mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-22 23:42:06 +00:00
Better error message
Signed-off-by: Adrien Gallouët <angt@huggingface.co>
This commit is contained in:
parent
2242d1a67c
commit
0d01a89f0f
@ -209,31 +209,27 @@ async fn main() -> Result<(), RouterError> {
|
|||||||
Tokenizer::from_pretrained(&args.model_id, Some(params))?
|
Tokenizer::from_pretrained(&args.model_id, Some(params))?
|
||||||
};
|
};
|
||||||
|
|
||||||
let model_gguf = match args.model_gguf {
|
let model_gguf = if let Some(model_gguf) = args.model_gguf {
|
||||||
Some(model_gguf) => model_gguf,
|
model_gguf
|
||||||
None => {
|
} else {
|
||||||
let make_gguf = match std::env::var("MAKE_GGUF") {
|
let make_gguf = std::env::var("MAKE_GGUF").map_err(|e| {
|
||||||
Ok(make_gguf) => make_gguf,
|
error!("No GGUF model given and environment variable MAKE_GGUF is missing.");
|
||||||
Err(e) => {
|
RouterError::VarError(e)
|
||||||
error!("Missing env: MAKE_GGUF");
|
})?;
|
||||||
return Err(RouterError::VarError(e));
|
let model_gguf = "models/model.gguf".to_string();
|
||||||
}
|
|
||||||
};
|
|
||||||
let model_gguf = "models/model.gguf".to_string();
|
|
||||||
|
|
||||||
let status = Command::new(make_gguf)
|
let status = Command::new(make_gguf)
|
||||||
.arg(&model_gguf)
|
.arg(&model_gguf)
|
||||||
.arg(&args.model_id)
|
.arg(&args.model_id)
|
||||||
.arg(&args.revision)
|
.arg(&args.revision)
|
||||||
.spawn()?
|
.spawn()?
|
||||||
.wait()
|
.wait()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if !status.success() {
|
if !status.success() {
|
||||||
error!("Failed to generate GGUF");
|
error!("Failed to generate GGUF");
|
||||||
}
|
|
||||||
model_gguf
|
|
||||||
}
|
}
|
||||||
|
model_gguf
|
||||||
};
|
};
|
||||||
|
|
||||||
let (backend, ok, shutdown) = LlamacppBackend::new(
|
let (backend, ok, shutdown) = LlamacppBackend::new(
|
||||||
|
Loading…
Reference in New Issue
Block a user