mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 04:14:52 +00:00
fix: simplify error handling
This commit is contained in:
parent
4c8bf7f5b8
commit
19bb3bf355
@ -1824,7 +1824,7 @@ pub async fn run(
|
||||
|
||||
let tokenizer: Result<Tokenizer, WebServerError> = {
|
||||
use pyo3::prelude::*;
|
||||
match Python::with_gil(|py| -> PyResult<()> {
|
||||
let res = Python::with_gil(|py| -> PyResult<()> {
|
||||
py_resolve_tokenizer(py, &tokenizer_name, revision.as_deref(), trust_remote_code)?;
|
||||
Ok(())
|
||||
})
|
||||
@ -1834,13 +1834,11 @@ pub async fn run(
|
||||
.or_else(|err| {
|
||||
let out = legacy_tokenizer_handle(config_filename.as_ref());
|
||||
out.ok_or(err)
|
||||
}) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
return Err(WebServerError::Tokenizer(
|
||||
"Unable to load tokenizer.".to_string(),
|
||||
));
|
||||
}
|
||||
}).map_err(|_|WebServerError::Tokenizer(
|
||||
"Unable to load tokenizer.".to_string(),
|
||||
));
|
||||
if res.is_err(){
|
||||
return res
|
||||
}
|
||||
let filename = "out/tokenizer.json";
|
||||
if let Ok(tok) = tokenizers::Tokenizer::from_file(filename) {
|
||||
|
Loading…
Reference in New Issue
Block a user