2024-10-31 16:51:57 +00:00
|
|
|
use crate::ffi::{
|
|
|
|
create_single_worker_backend, GenerationParams, LlamaCppBackendImpl, SamplingParams,
|
|
|
|
};
|
2024-10-24 14:42:50 +00:00
|
|
|
use async_trait::async_trait;
|
2024-10-28 21:44:47 +00:00
|
|
|
use cxx::{Exception, UniquePtr};
|
2024-10-24 14:42:50 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
|
|
|
use std::sync::Arc;
|
2024-10-28 21:44:47 +00:00
|
|
|
use std::thread::spawn;
|
2024-10-04 08:42:31 +00:00
|
|
|
use text_generation_router::infer::{Backend, InferError, InferStreamResponse};
|
|
|
|
use text_generation_router::validation::ValidGenerateRequest;
|
2024-10-24 14:42:50 +00:00
|
|
|
use thiserror::Error;
|
2024-10-04 08:42:31 +00:00
|
|
|
use tokio_stream::wrappers::UnboundedReceiverStream;
|
2024-10-24 14:42:50 +00:00
|
|
|
use tracing::info;
|
2024-10-04 08:42:31 +00:00
|
|
|
|
2024-10-24 14:42:50 +00:00
|
|
|
unsafe impl Send for LlamaCppBackendImpl {}
|
|
|
|
|
|
|
|
#[derive(Debug, Error)]
|
|
|
|
pub enum LlamaCppBackendError {
|
|
|
|
#[error("Provided GGUF model path {0} doesn't exist")]
|
|
|
|
ModelFileDoesntExist(String),
|
|
|
|
|
|
|
|
#[error("Failed to initialize model from GGUF file {0}: {1}")]
|
|
|
|
ModelInitializationFailed(PathBuf, String),
|
2024-10-24 07:56:40 +00:00
|
|
|
}
|
|
|
|
|
2024-10-24 14:42:50 +00:00
|
|
|
pub struct LlamaCppBackend {}
|
|
|
|
|
|
|
|
impl LlamaCppBackend {
|
2024-10-31 16:51:57 +00:00
|
|
|
pub fn new<P: AsRef<Path> + Send>(model_path: P) -> Result<Self, LlamaCppBackendError> {
|
2024-10-24 14:42:50 +00:00
|
|
|
let path = Arc::new(model_path.as_ref());
|
|
|
|
if !path.exists() {
|
|
|
|
return Err(LlamaCppBackendError::ModelFileDoesntExist(
|
|
|
|
path.display().to_string(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2024-10-31 16:51:57 +00:00
|
|
|
let mut backend = create_single_worker_backend(path.to_str().unwrap()).map_err(|err| {
|
|
|
|
LlamaCppBackendError::ModelInitializationFailed(
|
|
|
|
path.to_path_buf(),
|
|
|
|
err.what().to_string(),
|
|
|
|
)
|
|
|
|
})?;
|
2024-10-24 14:42:50 +00:00
|
|
|
|
|
|
|
info!(
|
|
|
|
"Successfully initialized llama.cpp backend from {}",
|
|
|
|
path.display()
|
|
|
|
);
|
|
|
|
|
2024-10-28 21:44:47 +00:00
|
|
|
let j = spawn(|| scheduler_loop(backend));
|
|
|
|
j.join().ok();
|
2024-10-24 14:42:50 +00:00
|
|
|
Ok(Self {})
|
2024-10-24 07:56:40 +00:00
|
|
|
}
|
|
|
|
}
|
2024-10-04 08:42:31 +00:00
|
|
|
|
2024-10-28 21:44:47 +00:00
|
|
|
fn scheduler_loop(mut backend: UniquePtr<LlamaCppBackendImpl>) {
|
|
|
|
println!("Scheduler loop");
|
2024-10-31 16:51:57 +00:00
|
|
|
let tokens = [128000u32, 5159, 836, 374, 23809];
|
|
|
|
let mut generated = vec![0u32; 16];
|
|
|
|
let generation_params = GenerationParams {
|
|
|
|
max_new_tokens: generated.len() as u32,
|
|
|
|
};
|
|
|
|
let sampling_params = SamplingParams::default();
|
|
|
|
|
|
|
|
match backend.pin_mut().generate(
|
|
|
|
&tokens,
|
|
|
|
&mut generated,
|
|
|
|
&generation_params,
|
|
|
|
&sampling_params,
|
|
|
|
|new_token_id: u32, is_eos: bool| println!("Generated {new_token_id} (is_eos: {is_eos})"),
|
|
|
|
) {
|
2024-10-28 21:44:47 +00:00
|
|
|
Ok(n_tokens) => {
|
|
|
|
generated.truncate(n_tokens);
|
|
|
|
println!("Generated {} tokens -> {:?}", n_tokens, generated);
|
|
|
|
}
|
|
|
|
Err(err) => println!("Error: {}", err),
|
|
|
|
}
|
|
|
|
}
|
2024-10-24 14:42:50 +00:00
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl Backend for LlamaCppBackend {
|
2024-10-04 08:42:31 +00:00
|
|
|
fn schedule(
|
|
|
|
&self,
|
2024-10-24 14:42:50 +00:00
|
|
|
_request: ValidGenerateRequest,
|
2024-10-04 08:42:31 +00:00
|
|
|
) -> Result<UnboundedReceiverStream<Result<InferStreamResponse, InferError>>, InferError> {
|
|
|
|
Err(InferError::GenerationError("Not implemented yet".into()))
|
|
|
|
}
|
|
|
|
|
2024-10-24 14:42:50 +00:00
|
|
|
async fn health(&self, _: bool) -> bool {
|
|
|
|
true
|
2024-10-04 08:42:31 +00:00
|
|
|
}
|
|
|
|
}
|