2023-04-26 18:23:54 +00:00
|
|
|
mod health;
|
2023-02-02 13:59:27 +00:00
|
|
|
/// Text Generation Inference Webserver
|
2023-02-02 14:02:04 +00:00
|
|
|
mod infer;
|
2023-02-02 13:59:27 +00:00
|
|
|
mod queue;
|
2022-10-17 16:27:33 +00:00
|
|
|
pub mod server;
|
2022-10-18 13:19:03 +00:00
|
|
|
mod validation;
|
2022-10-17 12:59:00 +00:00
|
|
|
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
use infer::{Infer, InferError, InferStreamResponse};
|
2023-02-02 13:59:27 +00:00
|
|
|
use queue::{Entry, Queue};
|
2022-10-18 13:19:03 +00:00
|
|
|
use serde::{Deserialize, Serialize};
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
use tokio::sync::OwnedSemaphorePermit;
|
|
|
|
use tokio_stream::wrappers::UnboundedReceiverStream;
|
2023-02-03 11:43:37 +00:00
|
|
|
use utoipa::ToSchema;
|
2022-10-17 12:59:00 +00:00
|
|
|
use validation::Validation;
|
2022-10-18 13:19:03 +00:00
|
|
|
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
/// Type alias for generation responses
|
|
|
|
pub(crate) type GenerateStreamResponse = (
|
|
|
|
OwnedSemaphorePermit,
|
|
|
|
u32, // input_length
|
|
|
|
UnboundedReceiverStream<Result<InferStreamResponse, InferError>>,
|
|
|
|
);
|
|
|
|
|
2023-04-18 14:16:06 +00:00
|
|
|
/// Hub type
|
|
|
|
#[derive(Clone, Debug, Deserialize)]
|
2023-04-21 13:36:29 +00:00
|
|
|
pub struct HubModelInfo {
|
2023-04-18 14:16:06 +00:00
|
|
|
#[serde(rename(deserialize = "id"))]
|
|
|
|
pub model_id: String,
|
|
|
|
pub sha: Option<String>,
|
|
|
|
pub pipeline_tag: Option<String>,
|
|
|
|
}
|
|
|
|
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
#[derive(Clone, Deserialize, Default)]
|
|
|
|
pub struct HubTokenizerConfig {
|
|
|
|
#[serde(default)]
|
|
|
|
pub chat_template: Option<String>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl HubTokenizerConfig {
|
|
|
|
pub fn from_file(filename: &str) -> Self {
|
|
|
|
let content = std::fs::read_to_string(filename).unwrap();
|
|
|
|
serde_json::from_str(&content).unwrap_or_default()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-18 14:16:06 +00:00
|
|
|
#[derive(Clone, Debug, Serialize, ToSchema)]
|
|
|
|
pub struct Info {
|
2023-04-25 11:11:18 +00:00
|
|
|
/// Model info
|
2023-04-18 14:16:06 +00:00
|
|
|
#[schema(example = "bigscience/blomm-560m")]
|
|
|
|
pub model_id: String,
|
|
|
|
#[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
|
|
|
|
pub model_sha: Option<String>,
|
2023-04-21 13:36:29 +00:00
|
|
|
#[schema(example = "torch.float16")]
|
|
|
|
pub model_dtype: String,
|
|
|
|
#[schema(example = "cuda")]
|
|
|
|
pub model_device_type: String,
|
2023-04-18 14:16:06 +00:00
|
|
|
#[schema(nullable = true, example = "text-generation")]
|
|
|
|
pub model_pipeline_tag: Option<String>,
|
2023-04-25 11:11:18 +00:00
|
|
|
/// Router Parameters
|
|
|
|
#[schema(example = "128")]
|
|
|
|
pub max_concurrent_requests: usize,
|
|
|
|
#[schema(example = "2")]
|
|
|
|
pub max_best_of: usize,
|
|
|
|
#[schema(example = "4")]
|
|
|
|
pub max_stop_sequences: usize,
|
|
|
|
#[schema(example = "1024")]
|
|
|
|
pub max_input_length: usize,
|
|
|
|
#[schema(example = "2048")]
|
|
|
|
pub max_total_tokens: usize,
|
|
|
|
#[schema(example = "1.2")]
|
|
|
|
pub waiting_served_ratio: f32,
|
|
|
|
#[schema(example = "32000")]
|
|
|
|
pub max_batch_total_tokens: u32,
|
|
|
|
#[schema(example = "20")]
|
|
|
|
pub max_waiting_tokens: usize,
|
|
|
|
#[schema(example = "2")]
|
|
|
|
pub validation_workers: usize,
|
|
|
|
/// Router Info
|
2023-04-18 14:16:06 +00:00
|
|
|
#[schema(example = "0.5.0")]
|
|
|
|
pub version: &'static str,
|
|
|
|
#[schema(nullable = true, example = "null")]
|
|
|
|
pub sha: Option<&'static str>,
|
2023-05-02 13:43:19 +00:00
|
|
|
#[schema(nullable = true, example = "null")]
|
|
|
|
pub docker_label: Option<&'static str>,
|
2023-04-18 14:16:06 +00:00
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Clone, Debug, Deserialize, ToSchema)]
|
2022-10-18 13:19:03 +00:00
|
|
|
pub(crate) struct GenerateParameters {
|
2023-03-09 14:30:54 +00:00
|
|
|
#[serde(default)]
|
|
|
|
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
|
|
|
|
pub best_of: Option<usize>,
|
2023-02-03 11:43:37 +00:00
|
|
|
#[serde(default)]
|
|
|
|
#[schema(
|
|
|
|
exclusive_minimum = 0.0,
|
|
|
|
nullable = true,
|
|
|
|
default = "null",
|
|
|
|
example = 0.5
|
|
|
|
)]
|
|
|
|
pub temperature: Option<f32>,
|
|
|
|
#[serde(default)]
|
|
|
|
#[schema(
|
|
|
|
exclusive_minimum = 0.0,
|
|
|
|
nullable = true,
|
|
|
|
default = "null",
|
|
|
|
example = 1.03
|
|
|
|
)]
|
|
|
|
pub repetition_penalty: Option<f32>,
|
|
|
|
#[serde(default)]
|
|
|
|
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
|
|
|
|
pub top_k: Option<i32>,
|
|
|
|
#[serde(default)]
|
|
|
|
#[schema(
|
|
|
|
exclusive_minimum = 0.0,
|
|
|
|
maximum = 1.0,
|
|
|
|
nullable = true,
|
|
|
|
default = "null",
|
|
|
|
example = 0.95
|
|
|
|
)]
|
|
|
|
pub top_p: Option<f32>,
|
2023-02-28 09:19:32 +00:00
|
|
|
#[serde(default)]
|
2023-03-09 10:33:57 +00:00
|
|
|
#[schema(
|
|
|
|
exclusive_minimum = 0.0,
|
|
|
|
maximum = 1.0,
|
|
|
|
nullable = true,
|
|
|
|
default = "null",
|
|
|
|
example = 0.95
|
|
|
|
)]
|
|
|
|
pub typical_p: Option<f32>,
|
|
|
|
#[serde(default)]
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(default = "false", example = true)]
|
2022-10-18 13:19:03 +00:00
|
|
|
pub do_sample: bool,
|
|
|
|
#[serde(default = "default_max_new_tokens")]
|
2023-12-13 08:19:19 +00:00
|
|
|
#[schema(nullable = true, default = "100", example = "20")]
|
2023-10-04 15:38:42 +00:00
|
|
|
pub max_new_tokens: Option<u32>,
|
2022-12-15 16:03:56 +00:00
|
|
|
#[serde(default)]
|
2023-03-09 14:30:54 +00:00
|
|
|
#[schema(nullable = true, default = "null", example = false)]
|
2023-02-28 09:19:32 +00:00
|
|
|
pub return_full_text: Option<bool>,
|
|
|
|
#[serde(default)]
|
2023-02-27 13:56:58 +00:00
|
|
|
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
|
2022-12-12 17:25:22 +00:00
|
|
|
pub stop: Vec<String>,
|
2022-12-15 16:03:56 +00:00
|
|
|
#[serde(default)]
|
2023-03-09 14:30:54 +00:00
|
|
|
#[schema(nullable = true, default = "null", example = "null")]
|
2023-03-09 12:10:30 +00:00
|
|
|
pub truncate: Option<usize>,
|
|
|
|
#[serde(default)]
|
2023-03-02 11:30:41 +00:00
|
|
|
#[schema(default = "false", example = true)]
|
|
|
|
pub watermark: bool,
|
|
|
|
#[serde(default)]
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(default = "true")]
|
2022-12-15 16:03:56 +00:00
|
|
|
pub details: bool,
|
2023-01-30 14:36:16 +00:00
|
|
|
#[serde(default)]
|
2023-06-02 15:12:30 +00:00
|
|
|
#[schema(default = "true")]
|
|
|
|
pub decoder_input_details: bool,
|
|
|
|
#[serde(default)]
|
2023-03-09 14:30:54 +00:00
|
|
|
#[schema(
|
|
|
|
exclusive_minimum = 0,
|
|
|
|
nullable = true,
|
|
|
|
default = "null",
|
|
|
|
example = "null"
|
|
|
|
)]
|
2023-01-30 14:36:16 +00:00
|
|
|
pub seed: Option<u64>,
|
2023-08-28 09:43:47 +00:00
|
|
|
#[serde(default)]
|
|
|
|
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
|
|
|
|
pub top_n_tokens: Option<u32>,
|
2022-10-18 13:19:03 +00:00
|
|
|
}
|
|
|
|
|
2023-10-04 15:38:42 +00:00
|
|
|
fn default_max_new_tokens() -> Option<u32> {
|
2023-12-13 08:19:19 +00:00
|
|
|
Some(100)
|
2022-10-18 13:19:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn default_parameters() -> GenerateParameters {
|
|
|
|
GenerateParameters {
|
2023-03-09 14:30:54 +00:00
|
|
|
best_of: None,
|
2023-02-03 11:43:37 +00:00
|
|
|
temperature: None,
|
|
|
|
repetition_penalty: None,
|
|
|
|
top_k: None,
|
|
|
|
top_p: None,
|
2023-03-09 10:33:57 +00:00
|
|
|
typical_p: None,
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
do_sample: true,
|
2022-10-18 13:19:03 +00:00
|
|
|
max_new_tokens: default_max_new_tokens(),
|
2023-02-28 09:19:32 +00:00
|
|
|
return_full_text: None,
|
2023-03-02 11:30:41 +00:00
|
|
|
stop: Vec::new(),
|
2023-03-09 12:10:30 +00:00
|
|
|
truncate: None,
|
2023-03-02 11:30:41 +00:00
|
|
|
watermark: false,
|
2022-12-15 16:03:56 +00:00
|
|
|
details: false,
|
2023-06-02 15:12:30 +00:00
|
|
|
decoder_input_details: false,
|
2023-01-30 14:36:16 +00:00
|
|
|
seed: None,
|
2023-08-28 09:43:47 +00:00
|
|
|
top_n_tokens: None,
|
2022-10-18 13:19:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
#[derive(Clone, Deserialize, Serialize)]
|
|
|
|
pub(crate) struct ChatCompletion {
|
|
|
|
pub id: String,
|
|
|
|
pub object: String,
|
|
|
|
pub created: u64,
|
|
|
|
pub model: String,
|
|
|
|
pub system_fingerprint: String,
|
|
|
|
pub choices: Vec<ChatCompletionComplete>,
|
|
|
|
pub usage: Usage,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Deserialize, Serialize)]
|
|
|
|
pub(crate) struct ChatCompletionComplete {
|
|
|
|
pub index: u32,
|
|
|
|
pub message: Message,
|
|
|
|
pub logprobs: Option<Vec<f32>>,
|
|
|
|
pub finish_reason: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Deserialize, Serialize)]
|
|
|
|
pub(crate) struct Usage {
|
|
|
|
pub prompt_tokens: u32,
|
|
|
|
pub completion_tokens: u32,
|
|
|
|
pub total_tokens: u32,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ChatCompletion {
|
|
|
|
pub(crate) fn new(
|
|
|
|
model: String,
|
|
|
|
system_fingerprint: String,
|
|
|
|
output: String,
|
|
|
|
created: u64,
|
|
|
|
details: Details,
|
|
|
|
return_logprobs: bool,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
id: String::new(),
|
|
|
|
object: "text_completion".into(),
|
|
|
|
created,
|
|
|
|
model,
|
|
|
|
system_fingerprint,
|
|
|
|
choices: vec![ChatCompletionComplete {
|
|
|
|
index: 0,
|
|
|
|
message: Message {
|
|
|
|
role: "assistant".into(),
|
|
|
|
content: output,
|
|
|
|
},
|
|
|
|
logprobs: return_logprobs
|
|
|
|
.then(|| details.tokens.iter().map(|t| t.logprob).collect()),
|
|
|
|
finish_reason: details.finish_reason.to_string(),
|
|
|
|
}],
|
|
|
|
usage: Usage {
|
|
|
|
prompt_tokens: details.prefill.len() as u32,
|
|
|
|
completion_tokens: details.generated_tokens,
|
|
|
|
total_tokens: details.prefill.len() as u32 + details.generated_tokens,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Deserialize, Serialize)]
|
|
|
|
pub(crate) struct ChatCompletionChunk {
|
|
|
|
pub id: String,
|
|
|
|
pub object: String,
|
|
|
|
pub created: u64,
|
|
|
|
pub model: String,
|
|
|
|
pub system_fingerprint: String,
|
|
|
|
pub choices: Vec<ChatCompletionChoice>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Deserialize, Serialize)]
|
|
|
|
pub(crate) struct ChatCompletionChoice {
|
|
|
|
pub index: u32,
|
|
|
|
pub delta: ChatCompletionDelta,
|
|
|
|
pub logprobs: Option<f32>,
|
|
|
|
pub finish_reason: Option<String>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
|
|
pub(crate) struct ChatCompletionDelta {
|
|
|
|
pub role: String,
|
|
|
|
pub content: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ChatCompletionChunk {
|
|
|
|
pub(crate) fn new(
|
|
|
|
model: String,
|
|
|
|
system_fingerprint: String,
|
|
|
|
delta: String,
|
|
|
|
created: u64,
|
|
|
|
index: u32,
|
|
|
|
logprobs: Option<f32>,
|
|
|
|
finish_reason: Option<String>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
id: String::new(),
|
|
|
|
object: "text_completion".to_string(),
|
|
|
|
created,
|
|
|
|
model,
|
|
|
|
system_fingerprint,
|
|
|
|
choices: vec![ChatCompletionChoice {
|
|
|
|
index,
|
|
|
|
delta: ChatCompletionDelta {
|
|
|
|
role: "assistant".to_string(),
|
|
|
|
content: delta,
|
|
|
|
},
|
|
|
|
logprobs,
|
|
|
|
finish_reason,
|
|
|
|
}],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn default_request_messages() -> Vec<Message> {
|
|
|
|
vec![Message {
|
|
|
|
role: "user".to_string(),
|
|
|
|
content: "My name is David and I".to_string(),
|
|
|
|
}]
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Deserialize, ToSchema, Serialize)]
|
|
|
|
pub(crate) struct ChatRequest {
|
|
|
|
/// UNUSED
|
|
|
|
#[schema(example = "bigscience/blomm-560m")]
|
|
|
|
/// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
|
|
|
|
pub model: String, /* NOTE: UNUSED */
|
|
|
|
|
|
|
|
/// A list of messages comprising the conversation so far.
|
|
|
|
#[serde(default = "default_request_messages")]
|
|
|
|
pub messages: Vec<Message>,
|
|
|
|
|
|
|
|
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
|
|
|
|
/// decreasing the model's likelihood to repeat the same line verbatim.
|
|
|
|
#[serde(default)]
|
|
|
|
pub frequency_penalty: Option<f32>,
|
|
|
|
|
|
|
|
/// UNUSED
|
|
|
|
/// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
|
|
|
|
/// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
|
|
|
|
/// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
|
|
|
|
/// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
|
|
|
|
/// result in a ban or exclusive selection of the relevant token.
|
|
|
|
#[serde(default)]
|
|
|
|
pub logit_bias: Option<Vec<f32>>,
|
|
|
|
|
|
|
|
/// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
|
|
|
|
/// output token returned in the content of message.
|
|
|
|
#[serde(default)]
|
|
|
|
pub logprobs: Option<bool>,
|
|
|
|
|
|
|
|
/// UNUSED
|
|
|
|
/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
|
|
|
|
/// an associated log probability. logprobs must be set to true if this parameter is used.
|
|
|
|
#[serde(default)]
|
|
|
|
pub top_logprobs: Option<u32>,
|
|
|
|
|
|
|
|
/// The maximum number of tokens that can be generated in the chat completion.
|
|
|
|
#[serde(default)]
|
|
|
|
pub max_tokens: Option<u32>,
|
|
|
|
|
|
|
|
/// UNUSED
|
|
|
|
/// How many chat completion choices to generate for each input message. Note that you will be charged based on the
|
|
|
|
/// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
|
|
|
|
#[serde(default)]
|
|
|
|
pub n: Option<u32>,
|
|
|
|
|
|
|
|
/// UNUSED
|
|
|
|
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
|
|
|
|
/// increasing the model's likelihood to talk about new topics
|
|
|
|
#[serde(default)]
|
|
|
|
pub presence_penalty: Option<f32>,
|
|
|
|
|
|
|
|
#[serde(default = "bool::default")]
|
|
|
|
pub stream: bool,
|
|
|
|
|
|
|
|
#[schema(nullable = true, example = 42)]
|
|
|
|
pub seed: Option<u64>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone, Deserialize, ToSchema, Serialize)]
|
|
|
|
pub(crate) struct Message {
|
|
|
|
#[schema(example = "user")]
|
|
|
|
pub role: String,
|
|
|
|
#[schema(example = "My name is David and I")]
|
|
|
|
pub content: String,
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Clone, Debug, Deserialize, ToSchema)]
|
2022-10-18 13:19:03 +00:00
|
|
|
pub(crate) struct GenerateRequest {
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(example = "My name is Olivier and I")]
|
2022-10-18 13:19:03 +00:00
|
|
|
pub inputs: String,
|
|
|
|
#[serde(default = "default_parameters")]
|
|
|
|
pub parameters: GenerateParameters,
|
|
|
|
}
|
|
|
|
|
2023-02-27 13:56:58 +00:00
|
|
|
#[derive(Clone, Debug, Deserialize, ToSchema)]
|
|
|
|
pub(crate) struct CompatGenerateRequest {
|
|
|
|
#[schema(example = "My name is Olivier and I")]
|
|
|
|
pub inputs: String,
|
|
|
|
#[serde(default = "default_parameters")]
|
|
|
|
pub parameters: GenerateParameters,
|
|
|
|
#[serde(default)]
|
2023-06-05 16:16:08 +00:00
|
|
|
#[schema(default = "false")]
|
2023-02-27 13:56:58 +00:00
|
|
|
pub stream: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<CompatGenerateRequest> for GenerateRequest {
|
|
|
|
fn from(req: CompatGenerateRequest) -> Self {
|
|
|
|
Self {
|
|
|
|
inputs: req.inputs,
|
|
|
|
parameters: req.parameters,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-24 14:55:57 +00:00
|
|
|
#[derive(Debug, Serialize, ToSchema)]
|
|
|
|
pub struct PrefillToken {
|
|
|
|
#[schema(example = 0)]
|
|
|
|
id: u32,
|
|
|
|
#[schema(example = "test")]
|
|
|
|
text: String,
|
2023-02-27 13:56:58 +00:00
|
|
|
#[schema(nullable = true, example = - 0.34)]
|
2023-02-24 14:55:57 +00:00
|
|
|
logprob: f32,
|
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Debug, Serialize, ToSchema)]
|
|
|
|
pub struct Token {
|
|
|
|
#[schema(example = 0)]
|
|
|
|
id: u32,
|
|
|
|
#[schema(example = "test")]
|
|
|
|
text: String,
|
2023-02-27 13:56:58 +00:00
|
|
|
#[schema(nullable = true, example = - 0.34)]
|
2023-02-03 11:43:37 +00:00
|
|
|
logprob: f32,
|
2023-02-24 14:55:57 +00:00
|
|
|
#[schema(example = "false")]
|
|
|
|
special: bool,
|
2023-02-03 11:43:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, ToSchema)]
|
|
|
|
#[serde(rename_all(serialize = "snake_case"))]
|
|
|
|
pub(crate) enum FinishReason {
|
|
|
|
#[schema(rename = "length")]
|
|
|
|
Length,
|
|
|
|
#[serde(rename = "eos_token")]
|
|
|
|
#[schema(rename = "eos_token")]
|
|
|
|
EndOfSequenceToken,
|
|
|
|
#[schema(rename = "stop_sequence")]
|
|
|
|
StopSequence,
|
|
|
|
}
|
2023-01-31 16:04:00 +00:00
|
|
|
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
impl std::fmt::Display for FinishReason {
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
match self {
|
|
|
|
FinishReason::Length => write!(f, "length"),
|
|
|
|
FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
|
|
|
|
FinishReason::StopSequence => write!(f, "stop_sequence"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-09 14:30:54 +00:00
|
|
|
#[derive(Serialize, ToSchema)]
|
|
|
|
pub(crate) struct BestOfSequence {
|
|
|
|
#[schema(example = "test")]
|
|
|
|
pub generated_text: String,
|
|
|
|
#[schema(example = "length")]
|
|
|
|
pub finish_reason: FinishReason,
|
|
|
|
#[schema(example = 1)]
|
|
|
|
pub generated_tokens: u32,
|
|
|
|
#[schema(nullable = true, example = 42)]
|
|
|
|
pub seed: Option<u64>,
|
|
|
|
pub prefill: Vec<PrefillToken>,
|
|
|
|
pub tokens: Vec<Token>,
|
2023-08-28 09:43:47 +00:00
|
|
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
|
|
|
pub top_tokens: Vec<Vec<Token>>,
|
2023-03-09 14:30:54 +00:00
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Serialize, ToSchema)]
|
2022-12-15 16:03:56 +00:00
|
|
|
pub(crate) struct Details {
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(example = "length")]
|
|
|
|
pub finish_reason: FinishReason,
|
|
|
|
#[schema(example = 1)]
|
2022-12-15 16:03:56 +00:00
|
|
|
pub generated_tokens: u32,
|
2023-03-09 14:30:54 +00:00
|
|
|
#[schema(nullable = true, example = 42)]
|
2023-01-30 14:36:16 +00:00
|
|
|
pub seed: Option<u64>,
|
2023-03-07 17:52:22 +00:00
|
|
|
pub prefill: Vec<PrefillToken>,
|
|
|
|
pub tokens: Vec<Token>,
|
2023-03-09 14:30:54 +00:00
|
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
|
|
pub best_of_sequences: Option<Vec<BestOfSequence>>,
|
2023-08-28 09:43:47 +00:00
|
|
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
|
|
|
pub top_tokens: Vec<Vec<Token>>,
|
2022-12-15 16:03:56 +00:00
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Serialize, ToSchema)]
|
2023-01-31 16:04:00 +00:00
|
|
|
pub(crate) struct GenerateResponse {
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(example = "test")]
|
2022-10-18 13:19:03 +00:00
|
|
|
pub generated_text: String,
|
2022-12-15 16:03:56 +00:00
|
|
|
#[serde(skip_serializing_if = "Option::is_none")]
|
|
|
|
pub details: Option<Details>,
|
2022-10-18 13:19:03 +00:00
|
|
|
}
|
2022-10-27 12:25:29 +00:00
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Serialize, ToSchema)]
|
|
|
|
pub(crate) struct StreamDetails {
|
|
|
|
#[schema(example = "length")]
|
|
|
|
pub finish_reason: FinishReason,
|
|
|
|
#[schema(example = 1)]
|
|
|
|
pub generated_tokens: u32,
|
2023-03-09 14:30:54 +00:00
|
|
|
#[schema(nullable = true, example = 42)]
|
2023-02-03 11:43:37 +00:00
|
|
|
pub seed: Option<u64>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize, ToSchema)]
|
2023-01-31 16:04:00 +00:00
|
|
|
pub(crate) struct StreamResponse {
|
feat: supports openai chat completions API (#1427)
This PR adds support to make TGI a drop in replacement for OpenAI
clients by exposing the same HTTP interface.
Notes
- TGI inits a single model at startup so the `model` field is unused in
HTTP requests.
- `max_tokens` and `stream` should work as expected but other params may
be (unimplemented or not supported)
General approach
- fetch the `tokenizer_config` at startup from the hub
- pass `tokenizer_config` into `Infer` so we have it at request time
- use the `chat_template` on the config to format chat request
- parse jinja template and render chat string
- pass inputs into existing generate function
- wrap generation output in expected structure before returning
# How to test
### Streaming curl
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
It is also possible to use the `openai` python library and change the
base url
### 🌊 STREAMING REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
# ChatCompletionChunk(id='', choices=[Choice(delta=ChoiceDelta(content=' that', function_call=None, role='assistant', tool_calls=None), finish_reason=None, index=2, logprobs=None)], created=1704486761, model='', object='text_completion', system_fingerprint='')
```
### 🚗 SYNCHRONOUS REQUEST
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="not needed for a local LLM"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
# ChatCompletion(id='', choices=[Choice(finish_reason=None, index=0, logprobs=None, message=ChatCompletionMessage(content='\nDeep learning is a new field of research that has been gaining traction in the last ...', role='assistant', function_call=None, tool_calls=None))], created=1704486762, model='', object='text_completion', system_fingerprint='', usage=CompletionUsage(completion_tokens=100, prompt_tokens=76, total_tokens=176))
```
## How to run dev
```bash
cd text-generation-inference/server
MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 text-generation-server serve --trust-remote-code gpt2
```
***note many of the existing `chat_templates` use non standard `jinja`
(ie. adding a `raise` to the template) which will throw an error when
parsing; hence using `upstage/SOLAR-10.7B-Instruct-v1.0` since it has a
valid template
```bash
cd text-generation-inference/router
cargo run -- --tokenizer-name upstage/SOLAR-10.7B-Instruct-v1.0
```
trigger
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{ "model": "gpt-3.5-turbo", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is the IP address of the Google DNS servers?" } ], "stream": true, "max_tokens": 20, "logprobs": true }' \
-H 'Content-Type: application/json'
```
^ supports `stream: true` and `stream: false` requests
2024-01-16 10:07:41 +00:00
|
|
|
pub index: u32,
|
2023-01-31 16:04:00 +00:00
|
|
|
pub token: Token,
|
2023-08-28 09:43:47 +00:00
|
|
|
#[serde(skip_serializing_if = "Vec::is_empty")]
|
|
|
|
pub top_tokens: Vec<Token>,
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(nullable = true, default = "null", example = "test")]
|
2023-01-31 16:04:00 +00:00
|
|
|
pub generated_text: Option<String>,
|
2023-02-03 11:43:37 +00:00
|
|
|
#[schema(nullable = true, default = "null")]
|
|
|
|
pub details: Option<StreamDetails>,
|
2023-01-31 16:04:00 +00:00
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
#[derive(Serialize, ToSchema)]
|
2022-10-27 12:25:29 +00:00
|
|
|
pub(crate) struct ErrorResponse {
|
|
|
|
pub error: String,
|
2023-03-07 17:52:22 +00:00
|
|
|
pub error_type: String,
|
2022-10-27 12:25:29 +00:00
|
|
|
}
|
2023-04-26 14:14:40 +00:00
|
|
|
|
|
|
|
#[cfg(test)]
|
2023-04-26 18:23:54 +00:00
|
|
|
mod tests {
|
2023-04-26 14:14:40 +00:00
|
|
|
use std::io::Write;
|
|
|
|
use tokenizers::Tokenizer;
|
|
|
|
|
2023-04-26 18:23:54 +00:00
|
|
|
pub(crate) async fn get_tokenizer() -> Tokenizer {
|
2023-11-23 15:42:48 +00:00
|
|
|
let filename = std::path::Path::new("tokenizer.json");
|
|
|
|
if !filename.exists() {
|
2023-04-26 18:23:54 +00:00
|
|
|
let content = reqwest::get("https://huggingface.co/gpt2/raw/main/tokenizer.json")
|
|
|
|
.await
|
|
|
|
.unwrap()
|
|
|
|
.bytes()
|
|
|
|
.await
|
|
|
|
.unwrap();
|
2023-11-23 15:42:48 +00:00
|
|
|
let tmp_filename = "tokenizer.json.temp";
|
|
|
|
let mut file = std::fs::File::create(tmp_filename).unwrap();
|
2023-04-26 14:14:40 +00:00
|
|
|
file.write_all(&content).unwrap();
|
2023-11-23 15:42:48 +00:00
|
|
|
// Re-check if another process has written this file maybe.
|
|
|
|
if !filename.exists() {
|
|
|
|
std::fs::rename(tmp_filename, filename).unwrap()
|
|
|
|
}
|
2023-04-26 14:14:40 +00:00
|
|
|
}
|
|
|
|
Tokenizer::from_file("tokenizer.json").unwrap()
|
|
|
|
}
|
|
|
|
}
|