mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-06-02 20:52:07 +00:00
Before this change, the generation input was sent to the backend as a single string, encoding images as Base64 and packing them in Markdown-style links. This change adds a new chunked input representation that separates text chunks from images chunks. Image chunks contain binary data (for smaller message sizes) and the image's MIME type. The stringly-typed inputs are still sent to support backends that do not support chunked inputs yet.
22 lines
454 B
TOML
22 lines
454 B
TOML
[package]
|
|
name = "text-generation-client"
|
|
version.workspace = true
|
|
edition.workspace = true
|
|
authors.workspace = true
|
|
homepage.workspace = true
|
|
|
|
[dependencies]
|
|
base64 = { workspace = true }
|
|
futures = "^0.3"
|
|
grpc-metadata = { path = "../grpc-metadata" }
|
|
prost = "^0.12"
|
|
thiserror = "^1.0"
|
|
tokio = { version = "^1.32", features = ["sync"] }
|
|
tonic = "^0.10"
|
|
tower = "^0.4"
|
|
tracing = "^0.1"
|
|
|
|
[build-dependencies]
|
|
tonic-build = "0.10.1"
|
|
prost-build = "0.12.1"
|