diff --git a/docs/openapi.json b/docs/openapi.json index d1b60f4d4..957fe246b 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -2186,4 +2186,4 @@ "description": "Hugging Face Text Generation Inference API" } ] -} +} \ No newline at end of file diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index b883b36d6..4876f7c58 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -3,6 +3,8 @@ title: Text Generation Inference - local: quicktour title: Quick Tour + - local: supported_models + title: Supported Models - local: installation_nvidia title: Using TGI with Nvidia GPUs - local: installation_amd @@ -15,8 +17,7 @@ title: Using TGI with Intel GPUs - local: installation title: Installation from source - - local: supported_models - title: Supported Models and Hardware + - local: architecture title: Internal Architecture - local: usage_statistics diff --git a/docs/source/supported_models.md b/docs/source/supported_models.md index be280a2ba..28008bcd1 100644 --- a/docs/source/supported_models.md +++ b/docs/source/supported_models.md @@ -1,9 +1,7 @@ -# Supported Models and Hardware +# Supported Models -Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models (VLMs & LLMs) are supported. - -## Supported Models +Text Generation Inference enables serving optimized models. The following sections list which models (VLMs & LLMs) are supported. - [Deepseek V2](https://huggingface.co/deepseek-ai/DeepSeek-V2) - [Idefics 2](https://huggingface.co/HuggingFaceM4/idefics2-8b) (Multimodal) @@ -38,6 +36,7 @@ Text Generation Inference enables serving optimized models on specific hardware - [Mllama](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct) (Multimodal) + If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models: ```python diff --git a/update_doc.py b/update_doc.py index 3fb0d3143..203aaced0 100644 --- a/update_doc.py +++ b/update_doc.py @@ -5,14 +5,13 @@ import json import os TEMPLATE = """ -# Supported Models and Hardware +# Supported Models -Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models (VLMs & LLMs) are supported. - -## Supported Models +Text Generation Inference enables serving optimized models. The following sections list which models (VLMs & LLMs) are supported. SUPPORTED_MODELS + If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models: ```python