fix: improve messages api docs content and formatting (#1506)

This PR simply updates the messages api docs to address content changes
and make format consistent
This commit is contained in:
drbh 2024-01-31 11:26:22 -05:00 committed by Karol Damaszke
parent bf72c03d0e
commit 11d8e7132f

View File

@ -1,8 +1,8 @@
# Messages API # Messages API
_Messages API is compatible to OpenAI Chat Completion API_ Text Generation Inference (TGI) now supports the Messages API, which is fully compatible with the OpenAI Chat Completion API. This feature is available starting from version 1.4.0. You can use OpenAI's client libraries or third-party libraries expecting OpenAI schema to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility.
Text Generation Inference (TGI) now supports the Message API which is fully compatible with the OpenAI Chat Completion API. This means you can use OpenAI's client libraries to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility. > **Note:** The Messages API is supported from TGI version 1.4.0 and above. Ensure you are using a compatible version to access this feature.
## Making a Request ## Making a Request
@ -98,30 +98,30 @@ import boto3
from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri
try: try:
role = sagemaker.get_execution_role() role = sagemaker.get_execution_role()
except ValueError: except ValueError:
iam = boto3.client('iam') iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models # Hub Model configuration. https://huggingface.co/models
hub = { hub = {
'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta', 'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta',
'SM_NUM_GPUS': json.dumps(1), 'SM_NUM_GPUS': json.dumps(1),
'MESSAGES_API_ENABLED': True 'MESSAGES_API_ENABLED': True
} }
# create Hugging Face Model Class # create Hugging Face Model Class
huggingface_model = HuggingFaceModel( huggingface_model = HuggingFaceModel(
image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"),
env=hub, env=hub,
role=role, role=role,
) )
# deploy model to SageMaker Inference # deploy model to SageMaker Inference
predictor = huggingface_model.deploy( predictor = huggingface_model.deploy(
initial_instance_count=1, initial_instance_count=1,
instance_type="ml.g5.2xlarge", instance_type="ml.g5.2xlarge",
container_startup_health_check_timeout=300, container_startup_health_check_timeout=300,
) )
# send request # send request