From fc2631547e7eb337dc061742c4cb69e906f4cc2f Mon Sep 17 00:00:00 2001 From: Julien Chaumond Date: Thu, 1 May 2025 16:40:51 +0200 Subject: [PATCH] good catch from copilot --- docs/source/basic_tutorials/visual_language_models.md | 6 +++--- docs/source/conceptual/streaming.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/basic_tutorials/visual_language_models.md b/docs/source/basic_tutorials/visual_language_models.md index 53b8fcbe..f3c8c836 100644 --- a/docs/source/basic_tutorials/visual_language_models.md +++ b/docs/source/basic_tutorials/visual_language_models.md @@ -139,12 +139,12 @@ npm install @huggingface/inference Whether you use Inference Providers (our serverless API), or Inference Endpoints, you can call `InferenceClient`. -We can create a `HfInferenceEndpoint` providing our endpoint URL and We can create a `HfInferenceEndpoint` providing our endpoint URL and [Hugging Face access token](https://huggingface.co/settings/tokens). +We can create a `InferenceClient` providing our endpoint URL and [Hugging Face access token](https://huggingface.co/settings/tokens). ```js -import { HfInferenceEndpoint } from "@huggingface/inference"; +import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceEndpoint('hf_YOUR_TOKEN', { endpointUrl: 'https://YOUR_ENDPOINT.endpoints.huggingface.cloud' }); +const client = new InferenceClient('hf_YOUR_TOKEN', { endpointUrl: 'https://YOUR_ENDPOINT.endpoints.huggingface.cloud' }); const prompt = "![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n"; diff --git a/docs/source/conceptual/streaming.md b/docs/source/conceptual/streaming.md index 6e037f25..dfa03bab 100644 --- a/docs/source/conceptual/streaming.md +++ b/docs/source/conceptual/streaming.md @@ -134,9 +134,9 @@ Whether you use Inference Providers (our serverless API), or Inference Endpoints ```js -import { InferenceEndpoint } from '@huggingface/inference'; +import { InferenceClient } from '@huggingface/inference'; -const client = new InferenceEndpoint('hf_YOUR_TOKEN', { endpointUrl: 'https://YOUR_ENDPOINT.endpoints.huggingface.cloud' }); +const client = new InferenceClient('hf_YOUR_TOKEN', { endpointUrl: 'https://YOUR_ENDPOINT.endpoints.huggingface.cloud' }); // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips';