mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 14:52:20 +00:00
This PR adds paligemma modeling code Blog post: https://huggingface.co/blog/paligemma Transformers PR: https://github.com/huggingface/transformers/pull/30814 install the latest changes and run with ```bash # get the weights # text-generation-server download-weights gv-hf/PaliGemma-base-224px-hf # run TGI text-generation-launcher --model-id gv-hf/PaliGemma-base-224px-hf ``` basic example sending various requests ```python from huggingface_hub import InferenceClient client = InferenceClient("http://127.0.0.1:3000") images = [ "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png", ] prompts = [ "What animal is in this image?", "Name three colors in this image.", "What are 10 colors in this image?", "Where is the cow standing?", "answer en Where is the cow standing?", "Is there a bird in the image?", "Is ther a cow in the image?", "Is there a rabbit in the image?", "how many birds are in the image?", "how many rabbits are in the image?", ] for img in images: print(f"\nImage: {img.split('/')[-1]}") for prompt in prompts: inputs = f"{prompt}\n" json_data = { "inputs": inputs, "parameters": { "max_new_tokens": 30, "do_sample": False, }, } generated_output = client.text_generation(prompt, max_new_tokens=30, stream=False) print([f"{prompt}\n{generated_output}"]) ``` --------- Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
40 lines
1.1 KiB
Python
40 lines
1.1 KiB
Python
import pytest
|
|
import requests
|
|
import io
|
|
import base64
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def flash_pali_gemma_handle(launcher):
|
|
with launcher(
|
|
"google/paligemma-3b-pt-224",
|
|
num_shard=1,
|
|
revision="float16",
|
|
max_input_length=4000,
|
|
max_total_tokens=4096,
|
|
) as handle:
|
|
yield handle
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
async def flash_pali_gemma(flash_pali_gemma_handle):
|
|
await flash_pali_gemma_handle.health(300)
|
|
return flash_pali_gemma_handle.client
|
|
|
|
|
|
def get_cow_beach():
|
|
with open("integration-tests/images/cow_beach.png", "rb") as image_file:
|
|
encoded_string = base64.b64encode(image_file.read())
|
|
return f"data:image/png;base64,{encoded_string.decode('utf-8')}"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.private
|
|
async def test_flash_pali_gemma(flash_pali_gemma, response_snapshot):
|
|
cow = get_cow_beach()
|
|
inputs = f"Where is the cow standing?\n"
|
|
response = await flash_pali_gemma.generate(inputs, max_new_tokens=20)
|
|
|
|
assert response.generated_text == "beach"
|
|
assert response == response_snapshot
|