mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 20:34:54 +00:00
Merge branch 'main' into lora-internal
This commit is contained in:
commit
0d496baaa4
159
.github/workflows/build.yaml
vendored
159
.github/workflows/build.yaml
vendored
@ -1,46 +1,29 @@
|
|||||||
name: Build and push docker image to internal registry
|
name: Build and push docker image to internal registry
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_call:
|
||||||
push:
|
inputs:
|
||||||
branches:
|
hardware:
|
||||||
- 'main'
|
type: string
|
||||||
tags:
|
description: Hardware
|
||||||
- 'v*'
|
# options:
|
||||||
pull_request:
|
# - cuda
|
||||||
paths:
|
# - rocm
|
||||||
- ".github/workflows/build.yaml"
|
# - intel
|
||||||
- "integration-tests/**"
|
required: true
|
||||||
- "server/**"
|
|
||||||
- "proto/**"
|
|
||||||
- "router/**"
|
|
||||||
- "launcher/**"
|
|
||||||
- "Cargo.lock"
|
|
||||||
- "rust-toolchain.toml"
|
|
||||||
- "Dockerfile"
|
|
||||||
- "Dockerfile_amd"
|
|
||||||
- "Dockerfile_intel"
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-and-push-image:
|
build-and-push:
|
||||||
|
outputs:
|
||||||
|
docker_image: ${{ steps.final.outputs.docker_image }}
|
||||||
|
docker_devices: ${{ steps.final.outputs.docker_devices }}
|
||||||
|
runs_on: ${{ steps.final.outputs.runs_on }}
|
||||||
|
label: ${{ steps.final.outputs.label }}
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-build-and-push-image-${{ inputs.hardware }}-${{ github.head_ref || github.run_id }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
# TODO see with @Glegendre to get CPU runner here instead
|
||||||
runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci]
|
runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci]
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- name: "cuda"
|
|
||||||
label: ""
|
|
||||||
dockerfile: "Dockerfile"
|
|
||||||
- name: "amd"
|
|
||||||
label: "-rocm"
|
|
||||||
dockerfile: "Dockerfile_amd"
|
|
||||||
- name: "intel"
|
|
||||||
label: "-intel"
|
|
||||||
dockerfile: "Dockerfile_intel"
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
packages: write
|
packages: write
|
||||||
@ -50,10 +33,43 @@ jobs:
|
|||||||
security-events: write
|
security-events: write
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Inject slug/short variables
|
- name: Inject slug/short variables
|
||||||
uses: rlespinasse/github-slug-action@v4.4.1
|
uses: rlespinasse/github-slug-action@v4.4.1
|
||||||
|
- name: Construct harware variables
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
case ${{ inputs.hardware }} in
|
||||||
|
cuda)
|
||||||
|
export dockerfile="Dockerfile"
|
||||||
|
export label_extension=""
|
||||||
|
export docker_devices=""
|
||||||
|
export runs_on="nvidia-gpu"
|
||||||
|
;;
|
||||||
|
rocm)
|
||||||
|
export dockerfile="Dockerfile_amd"
|
||||||
|
export label_extension="-rocm"
|
||||||
|
export docker_devices="/dev/kfd,/dev/dri"
|
||||||
|
# TODO Re-enable when they pass.
|
||||||
|
# export runs_on="amd-gpu-tgi"
|
||||||
|
export runs_on="ubuntu-latest"
|
||||||
|
;;
|
||||||
|
intel)
|
||||||
|
export dockerfile="Dockerfile_intel"
|
||||||
|
export label_extension="-intel"
|
||||||
|
export docker_devices=""
|
||||||
|
export runs_on="ubuntu-latest"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo $dockerfile
|
||||||
|
echo "Dockerfile=${dockerfile}"
|
||||||
|
echo $label_extension
|
||||||
|
echo $docker_devices
|
||||||
|
echo $runs_on
|
||||||
|
echo "DOCKERFILE=${dockerfile}" >> $GITHUB_ENV
|
||||||
|
echo "LABEL=${label_extension}" >> $GITHUB_ENV
|
||||||
|
echo "DOCKER_DEVICES=${docker_devices}" >> $GITHUB_ENV
|
||||||
|
echo "RUNS_ON=${runs_on}" >> $GITHUB_ENV
|
||||||
- name: Tailscale
|
- name: Tailscale
|
||||||
uses: huggingface/tailscale-action@main
|
uses: huggingface/tailscale-action@main
|
||||||
with:
|
with:
|
||||||
@ -61,25 +77,25 @@ jobs:
|
|||||||
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
||||||
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||||
- name: Initialize Docker Buildx
|
- name: Initialize Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2.0.0
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
install: true
|
install: true
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Login to internal Container Registry
|
- name: Login to internal Container Registry
|
||||||
uses: docker/login-action@v2.1.0
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.TAILSCALE_DOCKER_USERNAME }}
|
username: ${{ secrets.TAILSCALE_DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.TAILSCALE_DOCKER_PASSWORD }}
|
password: ${{ secrets.TAILSCALE_DOCKER_PASSWORD }}
|
||||||
registry: registry.internal.huggingface.tech
|
registry: registry.internal.huggingface.tech
|
||||||
- name: Login to Azure Container Registry
|
- name: Login to Azure Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v2.1.0
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.AZURE_DOCKER_USERNAME }}
|
username: ${{ secrets.AZURE_DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.AZURE_DOCKER_PASSWORD }}
|
password: ${{ secrets.AZURE_DOCKER_PASSWORD }}
|
||||||
@ -88,12 +104,12 @@ jobs:
|
|||||||
- name: Extract metadata (tags, labels) for Docker
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
if: ${{ github.event_name == 'pull_request' }}
|
if: ${{ github.event_name == 'pull_request' }}
|
||||||
id: meta-pr
|
id: meta-pr
|
||||||
uses: docker/metadata-action@v4.3.0
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
registry.internal.huggingface.tech/api-inference/community/text-generation-inference
|
registry.internal.huggingface.tech/api-inference/community/text-generation-inference
|
||||||
tags: |
|
tags: |
|
||||||
type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }}
|
type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }}
|
||||||
# If main, release or tag
|
# If main, release or tag
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
- name: Extract metadata (tags, labels) for Docker
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
if: ${{ github.event_name != 'pull_request' }}
|
||||||
@ -107,44 +123,61 @@ jobs:
|
|||||||
ghcr.io/huggingface/text-generation-inference
|
ghcr.io/huggingface/text-generation-inference
|
||||||
db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference
|
db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference
|
||||||
tags: |
|
tags: |
|
||||||
type=semver,pattern={{version}}${{ matrix.label }}
|
type=semver,pattern={{version}}${{ env.LABEL }}
|
||||||
type=semver,pattern={{major}}.{{minor}}${{ matrix.label }}
|
type=semver,pattern={{major}}.{{minor}}${{ env.LABEL }}
|
||||||
type=raw,value=latest${{ matrix.label }},enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }}
|
type=raw,value=latest${{ env.LABEL }},enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }}
|
||||||
type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }}
|
type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }}
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
id: build-and-push
|
id: build-and-push
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ env.DOCKERFILE }}
|
||||||
push: true
|
push: true
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
build-args: |
|
build-args: |
|
||||||
GIT_SHA=${{ env.GITHUB_SHA }}
|
GIT_SHA=${{ env.GITHUB_SHA }}
|
||||||
DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }}
|
DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}${{ env.LABEL }}
|
||||||
tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }}
|
||||||
network: host
|
cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ env.LABEL }},mode=min
|
||||||
cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min
|
cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ env.LABEL }},mode=min
|
||||||
cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min
|
- name: Final
|
||||||
|
id: final
|
||||||
|
run: |
|
||||||
|
echo "docker_image=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT}}${{ env.LABEL }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "docker_devices=${{ env.DOCKER_DEVICES }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "runs_on=${{ env.RUNS_ON }}" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "label=${{ env.LABEL }}" >> "$GITHUB_OUTPUT"
|
||||||
|
integration_tests:
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.job }}-${{ needs.build-and-push.outputs.label }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
needs: build-and-push
|
||||||
|
runs-on: ["self-hosted", "${{ needs.build-and-push.outputs.runs_on }}", "multi-gpu"]
|
||||||
|
if: needs.build-and-push.outputs.runs_on != 'ubuntu-latest'
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Inject slug/short variables
|
||||||
|
uses: rlespinasse/github-slug-action@v4.4.1
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
if: matrix.name == 'cuda'
|
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: "3.10"
|
||||||
- name: Install
|
- name: Install
|
||||||
if: matrix.name == 'cuda'
|
|
||||||
run: |
|
run: |
|
||||||
make install-integration-tests
|
make install-integration-tests
|
||||||
|
- name: Tailscale
|
||||||
|
uses: huggingface/tailscale-action@main
|
||||||
|
if: needs.build-and-push.outputs.runs_on != 'amd-gpu-tgi'
|
||||||
|
with:
|
||||||
|
authkey: ${{ secrets.TAILSCALE_AUTHKEY }}
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
if: matrix.name == 'cuda'
|
|
||||||
run: |
|
run: |
|
||||||
export DOCKER_VOLUME=/mnt/cache
|
export DOCKER_VOLUME=/mnt/cache
|
||||||
export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }}
|
export DOCKER_IMAGE=${{ needs.build-and-push.outputs.docker_image }}
|
||||||
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HF_TOKEN }}
|
export DOCKER_DEVICES=${{ needs.build-and-push.outputs.docker_devices }}
|
||||||
|
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||||
|
echo $DOCKER_IMAGE
|
||||||
pytest -s -vv integration-tests
|
pytest -s -vv integration-tests
|
||||||
- name: Tailscale Wait
|
|
||||||
if: ${{ failure() || runner.debug == '1' }}
|
|
||||||
uses: huggingface/tailscale-action@main
|
|
||||||
with:
|
|
||||||
waitForSSH: true
|
|
||||||
|
@ -11,7 +11,7 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yaml@main
|
||||||
with:
|
with:
|
||||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||||
pr_number: ${{ github.event.number }}
|
pr_number: ${{ github.event.number }}
|
36
.github/workflows/ci_build.yaml
vendored
Normal file
36
.github/workflows/ci_build.yaml
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
name: CI build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- ".github/workflows/build.yaml"
|
||||||
|
- "integration-tests/**"
|
||||||
|
- "server/**"
|
||||||
|
- "proto/**"
|
||||||
|
- "router/**"
|
||||||
|
- "launcher/**"
|
||||||
|
- "Cargo.lock"
|
||||||
|
- "rust-toolchain.toml"
|
||||||
|
- "Dockerfile"
|
||||||
|
- "Dockerfile_amd"
|
||||||
|
- "Dockerfile_intel"
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
# super important if you want to see all results, even if one fails
|
||||||
|
# fail-fast is true by default
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
hardware: ["cuda", "rocm", "intel"]
|
||||||
|
uses: ./.github/workflows/build.yaml # calls the one above ^
|
||||||
|
with:
|
||||||
|
hardware: ${{ matrix.hardware }}
|
||||||
|
secrets: inherit
|
41
.github/workflows/integration_tests.yaml
vendored
Normal file
41
.github/workflows/integration_tests.yaml
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
name: Integration tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
docker_image:
|
||||||
|
type: string
|
||||||
|
description: Hardware
|
||||||
|
required: true
|
||||||
|
docker_devices:
|
||||||
|
type: string
|
||||||
|
description: Hardware
|
||||||
|
runs_on:
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Hardware to run integration tests
|
||||||
|
jobs:
|
||||||
|
integration_tests:
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
runs-on: ${{ inputs.runs_on }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Inject slug/short variables
|
||||||
|
uses: rlespinasse/github-slug-action@v4.4.1
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Install
|
||||||
|
run: |
|
||||||
|
make install-integration-tests
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
export DOCKER_VOLUME=/mnt/cache
|
||||||
|
export DOCKER_IMAGE=${{ inputs.docker_image }}
|
||||||
|
export DOCKER_DEVICES=${{ inputs.docker_devices }}
|
||||||
|
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||||
|
pytest -s -vv integration-tests
|
4
.github/workflows/tests.yaml
vendored
4
.github/workflows/tests.yaml
vendored
@ -33,8 +33,8 @@ jobs:
|
|||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: actions-rs/toolchain@v1
|
uses: actions-rs/toolchain@v1
|
||||||
with:
|
with:
|
||||||
# Released on: June 13, 2024
|
# Released on: 02 May, 2024
|
||||||
# https://releases.rs/docs/1.79.0/
|
# https://releases.rs/docs/1.78.0/
|
||||||
toolchain: 1.79.0
|
toolchain: 1.79.0
|
||||||
override: true
|
override: true
|
||||||
components: rustfmt, clippy
|
components: rustfmt, clippy
|
||||||
|
@ -5,6 +5,7 @@ WORKDIR /usr/src
|
|||||||
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
||||||
|
|
||||||
FROM chef as planner
|
FROM chef as planner
|
||||||
|
COPY Cargo.lock Cargo.lock
|
||||||
COPY Cargo.toml Cargo.toml
|
COPY Cargo.toml Cargo.toml
|
||||||
COPY rust-toolchain.toml rust-toolchain.toml
|
COPY rust-toolchain.toml rust-toolchain.toml
|
||||||
COPY proto proto
|
COPY proto proto
|
||||||
|
@ -5,6 +5,7 @@ WORKDIR /usr/src
|
|||||||
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
||||||
|
|
||||||
FROM chef as planner
|
FROM chef as planner
|
||||||
|
COPY Cargo.lock Cargo.lock
|
||||||
COPY Cargo.toml Cargo.toml
|
COPY Cargo.toml Cargo.toml
|
||||||
COPY rust-toolchain.toml rust-toolchain.toml
|
COPY rust-toolchain.toml rust-toolchain.toml
|
||||||
COPY proto proto
|
COPY proto proto
|
||||||
|
@ -4,6 +4,7 @@ WORKDIR /usr/src
|
|||||||
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
||||||
|
|
||||||
FROM chef as planner
|
FROM chef as planner
|
||||||
|
COPY Cargo.lock Cargo.lock
|
||||||
COPY Cargo.toml Cargo.toml
|
COPY Cargo.toml Cargo.toml
|
||||||
COPY rust-toolchain.toml rust-toolchain.toml
|
COPY rust-toolchain.toml rust-toolchain.toml
|
||||||
COPY proto proto
|
COPY proto proto
|
||||||
|
@ -34,6 +34,7 @@ from text_generation.types import (
|
|||||||
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
|
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
|
||||||
HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None)
|
HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None)
|
||||||
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
|
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
|
||||||
|
DOCKER_DEVICES = os.getenv("DOCKER_DEVICES")
|
||||||
|
|
||||||
|
|
||||||
class ResponseComparator(JSONSnapshotExtension):
|
class ResponseComparator(JSONSnapshotExtension):
|
||||||
@ -453,6 +454,18 @@ def launcher(event_loop):
|
|||||||
if DOCKER_VOLUME:
|
if DOCKER_VOLUME:
|
||||||
volumes = [f"{DOCKER_VOLUME}:/data"]
|
volumes = [f"{DOCKER_VOLUME}:/data"]
|
||||||
|
|
||||||
|
if DOCKER_DEVICES:
|
||||||
|
devices = DOCKER_DEVICES.split(",")
|
||||||
|
visible = os.getenv("ROCR_VISIBLE_DEVICES")
|
||||||
|
if visible:
|
||||||
|
env["ROCR_VISIBLE_DEVICES"] = visible
|
||||||
|
device_requests = []
|
||||||
|
else:
|
||||||
|
devices = []
|
||||||
|
device_requests = [
|
||||||
|
docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
|
||||||
|
]
|
||||||
|
|
||||||
container = client.containers.run(
|
container = client.containers.run(
|
||||||
DOCKER_IMAGE,
|
DOCKER_IMAGE,
|
||||||
command=args,
|
command=args,
|
||||||
@ -460,9 +473,8 @@ def launcher(event_loop):
|
|||||||
environment=env,
|
environment=env,
|
||||||
auto_remove=False,
|
auto_remove=False,
|
||||||
detach=True,
|
detach=True,
|
||||||
device_requests=[
|
device_requests=device_requests,
|
||||||
docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
|
devices=devices,
|
||||||
],
|
|
||||||
volumes=volumes,
|
volumes=volumes,
|
||||||
ports={"80/tcp": port},
|
ports={"80/tcp": port},
|
||||||
shm_size="1G",
|
shm_size="1G",
|
||||||
|
@ -340,7 +340,7 @@ def quantize(
|
|||||||
logger_level=logger_level,
|
logger_level=logger_level,
|
||||||
json_output=json_output,
|
json_output=json_output,
|
||||||
)
|
)
|
||||||
from text_generation_server.utils.gptq.quantize import quantize
|
from text_generation_server.layers.gptq.quantize import quantize
|
||||||
|
|
||||||
quantize(
|
quantize(
|
||||||
model_id=model_id,
|
model_id=model_id,
|
||||||
|
@ -12,7 +12,7 @@ from huggingface_hub import HfApi
|
|||||||
from accelerate import init_empty_weights
|
from accelerate import init_empty_weights
|
||||||
from text_generation_server.utils import initialize_torch_distributed, Weights
|
from text_generation_server.utils import initialize_torch_distributed, Weights
|
||||||
from text_generation_server.utils.hub import weight_files
|
from text_generation_server.utils.hub import weight_files
|
||||||
from text_generation_server.utils.gptq.quant_linear import QuantLinear
|
from text_generation_server.layers.gptq.quant_linear import QuantLinear
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
@ -40,31 +40,12 @@ def _load_gqa(config, prefix: str, weights):
|
|||||||
assert config.hidden_size % config.num_attention_heads == 0
|
assert config.hidden_size % config.num_attention_heads == 0
|
||||||
assert config.num_attention_heads % weights.process_group.size() == 0
|
assert config.num_attention_heads % weights.process_group.size() == 0
|
||||||
|
|
||||||
weight = weights.get_multi_weights_col(
|
return TensorParallelColumnLinear.load_multi(
|
||||||
|
config,
|
||||||
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
|
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
|
||||||
quantize=config.quantize,
|
|
||||||
dim=0,
|
dim=0,
|
||||||
)
|
weights=weights,
|
||||||
|
bias=True,
|
||||||
if config.quantize not in ["gptq", "awq", "marlin"]:
|
|
||||||
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
|
|
||||||
|
|
||||||
head_size = config.hidden_size // config.num_attention_heads
|
|
||||||
num_heads = config.num_attention_heads // weights.process_group.size()
|
|
||||||
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
|
|
||||||
assert list(weight.shape) == [
|
|
||||||
(num_heads + 2 * num_key_value_heads) * head_size,
|
|
||||||
config.hidden_size,
|
|
||||||
], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
|
|
||||||
|
|
||||||
w = [
|
|
||||||
weights.get_sharded(f"{p}.bias", dim=0)
|
|
||||||
for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
|
|
||||||
]
|
|
||||||
bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device)
|
|
||||||
|
|
||||||
return TensorParallelColumnLinear(
|
|
||||||
get_linear(weight, bias=bias, quantize=config.quantize)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,6 +16,13 @@ if cuda_graphs is not None:
|
|||||||
else:
|
else:
|
||||||
cuda_graphs = None
|
cuda_graphs = None
|
||||||
|
|
||||||
|
|
||||||
|
# sorting the cuda graphs in descending order helps reduce the
|
||||||
|
# memory impact and results in less memory usage
|
||||||
|
if cuda_graphs is not None:
|
||||||
|
cuda_graphs.sort(reverse=True)
|
||||||
|
|
||||||
|
|
||||||
CUDA_GRAPHS = cuda_graphs
|
CUDA_GRAPHS = cuda_graphs
|
||||||
|
|
||||||
# This is overridden at model loading.
|
# This is overridden at model loading.
|
||||||
|
@ -130,29 +130,57 @@ class Weights:
|
|||||||
), f"The choosen size {size} is not compatible with sharding on {world_size} shards"
|
), f"The choosen size {size} is not compatible with sharding on {world_size} shards"
|
||||||
return self.get_partial_sharded(tensor_name, dim)
|
return self.get_partial_sharded(tensor_name, dim)
|
||||||
|
|
||||||
def _get_qweight(self, name: str, block_sizes: Union[int, List[int]]):
|
def get_packed_sharded(
|
||||||
slice_ = self._get_slice(name)
|
self, tensor_name: str, dim: int, block_sizes: Union[int, List[int]]
|
||||||
total_size = slice_.get_shape()[1]
|
) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Get a shard from a tensor that packs multiple tensors.
|
||||||
|
|
||||||
|
When a tensor packs multiple tensors (such as QKV or an up
|
||||||
|
projection + gate projection), sharding with `get_sharded` is not
|
||||||
|
safe since it would not split the packed tensors across shards.
|
||||||
|
|
||||||
|
This method shards a tensor, such that the packed tensors are
|
||||||
|
split across shards.
|
||||||
|
|
||||||
|
The columns are split in equally sized blocks when blocks is an `int`, or
|
||||||
|
in blocks proportional given to the sizes. For instance `[2, 1, 1]` will
|
||||||
|
divide an input with dimensionality `1024` in `[512, 256, 256]`. This is
|
||||||
|
convenient for e.g. splitting QKV without knowing the storage details of
|
||||||
|
quantized weights.
|
||||||
|
"""
|
||||||
|
slice_ = self._get_slice(tensor_name)
|
||||||
|
total_size = slice_.get_shape()[dim]
|
||||||
block_sizes = _blocks_to_block_sizes(total_size=total_size, blocks=block_sizes)
|
block_sizes = _blocks_to_block_sizes(total_size=total_size, blocks=block_sizes)
|
||||||
|
|
||||||
world_size = self.process_group.size()
|
world_size = self.process_group.size()
|
||||||
rank = self.process_group.rank()
|
rank = self.process_group.rank()
|
||||||
|
|
||||||
weights = []
|
tensors = []
|
||||||
block_offset = 0
|
block_offset = 0
|
||||||
for block_size in block_sizes:
|
for block_size in block_sizes:
|
||||||
assert (
|
assert (
|
||||||
block_size % world_size == 0
|
block_size % world_size == 0
|
||||||
), f"Prepacked qkv cannot be sharded across {world_size} shards"
|
), f"Prepacked tensor cannot be sharded across {world_size} shards"
|
||||||
shard_block_size = block_size // world_size
|
shard_block_size = block_size // world_size
|
||||||
start = rank * shard_block_size
|
start = rank * shard_block_size
|
||||||
stop = (rank + 1) * shard_block_size
|
stop = (rank + 1) * shard_block_size
|
||||||
weights.append(slice_[:, block_offset + start : block_offset + stop])
|
if dim == 0:
|
||||||
|
tensor = slice_[block_offset + start : block_offset + stop]
|
||||||
|
elif dim == 1:
|
||||||
|
tensor = slice_[:, block_offset + start : block_offset + stop]
|
||||||
|
else:
|
||||||
|
raise NotImplementedError("Currently only dim=0 or dim=1 is supported")
|
||||||
|
tensors.append(tensor)
|
||||||
block_offset += block_size
|
block_offset += block_size
|
||||||
|
tensor = torch.cat(tensors, dim=dim)
|
||||||
|
tensor = tensor.to(device=self.device)
|
||||||
|
|
||||||
weight = torch.cat(weights, dim=1)
|
# Avoid casting quantizer dtypes.
|
||||||
weight = weight.to(device=self.device)
|
if tensor.dtype not in [torch.int16, torch.int32, torch.int64]:
|
||||||
return weight
|
tensor = tensor.to(dtype=self.dtype)
|
||||||
|
|
||||||
|
return tensor
|
||||||
|
|
||||||
def get_weights_col_packed_qkv(
|
def get_weights_col_packed_qkv(
|
||||||
self,
|
self,
|
||||||
@ -185,7 +213,9 @@ class Weights:
|
|||||||
from text_generation_server.layers.gptq import GPTQWeight
|
from text_generation_server.layers.gptq import GPTQWeight
|
||||||
|
|
||||||
try:
|
try:
|
||||||
qweight = self._get_qweight(f"{prefix}.qweight", block_sizes)
|
qweight = self.get_packed_sharded(
|
||||||
|
f"{prefix}.qweight", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Cannot load `{quantize}` weight, make sure the model is already quantized."
|
f"Cannot load `{quantize}` weight, make sure the model is already quantized."
|
||||||
@ -193,8 +223,12 @@ class Weights:
|
|||||||
|
|
||||||
gptq_params = self._get_gptq_params()
|
gptq_params = self._get_gptq_params()
|
||||||
|
|
||||||
qzeros = self._get_qweight(f"{prefix}.qzeros", block_sizes)
|
qzeros = self.get_packed_sharded(
|
||||||
scales = self._get_qweight(f"{prefix}.scales", block_sizes)
|
f"{prefix}.qzeros", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
|
scales = self.get_packed_sharded(
|
||||||
|
f"{prefix}.scales", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
scales = scales.to(dtype=self.dtype)
|
scales = scales.to(dtype=self.dtype)
|
||||||
|
|
||||||
if quantize == "gptq" and gptq_params.quant_method == "gptq":
|
if quantize == "gptq" and gptq_params.quant_method == "gptq":
|
||||||
@ -237,13 +271,17 @@ class Weights:
|
|||||||
if quant_method == "gptq":
|
if quant_method == "gptq":
|
||||||
gptq_params = self._get_gptq_params()
|
gptq_params = self._get_gptq_params()
|
||||||
try:
|
try:
|
||||||
qweight = self._get_qweight(f"{prefix}.qweight", block_sizes)
|
qweight = self.get_packed_sharded(
|
||||||
|
f"{prefix}.qweight", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Cannot load `{quantize}` weight for GPTQ -> Marlin repacking, make sure the model is already quantized"
|
f"Cannot load `{quantize}` weight for GPTQ -> Marlin repacking, make sure the model is already quantized"
|
||||||
)
|
)
|
||||||
|
|
||||||
scales = self._get_qweight(f"{prefix}.scales", block_sizes)
|
scales = self.get_packed_sharded(
|
||||||
|
f"{prefix}.scales", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
g_idx = self.get_tensor(f"{prefix}.g_idx")
|
g_idx = self.get_tensor(f"{prefix}.g_idx")
|
||||||
weight = repack_gptq_for_marlin(
|
weight = repack_gptq_for_marlin(
|
||||||
qweight=qweight,
|
qweight=qweight,
|
||||||
@ -257,34 +295,17 @@ class Weights:
|
|||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
B = self._get_qweight(f"{prefix}.B", block_sizes)
|
B = self.get_packed_sharded(
|
||||||
s = self._get_qweight(f"{prefix}.s", block_sizes)
|
f"{prefix}.B", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
|
s = self.get_packed_sharded(
|
||||||
|
f"{prefix}.s", dim=1, block_sizes=block_sizes
|
||||||
|
)
|
||||||
weight = MarlinWeight(B=B, s=s)
|
weight = MarlinWeight(B=B, s=s)
|
||||||
else:
|
else:
|
||||||
slice_ = self._get_slice(f"{prefix}.weight")
|
weight = self.get_packed_sharded(
|
||||||
total_size = slice_.get_shape()[0]
|
f"{prefix}.weight", dim=0, block_sizes=block_sizes
|
||||||
block_sizes = _blocks_to_block_sizes(
|
|
||||||
total_size=total_size, blocks=block_sizes
|
|
||||||
)
|
)
|
||||||
|
|
||||||
world_size = self.process_group.size()
|
|
||||||
rank = self.process_group.rank()
|
|
||||||
|
|
||||||
tensors = []
|
|
||||||
block_offset = 0
|
|
||||||
for block_size in block_sizes:
|
|
||||||
assert (
|
|
||||||
block_size % world_size == 0
|
|
||||||
), f"Prepacked weights cannot be sharded across {world_size} shards"
|
|
||||||
shard_block_size = block_size // world_size
|
|
||||||
start = rank * shard_block_size
|
|
||||||
stop = (rank + 1) * shard_block_size
|
|
||||||
tensor = slice_[block_offset + start : block_offset + stop]
|
|
||||||
tensors.append(tensor)
|
|
||||||
block_offset += block_size
|
|
||||||
weight = torch.cat(tensors, dim=0)
|
|
||||||
weight = weight.to(device=self.device)
|
|
||||||
weight = weight.to(dtype=self.dtype)
|
|
||||||
return weight
|
return weight
|
||||||
|
|
||||||
def get_weights_col(self, prefix: str, quantize: str):
|
def get_weights_col(self, prefix: str, quantize: str):
|
||||||
|
Loading…
Reference in New Issue
Block a user