diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..dd688493 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,26 @@ +--- +name: 报告问题 +about: 使用简练详细的语言描述你遇到的问题 +title: '' +labels: bug +assignees: '' + +--- + +**例行检查** + +[//]: # (方框内删除已有的空格,填 x 号) ++ [ ] 我已确认目前没有类似 issue ++ [ ] 我已确认我已升级到最新版本 ++ [ ] 我已完整查看过项目 README,尤其是常见问题部分 ++ [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈 ++ [ ] 我理解并认可上述内容,并理解项目维护者精力有限,**不遵循规则的 issue 可能会被无视或直接关闭** + +**问题描述** + +**复现步骤** + +**预期结果** + +**相关截图** +如果没有的话,请删除此节。 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..83a0f3f4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: 项目群聊 + url: https://openai.justsong.cn/ + about: QQ 群:828520184,自动审核,备注 One API + - name: 赞赏支持 + url: https://iamazing.cn/page/reward + about: 请作者喝杯咖啡,以激励作者持续开发 diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..049d89c8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,21 @@ +--- +name: 功能请求 +about: 使用简练详细的语言描述希望加入的新功能 +title: '' +labels: enhancement +assignees: '' + +--- + +**例行检查** + +[//]: # (方框内删除已有的空格,填 x 号) ++ [ ] 我已确认目前没有类似 issue ++ [ ] 我已确认我已升级到最新版本 ++ [ ] 我已完整查看过项目 README,已确定现有版本无法满足需求 ++ [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈 ++ [ ] 我理解并认可上述内容,并理解项目维护者精力有限,**不遵循规则的 issue 可能会被无视或直接关闭** + +**功能描述** + +**应用场景** diff --git a/.github/workflows/docker-image-amd64-en.yml b/.github/workflows/docker-image-amd64-en.yml new file mode 100644 index 00000000..44dc0bc0 --- /dev/null +++ b/.github/workflows/docker-image-amd64-en.yml @@ -0,0 +1,49 @@ +name: Publish Docker image (amd64, English) + +on: + push: + tags: + - '*' + workflow_dispatch: + inputs: + name: + description: 'reason' + required: false +jobs: + push_to_registries: + name: Push Docker image to multiple registries + runs-on: ubuntu-latest + permissions: + packages: write + contents: read + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Save version info + run: | + git describe --tags > VERSION + + - name: Translate + run: | + python ./i18n/translate.py --repository_path . --json_file_path ./i18n/en.json + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: | + justsong/one-api-en + + - name: Build and push Docker images + uses: docker/build-push-action@v3 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.github/workflows/docker-image-arm64.yml b/.github/workflows/docker-image-arm64.yml index 7304e5c9..d6449eb8 100644 --- a/.github/workflows/docker-image-arm64.yml +++ b/.github/workflows/docker-image-arm64.yml @@ -4,6 +4,7 @@ on: push: tags: - '*' + - '!*-alpha*' workflow_dispatch: inputs: name: diff --git a/.github/workflows/linux-release.yml b/.github/workflows/linux-release.yml index 6833a901..364b83ae 100644 --- a/.github/workflows/linux-release.yml +++ b/.github/workflows/linux-release.yml @@ -6,6 +6,7 @@ on: push: tags: - '*' + - '!*-alpha*' jobs: release: runs-on: ubuntu-latest diff --git a/.github/workflows/macos-release.yml b/.github/workflows/macos-release.yml index 5ec789c1..bdd0d208 100644 --- a/.github/workflows/macos-release.yml +++ b/.github/workflows/macos-release.yml @@ -6,6 +6,7 @@ on: push: tags: - '*' + - '!*-alpha*' jobs: release: runs-on: macos-latest diff --git a/.github/workflows/windows-release.yml b/.github/workflows/windows-release.yml index fa5bb995..33193a89 100644 --- a/.github/workflows/windows-release.yml +++ b/.github/workflows/windows-release.yml @@ -6,6 +6,7 @@ on: push: tags: - '*' + - '!*-alpha*' jobs: release: runs-on: windows-latest diff --git a/.gitignore b/.gitignore index 0b2856cc..60abb13e 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,6 @@ upload *.exe *.db build -*.db-journal \ No newline at end of file +*.db-journal +logs +data \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 4afbf100..ffb8c21b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,11 @@ FROM node:16 as builder WORKDIR /build +COPY web/package.json . +RUN npm install COPY ./web . COPY ./VERSION . -RUN npm install -RUN REACT_APP_VERSION=$(cat VERSION) npm run build +RUN DISABLE_ESLINT_PLUGIN='true' REACT_APP_VERSION=$(cat VERSION) npm run build FROM golang AS builder2 @@ -13,9 +14,10 @@ ENV GO111MODULE=on \ GOOS=linux WORKDIR /build +ADD go.mod go.sum ./ +RUN go mod download COPY . . COPY --from=builder /build/build ./web/build -RUN go mod download RUN go build -ldflags "-s -w -X 'one-api/common.Version=$(cat VERSION)' -extldflags '-static'" -o one-api FROM alpine diff --git a/README.en.md b/README.en.md new file mode 100644 index 00000000..9345a219 --- /dev/null +++ b/README.en.md @@ -0,0 +1,299 @@ +
+ + + +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Deployment Tutorial + · + Usage + · + Feedback + · + Screenshots + · + Live Demo + · + FAQ + · + Related Projects + · + Donate +
+ +> **Warning**: This README is translated by ChatGPT. Please feel free to submit a PR if you find any translation errors. + +> **Warning**: The Docker image for English version is `justsong/one-api-en`. + +> **Note**: The latest image pulled from Docker may be an `alpha` release. Specify the version manually if you require stability. + +## Features +1. Support for multiple large models: + + [x] [OpenAI ChatGPT Series Models](https://platform.openai.com/docs/guides/gpt/chat-completions-api) (Supports [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)) + + [x] [Anthropic Claude Series Models](https://anthropic.com) + + [x] [Google PaLM2 Series Models](https://developers.generativeai.google) + + [x] [Baidu Wenxin Yiyuan Series Models](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + + [x] [Alibaba Tongyi Qianwen Series Models](https://help.aliyun.com/document_detail/2400395.html) + + [x] [Zhipu ChatGLM Series Models](https://bigmodel.cn) +2. Supports access to multiple channels through **load balancing**. +3. Supports **stream mode** that enables typewriter-like effect through stream transmission. +4. Supports **multi-machine deployment**. [See here](#multi-machine-deployment) for more details. +5. Supports **token management** that allows setting token expiration time and usage count. +6. Supports **voucher management** that enables batch generation and export of vouchers. Vouchers can be used for account balance replenishment. +7. Supports **channel management** that allows bulk creation of channels. +8. Supports **user grouping** and **channel grouping** for setting different rates for different groups. +9. Supports channel **model list configuration**. +10. Supports **quota details checking**. +11. Supports **user invite rewards**. +12. Allows display of balance in USD. +13. Supports announcement publishing, recharge link setting, and initial balance setting for new users. +14. Offers rich **customization** options: + 1. Supports customization of system name, logo, and footer. + 2. Supports customization of homepage and about page using HTML & Markdown code, or embedding a standalone webpage through iframe. +15. Supports management API access through system access tokens. +16. Supports Cloudflare Turnstile user verification. +17. Supports user management and multiple user login/registration methods: + + Email login/registration and password reset via email. + + [GitHub OAuth](https://github.com/settings/applications/new). + + WeChat Official Account authorization (requires additional deployment of [WeChat Server](https://github.com/songquanpeng/wechat-server)). +18. Immediate support and encapsulation of other major model APIs as they become available. + +## Deployment +### Docker Deployment +Deployment command: `docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api-en` + +Update command: `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR` + +The first `3000` in `-p 3000:3000` is the port of the host, which can be modified as needed. + +Data will be saved in the `/home/ubuntu/data/one-api` directory on the host. Ensure that the directory exists and has write permissions, or change it to a suitable directory. + +Nginx reference configuration: +``` +server{ + server_name openai.justsong.cn; # Modify your domain name accordingly + + location / { + client_max_body_size 64m; + proxy_http_version 1.1; + proxy_pass http://localhost:3000; # Modify your port accordingly + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_cache_bypass $http_upgrade; + proxy_set_header Accept-Encoding gzip; + } +} +``` + +Next, configure HTTPS with Let's Encrypt certbot: +```bash +# Install certbot on Ubuntu: +sudo snap install --classic certbot +sudo ln -s /snap/bin/certbot /usr/bin/certbot +# Generate certificates & modify Nginx configuration +sudo certbot --nginx +# Follow the prompts +# Restart Nginx +sudo service nginx restart +``` + +The initial account username is `root` and password is `123456`. + +### Manual Deployment +1. Download the executable file from [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) or compile from source: + ```shell + git clone https://github.com/songquanpeng/one-api.git + + # Build the frontend + cd one-api/web + npm install + npm run build + + # Build the backend + cd .. + go mod download + go build -ldflags "-s -w" -o one-api + ``` +2. Run: + ```shell + chmod u+x one-api + ./one-api --port 3000 --log-dir ./logs + ``` +3. Access [http://localhost:3000/](http://localhost:3000/) and log in. The initial account username is `root` and password is `123456`. + +For more detailed deployment tutorials, please refer to [this page](https://iamazing.cn/page/how-to-deploy-a-website). + +### Multi-machine Deployment +1. Set the same `SESSION_SECRET` for all servers. +2. Set `SQL_DSN` and use MySQL instead of SQLite. All servers should connect to the same database. +3. Set the `NODE_TYPE` for all non-master nodes to `slave`. +4. Set `SYNC_FREQUENCY` for servers to periodically sync configurations from the database. +5. Non-master nodes can optionally set `FRONTEND_BASE_URL` to redirect page requests to the master server. +6. Install Redis separately on non-master nodes, and configure `REDIS_CONN_STRING` so that the database can be accessed with zero latency when the cache has not expired. +7. If the main server also has high latency accessing the database, Redis must be enabled and `SYNC_FREQUENCY` must be set to periodically sync configurations from the database. + +Please refer to the [environment variables](#environment-variables) section for details on using environment variables. + +### Deployment on Control Panels (e.g., Baota) +Refer to [#175](https://github.com/songquanpeng/one-api/issues/175) for detailed instructions. + +If you encounter a blank page after deployment, refer to [#97](https://github.com/songquanpeng/one-api/issues/97) for possible solutions. + +### Deployment on Third-Party Platforms +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ デプロイチュートリアル + · + 使用方法 + · + フィードバック + · + スクリーンショット + · + ライブデモ + · + FAQ + · + 関連プロジェクト + · + 寄付 +
+ +> **警告**: この README は ChatGPT によって翻訳されています。翻訳ミスを発見した場合は遠慮なく PR を投稿してください。 + +> **警告**: 英語版の Docker イメージは `justsong/one-api-en` です。 + +> **注**: Docker からプルされた最新のイメージは、`alpha` リリースかもしれません。安定性が必要な場合は、手動でバージョンを指定してください。 + +## 特徴 +1. 複数の大型モデルをサポート: + + [x] [OpenAI ChatGPT シリーズモデル](https://platform.openai.com/docs/guides/gpt/chat-completions-api) ([Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) をサポート) + + [x] [Anthropic Claude シリーズモデル](https://anthropic.com) + + [x] [Google PaLM2 シリーズモデル](https://developers.generativeai.google) + + [x] [Baidu Wenxin Yiyuan シリーズモデル](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + + [x] [Alibaba Tongyi Qianwen シリーズモデル](https://help.aliyun.com/document_detail/2400395.html) + + [x] [Zhipu ChatGLM シリーズモデル](https://bigmodel.cn) +2. **ロードバランシング**による複数チャンネルへのアクセスをサポート。 +3. ストリーム伝送によるタイプライター的効果を可能にする**ストリームモード**に対応。 +4. **マルチマシンデプロイ**に対応。[詳細はこちら](#multi-machine-deployment)を参照。 +5. トークンの有効期限や使用回数を設定できる**トークン管理**に対応しています。 +6. **バウチャー管理**に対応しており、バウチャーの一括生成やエクスポートが可能です。バウチャーは口座残高の補充に利用できます。 +7. **チャンネル管理**に対応し、チャンネルの一括作成が可能。 +8. グループごとに異なるレートを設定するための**ユーザーグループ**と**チャンネルグループ**をサポートしています。 +9. チャンネル**モデルリスト設定**に対応。 +10. **クォータ詳細チェック**をサポート。 +11. **ユーザー招待報酬**をサポートします。 +12. 米ドルでの残高表示が可能。 +13. 新規ユーザー向けのお知らせ公開、リチャージリンク設定、初期残高設定に対応。 +14. 豊富な**カスタマイズ**オプションを提供します: + 1. システム名、ロゴ、フッターのカスタマイズが可能。 + 2. HTML と Markdown コードを使用したホームページとアバウトページのカスタマイズ、または iframe を介したスタンドアロンウェブページの埋め込みをサポートしています。 +15. システム・アクセストークンによる管理 API アクセスをサポートする。 +16. Cloudflare Turnstile によるユーザー認証に対応。 +17. ユーザー管理と複数のユーザーログイン/登録方法をサポート: + + 電子メールによるログイン/登録とパスワードリセット。 + + [GitHub OAuth](https://github.com/settings/applications/new)。 + + WeChat 公式アカウントの認証([WeChat Server](https://github.com/songquanpeng/wechat-server)の追加導入が必要)。 +18. 他の主要なモデル API が利用可能になった場合、即座にサポートし、カプセル化する。 + +## デプロイメント +### Docker デプロイメント +デプロイコマンド: `docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api-en`。 + +コマンドを更新する: `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrr/watchtower -cR`。 + +`-p 3000:3000` の最初の `3000` はホストのポートで、必要に応じて変更できます。 + +データはホストの `/home/ubuntu/data/one-api` ディレクトリに保存される。このディレクトリが存在し、書き込み権限があることを確認する、もしくは適切なディレクトリに変更してください。 + +Nginxリファレンス設定: +``` +server{ + server_name openai.justsong.cn; # ドメイン名は適宜変更 + + location / { + client_max_body_size 64m; + proxy_http_version 1.1; + proxy_pass http://localhost:3000; # それに応じてポートを変更 + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_cache_bypass $http_upgrade; + proxy_set_header Accept-Encoding gzip; + proxy_read_timeout 300s; # GPT-4 はより長いタイムアウトが必要 + } +} +``` + +次に、Let's Encrypt certbot を使って HTTPS を設定します: +```bash +# Ubuntu に certbot をインストール: +sudo snap install --classic certbot +sudo ln -s /snap/bin/certbot /usr/bin/certbot +# 証明書の生成と Nginx 設定の変更 +sudo certbot --nginx +# プロンプトに従う +# Nginx を再起動 +sudo service nginx restart +``` + +初期アカウントのユーザー名は `root` で、パスワードは `123456` です。 + +### マニュアルデプロイ +1. [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) から実行ファイルをダウンロードする、もしくはソースからコンパイルする: + ```shell + git clone https://github.com/songquanpeng/one-api.git + + # フロントエンドのビルド + cd one-api/web + npm install + npm run build + + # バックエンドのビルド + cd .. + go mod download + go build -ldflags "-s -w" -o one-api + ``` +2. 実行: + ```shell + chmod u+x one-api + ./one-api --port 3000 --log-dir ./logs + ``` +3. [http://localhost:3000/](http://localhost:3000/) にアクセスし、ログインする。初期アカウントのユーザー名は `root`、パスワードは `123456` である。 + +より詳細なデプロイのチュートリアルについては、[このページ](https://iamazing.cn/page/how-to-deploy-a-website) を参照してください。 + +### マルチマシンデプロイ +1. すべてのサーバに同じ `SESSION_SECRET` を設定する。 +2. `SQL_DSN` を設定し、SQLite の代わりに MySQL を使用する。すべてのサーバは同じデータベースに接続する。 +3. マスターノード以外のノードの `NODE_TYPE` を `slave` に設定する。 +4. データベースから定期的に設定を同期するサーバーには `SYNC_FREQUENCY` を設定する。 +5. マスター以外のノードでは、オプションで `FRONTEND_BASE_URL` を設定して、ページ要求をマスターサーバーにリダイレクトすることができます。 +6. マスター以外のノードには Redis を個別にインストールし、`REDIS_CONN_STRING` を設定して、キャッシュの有効期限が切れていないときにデータベースにゼロレイテンシーでアクセスできるようにする。 +7. メインサーバーでもデータベースへのアクセスが高レイテンシになる場合は、Redis を有効にし、`SYNC_FREQUENCY` を設定してデータベースから定期的に設定を同期する必要がある。 + +Please refer to the [environment variables](#environment-variables) section for details on using environment variables. + +### コントロールパネル(例: Baota)への展開 +詳しい手順は [#175](https://github.com/songquanpeng/one-api/issues/175) を参照してください。 + +配置後に空白のページが表示される場合は、[#97](https://github.com/songquanpeng/one-api/issues/97) を参照してください。 + +### サードパーティプラットフォームへのデプロイ +- 程序下载 - · 部署教程 · + 使用方法 + · 意见反馈 · 截图展示 @@ -40,47 +45,88 @@ _✨ All in one 的 OpenAI 接口,整合各种 API 访问方式,开箱即用 在线演示 · 常见问题 + · + 相关项目 + · + 赞赏支持
-> **Warning**:从 `v0.2` 版本升级到 `v0.3` 版本需要手动迁移数据库,请手动执行[数据库迁移脚本](./bin/migration_v0.2-v0.3.sql)。 +> **Note** +> 本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。 +> +> 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。 +> **Warning** +> 使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。 + +> **Warning** +> 使用 root 用户初次登录系统后,务必修改默认密码 `123456`! ## 功能 -1. 支持多种 API 访问渠道,欢迎 PR 或提 issue 添加更多渠道: - + [x] OpenAI 官方通道 - + [x] **Azure OpenAI API** +1. 支持多种大模型: + + [x] [OpenAI ChatGPT 系列模型](https://platform.openai.com/docs/guides/gpt/chat-completions-api)(支持 [Azure OpenAI API](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)) + + [x] [Anthropic Claude 系列模型](https://anthropic.com) + + [x] [Google PaLM2 系列模型](https://developers.generativeai.google) + + [x] [百度文心一言系列模型](https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html) + + [x] [阿里通义千问系列模型](https://help.aliyun.com/document_detail/2400395.html) + + [x] [讯飞星火认知大模型](https://www.xfyun.cn/doc/spark/Web.html) + + [x] [智谱 ChatGLM 系列模型](https://bigmodel.cn) + + [x] [360 智脑](https://ai.360.cn) + + [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729) +2. 支持配置镜像以及众多第三方代理服务: + + [x] [OpenAI-SB](https://openai-sb.com) + + [x] [CloseAI](https://referer.shadowai.xyz/r/2412) + [x] [API2D](https://api2d.com/r/197971) + [x] [OhMyGPT](https://aigptx.top?aff=uFpUl2Kf) + [x] [AI Proxy](https://aiproxy.io/?i=OneAPI) (邀请码:`OneAPI`) - + [x] [AI.LS](https://ai.ls) - + [x] [OpenAI Max](https://openaimax.com) - + [x] [OpenAI-SB](https://openai-sb.com) - + [x] [CloseAI](https://console.openai-asia.com/r/2412) - + [x] 自定义渠道:例如使用自行搭建的 OpenAI 代理 -2. 支持通过**负载均衡**的方式访问多个渠道。 -3. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 -4. 支持**多机部署**,[详见此处](#多机部署)。 -5. 支持**令牌管理**,设置令牌的过期时间和使用次数。 -6. 支持**兑换码管理**,支持批量生成和导出兑换码,可使用兑换码为账户进行充值。 -7. 支持**通道管理**,批量创建通道。 -8. 支持发布公告,设置充值链接,设置新用户初始额度。 -9. 支持丰富的**自定义**设置, - 1. 支持自定义系统名称,logo 以及页脚。 - 2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。 -10. 支持通过系统访问令牌访问管理 API。 -11. 支持用户管理,支持**多种用户登录注册方式**: - + 邮箱登录注册以及通过邮箱进行密码重置。 + + [x] 自定义渠道:例如各种未收录的第三方代理服务 +3. 支持通过**负载均衡**的方式访问多个渠道。 +4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 +5. 支持**多机部署**,[详见此处](#多机部署)。 +6. 支持**令牌管理**,设置令牌的过期时间和额度。 +7. 支持**兑换码管理**,支持批量生成和导出兑换码,可使用兑换码为账户进行充值。 +8. 支持**通道管理**,批量创建通道。 +9. 支持**用户分组**以及**渠道分组**,支持为不同分组设置不同的倍率。 +10. 支持渠道**设置模型列表**。 +11. 支持**查看额度明细**。 +12. 支持**用户邀请奖励**。 +13. 支持以美元为单位显示额度。 +14. 支持发布公告,设置充值链接,设置新用户初始额度。 +15. 支持模型映射,重定向用户的请求模型。 +16. 支持失败自动重试。 +17. 支持绘图接口。 +18. 支持 [Cloudflare AI Gateway](https://developers.cloudflare.com/ai-gateway/providers/openai/),渠道设置的代理部分填写 `https://gateway.ai.cloudflare.com/v1/ACCOUNT_TAG/GATEWAY/openai` 即可。 +19. 支持丰富的**自定义**设置, + 1. 支持自定义系统名称,logo 以及页脚。 + 2. 支持自定义首页和关于页面,可以选择使用 HTML & Markdown 代码进行自定义,或者使用一个单独的网页通过 iframe 嵌入。 +20. 支持通过系统访问令牌访问管理 API。 +21. 支持 Cloudflare Turnstile 用户校验。 +22. 支持用户管理,支持**多种用户登录注册方式**: + + 邮箱登录注册(支持注册邮箱白名单)以及通过邮箱进行密码重置。 + [GitHub 开放授权](https://github.com/settings/applications/new)。 + 微信公众号授权(需要额外部署 [WeChat Server](https://github.com/songquanpeng/wechat-server))。 -12. 未来其他大模型开放 API 后,将第一时间支持,并将其封装成同样的 API 访问方式。 ## 部署 ### 基于 Docker 进行部署 -执行:`docker run -d --restart always -p 3000:3000 -v /home/ubuntu/data/one-api:/data justsong/one-api` +```shell +# 使用 SQLite 的部署命令: +docker run --name one-api -d --restart always -p 3000:3000 -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api +# 使用 MySQL 的部署命令,在上面的基础上添加 `-e SQL_DSN="root:123456@tcp(localhost:3306)/oneapi"`,请自行修改数据库连接参数,不清楚如何修改请参见下面环境变量一节。 +# 例如: +docker run --name one-api -d --restart always -p 3000:3000 -e SQL_DSN="root:123456@tcp(localhost:3306)/oneapi" -e TZ=Asia/Shanghai -v /home/ubuntu/data/one-api:/data justsong/one-api +``` -`-p 3000:3000` 中的第一个 `3000` 是宿主机的端口,可以根据需要进行修改。 +其中,`-p 3000:3000` 中的第一个 `3000` 是宿主机的端口,可以根据需要进行修改。 -数据将会保存在宿主机的 `/home/ubuntu/data/one-api` 目录,请确保该目录存在且具有写入权限,或者更改为合适的目录。 +数据和日志将会保存在宿主机的 `/home/ubuntu/data/one-api` 目录,请确保该目录存在且具有写入权限,或者更改为合适的目录。 + +如果启动失败,请添加 `--privileged=true`,具体参考 https://github.com/songquanpeng/one-api/issues/482 。 + +如果上面的镜像无法拉取,可以尝试使用 GitHub 的 Docker 镜像,将上面的 `justsong/one-api` 替换为 `ghcr.io/songquanpeng/one-api` 即可。 + +如果你的并发量较大,**务必**设置 `SQL_DSN`,详见下面[环境变量](#环境变量)一节。 + +更新命令:`docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -cR` Nginx 的参考配置: ``` @@ -95,6 +141,7 @@ server{ proxy_set_header X-Forwarded-For $remote_addr; proxy_cache_bypass $http_upgrade; proxy_set_header Accept-Encoding gzip; + proxy_read_timeout 300s; # GPT-4 需要较长的超时时间,请自行调整 } } ``` @@ -111,6 +158,21 @@ sudo certbot --nginx sudo service nginx restart ``` +初始账号用户名为 `root`,密码为 `123456`。 + + +### 基于 Docker Compose 进行部署 + +> 仅启动方式不同,参数设置不变,请参考基于 Docker 部署部分 + +```shell +# 目前支持 MySQL 启动,数据存储在 ./data/mysql 文件夹内 +docker-compose up -d + +# 查看部署状态 +docker-compose ps +``` + ### 手动部署 1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译: ```shell @@ -120,7 +182,7 @@ sudo service nginx restart cd one-api/web npm install npm run build - + # 构建后端 cd .. go mod download @@ -137,12 +199,95 @@ sudo service nginx restart ### 多机部署 1. 所有服务器 `SESSION_SECRET` 设置一样的值。 -2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,请自行配置主备数据库同步。 -3. 所有从服务器必须设置 `SYNC_FREQUENCY`,以定期从数据库同步配置。 -4. 从服务器可以选择设置 `FRONTEND_BASE_URL`,以重定向页面请求到主服务器。 +2. 必须设置 `SQL_DSN`,使用 MySQL 数据库而非 SQLite,所有服务器连接同一个数据库。 +3. 所有从服务器必须设置 `NODE_TYPE` 为 `slave`,不设置则默认为主服务器。 +4. 设置 `SYNC_FREQUENCY` 后服务器将定期从数据库同步配置,在使用远程数据库的情况下,推荐设置该项并启用 Redis,无论主从。 +5. 从服务器可以选择设置 `FRONTEND_BASE_URL`,以重定向页面请求到主服务器。 +6. 从服务器上**分别**装好 Redis,设置好 `REDIS_CONN_STRING`,这样可以做到在缓存未过期的情况下数据库零访问,可以减少延迟。 +7. 如果主服务器访问数据库延迟也比较高,则也需要启用 Redis,并设置 `SYNC_FREQUENCY`,以定期从数据库同步配置。 环境变量的具体使用方法详见[此处](#环境变量)。 +### 宝塔部署教程 + +详见 [#175](https://github.com/songquanpeng/one-api/issues/175)。 + +如果部署后访问出现空白页面,详见 [#97](https://github.com/songquanpeng/one-api/issues/97)。 + +### 部署第三方服务配合 One API 使用 +> 欢迎 PR 添加更多示例。 + +#### ChatGPT Next Web +项目主页:https://github.com/Yidadaa/ChatGPT-Next-Web + +```bash +docker run --name chat-next-web -d -p 3001:3000 yidadaa/chatgpt-next-web +``` + +注意修改端口号,之后在页面上设置接口地址(例如:https://openai.justsong.cn/ )和 API Key 即可。 + +#### ChatGPT Web +项目主页:https://github.com/Chanzhaoyu/chatgpt-web + +```bash +docker run --name chatgpt-web -d -p 3002:3002 -e OPENAI_API_BASE_URL=https://openai.justsong.cn -e OPENAI_API_KEY=sk-xxx chenzhaoyu94/chatgpt-web +``` + +注意修改端口号、`OPENAI_API_BASE_URL` 和 `OPENAI_API_KEY`。 + +#### QChatGPT - QQ机器人 +项目主页:https://github.com/RockChinQ/QChatGPT + +根据文档完成部署后,在`config.py`设置配置项`openai_config`的`reverse_proxy`为 One API 后端地址,设置`api_key`为 One API 生成的key,并在配置项`completion_api_params`的`model`参数设置为 One API 支持的模型名称。 + +可安装 [Switcher 插件](https://github.com/RockChinQ/Switcher)在运行时切换所使用的模型。 + +### 部署到第三方平台 +您好,你正在进行%s密码重置。
"+ - "点击此处进行密码重置。
"+ - "重置链接 %d 分钟内有效,如果不是本人操作,请忽略。
", common.SystemName, link, common.VerificationValidMinutes) + "点击 此处 进行密码重置。
"+ + "如果链接无法点击,请尝试点击下面的链接或将其复制到浏览器中打开:
%s
重置链接 %d 分钟内有效,如果不是本人操作,请忽略。
", common.SystemName, link, link, common.VerificationValidMinutes) err := common.SendEmail(subject, email, content) if err != nil { c.JSON(http.StatusOK, gin.H{ diff --git a/controller/model.go b/controller/model.go index 9825b4ab..7bd9d097 100644 --- a/controller/model.go +++ b/controller/model.go @@ -2,6 +2,7 @@ package controller import ( "fmt" + "github.com/gin-gonic/gin" ) @@ -23,20 +24,21 @@ type OpenAIModelPermission struct { } type OpenAIModels struct { - Id string `json:"id"` - Object string `json:"object"` - Created int `json:"created"` - OwnedBy string `json:"owned_by"` - Permission OpenAIModelPermission `json:"permission"` - Root string `json:"root"` - Parent *string `json:"parent"` + Id string `json:"id"` + Object string `json:"object"` + Created int `json:"created"` + OwnedBy string `json:"owned_by"` + Permission []OpenAIModelPermission `json:"permission"` + Root string `json:"root"` + Parent *string `json:"parent"` } var openAIModels []OpenAIModels var openAIModelsMap map[string]OpenAIModels func init() { - permission := OpenAIModelPermission{ + var permission []OpenAIModelPermission + permission = append(permission, OpenAIModelPermission{ Id: "modelperm-LwHkVFn8AcMItP432fKKDIKJ", Object: "model_permission", Created: 1626777600, @@ -49,9 +51,27 @@ func init() { Organization: "*", Group: nil, IsBlocking: false, - } + }) // https://platform.openai.com/docs/models/model-endpoint-compatibility openAIModels = []OpenAIModels{ + { + Id: "dall-e", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "dall-e", + Parent: nil, + }, + { + Id: "whisper-1", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "whisper-1", + Parent: nil, + }, { Id: "gpt-3.5-turbo", Object: "model", @@ -70,6 +90,51 @@ func init() { Root: "gpt-3.5-turbo-0301", Parent: nil, }, + { + Id: "gpt-3.5-turbo-0613", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-3.5-turbo-0613", + Parent: nil, + }, + { + Id: "gpt-3.5-turbo-16k", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-3.5-turbo-16k", + Parent: nil, + }, + { + Id: "gpt-3.5-turbo-16k-0613", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-3.5-turbo-16k-0613", + Parent: nil, + }, + { + Id: "gpt-3.5-turbo-1106", + Object: "model", + Created: 1699593571, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-3.5-turbo-1106", + Parent: nil, + }, + { + Id: "gpt-3.5-turbo-instruct", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-3.5-turbo-instruct", + Parent: nil, + }, { Id: "gpt-4", Object: "model", @@ -88,6 +153,15 @@ func init() { Root: "gpt-4-0314", Parent: nil, }, + { + Id: "gpt-4-0613", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-4-0613", + Parent: nil, + }, { Id: "gpt-4-32k", Object: "model", @@ -107,12 +181,30 @@ func init() { Parent: nil, }, { - Id: "gpt-3.5-turbo", + Id: "gpt-4-32k-0613", Object: "model", Created: 1677649963, OwnedBy: "openai", Permission: permission, - Root: "gpt-3.5-turbo", + Root: "gpt-4-32k-0613", + Parent: nil, + }, + { + Id: "gpt-4-1106-preview", + Object: "model", + Created: 1699593571, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-4-1106-preview", + Parent: nil, + }, + { + Id: "gpt-4-vision-preview", + Object: "model", + Created: 1699593571, + OwnedBy: "openai", + Permission: permission, + Root: "gpt-4-vision-preview", Parent: nil, }, { @@ -124,6 +216,267 @@ func init() { Root: "text-embedding-ada-002", Parent: nil, }, + { + Id: "text-davinci-003", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-davinci-003", + Parent: nil, + }, + { + Id: "text-davinci-002", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-davinci-002", + Parent: nil, + }, + { + Id: "text-curie-001", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-curie-001", + Parent: nil, + }, + { + Id: "text-babbage-001", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-babbage-001", + Parent: nil, + }, + { + Id: "text-ada-001", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-ada-001", + Parent: nil, + }, + { + Id: "text-moderation-latest", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-moderation-latest", + Parent: nil, + }, + { + Id: "text-moderation-stable", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-moderation-stable", + Parent: nil, + }, + { + Id: "text-davinci-edit-001", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "text-davinci-edit-001", + Parent: nil, + }, + { + Id: "code-davinci-edit-001", + Object: "model", + Created: 1677649963, + OwnedBy: "openai", + Permission: permission, + Root: "code-davinci-edit-001", + Parent: nil, + }, + { + Id: "claude-instant-1", + Object: "model", + Created: 1677649963, + OwnedBy: "anthropic", + Permission: permission, + Root: "claude-instant-1", + Parent: nil, + }, + { + Id: "claude-2", + Object: "model", + Created: 1677649963, + OwnedBy: "anthropic", + Permission: permission, + Root: "claude-2", + Parent: nil, + }, + { + Id: "ERNIE-Bot", + Object: "model", + Created: 1677649963, + OwnedBy: "baidu", + Permission: permission, + Root: "ERNIE-Bot", + Parent: nil, + }, + { + Id: "ERNIE-Bot-turbo", + Object: "model", + Created: 1677649963, + OwnedBy: "baidu", + Permission: permission, + Root: "ERNIE-Bot-turbo", + Parent: nil, + }, + { + Id: "ERNIE-Bot-4", + Object: "model", + Created: 1677649963, + OwnedBy: "baidu", + Permission: permission, + Root: "ERNIE-Bot-4", + Parent: nil, + }, + { + Id: "Embedding-V1", + Object: "model", + Created: 1677649963, + OwnedBy: "baidu", + Permission: permission, + Root: "Embedding-V1", + Parent: nil, + }, + { + Id: "PaLM-2", + Object: "model", + Created: 1677649963, + OwnedBy: "google", + Permission: permission, + Root: "PaLM-2", + Parent: nil, + }, + { + Id: "chatglm_turbo", + Object: "model", + Created: 1677649963, + OwnedBy: "zhipu", + Permission: permission, + Root: "chatglm_turbo", + Parent: nil, + }, + { + Id: "chatglm_pro", + Object: "model", + Created: 1677649963, + OwnedBy: "zhipu", + Permission: permission, + Root: "chatglm_pro", + Parent: nil, + }, + { + Id: "chatglm_std", + Object: "model", + Created: 1677649963, + OwnedBy: "zhipu", + Permission: permission, + Root: "chatglm_std", + Parent: nil, + }, + { + Id: "chatglm_lite", + Object: "model", + Created: 1677649963, + OwnedBy: "zhipu", + Permission: permission, + Root: "chatglm_lite", + Parent: nil, + }, + { + Id: "qwen-turbo", + Object: "model", + Created: 1677649963, + OwnedBy: "ali", + Permission: permission, + Root: "qwen-turbo", + Parent: nil, + }, + { + Id: "qwen-plus", + Object: "model", + Created: 1677649963, + OwnedBy: "ali", + Permission: permission, + Root: "qwen-plus", + Parent: nil, + }, + { + Id: "text-embedding-v1", + Object: "model", + Created: 1677649963, + OwnedBy: "ali", + Permission: permission, + Root: "text-embedding-v1", + Parent: nil, + }, + { + Id: "SparkDesk", + Object: "model", + Created: 1677649963, + OwnedBy: "xunfei", + Permission: permission, + Root: "SparkDesk", + Parent: nil, + }, + { + Id: "360GPT_S2_V9", + Object: "model", + Created: 1677649963, + OwnedBy: "360", + Permission: permission, + Root: "360GPT_S2_V9", + Parent: nil, + }, + { + Id: "embedding-bert-512-v1", + Object: "model", + Created: 1677649963, + OwnedBy: "360", + Permission: permission, + Root: "embedding-bert-512-v1", + Parent: nil, + }, + { + Id: "embedding_s1_v1", + Object: "model", + Created: 1677649963, + OwnedBy: "360", + Permission: permission, + Root: "embedding_s1_v1", + Parent: nil, + }, + { + Id: "semantic_similarity_s1_v1", + Object: "model", + Created: 1677649963, + OwnedBy: "360", + Permission: permission, + Root: "semantic_similarity_s1_v1", + Parent: nil, + }, + { + Id: "hunyuan", + Object: "model", + Created: 1677649963, + OwnedBy: "tencent", + Permission: permission, + Root: "hunyuan", + Parent: nil, + }, } openAIModelsMap = make(map[string]OpenAIModels) for _, model := range openAIModels { @@ -132,7 +485,10 @@ func init() { } func ListModels(c *gin.Context) { - c.JSON(200, openAIModels) + c.JSON(200, gin.H{ + "object": "list", + "data": openAIModels, + }) } func RetrieveModel(c *gin.Context) { diff --git a/controller/option.go b/controller/option.go index b5b675c6..bbf83578 100644 --- a/controller/option.go +++ b/controller/option.go @@ -2,18 +2,19 @@ package controller import ( "encoding/json" - "github.com/gin-gonic/gin" "net/http" "one-api/common" "one-api/model" "strings" + + "github.com/gin-gonic/gin" ) func GetOptions(c *gin.Context) { var options []*model.Option common.OptionMapRWMutex.Lock() for k, v := range common.OptionMap { - if strings.Contains(k, "Token") || strings.Contains(k, "Secret") { + if strings.HasSuffix(k, "Token") || strings.HasSuffix(k, "Secret") { continue } options = append(options, &model.Option{ @@ -45,7 +46,15 @@ func UpdateOption(c *gin.Context) { if option.Value == "true" && common.GitHubClientId == "" { c.JSON(http.StatusOK, gin.H{ "success": false, - "message": "无法启用 GitHub OAuth,请先填入 GitHub Client ID 以及 GitHub Client Secret!", + "message": "无法启用 GitHub OAuth,请先填入 GitHub Client Id 以及 GitHub Client Secret!", + }) + return + } + case "EmailDomainRestrictionEnabled": + if option.Value == "true" && len(common.EmailDomainWhitelist) == 0 { + c.JSON(http.StatusOK, gin.H{ + "success": false, + "message": "无法启用邮箱域名限制,请先填入限制的邮箱域名!", }) return } diff --git a/controller/relay-aiproxy.go b/controller/relay-aiproxy.go new file mode 100644 index 00000000..d0159ce8 --- /dev/null +++ b/controller/relay-aiproxy.go @@ -0,0 +1,220 @@ +package controller + +import ( + "bufio" + "encoding/json" + "fmt" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "strconv" + "strings" +) + +// https://docs.aiproxy.io/dev/library#使用已经定制好的知识库进行对话问答 + +type AIProxyLibraryRequest struct { + Model string `json:"model"` + Query string `json:"query"` + LibraryId string `json:"libraryId"` + Stream bool `json:"stream"` +} + +type AIProxyLibraryError struct { + ErrCode int `json:"errCode"` + Message string `json:"message"` +} + +type AIProxyLibraryDocument struct { + Title string `json:"title"` + URL string `json:"url"` +} + +type AIProxyLibraryResponse struct { + Success bool `json:"success"` + Answer string `json:"answer"` + Documents []AIProxyLibraryDocument `json:"documents"` + AIProxyLibraryError +} + +type AIProxyLibraryStreamResponse struct { + Content string `json:"content"` + Finish bool `json:"finish"` + Model string `json:"model"` + Documents []AIProxyLibraryDocument `json:"documents"` +} + +func requestOpenAI2AIProxyLibrary(request GeneralOpenAIRequest) *AIProxyLibraryRequest { + query := "" + if len(request.Messages) != 0 { + query = request.Messages[len(request.Messages)-1].Content + } + return &AIProxyLibraryRequest{ + Model: request.Model, + Stream: request.Stream, + Query: query, + } +} + +func aiProxyDocuments2Markdown(documents []AIProxyLibraryDocument) string { + if len(documents) == 0 { + return "" + } + content := "\n\n参考文档:\n" + for i, document := range documents { + content += fmt.Sprintf("%d. [%s](%s)\n", i+1, document.Title, document.URL) + } + return content +} + +func responseAIProxyLibrary2OpenAI(response *AIProxyLibraryResponse) *OpenAITextResponse { + content := response.Answer + aiProxyDocuments2Markdown(response.Documents) + choice := OpenAITextResponseChoice{ + Index: 0, + Message: Message{ + Role: "assistant", + Content: content, + }, + FinishReason: "stop", + } + fullTextResponse := OpenAITextResponse{ + Id: common.GetUUID(), + Object: "chat.completion", + Created: common.GetTimestamp(), + Choices: []OpenAITextResponseChoice{choice}, + } + return &fullTextResponse +} + +func documentsAIProxyLibrary(documents []AIProxyLibraryDocument) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = aiProxyDocuments2Markdown(documents) + choice.FinishReason = &stopFinishReason + return &ChatCompletionsStreamResponse{ + Id: common.GetUUID(), + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: "", + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } +} + +func streamResponseAIProxyLibrary2OpenAI(response *AIProxyLibraryStreamResponse) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = response.Content + return &ChatCompletionsStreamResponse{ + Id: common.GetUUID(), + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: response.Model, + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } +} + +func aiProxyLibraryStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var usage Usage + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\n"); i >= 0 { + return i + 1, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + if len(data) < 5 { // ignore blank line or wrong format + continue + } + if data[:5] != "data:" { + continue + } + data = data[5:] + dataChan <- data + } + stopChan <- true + }() + setEventStreamHeaders(c) + var documents []AIProxyLibraryDocument + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + var AIProxyLibraryResponse AIProxyLibraryStreamResponse + err := json.Unmarshal([]byte(data), &AIProxyLibraryResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + return true + } + if len(AIProxyLibraryResponse.Documents) != 0 { + documents = AIProxyLibraryResponse.Documents + } + response := streamResponseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse) + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case <-stopChan: + response := documentsAIProxyLibrary(documents) + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + return nil, &usage +} + +func aiProxyLibraryHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var AIProxyLibraryResponse AIProxyLibraryResponse + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &AIProxyLibraryResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if AIProxyLibraryResponse.ErrCode != 0 { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: AIProxyLibraryResponse.Message, + Type: strconv.Itoa(AIProxyLibraryResponse.ErrCode), + Code: AIProxyLibraryResponse.ErrCode, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responseAIProxyLibrary2OpenAI(&AIProxyLibraryResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} diff --git a/controller/relay-ali.go b/controller/relay-ali.go new file mode 100644 index 00000000..50dc743c --- /dev/null +++ b/controller/relay-ali.go @@ -0,0 +1,329 @@ +package controller + +import ( + "bufio" + "encoding/json" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "strings" +) + +// https://help.aliyun.com/document_detail/613695.html?spm=a2c4g.2399480.0.0.1adb778fAdzP9w#341800c0f8w0r + +type AliMessage struct { + User string `json:"user"` + Bot string `json:"bot"` +} + +type AliInput struct { + Prompt string `json:"prompt"` + History []AliMessage `json:"history"` +} + +type AliParameters struct { + TopP float64 `json:"top_p,omitempty"` + TopK int `json:"top_k,omitempty"` + Seed uint64 `json:"seed,omitempty"` + EnableSearch bool `json:"enable_search,omitempty"` +} + +type AliChatRequest struct { + Model string `json:"model"` + Input AliInput `json:"input"` + Parameters AliParameters `json:"parameters,omitempty"` +} + +type AliEmbeddingRequest struct { + Model string `json:"model"` + Input struct { + Texts []string `json:"texts"` + } `json:"input"` + Parameters *struct { + TextType string `json:"text_type,omitempty"` + } `json:"parameters,omitempty"` +} + +type AliEmbedding struct { + Embedding []float64 `json:"embedding"` + TextIndex int `json:"text_index"` +} + +type AliEmbeddingResponse struct { + Output struct { + Embeddings []AliEmbedding `json:"embeddings"` + } `json:"output"` + Usage AliUsage `json:"usage"` + AliError +} + +type AliError struct { + Code string `json:"code"` + Message string `json:"message"` + RequestId string `json:"request_id"` +} + +type AliUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + TotalTokens int `json:"total_tokens"` +} + +type AliOutput struct { + Text string `json:"text"` + FinishReason string `json:"finish_reason"` +} + +type AliChatResponse struct { + Output AliOutput `json:"output"` + Usage AliUsage `json:"usage"` + AliError +} + +func requestOpenAI2Ali(request GeneralOpenAIRequest) *AliChatRequest { + messages := make([]AliMessage, 0, len(request.Messages)) + prompt := "" + for i := 0; i < len(request.Messages); i++ { + message := request.Messages[i] + if message.Role == "system" { + messages = append(messages, AliMessage{ + User: message.Content, + Bot: "Okay", + }) + continue + } else { + if i == len(request.Messages)-1 { + prompt = message.Content + break + } + messages = append(messages, AliMessage{ + User: message.Content, + Bot: request.Messages[i+1].Content, + }) + i++ + } + } + return &AliChatRequest{ + Model: request.Model, + Input: AliInput{ + Prompt: prompt, + History: messages, + }, + //Parameters: AliParameters{ // ChatGPT's parameters are not compatible with Ali's + // TopP: request.TopP, + // TopK: 50, + // //Seed: 0, + // //EnableSearch: false, + //}, + } +} + +func embeddingRequestOpenAI2Ali(request GeneralOpenAIRequest) *AliEmbeddingRequest { + return &AliEmbeddingRequest{ + Model: "text-embedding-v1", + Input: struct { + Texts []string `json:"texts"` + }{ + Texts: request.ParseInput(), + }, + } +} + +func aliEmbeddingHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var aliResponse AliEmbeddingResponse + err := json.NewDecoder(resp.Body).Decode(&aliResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + + if aliResponse.Code != "" { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: aliResponse.Message, + Type: aliResponse.Code, + Param: aliResponse.RequestId, + Code: aliResponse.Code, + }, + StatusCode: resp.StatusCode, + }, nil + } + + fullTextResponse := embeddingResponseAli2OpenAI(&aliResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} + +func embeddingResponseAli2OpenAI(response *AliEmbeddingResponse) *OpenAIEmbeddingResponse { + openAIEmbeddingResponse := OpenAIEmbeddingResponse{ + Object: "list", + Data: make([]OpenAIEmbeddingResponseItem, 0, len(response.Output.Embeddings)), + Model: "text-embedding-v1", + Usage: Usage{TotalTokens: response.Usage.TotalTokens}, + } + + for _, item := range response.Output.Embeddings { + openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, OpenAIEmbeddingResponseItem{ + Object: `embedding`, + Index: item.TextIndex, + Embedding: item.Embedding, + }) + } + return &openAIEmbeddingResponse +} + +func responseAli2OpenAI(response *AliChatResponse) *OpenAITextResponse { + choice := OpenAITextResponseChoice{ + Index: 0, + Message: Message{ + Role: "assistant", + Content: response.Output.Text, + }, + FinishReason: response.Output.FinishReason, + } + fullTextResponse := OpenAITextResponse{ + Id: response.RequestId, + Object: "chat.completion", + Created: common.GetTimestamp(), + Choices: []OpenAITextResponseChoice{choice}, + Usage: Usage{ + PromptTokens: response.Usage.InputTokens, + CompletionTokens: response.Usage.OutputTokens, + TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens, + }, + } + return &fullTextResponse +} + +func streamResponseAli2OpenAI(aliResponse *AliChatResponse) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = aliResponse.Output.Text + if aliResponse.Output.FinishReason != "null" { + finishReason := aliResponse.Output.FinishReason + choice.FinishReason = &finishReason + } + response := ChatCompletionsStreamResponse{ + Id: aliResponse.RequestId, + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: "ernie-bot", + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } + return &response +} + +func aliStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var usage Usage + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\n"); i >= 0 { + return i + 1, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + if len(data) < 5 { // ignore blank line or wrong format + continue + } + if data[:5] != "data:" { + continue + } + data = data[5:] + dataChan <- data + } + stopChan <- true + }() + setEventStreamHeaders(c) + lastResponseText := "" + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + var aliResponse AliChatResponse + err := json.Unmarshal([]byte(data), &aliResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + return true + } + if aliResponse.Usage.OutputTokens != 0 { + usage.PromptTokens = aliResponse.Usage.InputTokens + usage.CompletionTokens = aliResponse.Usage.OutputTokens + usage.TotalTokens = aliResponse.Usage.InputTokens + aliResponse.Usage.OutputTokens + } + response := streamResponseAli2OpenAI(&aliResponse) + response.Choices[0].Delta.Content = strings.TrimPrefix(response.Choices[0].Delta.Content, lastResponseText) + lastResponseText = aliResponse.Output.Text + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + return nil, &usage +} + +func aliHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var aliResponse AliChatResponse + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &aliResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if aliResponse.Code != "" { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: aliResponse.Message, + Type: aliResponse.Code, + Param: aliResponse.RequestId, + Code: aliResponse.Code, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responseAli2OpenAI(&aliResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} diff --git a/controller/relay-audio.go b/controller/relay-audio.go new file mode 100644 index 00000000..53833108 --- /dev/null +++ b/controller/relay-audio.go @@ -0,0 +1,151 @@ +package controller + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "one-api/model" +) + +func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { + audioModel := "whisper-1" + + tokenId := c.GetInt("token_id") + channelType := c.GetInt("channel") + channelId := c.GetInt("channel_id") + userId := c.GetInt("id") + group := c.GetString("group") + + preConsumedTokens := common.PreConsumedQuota + modelRatio := common.GetModelRatio(audioModel) + groupRatio := common.GetGroupRatio(group) + ratio := modelRatio * groupRatio + preConsumedQuota := int(float64(preConsumedTokens) * ratio) + userQuota, err := model.CacheGetUserQuota(userId) + if err != nil { + return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError) + } + if userQuota-preConsumedQuota < 0 { + return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden) + } + err = model.CacheDecreaseUserQuota(userId, preConsumedQuota) + if err != nil { + return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError) + } + if userQuota > 100*preConsumedQuota { + // in this case, we do not pre-consume quota + // because the user has enough quota + preConsumedQuota = 0 + } + if preConsumedQuota > 0 { + err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota) + if err != nil { + return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden) + } + } + + // map model name + modelMapping := c.GetString("model_mapping") + if modelMapping != "" { + modelMap := make(map[string]string) + err := json.Unmarshal([]byte(modelMapping), &modelMap) + if err != nil { + return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError) + } + if modelMap[audioModel] != "" { + audioModel = modelMap[audioModel] + } + } + + baseURL := common.ChannelBaseURLs[channelType] + requestURL := c.Request.URL.String() + if c.GetString("base_url") != "" { + baseURL = c.GetString("base_url") + } + + fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType) + requestBody := c.Request.Body + + req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody) + if err != nil { + return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) + } + req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) + req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) + req.Header.Set("Accept", c.Request.Header.Get("Accept")) + + resp, err := httpClient.Do(req) + if err != nil { + return errorWrapper(err, "do_request_failed", http.StatusInternalServerError) + } + + err = req.Body.Close() + if err != nil { + return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) + } + err = c.Request.Body.Close() + if err != nil { + return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) + } + var audioResponse AudioResponse + + defer func(ctx context.Context) { + go func() { + quota := countTokenText(audioResponse.Text, audioModel) + quotaDelta := quota - preConsumedQuota + err := model.PostConsumeTokenQuota(tokenId, quotaDelta) + if err != nil { + common.SysError("error consuming token remain quota: " + err.Error()) + } + err = model.CacheUpdateUserQuota(userId) + if err != nil { + common.SysError("error update user quota cache: " + err.Error()) + } + if quota != 0 { + tokenName := c.GetString("token_name") + logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) + model.RecordConsumeLog(ctx, userId, channelId, 0, 0, audioModel, tokenName, quota, logContent) + model.UpdateUserUsedQuotaAndRequestCount(userId, quota) + channelId := c.GetInt("channel_id") + model.UpdateChannelUsedQuota(channelId, quota) + } + }() + }(c.Request.Context()) + + responseBody, err := io.ReadAll(resp.Body) + + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError) + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) + } + err = json.Unmarshal(responseBody, &audioResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError) + } + + resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) + + for k, v := range resp.Header { + c.Writer.Header().Set(k, v[0]) + } + c.Writer.WriteHeader(resp.StatusCode) + + _, err = io.Copy(c.Writer, resp.Body) + if err != nil { + return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError) + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) + } + return nil +} diff --git a/controller/relay-baidu.go b/controller/relay-baidu.go new file mode 100644 index 00000000..ed08ac04 --- /dev/null +++ b/controller/relay-baidu.go @@ -0,0 +1,359 @@ +package controller + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "strings" + "sync" + "time" +) + +// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/flfmc9do2 + +type BaiduTokenResponse struct { + ExpiresIn int `json:"expires_in"` + AccessToken string `json:"access_token"` +} + +type BaiduMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +type BaiduChatRequest struct { + Messages []BaiduMessage `json:"messages"` + Stream bool `json:"stream"` + UserId string `json:"user_id,omitempty"` +} + +type BaiduError struct { + ErrorCode int `json:"error_code"` + ErrorMsg string `json:"error_msg"` +} + +type BaiduChatResponse struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Result string `json:"result"` + IsTruncated bool `json:"is_truncated"` + NeedClearHistory bool `json:"need_clear_history"` + Usage Usage `json:"usage"` + BaiduError +} + +type BaiduChatStreamResponse struct { + BaiduChatResponse + SentenceId int `json:"sentence_id"` + IsEnd bool `json:"is_end"` +} + +type BaiduEmbeddingRequest struct { + Input []string `json:"input"` +} + +type BaiduEmbeddingData struct { + Object string `json:"object"` + Embedding []float64 `json:"embedding"` + Index int `json:"index"` +} + +type BaiduEmbeddingResponse struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Data []BaiduEmbeddingData `json:"data"` + Usage Usage `json:"usage"` + BaiduError +} + +type BaiduAccessToken struct { + AccessToken string `json:"access_token"` + Error string `json:"error,omitempty"` + ErrorDescription string `json:"error_description,omitempty"` + ExpiresIn int64 `json:"expires_in,omitempty"` + ExpiresAt time.Time `json:"-"` +} + +var baiduTokenStore sync.Map + +func requestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduChatRequest { + messages := make([]BaiduMessage, 0, len(request.Messages)) + for _, message := range request.Messages { + if message.Role == "system" { + messages = append(messages, BaiduMessage{ + Role: "user", + Content: message.Content, + }) + messages = append(messages, BaiduMessage{ + Role: "assistant", + Content: "Okay", + }) + } else { + messages = append(messages, BaiduMessage{ + Role: message.Role, + Content: message.Content, + }) + } + } + return &BaiduChatRequest{ + Messages: messages, + Stream: request.Stream, + } +} + +func responseBaidu2OpenAI(response *BaiduChatResponse) *OpenAITextResponse { + choice := OpenAITextResponseChoice{ + Index: 0, + Message: Message{ + Role: "assistant", + Content: response.Result, + }, + FinishReason: "stop", + } + fullTextResponse := OpenAITextResponse{ + Id: response.Id, + Object: "chat.completion", + Created: response.Created, + Choices: []OpenAITextResponseChoice{choice}, + Usage: response.Usage, + } + return &fullTextResponse +} + +func streamResponseBaidu2OpenAI(baiduResponse *BaiduChatStreamResponse) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = baiduResponse.Result + if baiduResponse.IsEnd { + choice.FinishReason = &stopFinishReason + } + response := ChatCompletionsStreamResponse{ + Id: baiduResponse.Id, + Object: "chat.completion.chunk", + Created: baiduResponse.Created, + Model: "ernie-bot", + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } + return &response +} + +func embeddingRequestOpenAI2Baidu(request GeneralOpenAIRequest) *BaiduEmbeddingRequest { + return &BaiduEmbeddingRequest{ + Input: request.ParseInput(), + } +} + +func embeddingResponseBaidu2OpenAI(response *BaiduEmbeddingResponse) *OpenAIEmbeddingResponse { + openAIEmbeddingResponse := OpenAIEmbeddingResponse{ + Object: "list", + Data: make([]OpenAIEmbeddingResponseItem, 0, len(response.Data)), + Model: "baidu-embedding", + Usage: response.Usage, + } + for _, item := range response.Data { + openAIEmbeddingResponse.Data = append(openAIEmbeddingResponse.Data, OpenAIEmbeddingResponseItem{ + Object: item.Object, + Index: item.Index, + Embedding: item.Embedding, + }) + } + return &openAIEmbeddingResponse +} + +func baiduStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var usage Usage + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\n"); i >= 0 { + return i + 1, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + if len(data) < 6 { // ignore blank line or wrong format + continue + } + data = data[6:] + dataChan <- data + } + stopChan <- true + }() + setEventStreamHeaders(c) + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + var baiduResponse BaiduChatStreamResponse + err := json.Unmarshal([]byte(data), &baiduResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + return true + } + if baiduResponse.Usage.TotalTokens != 0 { + usage.TotalTokens = baiduResponse.Usage.TotalTokens + usage.PromptTokens = baiduResponse.Usage.PromptTokens + usage.CompletionTokens = baiduResponse.Usage.TotalTokens - baiduResponse.Usage.PromptTokens + } + response := streamResponseBaidu2OpenAI(&baiduResponse) + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + return nil, &usage +} + +func baiduHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var baiduResponse BaiduChatResponse + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &baiduResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if baiduResponse.ErrorMsg != "" { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: baiduResponse.ErrorMsg, + Type: "baidu_error", + Param: "", + Code: baiduResponse.ErrorCode, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responseBaidu2OpenAI(&baiduResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} + +func baiduEmbeddingHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var baiduResponse BaiduEmbeddingResponse + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &baiduResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if baiduResponse.ErrorMsg != "" { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: baiduResponse.ErrorMsg, + Type: "baidu_error", + Param: "", + Code: baiduResponse.ErrorCode, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := embeddingResponseBaidu2OpenAI(&baiduResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} + +func getBaiduAccessToken(apiKey string) (string, error) { + if val, ok := baiduTokenStore.Load(apiKey); ok { + var accessToken BaiduAccessToken + if accessToken, ok = val.(BaiduAccessToken); ok { + // soon this will expire + if time.Now().Add(time.Hour).After(accessToken.ExpiresAt) { + go func() { + _, _ = getBaiduAccessTokenHelper(apiKey) + }() + } + return accessToken.AccessToken, nil + } + } + accessToken, err := getBaiduAccessTokenHelper(apiKey) + if err != nil { + return "", err + } + if accessToken == nil { + return "", errors.New("getBaiduAccessToken return a nil token") + } + return (*accessToken).AccessToken, nil +} + +func getBaiduAccessTokenHelper(apiKey string) (*BaiduAccessToken, error) { + parts := strings.Split(apiKey, "|") + if len(parts) != 2 { + return nil, errors.New("invalid baidu apikey") + } + req, err := http.NewRequest("POST", fmt.Sprintf("https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s", + parts[0], parts[1]), nil) + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + res, err := impatientHTTPClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var accessToken BaiduAccessToken + err = json.NewDecoder(res.Body).Decode(&accessToken) + if err != nil { + return nil, err + } + if accessToken.Error != "" { + return nil, errors.New(accessToken.Error + ": " + accessToken.ErrorDescription) + } + if accessToken.AccessToken == "" { + return nil, errors.New("getBaiduAccessTokenHelper get empty access token") + } + accessToken.ExpiresAt = time.Now().Add(time.Duration(accessToken.ExpiresIn) * time.Second) + baiduTokenStore.Store(apiKey, accessToken) + return &accessToken, nil +} diff --git a/controller/relay-claude.go b/controller/relay-claude.go new file mode 100644 index 00000000..1f4a3e7b --- /dev/null +++ b/controller/relay-claude.go @@ -0,0 +1,220 @@ +package controller + +import ( + "bufio" + "encoding/json" + "fmt" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "strings" +) + +type ClaudeMetadata struct { + UserId string `json:"user_id"` +} + +type ClaudeRequest struct { + Model string `json:"model"` + Prompt string `json:"prompt"` + MaxTokensToSample int `json:"max_tokens_to_sample"` + StopSequences []string `json:"stop_sequences,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + TopP float64 `json:"top_p,omitempty"` + TopK int `json:"top_k,omitempty"` + //ClaudeMetadata `json:"metadata,omitempty"` + Stream bool `json:"stream,omitempty"` +} + +type ClaudeError struct { + Type string `json:"type"` + Message string `json:"message"` +} + +type ClaudeResponse struct { + Completion string `json:"completion"` + StopReason string `json:"stop_reason"` + Model string `json:"model"` + Error ClaudeError `json:"error"` +} + +func stopReasonClaude2OpenAI(reason string) string { + switch reason { + case "stop_sequence": + return "stop" + case "max_tokens": + return "length" + default: + return reason + } +} + +func requestOpenAI2Claude(textRequest GeneralOpenAIRequest) *ClaudeRequest { + claudeRequest := ClaudeRequest{ + Model: textRequest.Model, + Prompt: "", + MaxTokensToSample: textRequest.MaxTokens, + StopSequences: nil, + Temperature: textRequest.Temperature, + TopP: textRequest.TopP, + Stream: textRequest.Stream, + } + if claudeRequest.MaxTokensToSample == 0 { + claudeRequest.MaxTokensToSample = 1000000 + } + prompt := "" + for _, message := range textRequest.Messages { + if message.Role == "user" { + prompt += fmt.Sprintf("\n\nHuman: %s", message.Content) + } else if message.Role == "assistant" { + prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content) + } else if message.Role == "system" { + prompt += fmt.Sprintf("\n\nSystem: %s", message.Content) + } + } + prompt += "\n\nAssistant:" + claudeRequest.Prompt = prompt + return &claudeRequest +} + +func streamResponseClaude2OpenAI(claudeResponse *ClaudeResponse) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = claudeResponse.Completion + finishReason := stopReasonClaude2OpenAI(claudeResponse.StopReason) + if finishReason != "null" { + choice.FinishReason = &finishReason + } + var response ChatCompletionsStreamResponse + response.Object = "chat.completion.chunk" + response.Model = claudeResponse.Model + response.Choices = []ChatCompletionsStreamResponseChoice{choice} + return &response +} + +func responseClaude2OpenAI(claudeResponse *ClaudeResponse) *OpenAITextResponse { + choice := OpenAITextResponseChoice{ + Index: 0, + Message: Message{ + Role: "assistant", + Content: strings.TrimPrefix(claudeResponse.Completion, " "), + Name: nil, + }, + FinishReason: stopReasonClaude2OpenAI(claudeResponse.StopReason), + } + fullTextResponse := OpenAITextResponse{ + Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()), + Object: "chat.completion", + Created: common.GetTimestamp(), + Choices: []OpenAITextResponseChoice{choice}, + } + return &fullTextResponse +} + +func claudeStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) { + responseText := "" + responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID()) + createdTime := common.GetTimestamp() + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\r\n\r\n"); i >= 0 { + return i + 4, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + if !strings.HasPrefix(data, "event: completion") { + continue + } + data = strings.TrimPrefix(data, "event: completion\r\ndata: ") + dataChan <- data + } + stopChan <- true + }() + setEventStreamHeaders(c) + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + // some implementations may add \r at the end of data + data = strings.TrimSuffix(data, "\r") + var claudeResponse ClaudeResponse + err := json.Unmarshal([]byte(data), &claudeResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + return true + } + responseText += claudeResponse.Completion + response := streamResponseClaude2OpenAI(&claudeResponse) + response.Id = responseId + response.Created = createdTime + jsonStr, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonStr)}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" + } + return nil, responseText +} + +func claudeHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) { + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + var claudeResponse ClaudeResponse + err = json.Unmarshal(responseBody, &claudeResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if claudeResponse.Error.Type != "" { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: claudeResponse.Error.Message, + Type: claudeResponse.Error.Type, + Param: "", + Code: claudeResponse.Error.Type, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responseClaude2OpenAI(&claudeResponse) + completionTokens := countTokenText(claudeResponse.Completion, model) + usage := Usage{ + PromptTokens: promptTokens, + CompletionTokens: completionTokens, + TotalTokens: promptTokens + completionTokens, + } + fullTextResponse.Usage = usage + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &usage +} diff --git a/controller/relay-image.go b/controller/relay-image.go new file mode 100644 index 00000000..ccd52dce --- /dev/null +++ b/controller/relay-image.go @@ -0,0 +1,177 @@ +package controller + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "one-api/model" +) + +func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { + imageModel := "dall-e" + + tokenId := c.GetInt("token_id") + channelType := c.GetInt("channel") + channelId := c.GetInt("channel_id") + userId := c.GetInt("id") + consumeQuota := c.GetBool("consume_quota") + group := c.GetString("group") + + var imageRequest ImageRequest + if consumeQuota { + err := common.UnmarshalBodyReusable(c, &imageRequest) + if err != nil { + return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) + } + } + + // Prompt validation + if imageRequest.Prompt == "" { + return errorWrapper(errors.New("prompt is required"), "required_field_missing", http.StatusBadRequest) + } + + // Not "256x256", "512x512", or "1024x1024" + if imageRequest.Size != "" && imageRequest.Size != "256x256" && imageRequest.Size != "512x512" && imageRequest.Size != "1024x1024" { + return errorWrapper(errors.New("size must be one of 256x256, 512x512, or 1024x1024"), "invalid_field_value", http.StatusBadRequest) + } + + // N should between 1 and 10 + if imageRequest.N != 0 && (imageRequest.N < 1 || imageRequest.N > 10) { + return errorWrapper(errors.New("n must be between 1 and 10"), "invalid_field_value", http.StatusBadRequest) + } + + // map model name + modelMapping := c.GetString("model_mapping") + isModelMapped := false + if modelMapping != "" { + modelMap := make(map[string]string) + err := json.Unmarshal([]byte(modelMapping), &modelMap) + if err != nil { + return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError) + } + if modelMap[imageModel] != "" { + imageModel = modelMap[imageModel] + isModelMapped = true + } + } + baseURL := common.ChannelBaseURLs[channelType] + requestURL := c.Request.URL.String() + if c.GetString("base_url") != "" { + baseURL = c.GetString("base_url") + } + fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType) + var requestBody io.Reader + if isModelMapped { + jsonStr, err := json.Marshal(imageRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + } else { + requestBody = c.Request.Body + } + + modelRatio := common.GetModelRatio(imageModel) + groupRatio := common.GetGroupRatio(group) + ratio := modelRatio * groupRatio + userQuota, err := model.CacheGetUserQuota(userId) + + sizeRatio := 1.0 + // Size + if imageRequest.Size == "256x256" { + sizeRatio = 1 + } else if imageRequest.Size == "512x512" { + sizeRatio = 1.125 + } else if imageRequest.Size == "1024x1024" { + sizeRatio = 1.25 + } + quota := int(ratio*sizeRatio*1000) * imageRequest.N + + if consumeQuota && userQuota-quota < 0 { + return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden) + } + + req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody) + if err != nil { + return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) + } + req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) + + req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) + req.Header.Set("Accept", c.Request.Header.Get("Accept")) + + resp, err := httpClient.Do(req) + if err != nil { + return errorWrapper(err, "do_request_failed", http.StatusInternalServerError) + } + + err = req.Body.Close() + if err != nil { + return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) + } + err = c.Request.Body.Close() + if err != nil { + return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) + } + var textResponse ImageResponse + + defer func(ctx context.Context) { + if consumeQuota { + err := model.PostConsumeTokenQuota(tokenId, quota) + if err != nil { + common.SysError("error consuming token remain quota: " + err.Error()) + } + err = model.CacheUpdateUserQuota(userId) + if err != nil { + common.SysError("error update user quota cache: " + err.Error()) + } + if quota != 0 { + tokenName := c.GetString("token_name") + logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) + model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent) + model.UpdateUserUsedQuotaAndRequestCount(userId, quota) + channelId := c.GetInt("channel_id") + model.UpdateChannelUsedQuota(channelId, quota) + } + } + }(c.Request.Context()) + + if consumeQuota { + responseBody, err := io.ReadAll(resp.Body) + + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError) + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) + } + err = json.Unmarshal(responseBody, &textResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError) + } + + resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) + } + + for k, v := range resp.Header { + c.Writer.Header().Set(k, v[0]) + } + c.Writer.WriteHeader(resp.StatusCode) + + _, err = io.Copy(c.Writer, resp.Body) + if err != nil { + return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError) + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError) + } + return nil +} diff --git a/controller/relay-openai.go b/controller/relay-openai.go new file mode 100644 index 00000000..6bdfbc08 --- /dev/null +++ b/controller/relay-openai.go @@ -0,0 +1,144 @@ +package controller + +import ( + "bufio" + "bytes" + "encoding/json" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "strings" +) + +func openaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*OpenAIErrorWithStatusCode, string) { + responseText := "" + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\n"); i >= 0 { + return i + 1, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + if len(data) < 6 { // ignore blank line or wrong format + continue + } + if data[:6] != "data: " && data[:6] != "[DONE]" { + continue + } + dataChan <- data + data = data[6:] + if !strings.HasPrefix(data, "[DONE]") { + switch relayMode { + case RelayModeChatCompletions: + var streamResponse ChatCompletionsStreamResponse + err := json.Unmarshal([]byte(data), &streamResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + continue // just ignore the error + } + for _, choice := range streamResponse.Choices { + responseText += choice.Delta.Content + } + case RelayModeCompletions: + var streamResponse CompletionsStreamResponse + err := json.Unmarshal([]byte(data), &streamResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + continue + } + for _, choice := range streamResponse.Choices { + responseText += choice.Text + } + } + } + } + stopChan <- true + }() + setEventStreamHeaders(c) + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + if strings.HasPrefix(data, "data: [DONE]") { + data = data[:12] + } + // some implementations may add \r at the end of data + data = strings.TrimSuffix(data, "\r") + c.Render(-1, common.CustomEvent{Data: data}) + return true + case <-stopChan: + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" + } + return nil, responseText +} + +func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) { + var textResponse TextResponse + if consumeQuota { + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &textResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if textResponse.Error.Type != "" { + return &OpenAIErrorWithStatusCode{ + OpenAIError: textResponse.Error, + StatusCode: resp.StatusCode, + }, nil + } + // Reset response body + resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) + } + // We shouldn't set the header before we parse the response body, because the parse part may fail. + // And then we will have to send an error response, but in this case, the header has already been set. + // So the httpClient will be confused by the response. + // For example, Postman will report error, and we cannot check the response at all. + for k, v := range resp.Header { + c.Writer.Header().Set(k, v[0]) + } + c.Writer.WriteHeader(resp.StatusCode) + _, err := io.Copy(c.Writer, resp.Body) + if err != nil { + return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + + if textResponse.Usage.TotalTokens == 0 { + completionTokens := 0 + for _, choice := range textResponse.Choices { + completionTokens += countTokenText(choice.Message.Content, model) + } + textResponse.Usage = Usage{ + PromptTokens: promptTokens, + CompletionTokens: completionTokens, + TotalTokens: promptTokens + completionTokens, + } + } + return nil, &textResponse.Usage +} diff --git a/controller/relay-palm.go b/controller/relay-palm.go index ae739ca0..a705b318 100644 --- a/controller/relay-palm.go +++ b/controller/relay-palm.go @@ -1,10 +1,17 @@ package controller import ( + "encoding/json" "fmt" "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" ) +// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body +// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body + type PaLMChatMessage struct { Author string `json:"author"` Content string `json:"content"` @@ -15,45 +22,184 @@ type PaLMFilter struct { Message string `json:"message"` } -// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#request-body +type PaLMPrompt struct { + Messages []PaLMChatMessage `json:"messages"` +} + type PaLMChatRequest struct { - Prompt []Message `json:"prompt"` - Temperature float64 `json:"temperature"` - CandidateCount int `json:"candidateCount"` - TopP float64 `json:"topP"` - TopK int `json:"topK"` + Prompt PaLMPrompt `json:"prompt"` + Temperature float64 `json:"temperature,omitempty"` + CandidateCount int `json:"candidateCount,omitempty"` + TopP float64 `json:"topP,omitempty"` + TopK int `json:"topK,omitempty"` +} + +type PaLMError struct { + Code int `json:"code"` + Message string `json:"message"` + Status string `json:"status"` } -// https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage#response-body type PaLMChatResponse struct { - Candidates []Message `json:"candidates"` - Messages []Message `json:"messages"` - Filters []PaLMFilter `json:"filters"` + Candidates []PaLMChatMessage `json:"candidates"` + Messages []Message `json:"messages"` + Filters []PaLMFilter `json:"filters"` + Error PaLMError `json:"error"` } -func relayPaLM(openAIRequest GeneralOpenAIRequest, c *gin.Context) *OpenAIErrorWithStatusCode { - // https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage - messages := make([]PaLMChatMessage, 0, len(openAIRequest.Messages)) - for _, message := range openAIRequest.Messages { - var author string - if message.Role == "user" { - author = "0" - } else { - author = "1" - } - messages = append(messages, PaLMChatMessage{ - Author: author, +func requestOpenAI2PaLM(textRequest GeneralOpenAIRequest) *PaLMChatRequest { + palmRequest := PaLMChatRequest{ + Prompt: PaLMPrompt{ + Messages: make([]PaLMChatMessage, 0, len(textRequest.Messages)), + }, + Temperature: textRequest.Temperature, + CandidateCount: textRequest.N, + TopP: textRequest.TopP, + TopK: textRequest.MaxTokens, + } + for _, message := range textRequest.Messages { + palmMessage := PaLMChatMessage{ Content: message.Content, - }) + } + if message.Role == "user" { + palmMessage.Author = "0" + } else { + palmMessage.Author = "1" + } + palmRequest.Prompt.Messages = append(palmRequest.Prompt.Messages, palmMessage) } - request := PaLMChatRequest{ - Prompt: nil, - Temperature: openAIRequest.Temperature, - CandidateCount: openAIRequest.N, - TopP: openAIRequest.TopP, - TopK: openAIRequest.MaxTokens, - } - // TODO: forward request to PaLM & convert response - fmt.Print(request) - return nil + return &palmRequest +} + +func responsePaLM2OpenAI(response *PaLMChatResponse) *OpenAITextResponse { + fullTextResponse := OpenAITextResponse{ + Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)), + } + for i, candidate := range response.Candidates { + choice := OpenAITextResponseChoice{ + Index: i, + Message: Message{ + Role: "assistant", + Content: candidate.Content, + }, + FinishReason: "stop", + } + fullTextResponse.Choices = append(fullTextResponse.Choices, choice) + } + return &fullTextResponse +} + +func streamResponsePaLM2OpenAI(palmResponse *PaLMChatResponse) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + if len(palmResponse.Candidates) > 0 { + choice.Delta.Content = palmResponse.Candidates[0].Content + } + choice.FinishReason = &stopFinishReason + var response ChatCompletionsStreamResponse + response.Object = "chat.completion.chunk" + response.Model = "palm2" + response.Choices = []ChatCompletionsStreamResponseChoice{choice} + return &response +} + +func palmStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) { + responseText := "" + responseId := fmt.Sprintf("chatcmpl-%s", common.GetUUID()) + createdTime := common.GetTimestamp() + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + common.SysError("error reading stream response: " + err.Error()) + stopChan <- true + return + } + err = resp.Body.Close() + if err != nil { + common.SysError("error closing stream response: " + err.Error()) + stopChan <- true + return + } + var palmResponse PaLMChatResponse + err = json.Unmarshal(responseBody, &palmResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + stopChan <- true + return + } + fullTextResponse := streamResponsePaLM2OpenAI(&palmResponse) + fullTextResponse.Id = responseId + fullTextResponse.Created = createdTime + if len(palmResponse.Candidates) > 0 { + responseText = palmResponse.Candidates[0].Content + } + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + stopChan <- true + return + } + dataChan <- string(jsonResponse) + stopChan <- true + }() + setEventStreamHeaders(c) + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + c.Render(-1, common.CustomEvent{Data: "data: " + data}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" + } + return nil, responseText +} + +func palmHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) { + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + var palmResponse PaLMChatResponse + err = json.Unmarshal(responseBody, &palmResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if palmResponse.Error.Code != 0 || len(palmResponse.Candidates) == 0 { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: palmResponse.Error.Message, + Type: palmResponse.Error.Status, + Param: "", + Code: palmResponse.Error.Code, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responsePaLM2OpenAI(&palmResponse) + completionTokens := countTokenText(palmResponse.Candidates[0].Content, model) + usage := Usage{ + PromptTokens: promptTokens, + CompletionTokens: completionTokens, + TotalTokens: promptTokens + completionTokens, + } + fullTextResponse.Usage = usage + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &usage } diff --git a/controller/relay-tencent.go b/controller/relay-tencent.go new file mode 100644 index 00000000..024468bc --- /dev/null +++ b/controller/relay-tencent.go @@ -0,0 +1,287 @@ +package controller + +import ( + "bufio" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "github.com/gin-gonic/gin" + "io" + "net/http" + "one-api/common" + "sort" + "strconv" + "strings" +) + +// https://cloud.tencent.com/document/product/1729/97732 + +type TencentMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +type TencentChatRequest struct { + AppId int64 `json:"app_id"` // 腾讯云账号的 APPID + SecretId string `json:"secret_id"` // 官网 SecretId + // Timestamp当前 UNIX 时间戳,单位为秒,可记录发起 API 请求的时间。 + // 例如1529223702,如果与当前时间相差过大,会引起签名过期错误 + Timestamp int64 `json:"timestamp"` + // Expired 签名的有效期,是一个符合 UNIX Epoch 时间戳规范的数值, + // 单位为秒;Expired 必须大于 Timestamp 且 Expired-Timestamp 小于90天 + Expired int64 `json:"expired"` + QueryID string `json:"query_id"` //请求 Id,用于问题排查 + // Temperature 较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定 + // 默认 1.0,取值区间为[0.0,2.0],非必要不建议使用,不合理的取值会影响效果 + // 建议该参数和 top_p 只设置1个,不要同时更改 top_p + Temperature float64 `json:"temperature"` + // TopP 影响输出文本的多样性,取值越大,生成文本的多样性越强 + // 默认1.0,取值区间为[0.0, 1.0],非必要不建议使用, 不合理的取值会影响效果 + // 建议该参数和 temperature 只设置1个,不要同时更改 + TopP float64 `json:"top_p"` + // Stream 0:同步,1:流式 (默认,协议:SSE) + // 同步请求超时:60s,如果内容较长建议使用流式 + Stream int `json:"stream"` + // Messages 会话内容, 长度最多为40, 按对话时间从旧到新在数组中排列 + // 输入 content 总数最大支持 3000 token。 + Messages []TencentMessage `json:"messages"` +} + +type TencentError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +type TencentUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + TotalTokens int `json:"total_tokens"` +} + +type TencentResponseChoices struct { + FinishReason string `json:"finish_reason,omitempty"` // 流式结束标志位,为 stop 则表示尾包 + Messages TencentMessage `json:"messages,omitempty"` // 内容,同步模式返回内容,流模式为 null 输出 content 内容总数最多支持 1024token。 + Delta TencentMessage `json:"delta,omitempty"` // 内容,流模式返回内容,同步模式为 null 输出 content 内容总数最多支持 1024token。 +} + +type TencentChatResponse struct { + Choices []TencentResponseChoices `json:"choices,omitempty"` // 结果 + Created string `json:"created,omitempty"` // unix 时间戳的字符串 + Id string `json:"id,omitempty"` // 会话 id + Usage Usage `json:"usage,omitempty"` // token 数量 + Error TencentError `json:"error,omitempty"` // 错误信息 注意:此字段可能返回 null,表示取不到有效值 + Note string `json:"note,omitempty"` // 注释 + ReqID string `json:"req_id,omitempty"` // 唯一请求 Id,每次请求都会返回。用于反馈接口入参 +} + +func requestOpenAI2Tencent(request GeneralOpenAIRequest) *TencentChatRequest { + messages := make([]TencentMessage, 0, len(request.Messages)) + for i := 0; i < len(request.Messages); i++ { + message := request.Messages[i] + if message.Role == "system" { + messages = append(messages, TencentMessage{ + Role: "user", + Content: message.Content, + }) + messages = append(messages, TencentMessage{ + Role: "assistant", + Content: "Okay", + }) + continue + } + messages = append(messages, TencentMessage{ + Content: message.Content, + Role: message.Role, + }) + } + stream := 0 + if request.Stream { + stream = 1 + } + return &TencentChatRequest{ + Timestamp: common.GetTimestamp(), + Expired: common.GetTimestamp() + 24*60*60, + QueryID: common.GetUUID(), + Temperature: request.Temperature, + TopP: request.TopP, + Stream: stream, + Messages: messages, + } +} + +func responseTencent2OpenAI(response *TencentChatResponse) *OpenAITextResponse { + fullTextResponse := OpenAITextResponse{ + Object: "chat.completion", + Created: common.GetTimestamp(), + Usage: response.Usage, + } + if len(response.Choices) > 0 { + choice := OpenAITextResponseChoice{ + Index: 0, + Message: Message{ + Role: "assistant", + Content: response.Choices[0].Messages.Content, + }, + FinishReason: response.Choices[0].FinishReason, + } + fullTextResponse.Choices = append(fullTextResponse.Choices, choice) + } + return &fullTextResponse +} + +func streamResponseTencent2OpenAI(TencentResponse *TencentChatResponse) *ChatCompletionsStreamResponse { + response := ChatCompletionsStreamResponse{ + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: "tencent-hunyuan", + } + if len(TencentResponse.Choices) > 0 { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = TencentResponse.Choices[0].Delta.Content + if TencentResponse.Choices[0].FinishReason == "stop" { + choice.FinishReason = &stopFinishReason + } + response.Choices = append(response.Choices, choice) + } + return &response +} + +func tencentStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) { + var responseText string + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\n"); i >= 0 { + return i + 1, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + if len(data) < 5 { // ignore blank line or wrong format + continue + } + if data[:5] != "data:" { + continue + } + data = data[5:] + dataChan <- data + } + stopChan <- true + }() + setEventStreamHeaders(c) + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + var TencentResponse TencentChatResponse + err := json.Unmarshal([]byte(data), &TencentResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + return true + } + response := streamResponseTencent2OpenAI(&TencentResponse) + if len(response.Choices) != 0 { + responseText += response.Choices[0].Delta.Content + } + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" + } + return nil, responseText +} + +func tencentHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var TencentResponse TencentChatResponse + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &TencentResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if TencentResponse.Error.Code != 0 { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: TencentResponse.Error.Message, + Code: TencentResponse.Error.Code, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responseTencent2OpenAI(&TencentResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} + +func parseTencentConfig(config string) (appId int64, secretId string, secretKey string, err error) { + parts := strings.Split(config, "|") + if len(parts) != 3 { + err = errors.New("invalid tencent config") + return + } + appId, err = strconv.ParseInt(parts[0], 10, 64) + secretId = parts[1] + secretKey = parts[2] + return +} + +func getTencentSign(req TencentChatRequest, secretKey string) string { + params := make([]string, 0) + params = append(params, "app_id="+strconv.FormatInt(req.AppId, 10)) + params = append(params, "secret_id="+req.SecretId) + params = append(params, "timestamp="+strconv.FormatInt(req.Timestamp, 10)) + params = append(params, "query_id="+req.QueryID) + params = append(params, "temperature="+strconv.FormatFloat(req.Temperature, 'f', -1, 64)) + params = append(params, "top_p="+strconv.FormatFloat(req.TopP, 'f', -1, 64)) + params = append(params, "stream="+strconv.Itoa(req.Stream)) + params = append(params, "expired="+strconv.FormatInt(req.Expired, 10)) + + var messageStr string + for _, msg := range req.Messages { + messageStr += fmt.Sprintf(`{"role":"%s","content":"%s"},`, msg.Role, msg.Content) + } + messageStr = strings.TrimSuffix(messageStr, ",") + params = append(params, "messages=["+messageStr+"]") + + sort.Sort(sort.StringSlice(params)) + url := "hunyuan.cloud.tencent.com/hyllm/v1/chat/completions?" + strings.Join(params, "&") + mac := hmac.New(sha1.New, []byte(secretKey)) + signURL := url + mac.Write([]byte(signURL)) + sign := mac.Sum([]byte(nil)) + return base64.StdEncoding.EncodeToString(sign) +} diff --git a/controller/relay-text.go b/controller/relay-text.go new file mode 100644 index 00000000..a61c6f7c --- /dev/null +++ b/controller/relay-text.go @@ -0,0 +1,645 @@ +package controller + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "one-api/common" + "one-api/model" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +const ( + APITypeOpenAI = iota + APITypeClaude + APITypePaLM + APITypeBaidu + APITypeZhipu + APITypeAli + APITypeXunfei + APITypeAIProxyLibrary + APITypeTencent +) + +var httpClient *http.Client +var impatientHTTPClient *http.Client + +func init() { + if common.RelayTimeout == 0 { + httpClient = &http.Client{} + } else { + httpClient = &http.Client{ + Timeout: time.Duration(common.RelayTimeout) * time.Second, + } + } + + impatientHTTPClient = &http.Client{ + Timeout: 5 * time.Second, + } +} + +func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode { + channelType := c.GetInt("channel") + channelId := c.GetInt("channel_id") + tokenId := c.GetInt("token_id") + userId := c.GetInt("id") + consumeQuota := c.GetBool("consume_quota") + group := c.GetString("group") + var textRequest GeneralOpenAIRequest + if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM { + err := common.UnmarshalBodyReusable(c, &textRequest) + if err != nil { + return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest) + } + } + if relayMode == RelayModeModerations && textRequest.Model == "" { + textRequest.Model = "text-moderation-latest" + } + if relayMode == RelayModeEmbeddings && textRequest.Model == "" { + textRequest.Model = c.Param("model") + } + // request validation + if textRequest.Model == "" { + return errorWrapper(errors.New("model is required"), "required_field_missing", http.StatusBadRequest) + } + switch relayMode { + case RelayModeCompletions: + if textRequest.Prompt == "" { + return errorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest) + } + case RelayModeChatCompletions: + if textRequest.Messages == nil || len(textRequest.Messages) == 0 { + return errorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest) + } + case RelayModeEmbeddings: + case RelayModeModerations: + if textRequest.Input == "" { + return errorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest) + } + case RelayModeEdits: + if textRequest.Instruction == "" { + return errorWrapper(errors.New("field instruction is required"), "required_field_missing", http.StatusBadRequest) + } + } + // map model name + modelMapping := c.GetString("model_mapping") + isModelMapped := false + if modelMapping != "" && modelMapping != "{}" { + modelMap := make(map[string]string) + err := json.Unmarshal([]byte(modelMapping), &modelMap) + if err != nil { + return errorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError) + } + if modelMap[textRequest.Model] != "" { + textRequest.Model = modelMap[textRequest.Model] + isModelMapped = true + } + } + apiType := APITypeOpenAI + switch channelType { + case common.ChannelTypeAnthropic: + apiType = APITypeClaude + case common.ChannelTypeBaidu: + apiType = APITypeBaidu + case common.ChannelTypePaLM: + apiType = APITypePaLM + case common.ChannelTypeZhipu: + apiType = APITypeZhipu + case common.ChannelTypeAli: + apiType = APITypeAli + case common.ChannelTypeXunfei: + apiType = APITypeXunfei + case common.ChannelTypeAIProxyLibrary: + apiType = APITypeAIProxyLibrary + case common.ChannelTypeTencent: + apiType = APITypeTencent + } + baseURL := common.ChannelBaseURLs[channelType] + requestURL := c.Request.URL.String() + if c.GetString("base_url") != "" { + baseURL = c.GetString("base_url") + } + fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType) + switch apiType { + case APITypeOpenAI: + if channelType == common.ChannelTypeAzure { + // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api + query := c.Request.URL.Query() + apiVersion := query.Get("api-version") + if apiVersion == "" { + apiVersion = c.GetString("api_version") + } + requestURL := strings.Split(requestURL, "?")[0] + requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion) + baseURL = c.GetString("base_url") + task := strings.TrimPrefix(requestURL, "/v1/") + model_ := textRequest.Model + model_ = strings.Replace(model_, ".", "", -1) + // https://github.com/songquanpeng/one-api/issues/67 + model_ = strings.TrimSuffix(model_, "-0301") + model_ = strings.TrimSuffix(model_, "-0314") + model_ = strings.TrimSuffix(model_, "-0613") + fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task) + } + case APITypeClaude: + fullRequestURL = "https://api.anthropic.com/v1/complete" + if baseURL != "" { + fullRequestURL = fmt.Sprintf("%s/v1/complete", baseURL) + } + case APITypeBaidu: + switch textRequest.Model { + case "ERNIE-Bot": + fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions" + case "ERNIE-Bot-turbo": + fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant" + case "ERNIE-Bot-4": + fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro" + case "BLOOMZ-7B": + fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1" + case "Embedding-V1": + fullRequestURL = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1" + } + apiKey := c.Request.Header.Get("Authorization") + apiKey = strings.TrimPrefix(apiKey, "Bearer ") + var err error + if apiKey, err = getBaiduAccessToken(apiKey); err != nil { + return errorWrapper(err, "invalid_baidu_config", http.StatusInternalServerError) + } + fullRequestURL += "?access_token=" + apiKey + case APITypePaLM: + fullRequestURL = "https://generativelanguage.googleapis.com/v1beta2/models/chat-bison-001:generateMessage" + if baseURL != "" { + fullRequestURL = fmt.Sprintf("%s/v1beta2/models/chat-bison-001:generateMessage", baseURL) + } + apiKey := c.Request.Header.Get("Authorization") + apiKey = strings.TrimPrefix(apiKey, "Bearer ") + fullRequestURL += "?key=" + apiKey + case APITypeZhipu: + method := "invoke" + if textRequest.Stream { + method = "sse-invoke" + } + fullRequestURL = fmt.Sprintf("https://open.bigmodel.cn/api/paas/v3/model-api/%s/%s", textRequest.Model, method) + case APITypeAli: + fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation" + if relayMode == RelayModeEmbeddings { + fullRequestURL = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding" + } + case APITypeTencent: + fullRequestURL = "https://hunyuan.cloud.tencent.com/hyllm/v1/chat/completions" + case APITypeAIProxyLibrary: + fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL) + } + var promptTokens int + var completionTokens int + switch relayMode { + case RelayModeChatCompletions: + promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model) + case RelayModeCompletions: + promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model) + case RelayModeModerations: + promptTokens = countTokenInput(textRequest.Input, textRequest.Model) + } + preConsumedTokens := common.PreConsumedQuota + if textRequest.MaxTokens != 0 { + preConsumedTokens = promptTokens + textRequest.MaxTokens + } + modelRatio := common.GetModelRatio(textRequest.Model) + groupRatio := common.GetGroupRatio(group) + ratio := modelRatio * groupRatio + preConsumedQuota := int(float64(preConsumedTokens) * ratio) + userQuota, err := model.CacheGetUserQuota(userId) + if err != nil { + return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError) + } + if userQuota-preConsumedQuota < 0 { + return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden) + } + err = model.CacheDecreaseUserQuota(userId, preConsumedQuota) + if err != nil { + return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError) + } + if userQuota > 100*preConsumedQuota { + // in this case, we do not pre-consume quota + // because the user has enough quota + preConsumedQuota = 0 + common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", userId, userQuota)) + } + if consumeQuota && preConsumedQuota > 0 { + err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota) + if err != nil { + return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden) + } + } + var requestBody io.Reader + if isModelMapped { + jsonStr, err := json.Marshal(textRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + } else { + requestBody = c.Request.Body + } + switch apiType { + case APITypeClaude: + claudeRequest := requestOpenAI2Claude(textRequest) + jsonStr, err := json.Marshal(claudeRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + case APITypeBaidu: + var jsonData []byte + var err error + switch relayMode { + case RelayModeEmbeddings: + baiduEmbeddingRequest := embeddingRequestOpenAI2Baidu(textRequest) + jsonData, err = json.Marshal(baiduEmbeddingRequest) + default: + baiduRequest := requestOpenAI2Baidu(textRequest) + jsonData, err = json.Marshal(baiduRequest) + } + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonData) + case APITypePaLM: + palmRequest := requestOpenAI2PaLM(textRequest) + jsonStr, err := json.Marshal(palmRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + case APITypeZhipu: + zhipuRequest := requestOpenAI2Zhipu(textRequest) + jsonStr, err := json.Marshal(zhipuRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + case APITypeAli: + var jsonStr []byte + var err error + switch relayMode { + case RelayModeEmbeddings: + aliEmbeddingRequest := embeddingRequestOpenAI2Ali(textRequest) + jsonStr, err = json.Marshal(aliEmbeddingRequest) + default: + aliRequest := requestOpenAI2Ali(textRequest) + jsonStr, err = json.Marshal(aliRequest) + } + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + case APITypeTencent: + apiKey := c.Request.Header.Get("Authorization") + apiKey = strings.TrimPrefix(apiKey, "Bearer ") + appId, secretId, secretKey, err := parseTencentConfig(apiKey) + if err != nil { + return errorWrapper(err, "invalid_tencent_config", http.StatusInternalServerError) + } + tencentRequest := requestOpenAI2Tencent(textRequest) + tencentRequest.AppId = appId + tencentRequest.SecretId = secretId + jsonStr, err := json.Marshal(tencentRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + sign := getTencentSign(*tencentRequest, secretKey) + c.Request.Header.Set("Authorization", sign) + requestBody = bytes.NewBuffer(jsonStr) + case APITypeAIProxyLibrary: + aiProxyLibraryRequest := requestOpenAI2AIProxyLibrary(textRequest) + aiProxyLibraryRequest.LibraryId = c.GetString("library_id") + jsonStr, err := json.Marshal(aiProxyLibraryRequest) + if err != nil { + return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError) + } + requestBody = bytes.NewBuffer(jsonStr) + } + + var req *http.Request + var resp *http.Response + isStream := textRequest.Stream + + if apiType != APITypeXunfei { // cause xunfei use websocket + req, err = http.NewRequest(c.Request.Method, fullRequestURL, requestBody) + if err != nil { + return errorWrapper(err, "new_request_failed", http.StatusInternalServerError) + } + apiKey := c.Request.Header.Get("Authorization") + apiKey = strings.TrimPrefix(apiKey, "Bearer ") + switch apiType { + case APITypeOpenAI: + if channelType == common.ChannelTypeAzure { + req.Header.Set("api-key", apiKey) + } else { + req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) + if channelType == common.ChannelTypeOpenRouter { + req.Header.Set("HTTP-Referer", "https://github.com/songquanpeng/one-api") + req.Header.Set("X-Title", "One API") + } + } + case APITypeClaude: + req.Header.Set("x-api-key", apiKey) + anthropicVersion := c.Request.Header.Get("anthropic-version") + if anthropicVersion == "" { + anthropicVersion = "2023-06-01" + } + req.Header.Set("anthropic-version", anthropicVersion) + case APITypeZhipu: + token := getZhipuToken(apiKey) + req.Header.Set("Authorization", token) + case APITypeAli: + req.Header.Set("Authorization", "Bearer "+apiKey) + if textRequest.Stream { + req.Header.Set("X-DashScope-SSE", "enable") + } + case APITypeTencent: + req.Header.Set("Authorization", apiKey) + default: + req.Header.Set("Authorization", "Bearer "+apiKey) + } + req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) + req.Header.Set("Accept", c.Request.Header.Get("Accept")) + if isStream && c.Request.Header.Get("Accept") == "" { + req.Header.Set("Accept", "text/event-stream") + } + //req.Header.Set("Connection", c.Request.Header.Get("Connection")) + resp, err = httpClient.Do(req) + if err != nil { + return errorWrapper(err, "do_request_failed", http.StatusInternalServerError) + } + err = req.Body.Close() + if err != nil { + return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) + } + err = c.Request.Body.Close() + if err != nil { + return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError) + } + isStream = isStream || strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream") + + if resp.StatusCode != http.StatusOK { + if preConsumedQuota != 0 { + go func(ctx context.Context) { + // return pre-consumed quota + err := model.PostConsumeTokenQuota(tokenId, -preConsumedQuota) + if err != nil { + common.LogError(ctx, "error return pre-consumed quota: "+err.Error()) + } + }(c.Request.Context()) + } + return relayErrorHandler(resp) + } + } + + var textResponse TextResponse + tokenName := c.GetString("token_name") + + defer func(ctx context.Context) { + // c.Writer.Flush() + go func() { + if consumeQuota { + quota := 0 + completionRatio := common.GetCompletionRatio(textRequest.Model) + promptTokens = textResponse.Usage.PromptTokens + completionTokens = textResponse.Usage.CompletionTokens + quota = int(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio)) + if ratio != 0 && quota <= 0 { + quota = 1 + } + totalTokens := promptTokens + completionTokens + if totalTokens == 0 { + // in this case, must be some error happened + // we cannot just return, because we may have to return the pre-consumed quota + quota = 0 + } + quotaDelta := quota - preConsumedQuota + err := model.PostConsumeTokenQuota(tokenId, quotaDelta) + if err != nil { + common.LogError(ctx, "error consuming token remain quota: "+err.Error()) + } + err = model.CacheUpdateUserQuota(userId) + if err != nil { + common.LogError(ctx, "error update user quota cache: "+err.Error()) + } + if quota != 0 { + logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) + model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent) + model.UpdateUserUsedQuotaAndRequestCount(userId, quota) + model.UpdateChannelUsedQuota(channelId, quota) + } + } + }() + }(c.Request.Context()) + switch apiType { + case APITypeOpenAI: + if isStream { + err, responseText := openaiStreamHandler(c, resp, relayMode) + if err != nil { + return err + } + textResponse.Usage.PromptTokens = promptTokens + textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model) + return nil + } else { + err, usage := openaiHandler(c, resp, consumeQuota, promptTokens, textRequest.Model) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + case APITypeClaude: + if isStream { + err, responseText := claudeStreamHandler(c, resp) + if err != nil { + return err + } + textResponse.Usage.PromptTokens = promptTokens + textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model) + return nil + } else { + err, usage := claudeHandler(c, resp, promptTokens, textRequest.Model) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + case APITypeBaidu: + if isStream { + err, usage := baiduStreamHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } else { + var err *OpenAIErrorWithStatusCode + var usage *Usage + switch relayMode { + case RelayModeEmbeddings: + err, usage = baiduEmbeddingHandler(c, resp) + default: + err, usage = baiduHandler(c, resp) + } + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + case APITypePaLM: + if textRequest.Stream { // PaLM2 API does not support stream + err, responseText := palmStreamHandler(c, resp) + if err != nil { + return err + } + textResponse.Usage.PromptTokens = promptTokens + textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model) + return nil + } else { + err, usage := palmHandler(c, resp, promptTokens, textRequest.Model) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + case APITypeZhipu: + if isStream { + err, usage := zhipuStreamHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + // zhipu's API does not return prompt tokens & completion tokens + textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens + return nil + } else { + err, usage := zhipuHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + // zhipu's API does not return prompt tokens & completion tokens + textResponse.Usage.PromptTokens = textResponse.Usage.TotalTokens + return nil + } + case APITypeAli: + if isStream { + err, usage := aliStreamHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } else { + var err *OpenAIErrorWithStatusCode + var usage *Usage + switch relayMode { + case RelayModeEmbeddings: + err, usage = aliEmbeddingHandler(c, resp) + default: + err, usage = aliHandler(c, resp) + } + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + case APITypeXunfei: + auth := c.Request.Header.Get("Authorization") + auth = strings.TrimPrefix(auth, "Bearer ") + splits := strings.Split(auth, "|") + if len(splits) != 3 { + return errorWrapper(errors.New("invalid auth"), "invalid_auth", http.StatusBadRequest) + } + var err *OpenAIErrorWithStatusCode + var usage *Usage + if isStream { + err, usage = xunfeiStreamHandler(c, textRequest, splits[0], splits[1], splits[2]) + } else { + err, usage = xunfeiHandler(c, textRequest, splits[0], splits[1], splits[2]) + } + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + case APITypeAIProxyLibrary: + if isStream { + err, usage := aiProxyLibraryStreamHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } else { + err, usage := aiProxyLibraryHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + case APITypeTencent: + if isStream { + err, responseText := tencentStreamHandler(c, resp) + if err != nil { + return err + } + textResponse.Usage.PromptTokens = promptTokens + textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model) + return nil + } else { + err, usage := tencentHandler(c, resp) + if err != nil { + return err + } + if usage != nil { + textResponse.Usage = *usage + } + return nil + } + default: + return errorWrapper(errors.New("unknown api type"), "unknown_api_type", http.StatusInternalServerError) + } +} diff --git a/controller/relay-utils.go b/controller/relay-utils.go index bb25fa3b..cf5d9b69 100644 --- a/controller/relay-utils.go +++ b/controller/relay-utils.go @@ -1,28 +1,68 @@ package controller import ( + "encoding/json" "fmt" + "github.com/gin-gonic/gin" "github.com/pkoukk/tiktoken-go" + "io" + "net/http" "one-api/common" + "strconv" "strings" ) -var tokenEncoderMap = map[string]*tiktoken.Tiktoken{} +var stopFinishReason = "stop" -func getTokenEncoder(model string) *tiktoken.Tiktoken { - if tokenEncoder, ok := tokenEncoderMap[model]; ok { - return tokenEncoder - } - tokenEncoder, err := tiktoken.EncodingForModel(model) +// tokenEncoderMap won't grow after initialization +var tokenEncoderMap = map[string]*tiktoken.Tiktoken{} +var defaultTokenEncoder *tiktoken.Tiktoken + +func InitTokenEncoders() { + common.SysLog("initializing token encoders") + gpt35TokenEncoder, err := tiktoken.EncodingForModel("gpt-3.5-turbo") if err != nil { - common.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error())) - tokenEncoder, err = tiktoken.EncodingForModel("gpt-3.5-turbo") - if err != nil { - common.FatalLog(fmt.Sprintf("failed to get token encoder for model gpt-3.5-turbo: %s", err.Error())) + common.FatalLog(fmt.Sprintf("failed to get gpt-3.5-turbo token encoder: %s", err.Error())) + } + defaultTokenEncoder = gpt35TokenEncoder + gpt4TokenEncoder, err := tiktoken.EncodingForModel("gpt-4") + if err != nil { + common.FatalLog(fmt.Sprintf("failed to get gpt-4 token encoder: %s", err.Error())) + } + for model, _ := range common.ModelRatio { + if strings.HasPrefix(model, "gpt-3.5") { + tokenEncoderMap[model] = gpt35TokenEncoder + } else if strings.HasPrefix(model, "gpt-4") { + tokenEncoderMap[model] = gpt4TokenEncoder + } else { + tokenEncoderMap[model] = nil } } - tokenEncoderMap[model] = tokenEncoder - return tokenEncoder + common.SysLog("token encoders initialized") +} + +func getTokenEncoder(model string) *tiktoken.Tiktoken { + tokenEncoder, ok := tokenEncoderMap[model] + if ok && tokenEncoder != nil { + return tokenEncoder + } + if ok { + tokenEncoder, err := tiktoken.EncodingForModel(model) + if err != nil { + common.SysError(fmt.Sprintf("failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo", model, err.Error())) + tokenEncoder = defaultTokenEncoder + } + tokenEncoderMap[model] = tokenEncoder + return tokenEncoder + } + return defaultTokenEncoder +} + +func getTokenNum(tokenEncoder *tiktoken.Tiktoken, text string) int { + if common.ApproximateTokenEnabled { + return int(float64(len(text)) * 0.38) + } + return len(tokenEncoder.Encode(text, nil, nil)) } func countTokenMessages(messages []Message, model string) int { @@ -34,12 +74,9 @@ func countTokenMessages(messages []Message, model string) int { // Every message follows <|start|>{role/name}\n{content}<|end|>\n var tokensPerMessage int var tokensPerName int - if strings.HasPrefix(model, "gpt-3.5") { + if model == "gpt-3.5-turbo-0301" { tokensPerMessage = 4 tokensPerName = -1 // If there's a name, the role is omitted - } else if strings.HasPrefix(model, "gpt-4") { - tokensPerMessage = 3 - tokensPerName = 1 } else { tokensPerMessage = 3 tokensPerName = 1 @@ -47,19 +84,105 @@ func countTokenMessages(messages []Message, model string) int { tokenNum := 0 for _, message := range messages { tokenNum += tokensPerMessage - tokenNum += len(tokenEncoder.Encode(message.Content, nil, nil)) - tokenNum += len(tokenEncoder.Encode(message.Role, nil, nil)) + tokenNum += getTokenNum(tokenEncoder, message.Content) + tokenNum += getTokenNum(tokenEncoder, message.Role) if message.Name != nil { tokenNum += tokensPerName - tokenNum += len(tokenEncoder.Encode(*message.Name, nil, nil)) + tokenNum += getTokenNum(tokenEncoder, *message.Name) } } tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|> return tokenNum } +func countTokenInput(input any, model string) int { + switch input.(type) { + case string: + return countTokenText(input.(string), model) + case []string: + text := "" + for _, s := range input.([]string) { + text += s + } + return countTokenText(text, model) + } + return 0 +} + func countTokenText(text string, model string) int { tokenEncoder := getTokenEncoder(model) - token := tokenEncoder.Encode(text, nil, nil) - return len(token) + return getTokenNum(tokenEncoder, text) +} + +func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode { + openAIError := OpenAIError{ + Message: err.Error(), + Type: "one_api_error", + Code: code, + } + return &OpenAIErrorWithStatusCode{ + OpenAIError: openAIError, + StatusCode: statusCode, + } +} + +func shouldDisableChannel(err *OpenAIError, statusCode int) bool { + if !common.AutomaticDisableChannelEnabled { + return false + } + if err == nil { + return false + } + if statusCode == http.StatusUnauthorized { + return true + } + if err.Type == "insufficient_quota" || err.Code == "invalid_api_key" || err.Code == "account_deactivated" { + return true + } + return false +} + +func setEventStreamHeaders(c *gin.Context) { + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") + c.Writer.Header().Set("Transfer-Encoding", "chunked") + c.Writer.Header().Set("X-Accel-Buffering", "no") +} + +func relayErrorHandler(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode) { + openAIErrorWithStatusCode = &OpenAIErrorWithStatusCode{ + StatusCode: resp.StatusCode, + OpenAIError: OpenAIError{ + Message: fmt.Sprintf("bad response status code %d", resp.StatusCode), + Type: "upstream_error", + Code: "bad_response_status_code", + Param: strconv.Itoa(resp.StatusCode), + }, + } + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return + } + err = resp.Body.Close() + if err != nil { + return + } + var textResponse TextResponse + err = json.Unmarshal(responseBody, &textResponse) + if err != nil { + return + } + openAIErrorWithStatusCode.OpenAIError = textResponse.Error + return +} + +func getFullRequestURL(baseURL string, requestURL string, channelType int) string { + fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) + if channelType == common.ChannelTypeOpenAI { + if strings.HasPrefix(baseURL, "https://gateway.ai.cloudflare.com") { + fullRequestURL = fmt.Sprintf("%s%s", baseURL, strings.TrimPrefix(requestURL, "/v1")) + } + } + return fullRequestURL } diff --git a/controller/relay-xunfei.go b/controller/relay-xunfei.go new file mode 100644 index 00000000..91fb6042 --- /dev/null +++ b/controller/relay-xunfei.go @@ -0,0 +1,306 @@ +package controller + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/gin-gonic/gin" + "github.com/gorilla/websocket" + "io" + "net/http" + "net/url" + "one-api/common" + "strings" + "time" +) + +// https://console.xfyun.cn/services/cbm +// https://www.xfyun.cn/doc/spark/Web.html + +type XunfeiMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +type XunfeiChatRequest struct { + Header struct { + AppId string `json:"app_id"` + } `json:"header"` + Parameter struct { + Chat struct { + Domain string `json:"domain,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + TopK int `json:"top_k,omitempty"` + MaxTokens int `json:"max_tokens,omitempty"` + Auditing bool `json:"auditing,omitempty"` + } `json:"chat"` + } `json:"parameter"` + Payload struct { + Message struct { + Text []XunfeiMessage `json:"text"` + } `json:"message"` + } `json:"payload"` +} + +type XunfeiChatResponseTextItem struct { + Content string `json:"content"` + Role string `json:"role"` + Index int `json:"index"` +} + +type XunfeiChatResponse struct { + Header struct { + Code int `json:"code"` + Message string `json:"message"` + Sid string `json:"sid"` + Status int `json:"status"` + } `json:"header"` + Payload struct { + Choices struct { + Status int `json:"status"` + Seq int `json:"seq"` + Text []XunfeiChatResponseTextItem `json:"text"` + } `json:"choices"` + Usage struct { + //Text struct { + // QuestionTokens string `json:"question_tokens"` + // PromptTokens string `json:"prompt_tokens"` + // CompletionTokens string `json:"completion_tokens"` + // TotalTokens string `json:"total_tokens"` + //} `json:"text"` + Text Usage `json:"text"` + } `json:"usage"` + } `json:"payload"` +} + +func requestOpenAI2Xunfei(request GeneralOpenAIRequest, xunfeiAppId string, domain string) *XunfeiChatRequest { + messages := make([]XunfeiMessage, 0, len(request.Messages)) + for _, message := range request.Messages { + if message.Role == "system" { + messages = append(messages, XunfeiMessage{ + Role: "user", + Content: message.Content, + }) + messages = append(messages, XunfeiMessage{ + Role: "assistant", + Content: "Okay", + }) + } else { + messages = append(messages, XunfeiMessage{ + Role: message.Role, + Content: message.Content, + }) + } + } + xunfeiRequest := XunfeiChatRequest{} + xunfeiRequest.Header.AppId = xunfeiAppId + xunfeiRequest.Parameter.Chat.Domain = domain + xunfeiRequest.Parameter.Chat.Temperature = request.Temperature + xunfeiRequest.Parameter.Chat.TopK = request.N + xunfeiRequest.Parameter.Chat.MaxTokens = request.MaxTokens + xunfeiRequest.Payload.Message.Text = messages + return &xunfeiRequest +} + +func responseXunfei2OpenAI(response *XunfeiChatResponse) *OpenAITextResponse { + if len(response.Payload.Choices.Text) == 0 { + response.Payload.Choices.Text = []XunfeiChatResponseTextItem{ + { + Content: "", + }, + } + } + choice := OpenAITextResponseChoice{ + Index: 0, + Message: Message{ + Role: "assistant", + Content: response.Payload.Choices.Text[0].Content, + }, + FinishReason: stopFinishReason, + } + fullTextResponse := OpenAITextResponse{ + Object: "chat.completion", + Created: common.GetTimestamp(), + Choices: []OpenAITextResponseChoice{choice}, + Usage: response.Payload.Usage.Text, + } + return &fullTextResponse +} + +func streamResponseXunfei2OpenAI(xunfeiResponse *XunfeiChatResponse) *ChatCompletionsStreamResponse { + if len(xunfeiResponse.Payload.Choices.Text) == 0 { + xunfeiResponse.Payload.Choices.Text = []XunfeiChatResponseTextItem{ + { + Content: "", + }, + } + } + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = xunfeiResponse.Payload.Choices.Text[0].Content + if xunfeiResponse.Payload.Choices.Status == 2 { + choice.FinishReason = &stopFinishReason + } + response := ChatCompletionsStreamResponse{ + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: "SparkDesk", + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } + return &response +} + +func buildXunfeiAuthUrl(hostUrl string, apiKey, apiSecret string) string { + HmacWithShaToBase64 := func(algorithm, data, key string) string { + mac := hmac.New(sha256.New, []byte(key)) + mac.Write([]byte(data)) + encodeData := mac.Sum(nil) + return base64.StdEncoding.EncodeToString(encodeData) + } + ul, err := url.Parse(hostUrl) + if err != nil { + fmt.Println(err) + } + date := time.Now().UTC().Format(time.RFC1123) + signString := []string{"host: " + ul.Host, "date: " + date, "GET " + ul.Path + " HTTP/1.1"} + sign := strings.Join(signString, "\n") + sha := HmacWithShaToBase64("hmac-sha256", sign, apiSecret) + authUrl := fmt.Sprintf("hmac username=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"", apiKey, + "hmac-sha256", "host date request-line", sha) + authorization := base64.StdEncoding.EncodeToString([]byte(authUrl)) + v := url.Values{} + v.Add("host", ul.Host) + v.Add("date", date) + v.Add("authorization", authorization) + callUrl := hostUrl + "?" + v.Encode() + return callUrl +} + +func xunfeiStreamHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*OpenAIErrorWithStatusCode, *Usage) { + domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret) + dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId) + if err != nil { + return errorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil + } + setEventStreamHeaders(c) + var usage Usage + c.Stream(func(w io.Writer) bool { + select { + case xunfeiResponse := <-dataChan: + usage.PromptTokens += xunfeiResponse.Payload.Usage.Text.PromptTokens + usage.CompletionTokens += xunfeiResponse.Payload.Usage.Text.CompletionTokens + usage.TotalTokens += xunfeiResponse.Payload.Usage.Text.TotalTokens + response := streamResponseXunfei2OpenAI(&xunfeiResponse) + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + return nil, &usage +} + +func xunfeiHandler(c *gin.Context, textRequest GeneralOpenAIRequest, appId string, apiSecret string, apiKey string) (*OpenAIErrorWithStatusCode, *Usage) { + domain, authUrl := getXunfeiAuthUrl(c, apiKey, apiSecret) + dataChan, stopChan, err := xunfeiMakeRequest(textRequest, domain, authUrl, appId) + if err != nil { + return errorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil + } + var usage Usage + var content string + var xunfeiResponse XunfeiChatResponse + stop := false + for !stop { + select { + case xunfeiResponse = <-dataChan: + if len(xunfeiResponse.Payload.Choices.Text) == 0 { + continue + } + content += xunfeiResponse.Payload.Choices.Text[0].Content + usage.PromptTokens += xunfeiResponse.Payload.Usage.Text.PromptTokens + usage.CompletionTokens += xunfeiResponse.Payload.Usage.Text.CompletionTokens + usage.TotalTokens += xunfeiResponse.Payload.Usage.Text.TotalTokens + case stop = <-stopChan: + } + } + + xunfeiResponse.Payload.Choices.Text[0].Content = content + + response := responseXunfei2OpenAI(&xunfeiResponse) + jsonResponse, err := json.Marshal(response) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + _, _ = c.Writer.Write(jsonResponse) + return nil, &usage +} + +func xunfeiMakeRequest(textRequest GeneralOpenAIRequest, domain, authUrl, appId string) (chan XunfeiChatResponse, chan bool, error) { + d := websocket.Dialer{ + HandshakeTimeout: 5 * time.Second, + } + conn, resp, err := d.Dial(authUrl, nil) + if err != nil || resp.StatusCode != 101 { + return nil, nil, err + } + data := requestOpenAI2Xunfei(textRequest, appId, domain) + err = conn.WriteJSON(data) + if err != nil { + return nil, nil, err + } + + dataChan := make(chan XunfeiChatResponse) + stopChan := make(chan bool) + go func() { + for { + _, msg, err := conn.ReadMessage() + if err != nil { + common.SysError("error reading stream response: " + err.Error()) + break + } + var response XunfeiChatResponse + err = json.Unmarshal(msg, &response) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + break + } + dataChan <- response + if response.Payload.Choices.Status == 2 { + err := conn.Close() + if err != nil { + common.SysError("error closing websocket connection: " + err.Error()) + } + break + } + } + stopChan <- true + }() + + return dataChan, stopChan, nil +} + +func getXunfeiAuthUrl(c *gin.Context, apiKey string, apiSecret string) (string, string) { + query := c.Request.URL.Query() + apiVersion := query.Get("api-version") + if apiVersion == "" { + apiVersion = c.GetString("api_version") + } + if apiVersion == "" { + apiVersion = "v1.1" + common.SysLog("api_version not found, use default: " + apiVersion) + } + domain := "general" + if apiVersion != "v1.1" { + domain += strings.Split(apiVersion, ".")[0] + } + authUrl := buildXunfeiAuthUrl(fmt.Sprintf("wss://spark-api.xf-yun.com/%s/chat", apiVersion), apiKey, apiSecret) + return domain, authUrl +} diff --git a/controller/relay-zhipu.go b/controller/relay-zhipu.go new file mode 100644 index 00000000..7a4a582d --- /dev/null +++ b/controller/relay-zhipu.go @@ -0,0 +1,301 @@ +package controller + +import ( + "bufio" + "encoding/json" + "github.com/gin-gonic/gin" + "github.com/golang-jwt/jwt" + "io" + "net/http" + "one-api/common" + "strings" + "sync" + "time" +) + +// https://open.bigmodel.cn/doc/api#chatglm_std +// chatglm_std, chatglm_lite +// https://open.bigmodel.cn/api/paas/v3/model-api/chatglm_std/invoke +// https://open.bigmodel.cn/api/paas/v3/model-api/chatglm_std/sse-invoke + +type ZhipuMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +type ZhipuRequest struct { + Prompt []ZhipuMessage `json:"prompt"` + Temperature float64 `json:"temperature,omitempty"` + TopP float64 `json:"top_p,omitempty"` + RequestId string `json:"request_id,omitempty"` + Incremental bool `json:"incremental,omitempty"` +} + +type ZhipuResponseData struct { + TaskId string `json:"task_id"` + RequestId string `json:"request_id"` + TaskStatus string `json:"task_status"` + Choices []ZhipuMessage `json:"choices"` + Usage `json:"usage"` +} + +type ZhipuResponse struct { + Code int `json:"code"` + Msg string `json:"msg"` + Success bool `json:"success"` + Data ZhipuResponseData `json:"data"` +} + +type ZhipuStreamMetaResponse struct { + RequestId string `json:"request_id"` + TaskId string `json:"task_id"` + TaskStatus string `json:"task_status"` + Usage `json:"usage"` +} + +type zhipuTokenData struct { + Token string + ExpiryTime time.Time +} + +var zhipuTokens sync.Map +var expSeconds int64 = 24 * 3600 + +func getZhipuToken(apikey string) string { + data, ok := zhipuTokens.Load(apikey) + if ok { + tokenData := data.(zhipuTokenData) + if time.Now().Before(tokenData.ExpiryTime) { + return tokenData.Token + } + } + + split := strings.Split(apikey, ".") + if len(split) != 2 { + common.SysError("invalid zhipu key: " + apikey) + return "" + } + + id := split[0] + secret := split[1] + + expMillis := time.Now().Add(time.Duration(expSeconds)*time.Second).UnixNano() / 1e6 + expiryTime := time.Now().Add(time.Duration(expSeconds) * time.Second) + + timestamp := time.Now().UnixNano() / 1e6 + + payload := jwt.MapClaims{ + "api_key": id, + "exp": expMillis, + "timestamp": timestamp, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, payload) + + token.Header["alg"] = "HS256" + token.Header["sign_type"] = "SIGN" + + tokenString, err := token.SignedString([]byte(secret)) + if err != nil { + return "" + } + + zhipuTokens.Store(apikey, zhipuTokenData{ + Token: tokenString, + ExpiryTime: expiryTime, + }) + + return tokenString +} + +func requestOpenAI2Zhipu(request GeneralOpenAIRequest) *ZhipuRequest { + messages := make([]ZhipuMessage, 0, len(request.Messages)) + for _, message := range request.Messages { + if message.Role == "system" { + messages = append(messages, ZhipuMessage{ + Role: "system", + Content: message.Content, + }) + messages = append(messages, ZhipuMessage{ + Role: "user", + Content: "Okay", + }) + } else { + messages = append(messages, ZhipuMessage{ + Role: message.Role, + Content: message.Content, + }) + } + } + return &ZhipuRequest{ + Prompt: messages, + Temperature: request.Temperature, + TopP: request.TopP, + Incremental: false, + } +} + +func responseZhipu2OpenAI(response *ZhipuResponse) *OpenAITextResponse { + fullTextResponse := OpenAITextResponse{ + Id: response.Data.TaskId, + Object: "chat.completion", + Created: common.GetTimestamp(), + Choices: make([]OpenAITextResponseChoice, 0, len(response.Data.Choices)), + Usage: response.Data.Usage, + } + for i, choice := range response.Data.Choices { + openaiChoice := OpenAITextResponseChoice{ + Index: i, + Message: Message{ + Role: choice.Role, + Content: strings.Trim(choice.Content, "\""), + }, + FinishReason: "", + } + if i == len(response.Data.Choices)-1 { + openaiChoice.FinishReason = "stop" + } + fullTextResponse.Choices = append(fullTextResponse.Choices, openaiChoice) + } + return &fullTextResponse +} + +func streamResponseZhipu2OpenAI(zhipuResponse string) *ChatCompletionsStreamResponse { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = zhipuResponse + response := ChatCompletionsStreamResponse{ + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: "chatglm", + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } + return &response +} + +func streamMetaResponseZhipu2OpenAI(zhipuResponse *ZhipuStreamMetaResponse) (*ChatCompletionsStreamResponse, *Usage) { + var choice ChatCompletionsStreamResponseChoice + choice.Delta.Content = "" + choice.FinishReason = &stopFinishReason + response := ChatCompletionsStreamResponse{ + Id: zhipuResponse.RequestId, + Object: "chat.completion.chunk", + Created: common.GetTimestamp(), + Model: "chatglm", + Choices: []ChatCompletionsStreamResponseChoice{choice}, + } + return &response, &zhipuResponse.Usage +} + +func zhipuStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var usage *Usage + scanner := bufio.NewScanner(resp.Body) + scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := strings.Index(string(data), "\n\n"); i >= 0 && strings.Index(string(data), ":") >= 0 { + return i + 2, data[0:i], nil + } + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + }) + dataChan := make(chan string) + metaChan := make(chan string) + stopChan := make(chan bool) + go func() { + for scanner.Scan() { + data := scanner.Text() + lines := strings.Split(data, "\n") + for i, line := range lines { + if len(line) < 5 { + continue + } + if line[:5] == "data:" { + dataChan <- line[5:] + if i != len(lines)-1 { + dataChan <- "\n" + } + } else if line[:5] == "meta:" { + metaChan <- line[5:] + } + } + } + stopChan <- true + }() + setEventStreamHeaders(c) + c.Stream(func(w io.Writer) bool { + select { + case data := <-dataChan: + response := streamResponseZhipu2OpenAI(data) + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case data := <-metaChan: + var zhipuResponse ZhipuStreamMetaResponse + err := json.Unmarshal([]byte(data), &zhipuResponse) + if err != nil { + common.SysError("error unmarshalling stream response: " + err.Error()) + return true + } + response, zhipuUsage := streamMetaResponseZhipu2OpenAI(&zhipuResponse) + jsonResponse, err := json.Marshal(response) + if err != nil { + common.SysError("error marshalling stream response: " + err.Error()) + return true + } + usage = zhipuUsage + c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)}) + return true + case <-stopChan: + c.Render(-1, common.CustomEvent{Data: "data: [DONE]"}) + return false + } + }) + err := resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + return nil, usage +} + +func zhipuHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, *Usage) { + var zhipuResponse ZhipuResponse + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil + } + err = resp.Body.Close() + if err != nil { + return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil + } + err = json.Unmarshal(responseBody, &zhipuResponse) + if err != nil { + return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil + } + if !zhipuResponse.Success { + return &OpenAIErrorWithStatusCode{ + OpenAIError: OpenAIError{ + Message: zhipuResponse.Msg, + Type: "zhipu_error", + Param: "", + Code: zhipuResponse.Code, + }, + StatusCode: resp.StatusCode, + }, nil + } + fullTextResponse := responseZhipu2OpenAI(&zhipuResponse) + jsonResponse, err := json.Marshal(fullTextResponse) + if err != nil { + return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil + } + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteHeader(resp.StatusCode) + _, err = c.Writer.Write(jsonResponse) + return nil, &fullTextResponse.Usage +} diff --git a/controller/relay.go b/controller/relay.go index 81497d81..1926110e 100644 --- a/controller/relay.go +++ b/controller/relay.go @@ -1,16 +1,13 @@ package controller import ( - "bufio" - "bytes" - "encoding/json" "fmt" - "github.com/gin-gonic/gin" - "io" "net/http" "one-api/common" - "one-api/model" + "strconv" "strings" + + "github.com/gin-gonic/gin" ) type Message struct { @@ -19,17 +16,51 @@ type Message struct { Name *string `json:"name,omitempty"` } +const ( + RelayModeUnknown = iota + RelayModeChatCompletions + RelayModeCompletions + RelayModeEmbeddings + RelayModeModerations + RelayModeImagesGenerations + RelayModeEdits + RelayModeAudio +) + // https://platform.openai.com/docs/api-reference/chat type GeneralOpenAIRequest struct { - Model string `json:"model"` - Messages []Message `json:"messages"` - Prompt string `json:"prompt"` - Stream bool `json:"stream"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` - TopP float64 `json:"top_p"` - N int `json:"n"` + Model string `json:"model,omitempty"` + Messages []Message `json:"messages,omitempty"` + Prompt any `json:"prompt,omitempty"` + Stream bool `json:"stream,omitempty"` + MaxTokens int `json:"max_tokens,omitempty"` + Temperature float64 `json:"temperature,omitempty"` + TopP float64 `json:"top_p,omitempty"` + N int `json:"n,omitempty"` + Input any `json:"input,omitempty"` + Instruction string `json:"instruction,omitempty"` + Size string `json:"size,omitempty"` + Functions any `json:"functions,omitempty"` +} + +func (r GeneralOpenAIRequest) ParseInput() []string { + if r.Input == nil { + return nil + } + var input []string + switch r.Input.(type) { + case string: + input = []string{r.Input.(string)} + case []any: + input = make([]string, 0, len(r.Input.([]any))) + for _, item := range r.Input.([]any) { + if str, ok := item.(string); ok { + input = append(input, str) + } + } + } + return input } type ChatRequest struct { @@ -46,6 +77,16 @@ type TextRequest struct { //Stream bool `json:"stream"` } +type ImageRequest struct { + Prompt string `json:"prompt"` + N int `json:"n"` + Size string `json:"size"` +} + +type AudioResponse struct { + Text string `json:"text,omitempty"` +} + type Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` @@ -56,7 +97,7 @@ type OpenAIError struct { Message string `json:"message"` Type string `json:"type"` Param string `json:"param"` - Code string `json:"code"` + Code any `json:"code"` } type OpenAIErrorWithStatusCode struct { @@ -65,32 +106,117 @@ type OpenAIErrorWithStatusCode struct { } type TextResponse struct { - Usage `json:"usage"` - Error OpenAIError `json:"error"` + Choices []OpenAITextResponseChoice `json:"choices"` + Usage `json:"usage"` + Error OpenAIError `json:"error"` } -type StreamResponse struct { +type OpenAITextResponseChoice struct { + Index int `json:"index"` + Message `json:"message"` + FinishReason string `json:"finish_reason"` +} + +type OpenAITextResponse struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Choices []OpenAITextResponseChoice `json:"choices"` + Usage `json:"usage"` +} + +type OpenAIEmbeddingResponseItem struct { + Object string `json:"object"` + Index int `json:"index"` + Embedding []float64 `json:"embedding"` +} + +type OpenAIEmbeddingResponse struct { + Object string `json:"object"` + Data []OpenAIEmbeddingResponseItem `json:"data"` + Model string `json:"model"` + Usage `json:"usage"` +} + +type ImageResponse struct { + Created int `json:"created"` + Data []struct { + Url string `json:"url"` + } +} + +type ChatCompletionsStreamResponseChoice struct { + Delta struct { + Content string `json:"content"` + } `json:"delta"` + FinishReason *string `json:"finish_reason"` +} + +type ChatCompletionsStreamResponse struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + Choices []ChatCompletionsStreamResponseChoice `json:"choices"` +} + +type CompletionsStreamResponse struct { Choices []struct { - Delta struct { - Content string `json:"content"` - } `json:"delta"` + Text string `json:"text"` FinishReason string `json:"finish_reason"` } `json:"choices"` } func Relay(c *gin.Context) { - err := relayHelper(c) + relayMode := RelayModeUnknown + if strings.HasPrefix(c.Request.URL.Path, "/v1/chat/completions") { + relayMode = RelayModeChatCompletions + } else if strings.HasPrefix(c.Request.URL.Path, "/v1/completions") { + relayMode = RelayModeCompletions + } else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") { + relayMode = RelayModeEmbeddings + } else if strings.HasSuffix(c.Request.URL.Path, "embeddings") { + relayMode = RelayModeEmbeddings + } else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") { + relayMode = RelayModeModerations + } else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") { + relayMode = RelayModeImagesGenerations + } else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") { + relayMode = RelayModeEdits + } else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") { + relayMode = RelayModeAudio + } + var err *OpenAIErrorWithStatusCode + switch relayMode { + case RelayModeImagesGenerations: + err = relayImageHelper(c, relayMode) + case RelayModeAudio: + err = relayAudioHelper(c, relayMode) + default: + err = relayTextHelper(c, relayMode) + } if err != nil { - if err.StatusCode == http.StatusTooManyRequests { - err.OpenAIError.Message = "负载已满,请稍后再试,或升级账户以提升服务质量。" + requestId := c.GetString(common.RequestIdKey) + retryTimesStr := c.Query("retry") + retryTimes, _ := strconv.Atoi(retryTimesStr) + if retryTimesStr == "" { + retryTimes = common.RetryTimes + } + if retryTimes > 0 { + c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1)) + } else { + if err.StatusCode == http.StatusTooManyRequests { + err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试" + } + err.OpenAIError.Message = common.MessageWithRequestId(err.OpenAIError.Message, requestId) + c.JSON(err.StatusCode, gin.H{ + "error": err.OpenAIError, + }) } - c.JSON(err.StatusCode, gin.H{ - "error": err.OpenAIError, - }) channelId := c.GetInt("channel_id") - common.SysError(fmt.Sprintf("Relay error (channel #%d): %s", channelId, err.Message)) + common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message)) // https://platform.openai.com/docs/guides/error-codes/api-errors - if common.AutomaticDisableChannelEnabled && (err.Type == "insufficient_quota" || err.Code == "invalid_api_key") { + if shouldDisableChannel(&err.OpenAIError, err.StatusCode) { channelId := c.GetInt("channel_id") channelName := c.GetString("channel_name") disableChannel(channelId, channelName, err.Message) @@ -98,241 +224,6 @@ func Relay(c *gin.Context) { } } -func errorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode { - openAIError := OpenAIError{ - Message: err.Error(), - Type: "one_api_error", - Code: code, - } - return &OpenAIErrorWithStatusCode{ - OpenAIError: openAIError, - StatusCode: statusCode, - } -} - -func relayHelper(c *gin.Context) *OpenAIErrorWithStatusCode { - channelType := c.GetInt("channel") - tokenId := c.GetInt("token_id") - consumeQuota := c.GetBool("consume_quota") - var textRequest GeneralOpenAIRequest - if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM { - requestBody, err := io.ReadAll(c.Request.Body) - if err != nil { - return errorWrapper(err, "read_request_body_failed", http.StatusBadRequest) - } - err = c.Request.Body.Close() - if err != nil { - return errorWrapper(err, "close_request_body_failed", http.StatusBadRequest) - } - err = json.Unmarshal(requestBody, &textRequest) - if err != nil { - return errorWrapper(err, "unmarshal_request_body_failed", http.StatusBadRequest) - } - // Reset request body - c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) - } - baseURL := common.ChannelBaseURLs[channelType] - requestURL := c.Request.URL.String() - if channelType == common.ChannelTypeCustom { - baseURL = c.GetString("base_url") - } - fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL) - if channelType == common.ChannelTypeAzure { - // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api - query := c.Request.URL.Query() - apiVersion := query.Get("api-version") - if apiVersion == "" { - apiVersion = c.GetString("api_version") - } - requestURL := strings.Split(requestURL, "?")[0] - requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion) - baseURL = c.GetString("base_url") - task := strings.TrimPrefix(requestURL, "/v1/") - model_ := textRequest.Model - model_ = strings.Replace(model_, ".", "", -1) - // https://github.com/songquanpeng/one-api/issues/67 - model_ = strings.TrimSuffix(model_, "-0301") - model_ = strings.TrimSuffix(model_, "-0314") - fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/%s", baseURL, model_, task) - } else if channelType == common.ChannelTypePaLM { - err := relayPaLM(textRequest, c) - return err - } - - promptTokens := countTokenMessages(textRequest.Messages, textRequest.Model) - preConsumedTokens := common.PreConsumedQuota - if textRequest.MaxTokens != 0 { - preConsumedTokens = promptTokens + textRequest.MaxTokens - } - ratio := common.GetModelRatio(textRequest.Model) - preConsumedQuota := int(float64(preConsumedTokens) * ratio) - if consumeQuota { - err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota) - if err != nil { - return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusOK) - } - } - req, err := http.NewRequest(c.Request.Method, fullRequestURL, c.Request.Body) - if err != nil { - return errorWrapper(err, "new_request_failed", http.StatusOK) - } - if channelType == common.ChannelTypeAzure { - key := c.Request.Header.Get("Authorization") - key = strings.TrimPrefix(key, "Bearer ") - req.Header.Set("api-key", key) - } else { - req.Header.Set("Authorization", c.Request.Header.Get("Authorization")) - } - req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type")) - req.Header.Set("Accept", c.Request.Header.Get("Accept")) - req.Header.Set("Connection", c.Request.Header.Get("Connection")) - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return errorWrapper(err, "do_request_failed", http.StatusOK) - } - err = req.Body.Close() - if err != nil { - return errorWrapper(err, "close_request_body_failed", http.StatusOK) - } - err = c.Request.Body.Close() - if err != nil { - return errorWrapper(err, "close_request_body_failed", http.StatusOK) - } - var textResponse TextResponse - isStream := strings.HasPrefix(resp.Header.Get("Content-Type"), "text/event-stream") - var streamResponseText string - - defer func() { - if consumeQuota { - quota := 0 - usingGPT4 := strings.HasPrefix(textRequest.Model, "gpt-4") - completionRatio := 1 - if usingGPT4 { - completionRatio = 2 - } - if isStream { - responseTokens := countTokenText(streamResponseText, textRequest.Model) - quota = promptTokens + responseTokens*completionRatio - } else { - quota = textResponse.Usage.PromptTokens + textResponse.Usage.CompletionTokens*completionRatio - } - quota = int(float64(quota) * ratio) - quotaDelta := quota - preConsumedQuota - err := model.PostConsumeTokenQuota(tokenId, quotaDelta) - if err != nil { - common.SysError("Error consuming token remain quota: " + err.Error()) - } - } - }() - - if isStream { - scanner := bufio.NewScanner(resp.Body) - scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - - if i := strings.Index(string(data), "\n\n"); i >= 0 { - return i + 2, data[0:i], nil - } - - if atEOF { - return len(data), data, nil - } - - return 0, nil, nil - }) - dataChan := make(chan string) - stopChan := make(chan bool) - go func() { - for scanner.Scan() { - data := scanner.Text() - if len(data) < 6 { // must be something wrong! - common.SysError("Invalid stream response: " + data) - continue - } - dataChan <- data - data = data[6:] - if !strings.HasPrefix(data, "[DONE]") { - var streamResponse StreamResponse - err = json.Unmarshal([]byte(data), &streamResponse) - if err != nil { - common.SysError("Error unmarshalling stream response: " + err.Error()) - return - } - for _, choice := range streamResponse.Choices { - streamResponseText += choice.Delta.Content - } - } - } - stopChan <- true - }() - c.Writer.Header().Set("Content-Type", "text/event-stream") - c.Writer.Header().Set("Cache-Control", "no-cache") - c.Writer.Header().Set("Connection", "keep-alive") - c.Writer.Header().Set("Transfer-Encoding", "chunked") - c.Writer.Header().Set("X-Accel-Buffering", "no") - c.Stream(func(w io.Writer) bool { - select { - case data := <-dataChan: - if strings.HasPrefix(data, "data: [DONE]") { - data = data[:12] - } - c.Render(-1, common.CustomEvent{Data: data}) - return true - case <-stopChan: - return false - } - }) - err = resp.Body.Close() - if err != nil { - return errorWrapper(err, "close_response_body_failed", http.StatusOK) - } - return nil - } else { - if consumeQuota { - responseBody, err := io.ReadAll(resp.Body) - if err != nil { - return errorWrapper(err, "read_response_body_failed", http.StatusOK) - } - err = resp.Body.Close() - if err != nil { - return errorWrapper(err, "close_response_body_failed", http.StatusOK) - } - err = json.Unmarshal(responseBody, &textResponse) - if err != nil { - return errorWrapper(err, "unmarshal_response_body_failed", http.StatusOK) - } - if textResponse.Error.Type != "" { - return &OpenAIErrorWithStatusCode{ - OpenAIError: textResponse.Error, - StatusCode: resp.StatusCode, - } - } - // Reset response body - resp.Body = io.NopCloser(bytes.NewBuffer(responseBody)) - } - // We shouldn't set the header before we parse the response body, because the parse part may fail. - // And then we will have to send an error response, but in this case, the header has already been set. - // So the client will be confused by the response. - // For example, Postman will report error, and we cannot check the response at all. - for k, v := range resp.Header { - c.Writer.Header().Set(k, v[0]) - } - c.Writer.WriteHeader(resp.StatusCode) - _, err = io.Copy(c.Writer, resp.Body) - if err != nil { - return errorWrapper(err, "copy_response_body_failed", http.StatusOK) - } - err = resp.Body.Close() - if err != nil { - return errorWrapper(err, "close_response_body_failed", http.StatusOK) - } - return nil - } -} - func RelayNotImplemented(c *gin.Context) { err := OpenAIError{ Message: "API not implemented", @@ -340,7 +231,19 @@ func RelayNotImplemented(c *gin.Context) { Param: "", Code: "api_not_implemented", } - c.JSON(http.StatusOK, gin.H{ + c.JSON(http.StatusNotImplemented, gin.H{ + "error": err, + }) +} + +func RelayNotFound(c *gin.Context) { + err := OpenAIError{ + Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path), + Type: "invalid_request_error", + Param: "", + Code: "", + } + c.JSON(http.StatusNotFound, gin.H{ "error": err, }) } diff --git a/controller/token.go b/controller/token.go index 180c4259..8642122c 100644 --- a/controller/token.go +++ b/controller/token.go @@ -109,17 +109,17 @@ func AddToken(c *gin.Context) { }) return } - if len(token.Name) == 0 || len(token.Name) > 20 { + if len(token.Name) > 30 { c.JSON(http.StatusOK, gin.H{ "success": false, - "message": "令牌名称长度必须在1-20之间", + "message": "令牌名称过长", }) return } cleanToken := model.Token{ UserId: c.GetInt("id"), Name: token.Name, - Key: common.GetUUID(), + Key: common.GenerateKey(), CreatedTime: common.GetTimestamp(), AccessedTime: common.GetTimestamp(), ExpiredTime: token.ExpiredTime, @@ -171,6 +171,13 @@ func UpdateToken(c *gin.Context) { }) return } + if len(token.Name) > 30 { + c.JSON(http.StatusOK, gin.H{ + "success": false, + "message": "令牌名称过长", + }) + return + } cleanToken, err := model.GetTokenByIds(token.Id, userId) if err != nil { c.JSON(http.StatusOK, gin.H{ @@ -180,10 +187,10 @@ func UpdateToken(c *gin.Context) { return } if token.Status == common.TokenStatusEnabled { - if cleanToken.Status == common.TokenStatusExpired && cleanToken.ExpiredTime <= common.GetTimestamp() { + if cleanToken.Status == common.TokenStatusExpired && cleanToken.ExpiredTime <= common.GetTimestamp() && cleanToken.ExpiredTime != -1 { c.JSON(http.StatusOK, gin.H{ "success": false, - "message": "令牌已过期,无法启用,请先修改令牌过期时间", + "message": "令牌已过期,无法启用,请先修改令牌过期时间,或者设置为永不过期", }) return } diff --git a/controller/user.go b/controller/user.go index b6241d36..8fd10b82 100644 --- a/controller/user.go +++ b/controller/user.go @@ -2,12 +2,14 @@ package controller import ( "encoding/json" - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" + "fmt" "net/http" "one-api/common" "one-api/model" "strconv" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" ) type LoginRequest struct { @@ -149,15 +151,18 @@ func Register(c *gin.Context) { return } } + affCode := user.AffCode // this code is the inviter's code, not the user's own code + inviterId, _ := model.GetUserIdByAffCode(affCode) cleanUser := model.User{ Username: user.Username, Password: user.Password, DisplayName: user.Username, + InviterId: inviterId, } if common.EmailVerificationEnabled { cleanUser.Email = user.Email } - if err := cleanUser.Insert(); err != nil { + if err := cleanUser.Insert(inviterId); err != nil { c.JSON(http.StatusOK, gin.H{ "success": false, "message": err.Error(), @@ -228,7 +233,7 @@ func GetUser(c *gin.Context) { return } myRole := c.GetInt("role") - if myRole <= user.Role { + if myRole <= user.Role && myRole != common.RoleRootUser { c.JSON(http.StatusOK, gin.H{ "success": false, "message": "无权获取同级或更高等级用户的信息", @@ -255,7 +260,7 @@ func GenerateAccessToken(c *gin.Context) { } user.AccessToken = common.GetUUID() - if model.DB.Where("token = ?", user.AccessToken).First(user).RowsAffected != 0 { + if model.DB.Where("access_token = ?", user.AccessToken).First(user).RowsAffected != 0 { c.JSON(http.StatusOK, gin.H{ "success": false, "message": "请重试,系统生成的 UUID 竟然重复了!", @@ -279,6 +284,34 @@ func GenerateAccessToken(c *gin.Context) { return } +func GetAffCode(c *gin.Context) { + id := c.GetInt("id") + user, err := model.GetUserById(id, true) + if err != nil { + c.JSON(http.StatusOK, gin.H{ + "success": false, + "message": err.Error(), + }) + return + } + if user.AffCode == "" { + user.AffCode = common.GetRandomString(4) + if err := user.Update(false); err != nil { + c.JSON(http.StatusOK, gin.H{ + "success": false, + "message": err.Error(), + }) + return + } + } + c.JSON(http.StatusOK, gin.H{ + "success": true, + "message": "", + "data": user.AffCode, + }) + return +} + func GetSelf(c *gin.Context) { id := c.GetInt("id") user, err := model.GetUserById(id, false) @@ -326,14 +359,14 @@ func UpdateUser(c *gin.Context) { return } myRole := c.GetInt("role") - if myRole <= originUser.Role { + if myRole <= originUser.Role && myRole != common.RoleRootUser { c.JSON(http.StatusOK, gin.H{ "success": false, "message": "无权更新同权限等级或更高权限等级的用户信息", }) return } - if myRole <= updatedUser.Role { + if myRole <= updatedUser.Role && myRole != common.RoleRootUser { c.JSON(http.StatusOK, gin.H{ "success": false, "message": "无权将其他用户权限等级提升到大于等于自己的权限等级", @@ -351,6 +384,9 @@ func UpdateUser(c *gin.Context) { }) return } + if originUser.Quota != updatedUser.Quota { + model.RecordLog(originUser.Id, model.LogTypeManage, fmt.Sprintf("管理员将用户额度从 %s修改为 %s", common.LogQuota(originUser.Quota), common.LogQuota(updatedUser.Quota))) + } c.JSON(http.StatusOK, gin.H{ "success": true, "message": "", @@ -442,6 +478,16 @@ func DeleteUser(c *gin.Context) { func DeleteSelf(c *gin.Context) { id := c.GetInt("id") + user, _ := model.GetUserById(id, false) + + if user.Role == common.RoleRootUser { + c.JSON(http.StatusOK, gin.H{ + "success": false, + "message": "不能删除超级管理员账户", + }) + return + } + err := model.DeleteUserById(id) if err != nil { c.JSON(http.StatusOK, gin.H{ @@ -491,7 +537,7 @@ func CreateUser(c *gin.Context) { Password: user.Password, DisplayName: user.DisplayName, } - if err := cleanUser.Insert(); err != nil { + if err := cleanUser.Insert(0); err != nil { c.JSON(http.StatusOK, gin.H{ "success": false, "message": err.Error(), @@ -655,6 +701,9 @@ func EmailBind(c *gin.Context) { }) return } + if user.Role == common.RoleRootUser { + common.RootUserEmail = email + } c.JSON(http.StatusOK, gin.H{ "success": true, "message": "", diff --git a/controller/wechat.go b/controller/wechat.go index 5620e8d3..ff4c9fb6 100644 --- a/controller/wechat.go +++ b/controller/wechat.go @@ -85,7 +85,7 @@ func WeChatAuth(c *gin.Context) { user.Role = common.RoleCommonUser user.Status = common.UserStatusEnabled - if err := user.Insert(); err != nil { + if err := user.Insert(0); err != nil { c.JSON(http.StatusOK, gin.H{ "success": false, "message": err.Error(), diff --git a/docker-compose.yml b/docker-compose.yml index 0fef5b82..30edb281 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,21 +2,48 @@ version: '3.4' services: one-api: - image: ghcr.io/songquanpeng/one-api:latest + image: justsong/one-api:latest container_name: one-api restart: always command: --log-dir /app/logs ports: - "3000:3000" volumes: - - /home/ubuntu/data/one-api:/data - - /home/ubuntu/data/one-api/logs:/app/logs - # environment: - # REDIS_CONN_STRING: redis://default:redispw@localhost:49153 - # SESSION_SECRET: random_string - # SQL_DSN: root:123456@tcp(localhost:3306)/one-api + - ./data/oneapi:/data + - ./logs:/app/logs + environment: + - SQL_DSN=oneapi:123456@tcp(db:3306)/one-api # 修改此行,或注释掉以使用 SQLite 作为数据库 + - REDIS_CONN_STRING=redis://redis + - SESSION_SECRET=random_string # 修改为随机字符串 + - TZ=Asia/Shanghai +# - NODE_TYPE=slave # 多机部署时从节点取消注释该行 +# - SYNC_FREQUENCY=60 # 需要定期从数据库加载数据时取消注释该行 +# - FRONTEND_BASE_URL=https://openai.justsong.cn # 多机部署时从节点取消注释该行 + depends_on: + - redis + - db healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:3000/api/status | grep -o '\"success\":\\s*true' | awk '{print $2}' | grep 'true'"] + test: [ "CMD-SHELL", "wget -q -O - http://localhost:3000/api/status | grep -o '\"success\":\\s*true' | awk -F: '{print $2}'" ] interval: 30s timeout: 10s retries: 3 + + redis: + image: redis:latest + container_name: redis + restart: always + + db: + image: mysql:8.2.0 + restart: always + container_name: mysql + volumes: + - ./data/mysql:/var/lib/mysql # 挂载目录,持久化存储 + ports: + - '3306:3306' + environment: + TZ: Asia/Shanghai # 设置时区 + MYSQL_ROOT_PASSWORD: 'OneAPI@justsong' # 设置 root 用户的密码 + MYSQL_USER: oneapi # 创建专用用户 + MYSQL_PASSWORD: '123456' # 设置专用用户密码 + MYSQL_DATABASE: one-api # 自动创建数据库 \ No newline at end of file diff --git a/go.mod b/go.mod index 0280586a..10b78d68 100644 --- a/go.mod +++ b/go.mod @@ -8,49 +8,54 @@ require ( github.com/gin-contrib/gzip v0.0.6 github.com/gin-contrib/sessions v0.0.5 github.com/gin-contrib/static v0.0.1 - github.com/gin-gonic/gin v1.9.0 - github.com/go-playground/validator/v10 v10.12.0 + github.com/gin-gonic/gin v1.9.1 + github.com/go-playground/validator/v10 v10.14.0 github.com/go-redis/redis/v8 v8.11.5 + github.com/golang-jwt/jwt v3.2.2+incompatible github.com/google/uuid v1.3.0 - github.com/pkoukk/tiktoken-go v0.1.1 - golang.org/x/crypto v0.8.0 + github.com/gorilla/websocket v1.5.0 + github.com/pkoukk/tiktoken-go v0.1.5 + golang.org/x/crypto v0.14.0 gorm.io/driver/mysql v1.4.3 + gorm.io/driver/postgres v1.5.2 gorm.io/driver/sqlite v1.4.3 - gorm.io/gorm v1.24.0 + gorm.io/gorm v1.25.0 ) require ( - github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff // indirect - github.com/bytedance/sonic v1.8.8 // indirect + github.com/bytedance/sonic v1.9.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/dlclark/regexp2 v1.8.1 // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/gorilla/context v1.1.1 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/sessions v1.2.1 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgx/v5 v5.3.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect - github.com/leodido/go-urn v1.2.3 // indirect - github.com/mattn/go-isatty v0.0.18 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.11 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index e6bad42f..4865bcaa 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,6 @@ -github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff h1:RmdPFa+slIr4SCBg4st/l/vZWVe9QJKMXGO60Bxbe04= -github.com/boj/redistore v0.0.0-20180917114910-cd5dcc76aeff/go.mod h1:+RTT1BOk5P97fT2CiHkbFQwkK3mjsFAP6zCYV2aXtjw= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q= -github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -14,9 +12,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dlclark/regexp2 v1.8.1 h1:6Lcdwya6GjPUNsBct8Lg/yRPwMhABj269AAzdGSiR+0= -github.com/dlclark/regexp2 v1.8.1/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g= github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs= github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4= @@ -29,8 +29,8 @@ github.com/gin-contrib/static v0.0.1 h1:JVxuvHPuUfkoul12N7dtQw7KRn/pSMq7Ue1Va9Sw github.com/gin-contrib/static v0.0.1/go.mod h1:CSxeF+wep05e0kCOsqWdAWbSszmc31zTIbD8TvWl7Hs= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= -github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= @@ -43,8 +43,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= -github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= @@ -52,10 +52,10 @@ github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= -github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -65,9 +65,16 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.1.1/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU= +github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -89,12 +96,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= -github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -108,11 +115,11 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= -github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkoukk/tiktoken-go v0.1.1 h1:jtkYlIECjyM9OW1w4rjPmTohK4arORP9V25y6TM6nXo= -github.com/pkoukk/tiktoken-go v0.1.1/go.mod h1:boMWvk9pQCOTx11pgu0DrIdrAKgQzzJKUP6vLXaz7Rw= +github.com/pkoukk/tiktoken-go v0.1.5 h1:hAlT4dCf6Uk50x8E7HQrddhH3EWMKUN+LArExQQsQx4= +github.com/pkoukk/tiktoken-go v0.1.5/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -128,8 +135,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= @@ -142,11 +150,11 @@ golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUu golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -154,14 +162,14 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -185,9 +193,12 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.4.3 h1:/JhWJhO2v17d8hjApTltKNADm7K7YI2ogkR7avJUL3k= gorm.io/driver/mysql v1.4.3/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10c= +gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0= +gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8= gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.24.0 h1:j/CoiSm6xpRpmzbFJsQHYj+I8bGYWLXVHeYEyyKlF74= gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= +gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU= +gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/i18n/en.json b/i18n/en.json new file mode 100644 index 00000000..9b2ca4c8 --- /dev/null +++ b/i18n/en.json @@ -0,0 +1,528 @@ +{ + "$%.6f 额度": "$%.6f quota", + "%d 点额度": "%d point quota", + "尚未实现": "Not yet implemented", + "余额不足": "Insufficient balance", + "危险操作": "Hazardous operations", + "输入你的账户名": "Enter your account name", + "确认删除": "Confirm Delete", + "确认绑定": "Confirm Binding", + "您正在删除自己的帐户,将清空所有数据且不可恢复": "You are deleting your account, all data will be cleared and unrecoverable.", + "\"通道「%s」(#%d)已被禁用\"": "\"Channel %s (#%d) has been disabled\"", + "通道「%s」(#%d)已被禁用,原因:%s": "Channel %s (#%d) has been disabled, reason: %s", + "测试已在运行中": "Test is already running", + "响应时间 %.2fs 超过阈值 %.2fs": "Response time %.2fs exceeds threshold %.2fs", + "通道测试完成": "Channel test completed", + "通道测试完成,如果没有收到禁用通知,说明所有通道都正常": "Channel test completed, if you have not received the disable notification, it means that all channels are normal", + "无法连接至 GitHub 服务器,请稍后重试!": "Unable to connect to GitHub server, please try again later!", + "返回值非法,用户字段为空,请稍后重试!": "The return value is illegal, the user field is empty, please try again later!", + "管理员未开启通过 GitHub 登录以及注册": "The administrator did not turn on login and registration via GitHub", + "管理员关闭了新用户注册": "The administrator has turned off new user registration", + "用户已被封禁": "User has been banned", + "该 GitHub 账户已被绑定": "The GitHub account has been bound", + "邮箱地址已被占用": "Email address is occupied", + "%s邮箱验证邮件": "%s Email verification email", + "您好,你正在进行%s邮箱验证。
": "Hello, you are verifying %s email.
", + "您的验证码为: %s
": "Your verification code is: %s
", + "验证码 %d 分钟内有效,如果不是本人操作,请忽略。
": "The verification code is valid within %d minutes. If it is not your operation, please ignore it.
", + "无效的参数": "Invalid parameter", + "该邮箱地址未注册": "The email address is not registered", + "%s密码重置": "%s Password reset", + "您好,你正在进行%s密码重置。
": "Hello, you are resetting %s password.
", + "点击此处进行密码重置。
": "Click here to reset your password.
", + "重置链接 %d 分钟内有效,如果不是本人操作,请忽略。
": "The reset link is valid within %d minutes. If it is not your operation, please ignore it.
", + "重置链接非法或已过期": "Reset link is illegal or expired", + "无法启用 GitHub OAuth,请先填入 GitHub Client ID 以及 GitHub Client Secret!": "Unable to enable GitHub OAuth, please fill in GitHub Client ID and GitHub Client Secret first!", + "无法启用微信登录,请先填入微信登录相关配置信息!": "Unable to enable WeChat login, please fill in the relevant configuration information for WeChat login first!", + "无法启用 Turnstile 校验,请先填入 Turnstile 校验相关配置信息!": "Unable to enable Turnstile verification, please fill in the relevant configuration information for Turnstile verification first!", + "兑换码名称长度必须在1-20之间": "The length of the redemption code name must be between 1-20", + "兑换码个数必须大于0": "The number of redemption codes must be greater than 0", + "一次兑换码批量生成的个数不能大于 100": "The number of redemption codes generated in a batch cannot be greater than 100", + "通过令牌「%s」使用模型 %s 消耗 %s(模型倍率 %.2f,分组倍率 %.2f)": "Using model %s with token %s consumes %s (model rate %.2f, group rate %.2f)", + "当前分组上游负载已饱和,请稍后再试": "The current group load is saturated, please try again later", + "令牌名称过长": "Token name is too long", + "令牌已过期,无法启用,请先修改令牌过期时间,或者设置为永不过期": "The token has expired and cannot be enabled. Please modify the expiration time of the token, or set it to never expire.", + "令牌可用额度已用尽,无法启用,请先修改令牌剩余额度,或者设置为无限额度": "The available quota of the token has been used up and cannot be enabled. Please modify the remaining quota of the token, or set it to unlimited quota", + "管理员关闭了密码登录": "The administrator has turned off password login", + "无法保存会话信息,请重试": "Unable to save session information, please try again", + "管理员关闭了通过密码进行注册,请使用第三方账户验证的形式进行注册": "The administrator has turned off registration via password. Please use the form of third-party account verification to register", + "输入不合法 ": "Input is illegal ", + "管理员开启了邮箱验证,请输入邮箱地址和验证码": "The administrator has turned on email verification, please enter the email address and verification code", + "验证码错误或已过期": "Verification code error or expired", + "无权获取同级或更高等级用户的信息": "No permission to get information of users at the same level or higher", + "请重试,系统生成的 UUID 竟然重复了!": "Please try again, the system-generated UUID is actually duplicated!", + "输入不合法": "Input is illegal", + "无权更新同权限等级或更高权限等级的用户信息": "No permission to update user information with the same permission level or higher permission level", + "管理员将用户额度从 %s修改为 %s": "The administrator changed the user quota from %s to %s", + "无权删除同权限等级或更高权限等级的用户": "No permission to delete users with the same permission level or higher permission level", + "无法创建权限大于等于自己的用户": "Unable to create users with permissions greater than or equal to your own", + "用户不存在": "User does not exist", + "无法禁用超级管理员用户": "Unable to disable super administrator user", + "无法删除超级管理员用户": "Unable to delete super administrator user", + "普通管理员用户无法提升其他用户为管理员": "Ordinary administrator users cannot promote other users to administrators", + "该用户已经是管理员": "The user is already an administrator", + "无法降级超级管理员用户": "Unable to downgrade super administrator user", + "该用户已经是普通用户": "The user is already an ordinary user", + "管理员未开启通过微信登录以及注册": "The administrator has not enabled login and registration via WeChat", + "该微信账号已被绑定": "The WeChat account has been bound", + "无权进行此操作,未登录且未提供 access token": "No permission to perform this operation, not logged in and no access token provided", + "无权进行此操作,access token 无效": "No permission to perform this operation, access token is invalid", + "无权进行此操作,权限不足": "No permission to perform this operation, insufficient permissions", + "普通用户不支持指定渠道": "Ordinary users do not support specifying channels", + "无效的渠道 ID": "Invalid channel ID", + "该渠道已被禁用": "The channel has been disabled", + "无效的请求": "Invalid request", + "无可用渠道": "No available channels", + "Turnstile token 为空": "Turnstile token is empty", + "Turnstile 校验失败,请刷新重试!": "Turnstile verification failed, please refresh and try again!", + "id 为空!": "id is empty!", + "未提供兑换码": "No redemption code provided", + "无效的 user id": "Invalid user id", + "无效的兑换码": "Invalid redemption code", + "该兑换码已被使用": "The redemption code has been used", + "通过兑换码充值 %s": "Recharge %s through redemption code", + "未提供令牌": "No token provided", + "该令牌状态不可用": "The token status is not available", + "该令牌已过期": "The token has expired", + "该令牌额度已用尽": "The token quota has been used up", + "无效的令牌": "Invalid token", + "id 或 userId 为空!": "id or userId is empty!", + "quota 不能为负数!": "quota cannot be negative!", + "令牌额度不足": "Insufficient token quota", + "用户额度不足": "Insufficient user quota", + "您的额度即将用尽": "Your quota is about to run out", + "您的额度已用尽": "Your quota has been used up", + "%s,当前剩余额度为 %d,为了不影响您的使用,请及时充值。