Merge branch 'main' of https://github.com/songquanpeng/one-api
This commit is contained in:
commit
aa904af903
@ -51,15 +51,15 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用
|
|||||||
<a href="https://iamazing.cn/page/reward">赞赏支持</a>
|
<a href="https://iamazing.cn/page/reward">赞赏支持</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
> **Note**
|
> [!NOTE]
|
||||||
> 本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
> 本项目为开源项目,使用者必须在遵循 OpenAI 的[使用条款](https://openai.com/policies/terms-of-use)以及**法律法规**的情况下使用,不得用于非法用途。
|
||||||
>
|
>
|
||||||
> 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。
|
> 根据[《生成式人工智能服务管理暂行办法》](http://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm)的要求,请勿对中国地区公众提供一切未经备案的生成式人工智能服务。
|
||||||
|
|
||||||
> **Warning**
|
> [!WARNING]
|
||||||
> 使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
|
> 使用 Docker 拉取的最新镜像可能是 `alpha` 版本,如果追求稳定性请手动指定版本。
|
||||||
|
|
||||||
> **Warning**
|
> [!WARNING]
|
||||||
> 使用 root 用户初次登录系统后,务必修改默认密码 `123456`!
|
> 使用 root 用户初次登录系统后,务必修改默认密码 `123456`!
|
||||||
|
|
||||||
## 功能
|
## 功能
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/smtp"
|
"net/smtp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func SendEmail(subject string, receiver string, content string) error {
|
func SendEmail(subject string, receiver string, content string) error {
|
||||||
@ -33,9 +34,9 @@ func SendEmail(subject string, receiver string, content string) error {
|
|||||||
"From: %s<%s>\r\n"+
|
"From: %s<%s>\r\n"+
|
||||||
"Subject: %s\r\n"+
|
"Subject: %s\r\n"+
|
||||||
"Message-ID: %s\r\n"+ // add Message-ID header to avoid being treated as spam, RFC 5322
|
"Message-ID: %s\r\n"+ // add Message-ID header to avoid being treated as spam, RFC 5322
|
||||||
|
"Date: %s\r\n"+
|
||||||
"Content-Type: text/html; charset=UTF-8\r\n\r\n%s\r\n",
|
"Content-Type: text/html; charset=UTF-8\r\n\r\n%s\r\n",
|
||||||
receiver, SystemName, SMTPFrom, encodedSubject, messageId, content))
|
receiver, SystemName, SMTPFrom, encodedSubject, messageId, time.Now().Format(time.RFC1123Z), content))
|
||||||
|
|
||||||
auth := smtp.PlainAuth("", SMTPAccount, SMTPToken, SMTPServer)
|
auth := smtp.PlainAuth("", SMTPAccount, SMTPToken, SMTPServer)
|
||||||
addr := fmt.Sprintf("%s:%d", SMTPServer, SMTPPort)
|
addr := fmt.Sprintf("%s:%d", SMTPServer, SMTPPort)
|
||||||
to := strings.Split(receiver, ";")
|
to := strings.Split(receiver, ";")
|
||||||
|
@ -76,6 +76,8 @@ var ModelRatio = map[string]float64{
|
|||||||
"dall-e-3": 20, // $0.040 - $0.120 / image
|
"dall-e-3": 20, // $0.040 - $0.120 / image
|
||||||
"claude-instant-1": 0.815, // $1.63 / 1M tokens
|
"claude-instant-1": 0.815, // $1.63 / 1M tokens
|
||||||
"claude-2": 5.51, // $11.02 / 1M tokens
|
"claude-2": 5.51, // $11.02 / 1M tokens
|
||||||
|
"claude-2.0": 5.51, // $11.02 / 1M tokens
|
||||||
|
"claude-2.1": 5.51, // $11.02 / 1M tokens
|
||||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
||||||
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
|
"ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens
|
||||||
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
|
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
|
||||||
|
@ -360,6 +360,24 @@ func init() {
|
|||||||
Root: "claude-2",
|
Root: "claude-2",
|
||||||
Parent: nil,
|
Parent: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Id: "claude-2.1",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "claude-2.1",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "claude-2.0",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "anthropic",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "claude-2.0",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Id: "ERNIE-Bot",
|
Id: "ERNIE-Bot",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
|
@ -5,11 +5,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"one-api/common"
|
"one-api/common"
|
||||||
"one-api/model"
|
"one-api/model"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||||
@ -37,41 +39,40 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
preConsumedTokens := common.PreConsumedQuota
|
|
||||||
modelRatio := common.GetModelRatio(audioModel)
|
modelRatio := common.GetModelRatio(audioModel)
|
||||||
groupRatio := common.GetGroupRatio(group)
|
groupRatio := common.GetGroupRatio(group)
|
||||||
ratio := modelRatio * groupRatio
|
ratio := modelRatio * groupRatio
|
||||||
preConsumedQuota := int(float64(preConsumedTokens) * ratio)
|
var quota int
|
||||||
|
var preConsumedQuota int
|
||||||
|
switch relayMode {
|
||||||
|
case RelayModeAudioSpeech:
|
||||||
|
preConsumedQuota = int(float64(len(ttsRequest.Input)) * ratio)
|
||||||
|
quota = preConsumedQuota
|
||||||
|
default:
|
||||||
|
preConsumedQuota = int(float64(common.PreConsumedQuota) * ratio)
|
||||||
|
}
|
||||||
userQuota, err := model.CacheGetUserQuota(userId)
|
userQuota, err := model.CacheGetUserQuota(userId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
|
|
||||||
quota := 0
|
|
||||||
// Check if user quota is enough
|
// Check if user quota is enough
|
||||||
if relayMode == RelayModeAudioSpeech {
|
if userQuota-preConsumedQuota < 0 {
|
||||||
quota = int(float64(len(ttsRequest.Input)) * modelRatio * groupRatio)
|
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||||
if quota > userQuota {
|
}
|
||||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
|
||||||
}
|
if err != nil {
|
||||||
} else {
|
return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||||
if userQuota-preConsumedQuota < 0 {
|
}
|
||||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
if userQuota > 100*preConsumedQuota {
|
||||||
}
|
// in this case, we do not pre-consume quota
|
||||||
err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
|
// because the user has enough quota
|
||||||
|
preConsumedQuota = 0
|
||||||
|
}
|
||||||
|
if preConsumedQuota > 0 {
|
||||||
|
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||||
}
|
|
||||||
if userQuota > 100*preConsumedQuota {
|
|
||||||
// in this case, we do not pre-consume quota
|
|
||||||
// because the user has enough quota
|
|
||||||
preConsumedQuota = 0
|
|
||||||
}
|
|
||||||
if preConsumedQuota > 0 {
|
|
||||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,13 +96,33 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
}
|
}
|
||||||
|
|
||||||
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
fullRequestURL := getFullRequestURL(baseURL, requestURL, channelType)
|
||||||
|
if relayMode == RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||||
|
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
|
||||||
|
query := c.Request.URL.Query()
|
||||||
|
apiVersion := query.Get("api-version")
|
||||||
|
if apiVersion == "" {
|
||||||
|
apiVersion = c.GetString("api_version")
|
||||||
|
}
|
||||||
|
baseURL = c.GetString("base_url")
|
||||||
|
fullRequestURL = fmt.Sprintf("%s/openai/deployments/%s/audio/transcriptions?api-version=%s", baseURL, audioModel, apiVersion)
|
||||||
|
}
|
||||||
|
|
||||||
requestBody := c.Request.Body
|
requestBody := c.Request.Body
|
||||||
|
|
||||||
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
|
||||||
|
if relayMode == RelayModeAudioTranscription && channelType == common.ChannelTypeAzure {
|
||||||
|
// https://learn.microsoft.com/en-us/azure/ai-services/openai/whisper-quickstart?tabs=command-line#rest-api
|
||||||
|
apiKey := c.Request.Header.Get("Authorization")
|
||||||
|
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
|
||||||
|
req.Header.Set("api-key", apiKey)
|
||||||
|
req.ContentLength = c.Request.ContentLength
|
||||||
|
} else {
|
||||||
|
req.Header.Set("Authorization", c.Request.Header.Get("Authorization"))
|
||||||
|
}
|
||||||
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
req.Header.Set("Content-Type", c.Request.Header.Get("Content-Type"))
|
||||||
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
req.Header.Set("Accept", c.Request.Header.Get("Accept"))
|
||||||
|
|
||||||
@ -119,11 +140,7 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
|
|
||||||
if relayMode == RelayModeAudioSpeech {
|
if relayMode != RelayModeAudioSpeech {
|
||||||
defer func(ctx context.Context) {
|
|
||||||
go postConsumeQuota(ctx, tokenId, quota, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
|
||||||
}(c.Request.Context())
|
|
||||||
} else {
|
|
||||||
responseBody, err := io.ReadAll(resp.Body)
|
responseBody, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||||
@ -137,13 +154,29 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
defer func(ctx context.Context) {
|
quota = countTokenText(whisperResponse.Text, audioModel)
|
||||||
quota := countTokenText(whisperResponse.Text, audioModel)
|
|
||||||
quotaDelta := quota - preConsumedQuota
|
|
||||||
go postConsumeQuota(ctx, tokenId, quotaDelta, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
|
||||||
}(c.Request.Context())
|
|
||||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||||
}
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
if preConsumedQuota > 0 {
|
||||||
|
// we need to roll back the pre-consumed quota
|
||||||
|
defer func(ctx context.Context) {
|
||||||
|
go func() {
|
||||||
|
// negative means add quota back for token & user
|
||||||
|
err := model.PostConsumeTokenQuota(tokenId, -preConsumedQuota)
|
||||||
|
if err != nil {
|
||||||
|
common.LogError(ctx, fmt.Sprintf("error rollback pre-consumed quota: %s", err.Error()))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}(c.Request.Context())
|
||||||
|
}
|
||||||
|
return relayErrorHandler(resp)
|
||||||
|
}
|
||||||
|
quotaDelta := quota - preConsumedQuota
|
||||||
|
defer func(ctx context.Context) {
|
||||||
|
go postConsumeQuota(ctx, tokenId, quotaDelta, quota, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
||||||
|
}(c.Request.Context())
|
||||||
|
|
||||||
for k, v := range resp.Header {
|
for k, v := range resp.Header {
|
||||||
c.Writer.Header().Set(k, v[0])
|
c.Writer.Header().Set(k, v[0])
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,9 @@ func requestOpenAI2Claude(textRequest GeneralOpenAIRequest) *ClaudeRequest {
|
|||||||
} else if message.Role == "assistant" {
|
} else if message.Role == "assistant" {
|
||||||
prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
|
prompt += fmt.Sprintf("\n\nAssistant: %s", message.Content)
|
||||||
} else if message.Role == "system" {
|
} else if message.Role == "system" {
|
||||||
prompt += fmt.Sprintf("\n\nSystem: %s", message.Content)
|
if prompt == "" {
|
||||||
|
prompt = message.StringContent()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prompt += "\n\nAssistant:"
|
prompt += "\n\nAssistant:"
|
||||||
|
@ -35,15 +35,12 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
channelId := c.GetInt("channel_id")
|
channelId := c.GetInt("channel_id")
|
||||||
apiVersion := c.GetString("api_version")
|
apiVersion := c.GetString("api_version")
|
||||||
userId := c.GetInt("id")
|
userId := c.GetInt("id")
|
||||||
consumeQuota := c.GetBool("consume_quota")
|
|
||||||
group := c.GetString("group")
|
group := c.GetString("group")
|
||||||
|
|
||||||
var imageRequest ImageRequest
|
var imageRequest ImageRequest
|
||||||
if consumeQuota {
|
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
||||||
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
if err != nil {
|
||||||
if err != nil {
|
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size validation
|
// Size validation
|
||||||
@ -131,7 +128,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
|
|
||||||
quota := int(ratio*imageCostRatio*1000) * imageRequest.N
|
quota := int(ratio*imageCostRatio*1000) * imageRequest.N
|
||||||
|
|
||||||
if consumeQuota && userQuota-quota < 0 {
|
if userQuota-quota < 0 {
|
||||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,43 +163,39 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
var textResponse ImageResponse
|
var textResponse ImageResponse
|
||||||
|
|
||||||
defer func(ctx context.Context) {
|
defer func(ctx context.Context) {
|
||||||
if consumeQuota {
|
err := model.PostConsumeTokenQuota(tokenId, quota)
|
||||||
err := model.PostConsumeTokenQuota(tokenId, quota)
|
if err != nil {
|
||||||
if err != nil {
|
common.SysError("error consuming token remain quota: " + err.Error())
|
||||||
common.SysError("error consuming token remain quota: " + err.Error())
|
}
|
||||||
}
|
err = model.CacheUpdateUserQuota(userId)
|
||||||
err = model.CacheUpdateUserQuota(userId)
|
if err != nil {
|
||||||
if err != nil {
|
common.SysError("error update user quota cache: " + err.Error())
|
||||||
common.SysError("error update user quota cache: " + err.Error())
|
}
|
||||||
}
|
if quota != 0 {
|
||||||
if quota != 0 {
|
tokenName := c.GetString("token_name")
|
||||||
tokenName := c.GetString("token_name")
|
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent)
|
||||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, imageModel, tokenName, quota, logContent)
|
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
channelId := c.GetInt("channel_id")
|
||||||
channelId := c.GetInt("channel_id")
|
model.UpdateChannelUsedQuota(channelId, quota)
|
||||||
model.UpdateChannelUsedQuota(channelId, quota)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}(c.Request.Context())
|
}(c.Request.Context())
|
||||||
|
|
||||||
if consumeQuota {
|
responseBody, err := io.ReadAll(resp.Body)
|
||||||
responseBody, err := io.ReadAll(resp.Body)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||||
}
|
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(responseBody, &textResponse)
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
|
||||||
}
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(responseBody, &textResponse)
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||||
|
|
||||||
for k, v := range resp.Header {
|
for k, v := range resp.Header {
|
||||||
c.Writer.Header().Set(k, v[0])
|
c.Writer.Header().Set(k, v[0])
|
||||||
|
@ -88,30 +88,29 @@ func openaiStreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*O
|
|||||||
return nil, responseText
|
return nil, responseText
|
||||||
}
|
}
|
||||||
|
|
||||||
func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
func openaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
|
||||||
var textResponse TextResponse
|
var textResponse TextResponse
|
||||||
if consumeQuota {
|
responseBody, err := io.ReadAll(resp.Body)
|
||||||
responseBody, err := io.ReadAll(resp.Body)
|
if err != nil {
|
||||||
if err != nil {
|
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
|
||||||
}
|
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(responseBody, &textResponse)
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
|
||||||
}
|
|
||||||
if textResponse.Error.Type != "" {
|
|
||||||
return &OpenAIErrorWithStatusCode{
|
|
||||||
OpenAIError: textResponse.Error,
|
|
||||||
StatusCode: resp.StatusCode,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
// Reset response body
|
|
||||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
|
||||||
}
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(responseBody, &textResponse)
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
|
||||||
|
}
|
||||||
|
if textResponse.Error.Type != "" {
|
||||||
|
return &OpenAIErrorWithStatusCode{
|
||||||
|
OpenAIError: textResponse.Error,
|
||||||
|
StatusCode: resp.StatusCode,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
// Reset response body
|
||||||
|
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||||
|
|
||||||
// We shouldn't set the header before we parse the response body, because the parse part may fail.
|
// We shouldn't set the header before we parse the response body, because the parse part may fail.
|
||||||
// And then we will have to send an error response, but in this case, the header has already been set.
|
// And then we will have to send an error response, but in this case, the header has already been set.
|
||||||
// So the httpClient will be confused by the response.
|
// So the httpClient will be confused by the response.
|
||||||
@ -120,7 +119,7 @@ func openaiHandler(c *gin.Context, resp *http.Response, consumeQuota bool, promp
|
|||||||
c.Writer.Header().Set(k, v[0])
|
c.Writer.Header().Set(k, v[0])
|
||||||
}
|
}
|
||||||
c.Writer.WriteHeader(resp.StatusCode)
|
c.Writer.WriteHeader(resp.StatusCode)
|
||||||
_, err := io.Copy(c.Writer, resp.Body)
|
_, err = io.Copy(c.Writer, resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
|
return errorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError), nil
|
||||||
}
|
}
|
||||||
|
@ -51,14 +51,11 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
channelId := c.GetInt("channel_id")
|
channelId := c.GetInt("channel_id")
|
||||||
tokenId := c.GetInt("token_id")
|
tokenId := c.GetInt("token_id")
|
||||||
userId := c.GetInt("id")
|
userId := c.GetInt("id")
|
||||||
consumeQuota := c.GetBool("consume_quota")
|
|
||||||
group := c.GetString("group")
|
group := c.GetString("group")
|
||||||
var textRequest GeneralOpenAIRequest
|
var textRequest GeneralOpenAIRequest
|
||||||
if consumeQuota || channelType == common.ChannelTypeAzure || channelType == common.ChannelTypePaLM {
|
err := common.UnmarshalBodyReusable(c, &textRequest)
|
||||||
err := common.UnmarshalBodyReusable(c, &textRequest)
|
if err != nil {
|
||||||
if err != nil {
|
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||||
return errorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if relayMode == RelayModeModerations && textRequest.Model == "" {
|
if relayMode == RelayModeModerations && textRequest.Model == "" {
|
||||||
textRequest.Model = "text-moderation-latest"
|
textRequest.Model = "text-moderation-latest"
|
||||||
@ -235,7 +232,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
preConsumedQuota = 0
|
preConsumedQuota = 0
|
||||||
common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", userId, userQuota))
|
common.LogInfo(c.Request.Context(), fmt.Sprintf("user %d has enough quota %d, trusted and no need to pre-consume", userId, userQuota))
|
||||||
}
|
}
|
||||||
if consumeQuota && preConsumedQuota > 0 {
|
if preConsumedQuota > 0 {
|
||||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||||
@ -414,37 +411,36 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
defer func(ctx context.Context) {
|
defer func(ctx context.Context) {
|
||||||
// c.Writer.Flush()
|
// c.Writer.Flush()
|
||||||
go func() {
|
go func() {
|
||||||
if consumeQuota {
|
quota := 0
|
||||||
quota := 0
|
completionRatio := common.GetCompletionRatio(textRequest.Model)
|
||||||
completionRatio := common.GetCompletionRatio(textRequest.Model)
|
promptTokens = textResponse.Usage.PromptTokens
|
||||||
promptTokens = textResponse.Usage.PromptTokens
|
completionTokens = textResponse.Usage.CompletionTokens
|
||||||
completionTokens = textResponse.Usage.CompletionTokens
|
quota = int(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
||||||
quota = int(math.Ceil((float64(promptTokens) + float64(completionTokens)*completionRatio) * ratio))
|
if ratio != 0 && quota <= 0 {
|
||||||
if ratio != 0 && quota <= 0 {
|
quota = 1
|
||||||
quota = 1
|
|
||||||
}
|
|
||||||
totalTokens := promptTokens + completionTokens
|
|
||||||
if totalTokens == 0 {
|
|
||||||
// in this case, must be some error happened
|
|
||||||
// we cannot just return, because we may have to return the pre-consumed quota
|
|
||||||
quota = 0
|
|
||||||
}
|
|
||||||
quotaDelta := quota - preConsumedQuota
|
|
||||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
|
||||||
if err != nil {
|
|
||||||
common.LogError(ctx, "error consuming token remain quota: "+err.Error())
|
|
||||||
}
|
|
||||||
err = model.CacheUpdateUserQuota(userId)
|
|
||||||
if err != nil {
|
|
||||||
common.LogError(ctx, "error update user quota cache: "+err.Error())
|
|
||||||
}
|
|
||||||
if quota != 0 {
|
|
||||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
|
||||||
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent)
|
|
||||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
|
||||||
model.UpdateChannelUsedQuota(channelId, quota)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
totalTokens := promptTokens + completionTokens
|
||||||
|
if totalTokens == 0 {
|
||||||
|
// in this case, must be some error happened
|
||||||
|
// we cannot just return, because we may have to return the pre-consumed quota
|
||||||
|
quota = 0
|
||||||
|
}
|
||||||
|
quotaDelta := quota - preConsumedQuota
|
||||||
|
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||||
|
if err != nil {
|
||||||
|
common.LogError(ctx, "error consuming token remain quota: "+err.Error())
|
||||||
|
}
|
||||||
|
err = model.CacheUpdateUserQuota(userId)
|
||||||
|
if err != nil {
|
||||||
|
common.LogError(ctx, "error update user quota cache: "+err.Error())
|
||||||
|
}
|
||||||
|
if quota != 0 {
|
||||||
|
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||||
|
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent)
|
||||||
|
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||||
|
model.UpdateChannelUsedQuota(channelId, quota)
|
||||||
|
}
|
||||||
|
|
||||||
}()
|
}()
|
||||||
}(c.Request.Context())
|
}(c.Request.Context())
|
||||||
switch apiType {
|
switch apiType {
|
||||||
@ -458,7 +454,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
err, usage := openaiHandler(c, resp, consumeQuota, promptTokens, textRequest.Model)
|
err, usage := openaiHandler(c, resp, promptTokens, textRequest.Model)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -200,8 +200,9 @@ func getFullRequestURL(baseURL string, requestURL string, channelType int) strin
|
|||||||
return fullRequestURL
|
return fullRequestURL
|
||||||
}
|
}
|
||||||
|
|
||||||
func postConsumeQuota(ctx context.Context, tokenId int, quota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
func postConsumeQuota(ctx context.Context, tokenId int, quotaDelta int, totalQuota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
||||||
err := model.PostConsumeTokenQuota(tokenId, quota)
|
// quotaDelta is remaining quota to be consumed
|
||||||
|
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.SysError("error consuming token remain quota: " + err.Error())
|
common.SysError("error consuming token remain quota: " + err.Error())
|
||||||
}
|
}
|
||||||
@ -209,10 +210,14 @@ func postConsumeQuota(ctx context.Context, tokenId int, quota int, userId int, c
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
common.SysError("error update user quota cache: " + err.Error())
|
common.SysError("error update user quota cache: " + err.Error())
|
||||||
}
|
}
|
||||||
if quota != 0 {
|
// totalQuota is total quota consumed
|
||||||
|
if totalQuota != 0 {
|
||||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, modelName, tokenName, quota, logContent)
|
model.RecordConsumeLog(ctx, userId, channelId, totalQuota, 0, modelName, tokenName, totalQuota, logContent)
|
||||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
model.UpdateUserUsedQuotaAndRequestCount(userId, totalQuota)
|
||||||
model.UpdateChannelUsedQuota(channelId, quota)
|
model.UpdateChannelUsedQuota(channelId, totalQuota)
|
||||||
|
}
|
||||||
|
if totalQuota <= 0 {
|
||||||
|
common.LogError(ctx, fmt.Sprintf("totalQuota consumed is %d, something is wrong", totalQuota))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,12 +106,6 @@ func TokenAuth() func(c *gin.Context) {
|
|||||||
c.Set("id", token.UserId)
|
c.Set("id", token.UserId)
|
||||||
c.Set("token_id", token.Id)
|
c.Set("token_id", token.Id)
|
||||||
c.Set("token_name", token.Name)
|
c.Set("token_name", token.Name)
|
||||||
requestURL := c.Request.URL.String()
|
|
||||||
consumeQuota := true
|
|
||||||
if strings.HasPrefix(requestURL, "/v1/models") {
|
|
||||||
consumeQuota = false
|
|
||||||
}
|
|
||||||
c.Set("consume_quota", consumeQuota)
|
|
||||||
if len(parts) > 1 {
|
if len(parts) > 1 {
|
||||||
if model.IsAdmin(token.UserId) {
|
if model.IsAdmin(token.UserId) {
|
||||||
c.Set("channelId", parts[1])
|
c.Set("channelId", parts[1])
|
||||||
|
3
pull_request_template.md
Normal file
3
pull_request_template.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
close #issue_number
|
||||||
|
|
||||||
|
我已确认该 PR 已自测通过,相关截图如下:
|
@ -60,7 +60,7 @@ const EditChannel = () => {
|
|||||||
let localModels = [];
|
let localModels = [];
|
||||||
switch (value) {
|
switch (value) {
|
||||||
case 14:
|
case 14:
|
||||||
localModels = ['claude-instant-1', 'claude-2'];
|
localModels = ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1'];
|
||||||
break;
|
break;
|
||||||
case 11:
|
case 11:
|
||||||
localModels = ['PaLM-2'];
|
localModels = ['PaLM-2'];
|
||||||
|
Loading…
Reference in New Issue
Block a user