🎨 结构中继控制器
This commit is contained in:
parent
2114bc1982
commit
be364ae09b
@ -96,7 +96,7 @@ func SendRequest(req *http.Request, response any, outputResp bool) (*http.Respon
|
||||
// 发送请求
|
||||
resp, err := HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
return nil, ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if !outputResp {
|
||||
@ -120,7 +120,7 @@ func SendRequest(req *http.Request, response any, outputResp bool) (*http.Respon
|
||||
err = DecodeResponse(resp.Body, response)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "decode_response_failed", http.StatusInternalServerError)
|
||||
return nil, ErrorWrapper(err, "decode_response_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if outputResp {
|
||||
|
@ -2,11 +2,12 @@ package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-playground/validator/v10"
|
||||
)
|
||||
|
||||
func UnmarshalBodyReusable(c *gin.Context, v any) error {
|
||||
@ -18,17 +19,43 @@ func UnmarshalBodyReusable(c *gin.Context, v any) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentType := c.Request.Header.Get("Content-Type")
|
||||
if strings.HasPrefix(contentType, "application/json") {
|
||||
err = json.Unmarshal(requestBody, &v)
|
||||
} else if strings.HasPrefix(contentType, "multipart/form-data") {
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
err = c.ShouldBind(v)
|
||||
}
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
err = c.ShouldBind(v)
|
||||
if err != nil {
|
||||
if errs, ok := err.(validator.ValidationErrors); ok {
|
||||
// 返回第一个错误字段的名称
|
||||
return fmt.Errorf("field %s is required", errs[0].Field())
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Reset request body
|
||||
|
||||
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
|
||||
return nil
|
||||
}
|
||||
|
||||
func ErrorWrapper(err error, code string, statusCode int) *types.OpenAIErrorWithStatusCode {
|
||||
return StringErrorWrapper(err.Error(), code, statusCode)
|
||||
}
|
||||
|
||||
func StringErrorWrapper(err string, code string, statusCode int) *types.OpenAIErrorWithStatusCode {
|
||||
openAIError := types.OpenAIError{
|
||||
Message: err,
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &types.OpenAIErrorWithStatusCode{
|
||||
OpenAIError: openAIError,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func AbortWithMessage(c *gin.Context, statusCode int, message string) {
|
||||
c.JSON(statusCode, gin.H{
|
||||
"error": gin.H{
|
||||
"message": message,
|
||||
"type": "one_api_error",
|
||||
},
|
||||
})
|
||||
c.Abort()
|
||||
LogError(c.Request.Context(), message)
|
||||
}
|
||||
|
91
controller/relay-chat.go
Normal file
91
controller/relay-chat.go
Normal file
@ -0,0 +1,91 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayChat(c *gin.Context) {
|
||||
|
||||
var chatRequest types.ChatCompletionRequest
|
||||
if err := common.UnmarshalBodyReusable(c, &chatRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, chatRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[chatRequest.Model] != "" {
|
||||
chatRequest.Model = modelMap[chatRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeChatCompletions)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
chatProvider, ok := provider.(providersBase.ChatInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenMessages(chatRequest.Messages, chatRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, chatRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = chatProvider.ChatAction(&chatRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
91
controller/relay-completions.go
Normal file
91
controller/relay-completions.go
Normal file
@ -0,0 +1,91 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayCompletions(c *gin.Context) {
|
||||
|
||||
var completionRequest types.CompletionRequest
|
||||
if err := common.UnmarshalBodyReusable(c, &completionRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, completionRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[completionRequest.Model] != "" {
|
||||
completionRequest.Model = modelMap[completionRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeCompletions)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
completionProvider, ok := provider.(providersBase.CompletionInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenInput(completionRequest.Prompt, completionRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, completionRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = completionProvider.CompleteAction(&completionRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
96
controller/relay-embeddings.go
Normal file
96
controller/relay-embeddings.go
Normal file
@ -0,0 +1,96 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayEmbeddings(c *gin.Context) {
|
||||
|
||||
var embeddingsRequest types.EmbeddingRequest
|
||||
if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
embeddingsRequest.Model = c.Param("model")
|
||||
}
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &embeddingsRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, embeddingsRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[embeddingsRequest.Model] != "" {
|
||||
embeddingsRequest.Model = modelMap[embeddingsRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeEmbeddings)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
embeddingsProvider, ok := provider.(providersBase.EmbeddingsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenInput(embeddingsRequest.Input, embeddingsRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, embeddingsRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = embeddingsProvider.EmbeddingsAction(&embeddingsRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@ -25,16 +25,16 @@ func relayHelper(c *gin.Context, relayMode int) *types.OpenAIErrorWithStatusCode
|
||||
// 获取 Provider
|
||||
provider := providers.GetProvider(channelType, c)
|
||||
if provider == nil {
|
||||
return types.ErrorWrapper(errors.New("channel not found"), "channel_not_found", http.StatusNotImplemented)
|
||||
return common.ErrorWrapper(errors.New("channel not found"), "channel_not_found", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
if !provider.SupportAPI(relayMode) {
|
||||
return types.ErrorWrapper(errors.New("channel does not support this API"), "channel_not_support_api", http.StatusNotImplemented)
|
||||
return common.ErrorWrapper(errors.New("channel does not support this API"), "channel_not_support_api", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
quotaInfo := &QuotaInfo{
|
||||
@ -70,7 +70,7 @@ func relayHelper(c *gin.Context, relayMode int) *types.OpenAIErrorWithStatusCode
|
||||
case common.RelayModeImagesVariations:
|
||||
usage, openAIErrorWithStatusCode = handleImageEdits(c, provider, modelMap, quotaInfo, group, "variation")
|
||||
default:
|
||||
return types.ErrorWrapper(errors.New("invalid relay mode"), "invalid_relay_mode", http.StatusBadRequest)
|
||||
return common.ErrorWrapper(errors.New("invalid relay mode"), "invalid_relay_mode", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if openAIErrorWithStatusCode != nil {
|
||||
@ -105,16 +105,16 @@ func handleChatCompletions(c *gin.Context, provider providers_base.ProviderInter
|
||||
|
||||
chatProvider, ok := provider.(providers_base.ChatInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &chatRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if chatRequest.Messages == nil || len(chatRequest.Messages) == 0 {
|
||||
return nil, types.ErrorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field messages is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if modelMap != nil && modelMap[chatRequest.Model] != "" {
|
||||
@ -138,16 +138,16 @@ func handleCompletions(c *gin.Context, provider providers_base.ProviderInterface
|
||||
isModelMapped := false
|
||||
completionProvider, ok := provider.(providers_base.CompletionInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &completionRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if completionRequest.Prompt == "" {
|
||||
return nil, types.ErrorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field prompt is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if modelMap != nil && modelMap[completionRequest.Model] != "" {
|
||||
@ -171,16 +171,16 @@ func handleEmbeddings(c *gin.Context, provider providers_base.ProviderInterface,
|
||||
isModelMapped := false
|
||||
embeddingsProvider, ok := provider.(providers_base.EmbeddingsInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &embeddingsRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if embeddingsRequest.Input == "" {
|
||||
return nil, types.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if modelMap != nil && modelMap[embeddingsRequest.Model] != "" {
|
||||
@ -204,16 +204,16 @@ func handleModerations(c *gin.Context, provider providers_base.ProviderInterface
|
||||
isModelMapped := false
|
||||
moderationProvider, ok := provider.(providers_base.ModerationInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &moderationRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if moderationRequest.Input == "" {
|
||||
return nil, types.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if moderationRequest.Model == "" {
|
||||
@ -241,16 +241,16 @@ func handleSpeech(c *gin.Context, provider providers_base.ProviderInterface, mod
|
||||
isModelMapped := false
|
||||
speechProvider, ok := provider.(providers_base.SpeechInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &speechRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if speechRequest.Input == "" {
|
||||
return nil, types.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if modelMap != nil && modelMap[speechRequest.Model] != "" {
|
||||
@ -274,17 +274,17 @@ func handleTranscriptions(c *gin.Context, provider providers_base.ProviderInterf
|
||||
isModelMapped := false
|
||||
speechProvider, ok := provider.(providers_base.TranscriptionsInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &audioRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if audioRequest.File == nil {
|
||||
fmt.Println(audioRequest)
|
||||
return nil, types.ErrorWrapper(errors.New("field file is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field file is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if modelMap != nil && modelMap[audioRequest.Model] != "" {
|
||||
@ -308,17 +308,17 @@ func handleTranslations(c *gin.Context, provider providers_base.ProviderInterfac
|
||||
isModelMapped := false
|
||||
speechProvider, ok := provider.(providers_base.TranslationInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &audioRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if audioRequest.File == nil {
|
||||
fmt.Println(audioRequest)
|
||||
return nil, types.ErrorWrapper(errors.New("field file is required"), "required_field_missing", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(errors.New("field file is required"), "required_field_missing", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if modelMap != nil && modelMap[audioRequest.Model] != "" {
|
||||
@ -342,12 +342,12 @@ func handleImageGenerations(c *gin.Context, provider providers_base.ProviderInte
|
||||
isModelMapped := false
|
||||
imageGenerationsProvider, ok := provider.(providers_base.ImageGenerationsInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &imageRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if imageRequest.Model == "" {
|
||||
@ -368,7 +368,7 @@ func handleImageGenerations(c *gin.Context, provider providers_base.ProviderInte
|
||||
}
|
||||
promptTokens, err := common.CountTokenImage(imageRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "count_token_image_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "count_token_image_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
quotaInfo.modelName = imageRequest.Model
|
||||
@ -390,18 +390,18 @@ func handleImageEdits(c *gin.Context, provider providers_base.ProviderInterface,
|
||||
if imageType == "edit" {
|
||||
imageEditsProvider, ok = provider.(providers_base.ImageEditsInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
} else {
|
||||
imageVariations, ok = provider.(providers_base.ImageVariationsInterface)
|
||||
if !ok {
|
||||
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
return nil, common.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
err := common.UnmarshalBodyReusable(c, &imageEditRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
return nil, common.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if imageEditRequest.Model == "" {
|
||||
@ -418,7 +418,7 @@ func handleImageEdits(c *gin.Context, provider providers_base.ProviderInterface,
|
||||
}
|
||||
promptTokens, err := common.CountTokenImage(imageEditRequest)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "count_token_image_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "count_token_image_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
quotaInfo.modelName = imageEditRequest.Model
|
||||
|
109
controller/relay-image-edits.go
Normal file
109
controller/relay-image-edits.go
Normal file
@ -0,0 +1,109 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayImageEdits(c *gin.Context) {
|
||||
|
||||
var imageEditRequest types.ImageEditRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &imageEditRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if imageEditRequest.Prompt == "" {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "field prompt is required")
|
||||
return
|
||||
}
|
||||
|
||||
if imageEditRequest.Model == "" {
|
||||
imageEditRequest.Model = "dall-e-2"
|
||||
}
|
||||
|
||||
if imageEditRequest.Size == "" {
|
||||
imageEditRequest.Size = "1024x1024"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, imageEditRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[imageEditRequest.Model] != "" {
|
||||
imageEditRequest.Model = modelMap[imageEditRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeImagesEdits)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
imageEditsProvider, ok := provider.(providersBase.ImageEditsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens, err := common.CountTokenImage(imageEditRequest)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, imageEditRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = imageEditsProvider.ImageEditsAction(&imageEditRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
108
controller/relay-image-generations.go
Normal file
108
controller/relay-image-generations.go
Normal file
@ -0,0 +1,108 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayImageGenerations(c *gin.Context) {
|
||||
|
||||
var imageRequest types.ImageRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &imageRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if imageRequest.Model == "" {
|
||||
imageRequest.Model = "dall-e-2"
|
||||
}
|
||||
|
||||
if imageRequest.Size == "" {
|
||||
imageRequest.Size = "1024x1024"
|
||||
}
|
||||
|
||||
if imageRequest.Quality == "" {
|
||||
imageRequest.Quality = "standard"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, imageRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[imageRequest.Model] != "" {
|
||||
imageRequest.Model = modelMap[imageRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeImagesGenerations)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
imageGenerationsProvider, ok := provider.(providersBase.ImageGenerationsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens, err := common.CountTokenImage(imageRequest)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, imageRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = imageGenerationsProvider.ImageGenerationsAction(&imageRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
104
controller/relay-image-variationsy.go
Normal file
104
controller/relay-image-variationsy.go
Normal file
@ -0,0 +1,104 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayImageVariations(c *gin.Context) {
|
||||
|
||||
var imageEditRequest types.ImageEditRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &imageEditRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if imageEditRequest.Model == "" {
|
||||
imageEditRequest.Model = "dall-e-2"
|
||||
}
|
||||
|
||||
if imageEditRequest.Size == "" {
|
||||
imageEditRequest.Size = "1024x1024"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, imageEditRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[imageEditRequest.Model] != "" {
|
||||
imageEditRequest.Model = modelMap[imageEditRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeImagesVariations)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
imageVariations, ok := provider.(providersBase.ImageVariationsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens, err := common.CountTokenImage(imageEditRequest)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, imageEditRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = imageVariations.ImageVariationsAction(&imageEditRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
96
controller/relay-moderations.go
Normal file
96
controller/relay-moderations.go
Normal file
@ -0,0 +1,96 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayModerations(c *gin.Context) {
|
||||
|
||||
var moderationRequest types.ModerationRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &moderationRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if moderationRequest.Model == "" {
|
||||
moderationRequest.Model = "text-moderation-stable"
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, moderationRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[moderationRequest.Model] != "" {
|
||||
moderationRequest.Model = modelMap[moderationRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeModerations)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
moderationProvider, ok := provider.(providersBase.ModerationInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := common.CountTokenInput(moderationRequest.Input, moderationRequest.Model)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, moderationRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = moderationProvider.ModerationAction(&moderationRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
92
controller/relay-speech.go
Normal file
92
controller/relay-speech.go
Normal file
@ -0,0 +1,92 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelaySpeech(c *gin.Context) {
|
||||
|
||||
var speechRequest types.SpeechAudioRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &speechRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, speechRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[speechRequest.Model] != "" {
|
||||
speechRequest.Model = modelMap[speechRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeAudioSpeech)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
speechProvider, ok := provider.(providersBase.SpeechInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := len(speechRequest.Input)
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, speechRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = speechProvider.SpeechAction(&speechRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
92
controller/relay-transcriptions.go
Normal file
92
controller/relay-transcriptions.go
Normal file
@ -0,0 +1,92 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayTranscriptions(c *gin.Context) {
|
||||
|
||||
var audioRequest types.AudioRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &audioRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, audioRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[audioRequest.Model] != "" {
|
||||
audioRequest.Model = modelMap[audioRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeAudioTranscription)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
transcriptionsProvider, ok := provider.(providersBase.TranscriptionsInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := 0
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, audioRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = transcriptionsProvider.TranscriptionsAction(&audioRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
92
controller/relay-translations.go
Normal file
92
controller/relay-translations.go
Normal file
@ -0,0 +1,92 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RelayTranslations(c *gin.Context) {
|
||||
|
||||
var audioRequest types.AudioRequest
|
||||
|
||||
if err := common.UnmarshalBodyReusable(c, &audioRequest); err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
channel, pass := fetchChannel(c, audioRequest.Model)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
|
||||
// 写入渠道信息
|
||||
setChannelToContext(c, channel)
|
||||
|
||||
// 解析模型映射
|
||||
var isModelMapped bool
|
||||
modelMap, err := parseModelMapping(c.GetString("model_mapping"))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if modelMap != nil && modelMap[audioRequest.Model] != "" {
|
||||
audioRequest.Model = modelMap[audioRequest.Model]
|
||||
isModelMapped = true
|
||||
}
|
||||
|
||||
// 获取供应商
|
||||
provider, pass := getProvider(c, channel.Type, common.RelayModeAudioTranslation)
|
||||
if pass {
|
||||
return
|
||||
}
|
||||
translationProvider, ok := provider.(providersBase.TranslationInterface)
|
||||
if !ok {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// 获取Input Tokens
|
||||
promptTokens := 0
|
||||
|
||||
var quotaInfo *QuotaInfo
|
||||
var errWithCode *types.OpenAIErrorWithStatusCode
|
||||
var usage *types.Usage
|
||||
quotaInfo, errWithCode = generateQuotaInfo(c, audioRequest.Model, promptTokens)
|
||||
if errWithCode != nil {
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
}
|
||||
|
||||
usage, errWithCode = translationProvider.TranslationAction(&audioRequest, isModelMapped, promptTokens)
|
||||
|
||||
// 如果报错,则退还配额
|
||||
if errWithCode != nil {
|
||||
tokenId := c.GetInt("token_id")
|
||||
if quotaInfo.HandelStatus {
|
||||
go func(ctx context.Context) {
|
||||
// return pre-consumed quota
|
||||
err := model.PostConsumeTokenQuota(tokenId, -quotaInfo.preConsumedQuota)
|
||||
if err != nil {
|
||||
common.LogError(ctx, "error return pre-consumed quota: "+err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
errorHelper(c, errWithCode)
|
||||
return
|
||||
} else {
|
||||
tokenName := c.GetString("token_name")
|
||||
// 如果没有报错,则消费配额
|
||||
go func(ctx context.Context) {
|
||||
err = quotaInfo.completedQuotaConsumption(usage, tokenName, ctx)
|
||||
if err != nil {
|
||||
common.LogError(ctx, err.Error())
|
||||
}
|
||||
}(c.Request.Context())
|
||||
}
|
||||
}
|
@ -9,9 +9,104 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/providers"
|
||||
providersBase "one-api/providers/base"
|
||||
"one-api/types"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-playground/validator/v10"
|
||||
)
|
||||
|
||||
func GetValidFieldName(err error, obj interface{}) string {
|
||||
getObj := reflect.TypeOf(obj)
|
||||
if errs, ok := err.(validator.ValidationErrors); ok {
|
||||
for _, e := range errs {
|
||||
if f, exist := getObj.Elem().FieldByName(e.Field()); exist {
|
||||
return f.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func fetchChannel(c *gin.Context, modelName string) (*model.Channel, bool) {
|
||||
channelId, ok := c.Get("channelId")
|
||||
if ok {
|
||||
return fetchChannelById(c, channelId.(int))
|
||||
}
|
||||
return fetchChannelByModel(c, modelName)
|
||||
|
||||
}
|
||||
|
||||
func fetchChannelById(c *gin.Context, channelId any) (*model.Channel, bool) {
|
||||
id, err := strconv.Atoi(channelId.(string))
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "无效的渠道 Id")
|
||||
return nil, true
|
||||
}
|
||||
channel, err := model.GetChannelById(id, true)
|
||||
if err != nil {
|
||||
common.AbortWithMessage(c, http.StatusBadRequest, "无效的渠道 Id")
|
||||
return nil, true
|
||||
}
|
||||
if channel.Status != common.ChannelStatusEnabled {
|
||||
common.AbortWithMessage(c, http.StatusForbidden, "该渠道已被禁用")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
return channel, false
|
||||
}
|
||||
|
||||
func fetchChannelByModel(c *gin.Context, modelName string) (*model.Channel, bool) {
|
||||
group := c.GetString("group")
|
||||
channel, err := model.CacheGetRandomSatisfiedChannel(group, modelName)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("当前分组 %s 下对于模型 %s 无可用渠道", group, modelName)
|
||||
if channel != nil {
|
||||
common.SysError(fmt.Sprintf("渠道不存在:%d", channel.Id))
|
||||
message = "数据库一致性已被破坏,请联系管理员"
|
||||
}
|
||||
common.AbortWithMessage(c, http.StatusServiceUnavailable, message)
|
||||
return nil, true
|
||||
}
|
||||
|
||||
return channel, false
|
||||
}
|
||||
|
||||
func getProvider(c *gin.Context, channelType int, relayMode int) (providersBase.ProviderInterface, bool) {
|
||||
provider := providers.GetProvider(channelType, c)
|
||||
if provider == nil {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel not found")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
if !provider.SupportAPI(relayMode) {
|
||||
common.AbortWithMessage(c, http.StatusNotImplemented, "channel does not support this API")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
return provider, false
|
||||
}
|
||||
|
||||
func setChannelToContext(c *gin.Context, channel *model.Channel) {
|
||||
c.Set("channel", channel.Type)
|
||||
c.Set("channel_id", channel.Id)
|
||||
c.Set("channel_name", channel.Name)
|
||||
c.Set("model_mapping", channel.GetModelMapping())
|
||||
c.Set("api_key", channel.Key)
|
||||
c.Set("base_url", channel.GetBaseURL())
|
||||
switch channel.Type {
|
||||
case common.ChannelTypeAzure:
|
||||
c.Set("api_version", channel.Other)
|
||||
case common.ChannelTypeXunfei:
|
||||
c.Set("api_version", channel.Other)
|
||||
case common.ChannelTypeAIProxyLibrary:
|
||||
c.Set("library_id", channel.Other)
|
||||
}
|
||||
}
|
||||
|
||||
func shouldDisableChannel(err *types.OpenAIError, statusCode int) bool {
|
||||
if !common.AutomaticDisableChannelEnabled {
|
||||
return false
|
||||
@ -68,6 +163,26 @@ type QuotaInfo struct {
|
||||
userId int
|
||||
channelId int
|
||||
tokenId int
|
||||
HandelStatus bool
|
||||
}
|
||||
|
||||
func generateQuotaInfo(c *gin.Context, modelName string, promptTokens int) (*QuotaInfo, *types.OpenAIErrorWithStatusCode) {
|
||||
quotaInfo := &QuotaInfo{
|
||||
modelName: modelName,
|
||||
promptTokens: promptTokens,
|
||||
userId: c.GetInt("id"),
|
||||
channelId: c.GetInt("channel_id"),
|
||||
tokenId: c.GetInt("token_id"),
|
||||
HandelStatus: false,
|
||||
}
|
||||
quotaInfo.initQuotaInfo(c.GetString("group"))
|
||||
|
||||
errWithCode := quotaInfo.preQuotaConsumption()
|
||||
if errWithCode != nil {
|
||||
return nil, errWithCode
|
||||
}
|
||||
|
||||
return quotaInfo, nil
|
||||
}
|
||||
|
||||
func (q *QuotaInfo) initQuotaInfo(groupName string) {
|
||||
@ -89,16 +204,16 @@ func (q *QuotaInfo) initQuotaInfo(groupName string) {
|
||||
func (q *QuotaInfo) preQuotaConsumption() *types.OpenAIErrorWithStatusCode {
|
||||
userQuota, err := model.CacheGetUserQuota(q.userId)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if userQuota < q.preConsumedQuota {
|
||||
return types.ErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
return common.ErrorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||
}
|
||||
|
||||
err = model.CacheDecreaseUserQuota(q.userId, q.preConsumedQuota)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if userQuota > 100*q.preConsumedQuota {
|
||||
@ -111,8 +226,9 @@ func (q *QuotaInfo) preQuotaConsumption() *types.OpenAIErrorWithStatusCode {
|
||||
if q.preConsumedQuota > 0 {
|
||||
err := model.PreConsumeTokenQuota(q.tokenId, q.preConsumedQuota)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
return common.ErrorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||
}
|
||||
q.HandelStatus = true
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -6,73 +6,10 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/types"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func Relay(c *gin.Context) {
|
||||
defer c.Request.Body.Close()
|
||||
var err *types.OpenAIErrorWithStatusCode
|
||||
|
||||
relayMode := common.RelayModeUnknown
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/chat/completions") {
|
||||
relayMode = common.RelayModeChatCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/completions") {
|
||||
relayMode = common.RelayModeCompletions
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/embeddings") {
|
||||
relayMode = common.RelayModeEmbeddings
|
||||
} else if strings.HasSuffix(c.Request.URL.Path, "embeddings") {
|
||||
relayMode = common.RelayModeEmbeddings
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
|
||||
relayMode = common.RelayModeModerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
|
||||
relayMode = common.RelayModeAudioSpeech
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
|
||||
relayMode = common.RelayModeAudioTranscription
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
|
||||
relayMode = common.RelayModeAudioTranslation
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||
relayMode = common.RelayModeImagesGenerations
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/edits") {
|
||||
relayMode = common.RelayModeImagesEdits
|
||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/variations") {
|
||||
relayMode = common.RelayModeImagesVariations
|
||||
}
|
||||
// } else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
|
||||
// relayMode = RelayModeEdits
|
||||
|
||||
err = relayHelper(c, relayMode)
|
||||
|
||||
if err != nil {
|
||||
requestId := c.GetString(common.RequestIdKey)
|
||||
retryTimesStr := c.Query("retry")
|
||||
retryTimes, _ := strconv.Atoi(retryTimesStr)
|
||||
if retryTimesStr == "" {
|
||||
retryTimes = common.RetryTimes
|
||||
}
|
||||
if retryTimes > 0 {
|
||||
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
|
||||
} else {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
}
|
||||
err.OpenAIError.Message = common.MessageWithRequestId(err.OpenAIError.Message, requestId)
|
||||
c.JSON(err.StatusCode, gin.H{
|
||||
"error": err.OpenAIError,
|
||||
})
|
||||
}
|
||||
channelId := c.GetInt("channel_id")
|
||||
common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
|
||||
// https://platform.openai.com/docs/guides/error-codes/api-errors
|
||||
if shouldDisableChannel(&err.OpenAIError, err.StatusCode) {
|
||||
channelId := c.GetInt("channel_id")
|
||||
channelName := c.GetString("channel_name")
|
||||
disableChannel(channelId, channelName, err.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RelayNotImplemented(c *gin.Context) {
|
||||
err := types.OpenAIError{
|
||||
Message: "API not implemented",
|
||||
@ -96,3 +33,31 @@ func RelayNotFound(c *gin.Context) {
|
||||
"error": err,
|
||||
})
|
||||
}
|
||||
|
||||
func errorHelper(c *gin.Context, err *types.OpenAIErrorWithStatusCode) {
|
||||
requestId := c.GetString(common.RequestIdKey)
|
||||
retryTimesStr := c.Query("retry")
|
||||
retryTimes, _ := strconv.Atoi(retryTimesStr)
|
||||
if retryTimesStr == "" {
|
||||
retryTimes = common.RetryTimes
|
||||
}
|
||||
if retryTimes > 0 {
|
||||
c.Redirect(http.StatusTemporaryRedirect, fmt.Sprintf("%s?retry=%d", c.Request.URL.Path, retryTimes-1))
|
||||
} else {
|
||||
if err.StatusCode == http.StatusTooManyRequests {
|
||||
err.OpenAIError.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
}
|
||||
err.OpenAIError.Message = common.MessageWithRequestId(err.OpenAIError.Message, requestId)
|
||||
c.JSON(err.StatusCode, gin.H{
|
||||
"error": err.OpenAIError,
|
||||
})
|
||||
}
|
||||
channelId := c.GetInt("channel_id")
|
||||
common.LogError(c.Request.Context(), fmt.Sprintf("relay error (channel #%d): %s", channelId, err.Message))
|
||||
// https://platform.openai.com/docs/guides/error-codes/api-errors
|
||||
if shouldDisableChannel(&err.OpenAIError, err.StatusCode) {
|
||||
channelId := c.GetInt("channel_id")
|
||||
channelName := c.GetString("channel_name")
|
||||
disableChannel(channelId, channelName, err.Message)
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func (p *AliProvider) ChatAction(request *types.ChatCompletionRequest, isModelMa
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
@ -157,7 +157,7 @@ func (p *AliProvider) sendStreamRequest(req *http.Request) (usage *types.Usage,
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -59,7 +59,7 @@ func (p *AliProvider) EmbeddingsAction(request *types.EmbeddingRequest, isModelM
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
aliEmbeddingResponse := &AliEmbeddingResponse{}
|
||||
|
@ -25,13 +25,13 @@ func (c *ImageAzureResponse) ResponseHandler(resp *http.Response) (OpenAIRespons
|
||||
|
||||
operation_location := resp.Header.Get("operation-location")
|
||||
if operation_location == "" {
|
||||
return nil, types.ErrorWrapper(errors.New("image url is empty"), "get_images_url_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(errors.New("image url is empty"), "get_images_url_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest("GET", operation_location, common.WithHeader(c.Header))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "get_images_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "get_images_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
getImageAzureResponse := ImageAzureResponse{}
|
||||
@ -59,14 +59,14 @@ func (c *ImageAzureResponse) ResponseHandler(resp *http.Response) (OpenAIRespons
|
||||
}
|
||||
}
|
||||
|
||||
return nil, types.ErrorWrapper(errors.New("get image Timeout"), "get_images_url_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(errors.New("get image Timeout"), "get_images_url_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
func (p *AzureProvider) ImageGenerationsAction(request *types.ImageRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.ImagesGenerations, request.Model)
|
||||
@ -75,7 +75,7 @@ func (p *AzureProvider) ImageGenerationsAction(request *types.ImageRequest, isMo
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Model == "dall-e-2" {
|
||||
|
@ -73,7 +73,7 @@ func (p *BaiduProvider) ChatAction(request *types.ChatCompletionRequest, isModel
|
||||
requestBody := p.getChatRequestBody(request)
|
||||
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
|
||||
if fullRequestURL == "" {
|
||||
return nil, types.ErrorWrapper(nil, "invalid_baidu_config", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(nil, "invalid_baidu_config", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
headers := p.GetRequestHeaders()
|
||||
@ -84,7 +84,7 @@ func (p *BaiduProvider) ChatAction(request *types.ChatCompletionRequest, isModel
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
@ -130,7 +130,7 @@ func (p *BaiduProvider) sendStreamRequest(req *http.Request) (usage *types.Usage
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -48,14 +48,14 @@ func (p *BaiduProvider) EmbeddingsAction(request *types.EmbeddingRequest, isMode
|
||||
requestBody := p.getEmbeddingsRequestBody(request)
|
||||
fullRequestURL := p.GetFullRequestURL(p.Embeddings, request.Model)
|
||||
if fullRequestURL == "" {
|
||||
return nil, types.ErrorWrapper(nil, "invalid_baidu_config", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(nil, "invalid_baidu_config", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
headers := p.GetRequestHeaders()
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
baiduEmbeddingResponse := &BaiduEmbeddingResponse{}
|
||||
|
@ -79,19 +79,19 @@ func (p *BaseProvider) SendRequest(req *http.Request, response ProviderResponseH
|
||||
p.Context.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err := io.Copy(p.Context.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "copy_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
} else {
|
||||
jsonResponse, err := json.Marshal(openAIResponse)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
p.Context.Writer.Header().Set("Content-Type", "application/json")
|
||||
p.Context.Writer.WriteHeader(resp.StatusCode)
|
||||
_, err = p.Context.Writer.Write(jsonResponse)
|
||||
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ func (p *BaseProvider) SendRequestRaw(req *http.Request) (openAIErrorWithStatusC
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
@ -122,7 +122,7 @@ func (p *BaseProvider) SendRequestRaw(req *http.Request) (openAIErrorWithStatusC
|
||||
|
||||
_, err = io.Copy(p.Context.Writer, resp.Body)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError)
|
||||
return common.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -88,7 +88,7 @@ func (p *ClaudeProvider) ChatAction(request *types.ChatCompletionRequest, isMode
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
@ -139,7 +139,7 @@ func (p *ClaudeProvider) sendStreamRequest(req *http.Request) (*types.OpenAIErro
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -106,7 +106,7 @@ func (p *OpenAIProvider) sendStreamRequest(req *http.Request, response OpenAIPro
|
||||
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -28,7 +28,7 @@ func (c *OpenAIProviderChatStreamResponse) responseStreamHandler() (responseText
|
||||
func (p *OpenAIProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
|
||||
@ -40,7 +40,7 @@ func (p *OpenAIProvider) ChatAction(request *types.ChatCompletionRequest, isMode
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
|
@ -28,7 +28,7 @@ func (c *OpenAIProviderCompletionResponse) responseStreamHandler() (responseText
|
||||
func (p *OpenAIProvider) CompleteAction(request *types.CompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.Completions, request.Model)
|
||||
@ -40,7 +40,7 @@ func (p *OpenAIProvider) CompleteAction(request *types.CompletionRequest, isMode
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
openAIProviderCompletionResponse := &OpenAIProviderCompletionResponse{}
|
||||
|
@ -21,7 +21,7 @@ func (p *OpenAIProvider) EmbeddingsAction(request *types.EmbeddingRequest, isMod
|
||||
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.Embeddings, request.Model)
|
||||
@ -30,7 +30,7 @@ func (p *OpenAIProvider) EmbeddingsAction(request *types.EmbeddingRequest, isMod
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
openAIProviderEmbeddingsResponse := &OpenAIProviderEmbeddingsResponse{}
|
||||
|
@ -20,7 +20,7 @@ func (p *OpenAIProvider) ImageEditsAction(request *types.ImageEditRequest, isMod
|
||||
if isModelMapped {
|
||||
builder := client.CreateFormBuilder(&formBody)
|
||||
if err := imagesEditsMultipartForm(request, builder); err != nil {
|
||||
return nil, types.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req, err = client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(&formBody), common.WithHeader(headers), common.WithContentType(builder.FormDataContentType()))
|
||||
req.ContentLength = int64(formBody.Len())
|
||||
@ -31,7 +31,7 @@ func (p *OpenAIProvider) ImageEditsAction(request *types.ImageEditRequest, isMod
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
openAIProviderImageResponseResponse := &OpenAIProviderImageResponseResponse{}
|
||||
|
@ -21,7 +21,7 @@ func (p *OpenAIProvider) ImageGenerationsAction(request *types.ImageRequest, isM
|
||||
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.ImagesGenerations, request.Model)
|
||||
@ -30,7 +30,7 @@ func (p *OpenAIProvider) ImageGenerationsAction(request *types.ImageRequest, isM
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
openAIProviderImageResponseResponse := &OpenAIProviderImageResponseResponse{}
|
||||
|
@ -19,7 +19,7 @@ func (p *OpenAIProvider) ImageVariationsAction(request *types.ImageEditRequest,
|
||||
if isModelMapped {
|
||||
builder := client.CreateFormBuilder(&formBody)
|
||||
if err := imagesEditsMultipartForm(request, builder); err != nil {
|
||||
return nil, types.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req, err = client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(&formBody), common.WithHeader(headers), common.WithContentType(builder.FormDataContentType()))
|
||||
req.ContentLength = int64(formBody.Len())
|
||||
@ -30,7 +30,7 @@ func (p *OpenAIProvider) ImageVariationsAction(request *types.ImageEditRequest,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
openAIProviderImageResponseResponse := &OpenAIProviderImageResponseResponse{}
|
||||
|
@ -21,7 +21,7 @@ func (p *OpenAIProvider) ModerationAction(request *types.ModerationRequest, isMo
|
||||
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.Moderation, request.Model)
|
||||
@ -30,7 +30,7 @@ func (p *OpenAIProvider) ModerationAction(request *types.ModerationRequest, isMo
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
openAIProviderModerationResponse := &OpenAIProviderModerationResponse{}
|
||||
|
@ -10,7 +10,7 @@ func (p *OpenAIProvider) SpeechAction(request *types.SpeechAudioRequest, isModel
|
||||
|
||||
requestBody, err := p.GetRequestBody(&request, isModelMapped)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.AudioSpeech, request.Model)
|
||||
@ -19,7 +19,7 @@ func (p *OpenAIProvider) SpeechAction(request *types.SpeechAudioRequest, isModel
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
errWithCode = p.SendRequestRaw(req)
|
||||
|
@ -39,7 +39,7 @@ func (p *OpenAIProvider) TranscriptionsAction(request *types.AudioRequest, isMod
|
||||
if isModelMapped {
|
||||
builder := client.CreateFormBuilder(&formBody)
|
||||
if err := audioMultipartForm(request, builder); err != nil {
|
||||
return nil, types.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req, err = client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(&formBody), common.WithHeader(headers), common.WithContentType(builder.FormDataContentType()))
|
||||
req.ContentLength = int64(formBody.Len())
|
||||
@ -50,7 +50,7 @@ func (p *OpenAIProvider) TranscriptionsAction(request *types.AudioRequest, isMod
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
var textResponse string
|
||||
|
@ -19,7 +19,7 @@ func (p *OpenAIProvider) TranslationAction(request *types.AudioRequest, isModelM
|
||||
if isModelMapped {
|
||||
builder := client.CreateFormBuilder(&formBody)
|
||||
if err := audioMultipartForm(request, builder); err != nil {
|
||||
return nil, types.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "create_form_builder_failed", http.StatusInternalServerError)
|
||||
}
|
||||
req, err = client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(&formBody), common.WithHeader(headers), common.WithContentType(builder.FormDataContentType()))
|
||||
req.ContentLength = int64(formBody.Len())
|
||||
@ -30,7 +30,7 @@ func (p *OpenAIProvider) TranslationAction(request *types.AudioRequest, isModelM
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
var textResponse string
|
||||
|
@ -82,7 +82,7 @@ func (p *PalmProvider) ChatAction(request *types.ChatCompletionRequest, isModelM
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
@ -133,7 +133,7 @@ func (p *PalmProvider) sendStreamRequest(req *http.Request) (*types.OpenAIErrorW
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -82,7 +82,7 @@ func (p *TencentProvider) ChatAction(request *types.ChatCompletionRequest, isMod
|
||||
requestBody := p.getChatRequestBody(request)
|
||||
sign := p.getTencentSign(*requestBody)
|
||||
if sign == "" {
|
||||
return nil, types.ErrorWrapper(errors.New("get tencent sign failed"), "get_tencent_sign_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(errors.New("get tencent sign failed"), "get_tencent_sign_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
|
||||
@ -95,7 +95,7 @@ func (p *TencentProvider) ChatAction(request *types.ChatCompletionRequest, isMod
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
@ -144,7 +144,7 @@ func (p *TencentProvider) sendStreamRequest(req *http.Request) (*types.OpenAIErr
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), ""
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -26,7 +26,7 @@ func (p *XunfeiProvider) sendRequest(request *types.ChatCompletionRequest, authU
|
||||
usage = &types.Usage{}
|
||||
dataChan, stopChan, err := p.xunfeiMakeRequest(request, authUrl)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
var content string
|
||||
@ -51,7 +51,7 @@ func (p *XunfeiProvider) sendRequest(request *types.ChatCompletionRequest, authU
|
||||
response := p.responseXunfei2OpenAI(&xunfeiResponse)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
|
||||
}
|
||||
p.Context.Writer.Header().Set("Content-Type", "application/json")
|
||||
_, _ = p.Context.Writer.Write(jsonResponse)
|
||||
@ -62,7 +62,7 @@ func (p *XunfeiProvider) sendStreamRequest(request *types.ChatCompletionRequest,
|
||||
usage = &types.Usage{}
|
||||
dataChan, stopChan, err := p.xunfeiMakeRequest(request, authUrl)
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError)
|
||||
}
|
||||
common.SetEventStreamHeaders(p.Context)
|
||||
p.Context.Stream(func(w io.Writer) bool {
|
||||
|
@ -90,7 +90,7 @@ func (p *ZhipuProvider) ChatAction(request *types.ChatCompletionRequest, isModel
|
||||
client := common.NewClient()
|
||||
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
|
||||
if err != nil {
|
||||
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if request.Stream {
|
||||
@ -144,7 +144,7 @@ func (p *ZhipuProvider) sendStreamRequest(req *http.Request) (*types.OpenAIError
|
||||
// 发送请求
|
||||
resp, err := common.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), nil
|
||||
return common.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError), nil
|
||||
}
|
||||
|
||||
if common.IsFailureStatusCode(resp) {
|
||||
|
@ -19,17 +19,18 @@ func SetRelayRouter(router *gin.Engine) {
|
||||
relayV1Router := router.Group("/v1")
|
||||
relayV1Router.Use(middleware.TokenAuth(), middleware.Distribute())
|
||||
{
|
||||
relayV1Router.POST("/completions", controller.Relay)
|
||||
relayV1Router.POST("/chat/completions", controller.Relay)
|
||||
relayV1Router.POST("/edits", controller.Relay)
|
||||
relayV1Router.POST("/images/generations", controller.Relay)
|
||||
relayV1Router.POST("/images/edits", controller.Relay)
|
||||
relayV1Router.POST("/images/variations", controller.Relay)
|
||||
relayV1Router.POST("/embeddings", controller.Relay)
|
||||
relayV1Router.POST("/engines/:model/embeddings", controller.Relay)
|
||||
relayV1Router.POST("/audio/transcriptions", controller.Relay)
|
||||
relayV1Router.POST("/audio/translations", controller.Relay)
|
||||
relayV1Router.POST("/audio/speech", controller.Relay)
|
||||
relayV1Router.POST("/completions", controller.RelayCompletions)
|
||||
relayV1Router.POST("/chat/completions", controller.RelayChat)
|
||||
// relayV1Router.POST("/edits", controller.Relay)
|
||||
relayV1Router.POST("/images/generations", controller.RelayImageGenerations)
|
||||
relayV1Router.POST("/images/edits", controller.RelayImageEdits)
|
||||
relayV1Router.POST("/images/variations", controller.RelayImageVariations)
|
||||
relayV1Router.POST("/embeddings", controller.RelayEmbeddings)
|
||||
// relayV1Router.POST("/engines/:model/embeddings", controller.RelayEmbeddings)
|
||||
relayV1Router.POST("/audio/transcriptions", controller.RelayTranscriptions)
|
||||
relayV1Router.POST("/audio/translations", controller.RelayTranslations)
|
||||
relayV1Router.POST("/audio/speech", controller.RelaySpeech)
|
||||
relayV1Router.POST("/moderations", controller.RelayModerations)
|
||||
relayV1Router.GET("/files", controller.RelayNotImplemented)
|
||||
relayV1Router.POST("/files", controller.RelayNotImplemented)
|
||||
relayV1Router.DELETE("/files/:id", controller.RelayNotImplemented)
|
||||
@ -41,6 +42,5 @@ func SetRelayRouter(router *gin.Engine) {
|
||||
relayV1Router.POST("/fine-tunes/:id/cancel", controller.RelayNotImplemented)
|
||||
relayV1Router.GET("/fine-tunes/:id/events", controller.RelayNotImplemented)
|
||||
relayV1Router.DELETE("/models/:model", controller.RelayNotImplemented)
|
||||
relayV1Router.POST("/moderations", controller.Relay)
|
||||
}
|
||||
}
|
||||
|
@ -3,16 +3,16 @@ package types
|
||||
import "mime/multipart"
|
||||
|
||||
type SpeechAudioRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input string `json:"input"`
|
||||
Voice string `json:"voice"`
|
||||
Model string `json:"model" binding:"required"`
|
||||
Input string `json:"input" binding:"required"`
|
||||
Voice string `json:"voice" binding:"required"`
|
||||
ResponseFormat string `json:"response_format,omitempty"`
|
||||
Speed float64 `json:"speed,omitempty"`
|
||||
}
|
||||
|
||||
type AudioRequest struct {
|
||||
File *multipart.FileHeader `form:"file"`
|
||||
Model string `form:"model"`
|
||||
File *multipart.FileHeader `form:"file" binding:"required"`
|
||||
Model string `form:"model" binding:"required"`
|
||||
Language string `form:"language"`
|
||||
Prompt string `form:"prompt"`
|
||||
ResponseFormat string `form:"response_format"`
|
||||
|
@ -49,8 +49,8 @@ type ChatCompletionResponseFormat struct {
|
||||
}
|
||||
|
||||
type ChatCompletionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []ChatCompletionMessage `json:"messages"`
|
||||
Model string `json:"model" binding:"required"`
|
||||
Messages []ChatCompletionMessage `json:"messages" binding:"required"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
|
@ -22,19 +22,3 @@ type OpenAIErrorWithStatusCode struct {
|
||||
type OpenAIErrorResponse struct {
|
||||
Error OpenAIError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func ErrorWrapper(err error, code string, statusCode int) *OpenAIErrorWithStatusCode {
|
||||
openAIError := OpenAIError{
|
||||
Message: err.Error(),
|
||||
Type: "one_api_error",
|
||||
Code: code,
|
||||
}
|
||||
return &OpenAIErrorWithStatusCode{
|
||||
OpenAIError: openAIError,
|
||||
StatusCode: statusCode,
|
||||
}
|
||||
}
|
||||
|
||||
// type GeneralErrorHandling interface {
|
||||
// HandleError(resp *http.Response) (openAIErrorWithStatusCode *OpenAIErrorWithStatusCode)
|
||||
// }
|
||||
|
@ -1,8 +1,8 @@
|
||||
package types
|
||||
|
||||
type CompletionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Model string `json:"model" binding:"required"`
|
||||
Prompt any `json:"prompt" binding:"required"`
|
||||
Suffix string `json:"suffix,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
|
@ -1,8 +1,8 @@
|
||||
package types
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Input any `json:"input"`
|
||||
Model string `json:"model" binding:"required"`
|
||||
Input any `json:"input" binding:"required"`
|
||||
EncodingFormat string `json:"encoding_format,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package types
|
||||
import "mime/multipart"
|
||||
|
||||
type ImageRequest struct {
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Prompt string `json:"prompt,omitempty" binding:"required"`
|
||||
Model string `json:"model,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
Quality string `json:"quality,omitempty"`
|
||||
@ -25,7 +25,7 @@ type ImageResponseDataInner struct {
|
||||
}
|
||||
|
||||
type ImageEditRequest struct {
|
||||
Image *multipart.FileHeader `form:"image"`
|
||||
Image *multipart.FileHeader `form:"image" binding:"required"`
|
||||
Mask *multipart.FileHeader `form:"mask"`
|
||||
Model string `form:"model"`
|
||||
Prompt string `form:"prompt"`
|
||||
|
@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
type ModerationRequest struct {
|
||||
Input string `json:"input,omitempty"`
|
||||
Input string `json:"input,omitempty" binding:"required"`
|
||||
Model string `json:"model,omitempty"`
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user