♻️ refactor: 重构speech接口

This commit is contained in:
Martial BE 2023-11-29 18:11:15 +08:00
parent 1c7c2d40bb
commit 96dc7614e6
No known key found for this signature in database
GPG Key ID: D06C32DF0EDB9084
6 changed files with 125 additions and 6 deletions

View File

@ -103,6 +103,15 @@ func (c *Client) SendRequest(req *http.Request, response any) error {
return nil
}
func (c *Client) SendRequestRaw(req *http.Request) (body io.ReadCloser, err error) {
resp, err := HttpClient.Do(req)
if err != nil {
return
}
return resp.Body, nil
}
func IsFailureStatusCode(resp *http.Response) bool {
return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest
}

View File

@ -13,7 +13,7 @@ import (
"github.com/gin-gonic/gin"
)
func relayTextHelper(c *gin.Context, relayMode int) *types.OpenAIErrorWithStatusCode {
func relayHelper(c *gin.Context, relayMode int) *types.OpenAIErrorWithStatusCode {
// 获取请求参数
channelType := c.GetInt("channel")
channelId := c.GetInt("channel_id")
@ -36,10 +36,9 @@ func relayTextHelper(c *gin.Context, relayMode int) *types.OpenAIErrorWithStatus
return types.ErrorWrapper(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
}
var promptTokens int
quotaInfo := &QuotaInfo{
modelName: "",
promptTokens: promptTokens,
promptTokens: 0,
userId: userId,
channelId: channelId,
tokenId: tokenId,
@ -57,6 +56,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *types.OpenAIErrorWithStatus
usage, openAIErrorWithStatusCode = handleEmbeddings(c, provider, modelMap, quotaInfo, group)
case common.RelayModeModerations:
usage, openAIErrorWithStatusCode = handleModerations(c, provider, modelMap, quotaInfo, group)
case common.RelayModeAudioSpeech:
usage, openAIErrorWithStatusCode = handleSpeech(c, provider, modelMap, quotaInfo, group)
default:
return types.ErrorWrapper(errors.New("invalid relay mode"), "invalid_relay_mode", http.StatusBadRequest)
}
@ -112,6 +113,7 @@ func handleChatCompletions(c *gin.Context, provider providers_base.ProviderInter
promptTokens := common.CountTokenMessages(chatRequest.Messages, chatRequest.Model)
quotaInfo.modelName = chatRequest.Model
quotaInfo.promptTokens = promptTokens
quotaInfo.initQuotaInfo(group)
quota_err := quotaInfo.preQuotaConsumption()
if quota_err != nil {
@ -144,6 +146,7 @@ func handleCompletions(c *gin.Context, provider providers_base.ProviderInterface
promptTokens := common.CountTokenInput(completionRequest.Prompt, completionRequest.Model)
quotaInfo.modelName = completionRequest.Model
quotaInfo.promptTokens = promptTokens
quotaInfo.initQuotaInfo(group)
quota_err := quotaInfo.preQuotaConsumption()
if quota_err != nil {
@ -176,6 +179,7 @@ func handleEmbeddings(c *gin.Context, provider providers_base.ProviderInterface,
promptTokens := common.CountTokenInput(embeddingsRequest.Input, embeddingsRequest.Model)
quotaInfo.modelName = embeddingsRequest.Model
quotaInfo.promptTokens = promptTokens
quotaInfo.initQuotaInfo(group)
quota_err := quotaInfo.preQuotaConsumption()
if quota_err != nil {
@ -212,6 +216,7 @@ func handleModerations(c *gin.Context, provider providers_base.ProviderInterface
promptTokens := common.CountTokenInput(moderationRequest.Input, moderationRequest.Model)
quotaInfo.modelName = moderationRequest.Model
quotaInfo.promptTokens = promptTokens
quotaInfo.initQuotaInfo(group)
quota_err := quotaInfo.preQuotaConsumption()
if quota_err != nil {
@ -219,3 +224,36 @@ func handleModerations(c *gin.Context, provider providers_base.ProviderInterface
}
return moderationProvider.ModerationAction(&moderationRequest, isModelMapped, promptTokens)
}
func handleSpeech(c *gin.Context, provider providers_base.ProviderInterface, modelMap map[string]string, quotaInfo *QuotaInfo, group string) (*types.Usage, *types.OpenAIErrorWithStatusCode) {
var speechRequest types.SpeechAudioRequest
isModelMapped := false
speechProvider, ok := provider.(providers_base.SpeechInterface)
if !ok {
return nil, types.ErrorWrapper(errors.New("channel not implemented"), "channel_not_implemented", http.StatusNotImplemented)
}
err := common.UnmarshalBodyReusable(c, &speechRequest)
if err != nil {
return nil, types.ErrorWrapper(err, "bind_request_body_failed", http.StatusBadRequest)
}
if speechRequest.Input == "" {
return nil, types.ErrorWrapper(errors.New("field input is required"), "required_field_missing", http.StatusBadRequest)
}
if modelMap != nil && modelMap[speechRequest.Model] != "" {
speechRequest.Model = modelMap[speechRequest.Model]
isModelMapped = true
}
promptTokens := len(speechRequest.Input)
quotaInfo.modelName = speechRequest.Model
quotaInfo.promptTokens = promptTokens
quotaInfo.initQuotaInfo(group)
quota_err := quotaInfo.preQuotaConsumption()
if quota_err != nil {
return nil, quota_err
}
return speechProvider.SpeechAction(&speechRequest, isModelMapped, promptTokens)
}

View File

@ -235,13 +235,13 @@ func Relay(c *gin.Context) {
relayMode = common.RelayModeEmbeddings
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/moderations") {
relayMode = common.RelayModeModerations
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
relayMode = common.RelayModeAudioSpeech
}
// } else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
// relayMode = RelayModeImagesGenerations
// } else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
// relayMode = RelayModeEdits
// } else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
// relayMode = RelayModeAudioSpeech
// } else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") {
// relayMode = RelayModeAudioTranscription
// } else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
@ -257,7 +257,7 @@ func Relay(c *gin.Context) {
// case RelayModeAudioTranscription:
// err = relayAudioHelper(c, relayMode)
default:
err = relayTextHelper(c, relayMode)
err = relayHelper(c, relayMode)
}
if err != nil {
requestId := c.GetString(common.RequestIdKey)

View File

@ -95,6 +95,35 @@ func (p *BaseProvider) SendRequest(req *http.Request, response ProviderResponseH
return nil
}
func (p *BaseProvider) SendRequestRaw(req *http.Request) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode) {
// 发送请求
resp, err := common.HttpClient.Do(req)
if err != nil {
return types.ErrorWrapper(err, "http_request_failed", http.StatusInternalServerError)
}
defer resp.Body.Close()
// 处理响应
if common.IsFailureStatusCode(resp) {
return p.HandleErrorResp(resp)
}
for k, v := range resp.Header {
p.Context.Writer.Header().Set(k, v[0])
}
p.Context.Writer.WriteHeader(resp.StatusCode)
_, err = io.Copy(p.Context.Writer, resp.Body)
if err != nil {
return types.ErrorWrapper(err, "write_response_body_failed", http.StatusInternalServerError)
}
return nil
}
// 处理错误响应
func (p *BaseProvider) HandleErrorResp(resp *http.Response) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode) {
openAIErrorWithStatusCode = &types.OpenAIErrorWithStatusCode{

View File

@ -38,6 +38,12 @@ type ModerationInterface interface {
ModerationAction(request *types.ModerationRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode)
}
// 文字转语音接口
type SpeechInterface interface {
ProviderInterface
SpeechAction(request *types.SpeechAudioRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode)
}
// 余额接口
type BalanceInterface interface {
BalanceAction(channel *model.Channel) (float64, error)

View File

@ -0,0 +1,37 @@
package openai
import (
"net/http"
"one-api/common"
"one-api/types"
)
func (p *OpenAIProvider) SpeechAction(request *types.SpeechAudioRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody, err := p.getRequestBody(&request, isModelMapped)
if err != nil {
return nil, types.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
fullRequestURL := p.GetFullRequestURL(p.AudioSpeech, request.Model)
headers := p.GetRequestHeaders()
client := common.NewClient()
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
if err != nil {
return nil, types.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
}
errWithCode = p.SendRequestRaw(req)
if errWithCode != nil {
return
}
usage = &types.Usage{
PromptTokens: promptTokens,
CompletionTokens: 0,
TotalTokens: promptTokens,
}
return
}