Merge branch 'main' into private
This commit is contained in:
commit
341c21e4cb
@ -6,6 +6,29 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var DalleSizeRatios = map[string]map[string]float64{
|
||||||
|
"dall-e-2": {
|
||||||
|
"256x256": 1,
|
||||||
|
"512x512": 1.125,
|
||||||
|
"1024x1024": 1.25,
|
||||||
|
},
|
||||||
|
"dall-e-3": {
|
||||||
|
"1024x1024": 1,
|
||||||
|
"1024x1792": 2,
|
||||||
|
"1792x1024": 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var DalleGenerationImageAmounts = map[string][2]int{
|
||||||
|
"dall-e-2": {1, 10},
|
||||||
|
"dall-e-3": {1, 1}, // OpenAI allows n=1 currently.
|
||||||
|
}
|
||||||
|
|
||||||
|
var DalleImagePromptLengthLimitations = map[string]int{
|
||||||
|
"dall-e-2": 1000,
|
||||||
|
"dall-e-3": 4000,
|
||||||
|
}
|
||||||
|
|
||||||
// ModelRatio
|
// ModelRatio
|
||||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||||
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
|
// https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Blfmc9dlf
|
||||||
@ -36,7 +59,11 @@ var ModelRatio = map[string]float64{
|
|||||||
"text-davinci-003": 10,
|
"text-davinci-003": 10,
|
||||||
"text-davinci-edit-001": 10,
|
"text-davinci-edit-001": 10,
|
||||||
"code-davinci-edit-001": 10,
|
"code-davinci-edit-001": 10,
|
||||||
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
"whisper-1": 15, // $0.006 / minute -> $0.006 / 150 words -> $0.006 / 200 tokens -> $0.03 / 1k tokens
|
||||||
|
"tts-1": 7.5, // $0.015 / 1K characters
|
||||||
|
"tts-1-1106": 7.5,
|
||||||
|
"tts-1-hd": 15, // $0.030 / 1K characters
|
||||||
|
"tts-1-hd-1106": 15,
|
||||||
"davinci": 10,
|
"davinci": 10,
|
||||||
"curie": 10,
|
"curie": 10,
|
||||||
"babbage": 10,
|
"babbage": 10,
|
||||||
@ -45,7 +72,8 @@ var ModelRatio = map[string]float64{
|
|||||||
"text-search-ada-doc-001": 10,
|
"text-search-ada-doc-001": 10,
|
||||||
"text-moderation-stable": 0.1,
|
"text-moderation-stable": 0.1,
|
||||||
"text-moderation-latest": 0.1,
|
"text-moderation-latest": 0.1,
|
||||||
"dall-e": 8,
|
"dall-e-2": 8, // $0.016 - $0.020 / image
|
||||||
|
"dall-e-3": 20, // $0.040 - $0.120 / image
|
||||||
"claude-instant-1": 0.815, // $1.63 / 1M tokens
|
"claude-instant-1": 0.815, // $1.63 / 1M tokens
|
||||||
"claude-2": 5.51, // $11.02 / 1M tokens
|
"claude-2": 5.51, // $11.02 / 1M tokens
|
||||||
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
"ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"one-api/common"
|
"one-api/common"
|
||||||
"one-api/model"
|
"one-api/model"
|
||||||
@ -45,8 +46,8 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
|
|||||||
if channel.Type == common.ChannelTypeAzure {
|
if channel.Type == common.ChannelTypeAzure {
|
||||||
requestURL = getFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
|
requestURL = getFullRequestURL(channel.GetBaseURL(), fmt.Sprintf("/openai/deployments/%s/chat/completions?api-version=2023-03-15-preview", request.Model), channel.Type)
|
||||||
} else {
|
} else {
|
||||||
if channel.GetBaseURL() != "" {
|
if baseURL := channel.GetBaseURL(); len(baseURL) > 0 {
|
||||||
requestURL = channel.GetBaseURL()
|
requestURL = baseURL
|
||||||
}
|
}
|
||||||
|
|
||||||
requestURL = getFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
|
requestURL = getFullRequestURL(requestURL, "/v1/chat/completions", channel.Type)
|
||||||
@ -71,10 +72,14 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
var response TextResponse
|
var response TextResponse
|
||||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, nil
|
return err, nil
|
||||||
}
|
}
|
||||||
|
err = json.Unmarshal(body, &response)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error: %s\nResp body: %s", err, body), nil
|
||||||
|
}
|
||||||
if response.Usage.CompletionTokens == 0 {
|
if response.Usage.CompletionTokens == 0 {
|
||||||
return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)), &response.Error
|
return errors.New(fmt.Sprintf("type %s, code %v, message %s", response.Error.Type, response.Error.Code, response.Error.Message)), &response.Error
|
||||||
}
|
}
|
||||||
|
@ -55,12 +55,21 @@ func init() {
|
|||||||
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
// https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||||
openAIModels = []OpenAIModels{
|
openAIModels = []OpenAIModels{
|
||||||
{
|
{
|
||||||
Id: "dall-e",
|
Id: "dall-e-2",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
Created: 1677649963,
|
Created: 1677649963,
|
||||||
OwnedBy: "openai",
|
OwnedBy: "openai",
|
||||||
Permission: permission,
|
Permission: permission,
|
||||||
Root: "dall-e",
|
Root: "dall-e-2",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "dall-e-3",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "dall-e-3",
|
||||||
Parent: nil,
|
Parent: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -72,6 +81,42 @@ func init() {
|
|||||||
Root: "whisper-1",
|
Root: "whisper-1",
|
||||||
Parent: nil,
|
Parent: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Id: "tts-1",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "tts-1",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "tts-1-1106",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "tts-1-1106",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "tts-1-hd",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "tts-1-hd",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Id: "tts-1-hd-1106",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1677649963,
|
||||||
|
OwnedBy: "openai",
|
||||||
|
Permission: permission,
|
||||||
|
Root: "tts-1-hd-1106",
|
||||||
|
Parent: nil,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Id: "gpt-3.5-turbo",
|
Id: "gpt-3.5-turbo",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -21,6 +20,22 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
channelId := c.GetInt("channel_id")
|
channelId := c.GetInt("channel_id")
|
||||||
userId := c.GetInt("id")
|
userId := c.GetInt("id")
|
||||||
group := c.GetString("group")
|
group := c.GetString("group")
|
||||||
|
tokenName := c.GetString("token_name")
|
||||||
|
|
||||||
|
var ttsRequest TextToSpeechRequest
|
||||||
|
if relayMode == RelayModeAudioSpeech {
|
||||||
|
// Read JSON
|
||||||
|
err := common.UnmarshalBodyReusable(c, &ttsRequest)
|
||||||
|
// Check if JSON is valid
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "invalid_json", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
audioModel = ttsRequest.Model
|
||||||
|
// Check if text is too long 4096
|
||||||
|
if len(ttsRequest.Input) > 4096 {
|
||||||
|
return errorWrapper(errors.New("input is too long (over 4096 characters)"), "text_too_long", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
preConsumedTokens := common.PreConsumedQuota
|
preConsumedTokens := common.PreConsumedQuota
|
||||||
modelRatio := common.GetModelRatio(audioModel)
|
modelRatio := common.GetModelRatio(audioModel)
|
||||||
@ -31,22 +46,32 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
if userQuota-preConsumedQuota < 0 {
|
|
||||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
quota := 0
|
||||||
}
|
// Check if user quota is enough
|
||||||
err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
|
if relayMode == RelayModeAudioSpeech {
|
||||||
if err != nil {
|
quota = int(float64(len(ttsRequest.Input)) * modelRatio * groupRatio)
|
||||||
return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
if quota > userQuota {
|
||||||
}
|
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||||
if userQuota > 100*preConsumedQuota {
|
}
|
||||||
// in this case, we do not pre-consume quota
|
} else {
|
||||||
// because the user has enough quota
|
if userQuota-preConsumedQuota < 0 {
|
||||||
preConsumedQuota = 0
|
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||||
}
|
}
|
||||||
if preConsumedQuota > 0 {
|
err = model.CacheDecreaseUserQuota(userId, preConsumedQuota)
|
||||||
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
return errorWrapper(err, "decrease_user_quota_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
if userQuota > 100*preConsumedQuota {
|
||||||
|
// in this case, we do not pre-consume quota
|
||||||
|
// because the user has enough quota
|
||||||
|
preConsumedQuota = 0
|
||||||
|
}
|
||||||
|
if preConsumedQuota > 0 {
|
||||||
|
err := model.PreConsumeTokenQuota(tokenId, preConsumedQuota)
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "pre_consume_token_quota_failed", http.StatusForbidden)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,47 +118,32 @@ func relayAudioHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
return errorWrapper(err, "close_request_body_failed", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
var audioResponse AudioResponse
|
|
||||||
|
|
||||||
defer func(ctx context.Context) {
|
if relayMode == RelayModeAudioSpeech {
|
||||||
go func() {
|
defer func(ctx context.Context) {
|
||||||
quota := countTokenText(audioResponse.Text, audioModel)
|
go postConsumeQuota(ctx, tokenId, quota, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
||||||
|
}(c.Request.Context())
|
||||||
|
} else {
|
||||||
|
responseBody, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
var whisperResponse WhisperResponse
|
||||||
|
err = json.Unmarshal(responseBody, &whisperResponse)
|
||||||
|
if err != nil {
|
||||||
|
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
defer func(ctx context.Context) {
|
||||||
|
quota := countTokenText(whisperResponse.Text, audioModel)
|
||||||
quotaDelta := quota - preConsumedQuota
|
quotaDelta := quota - preConsumedQuota
|
||||||
err := model.PostConsumeTokenQuota(tokenId, quotaDelta)
|
go postConsumeQuota(ctx, tokenId, quotaDelta, userId, channelId, modelRatio, groupRatio, audioModel, tokenName)
|
||||||
if err != nil {
|
}(c.Request.Context())
|
||||||
common.SysError("error consuming token remain quota: " + err.Error())
|
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
||||||
}
|
|
||||||
err = model.CacheUpdateUserQuota(userId)
|
|
||||||
if err != nil {
|
|
||||||
common.SysError("error update user quota cache: " + err.Error())
|
|
||||||
}
|
|
||||||
if quota != 0 {
|
|
||||||
tokenName := c.GetString("token_name")
|
|
||||||
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
|
||||||
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, audioModel, tokenName, quota, logContent)
|
|
||||||
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
|
||||||
channelId := c.GetInt("channel_id")
|
|
||||||
model.UpdateChannelUsedQuota(channelId, quota)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}(c.Request.Context())
|
|
||||||
|
|
||||||
responseBody, err := io.ReadAll(resp.Body)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
|
|
||||||
}
|
}
|
||||||
err = resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(responseBody, &audioResponse)
|
|
||||||
if err != nil {
|
|
||||||
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
|
|
||||||
|
|
||||||
for k, v := range resp.Header {
|
for k, v := range resp.Header {
|
||||||
c.Writer.Header().Set(k, v[0])
|
c.Writer.Header().Set(k, v[0])
|
||||||
}
|
}
|
||||||
|
@ -6,15 +6,28 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"one-api/common"
|
"one-api/common"
|
||||||
"one-api/model"
|
"one-api/model"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func isWithinRange(element string, value int) bool {
|
||||||
|
if _, ok := common.DalleGenerationImageAmounts[element]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
min := common.DalleGenerationImageAmounts[element][0]
|
||||||
|
max := common.DalleGenerationImageAmounts[element][1]
|
||||||
|
|
||||||
|
return value >= min && value <= max
|
||||||
|
}
|
||||||
|
|
||||||
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
||||||
imageModel := "dall-e"
|
imageModel := "dall-e-2"
|
||||||
|
imageSize := "1024x1024"
|
||||||
|
|
||||||
tokenId := c.GetInt("token_id")
|
tokenId := c.GetInt("token_id")
|
||||||
channelType := c.GetInt("channel")
|
channelType := c.GetInt("channel")
|
||||||
@ -31,19 +44,44 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Size validation
|
||||||
|
if imageRequest.Size != "" {
|
||||||
|
imageSize = imageRequest.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Model validation
|
||||||
|
if imageRequest.Model != "" {
|
||||||
|
imageModel = imageRequest.Model
|
||||||
|
}
|
||||||
|
|
||||||
|
imageCostRatio, hasValidSize := common.DalleSizeRatios[imageModel][imageSize]
|
||||||
|
|
||||||
|
// Check if model is supported
|
||||||
|
if hasValidSize {
|
||||||
|
if imageRequest.Quality == "hd" && imageModel == "dall-e-3" {
|
||||||
|
if imageSize == "1024x1024" {
|
||||||
|
imageCostRatio *= 2
|
||||||
|
} else {
|
||||||
|
imageCostRatio *= 1.5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return errorWrapper(errors.New("size not supported for this image model"), "size_not_supported", http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
// Prompt validation
|
// Prompt validation
|
||||||
if imageRequest.Prompt == "" {
|
if imageRequest.Prompt == "" {
|
||||||
return errorWrapper(errors.New("prompt is required"), "required_field_missing", http.StatusBadRequest)
|
return errorWrapper(errors.New("prompt is required"), "prompt_missing", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not "256x256", "512x512", or "1024x1024"
|
// Check prompt length
|
||||||
if imageRequest.Size != "" && imageRequest.Size != "256x256" && imageRequest.Size != "512x512" && imageRequest.Size != "1024x1024" {
|
if len(imageRequest.Prompt) > common.DalleImagePromptLengthLimitations[imageModel] {
|
||||||
return errorWrapper(errors.New("size must be one of 256x256, 512x512, or 1024x1024"), "invalid_field_value", http.StatusBadRequest)
|
return errorWrapper(errors.New("prompt is too long"), "prompt_too_long", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// N should between 1 and 10
|
// Number of generated images validation
|
||||||
if imageRequest.N != 0 && (imageRequest.N < 1 || imageRequest.N > 10) {
|
if isWithinRange(imageModel, imageRequest.N) == false {
|
||||||
return errorWrapper(errors.New("n must be between 1 and 10"), "invalid_field_value", http.StatusBadRequest)
|
return errorWrapper(errors.New("invalid value of n"), "n_not_within_range", http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// map model name
|
// map model name
|
||||||
@ -82,16 +120,7 @@ func relayImageHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode
|
|||||||
ratio := modelRatio * groupRatio
|
ratio := modelRatio * groupRatio
|
||||||
userQuota, err := model.CacheGetUserQuota(userId)
|
userQuota, err := model.CacheGetUserQuota(userId)
|
||||||
|
|
||||||
sizeRatio := 1.0
|
quota := int(ratio*imageCostRatio*1000) * imageRequest.N
|
||||||
// Size
|
|
||||||
if imageRequest.Size == "256x256" {
|
|
||||||
sizeRatio = 1
|
|
||||||
} else if imageRequest.Size == "512x512" {
|
|
||||||
sizeRatio = 1.125
|
|
||||||
} else if imageRequest.Size == "1024x1024" {
|
|
||||||
sizeRatio = 1.25
|
|
||||||
}
|
|
||||||
quota := int(ratio*sizeRatio*1000) * imageRequest.N
|
|
||||||
|
|
||||||
if consumeQuota && userQuota-quota < 0 {
|
if consumeQuota && userQuota-quota < 0 {
|
||||||
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
return errorWrapper(errors.New("user quota is not enough"), "insufficient_user_quota", http.StatusForbidden)
|
||||||
|
@ -369,6 +369,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
}
|
}
|
||||||
case APITypeTencent:
|
case APITypeTencent:
|
||||||
req.Header.Set("Authorization", apiKey)
|
req.Header.Set("Authorization", apiKey)
|
||||||
|
case APITypePaLM:
|
||||||
|
// do not set Authorization header
|
||||||
default:
|
default:
|
||||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"one-api/common"
|
"one-api/common"
|
||||||
|
"one-api/model"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -192,3 +194,20 @@ func getFullRequestURL(baseURL string, requestURL string, channelType int) strin
|
|||||||
|
|
||||||
return fullRequestURL
|
return fullRequestURL
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func postConsumeQuota(ctx context.Context, tokenId int, quota int, userId int, channelId int, modelRatio float64, groupRatio float64, modelName string, tokenName string) {
|
||||||
|
err := model.PostConsumeTokenQuota(tokenId, quota)
|
||||||
|
if err != nil {
|
||||||
|
common.SysError("error consuming token remain quota: " + err.Error())
|
||||||
|
}
|
||||||
|
err = model.CacheUpdateUserQuota(userId)
|
||||||
|
if err != nil {
|
||||||
|
common.SysError("error update user quota cache: " + err.Error())
|
||||||
|
}
|
||||||
|
if quota != 0 {
|
||||||
|
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
|
||||||
|
model.RecordConsumeLog(ctx, userId, channelId, 0, 0, modelName, tokenName, quota, logContent)
|
||||||
|
model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
|
||||||
|
model.UpdateChannelUsedQuota(channelId, quota)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -24,7 +24,9 @@ const (
|
|||||||
RelayModeModerations
|
RelayModeModerations
|
||||||
RelayModeImagesGenerations
|
RelayModeImagesGenerations
|
||||||
RelayModeEdits
|
RelayModeEdits
|
||||||
RelayModeAudio
|
RelayModeAudioSpeech
|
||||||
|
RelayModeAudioTranscription
|
||||||
|
RelayModeAudioTranslation
|
||||||
)
|
)
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/chat
|
// https://platform.openai.com/docs/api-reference/chat
|
||||||
@ -77,16 +79,30 @@ type TextRequest struct {
|
|||||||
//Stream bool `json:"stream"`
|
//Stream bool `json:"stream"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageRequest docs: https://platform.openai.com/docs/api-reference/images/create
|
||||||
type ImageRequest struct {
|
type ImageRequest struct {
|
||||||
Prompt string `json:"prompt"`
|
Model string `json:"model"`
|
||||||
N int `json:"n"`
|
Prompt string `json:"prompt" binding:"required"`
|
||||||
Size string `json:"size"`
|
N int `json:"n"`
|
||||||
|
Size string `json:"size"`
|
||||||
|
Quality string `json:"quality"`
|
||||||
|
ResponseFormat string `json:"response_format"`
|
||||||
|
Style string `json:"style"`
|
||||||
|
User string `json:"user"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AudioResponse struct {
|
type WhisperResponse struct {
|
||||||
Text string `json:"text,omitempty"`
|
Text string `json:"text,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TextToSpeechRequest struct {
|
||||||
|
Model string `json:"model" binding:"required"`
|
||||||
|
Input string `json:"input" binding:"required"`
|
||||||
|
Voice string `json:"voice" binding:"required"`
|
||||||
|
Speed float64 `json:"speed"`
|
||||||
|
ResponseFormat string `json:"response_format"`
|
||||||
|
}
|
||||||
|
|
||||||
type Usage struct {
|
type Usage struct {
|
||||||
PromptTokens int `json:"prompt_tokens"`
|
PromptTokens int `json:"prompt_tokens"`
|
||||||
CompletionTokens int `json:"completion_tokens"`
|
CompletionTokens int `json:"completion_tokens"`
|
||||||
@ -183,14 +199,22 @@ func Relay(c *gin.Context) {
|
|||||||
relayMode = RelayModeImagesGenerations
|
relayMode = RelayModeImagesGenerations
|
||||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
|
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/edits") {
|
||||||
relayMode = RelayModeEdits
|
relayMode = RelayModeEdits
|
||||||
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") {
|
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/speech") {
|
||||||
relayMode = RelayModeAudio
|
relayMode = RelayModeAudioSpeech
|
||||||
|
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcription") {
|
||||||
|
relayMode = RelayModeAudioTranscription
|
||||||
|
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translation") {
|
||||||
|
relayMode = RelayModeAudioTranslation
|
||||||
}
|
}
|
||||||
var err *OpenAIErrorWithStatusCode
|
var err *OpenAIErrorWithStatusCode
|
||||||
switch relayMode {
|
switch relayMode {
|
||||||
case RelayModeImagesGenerations:
|
case RelayModeImagesGenerations:
|
||||||
err = relayImageHelper(c, relayMode)
|
err = relayImageHelper(c, relayMode)
|
||||||
case RelayModeAudio:
|
case RelayModeAudioSpeech:
|
||||||
|
fallthrough
|
||||||
|
case RelayModeAudioTranslation:
|
||||||
|
fallthrough
|
||||||
|
case RelayModeAudioTranscription:
|
||||||
err = relayAudioHelper(c, relayMode)
|
err = relayAudioHelper(c, relayMode)
|
||||||
default:
|
default:
|
||||||
err = relayTextHelper(c, relayMode)
|
err = relayTextHelper(c, relayMode)
|
||||||
|
@ -40,10 +40,7 @@ func Distribute() func(c *gin.Context) {
|
|||||||
} else {
|
} else {
|
||||||
// Select a channel for the user
|
// Select a channel for the user
|
||||||
var modelRequest ModelRequest
|
var modelRequest ModelRequest
|
||||||
var err error
|
err := common.UnmarshalBodyReusable(c, &modelRequest)
|
||||||
if !strings.HasPrefix(c.Request.URL.Path, "/v1/audio") {
|
|
||||||
err = common.UnmarshalBodyReusable(c, &modelRequest)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
abortWithMessage(c, http.StatusBadRequest, "无效的请求")
|
abortWithMessage(c, http.StatusBadRequest, "无效的请求")
|
||||||
return
|
return
|
||||||
@ -60,10 +57,10 @@ func Distribute() func(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
|
||||||
if modelRequest.Model == "" {
|
if modelRequest.Model == "" {
|
||||||
modelRequest.Model = "dall-e"
|
modelRequest.Model = "dall-e-2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") {
|
if strings.HasPrefix(c.Request.URL.Path, "/v1/audio/transcriptions") || strings.HasPrefix(c.Request.URL.Path, "/v1/audio/translations") {
|
||||||
if modelRequest.Model == "" {
|
if modelRequest.Model == "" {
|
||||||
modelRequest.Model = "whisper-1"
|
modelRequest.Model = "whisper-1"
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ func SetRelayRouter(router *gin.Engine) {
|
|||||||
relayV1Router.POST("/engines/:model/embeddings", controller.Relay)
|
relayV1Router.POST("/engines/:model/embeddings", controller.Relay)
|
||||||
relayV1Router.POST("/audio/transcriptions", controller.Relay)
|
relayV1Router.POST("/audio/transcriptions", controller.Relay)
|
||||||
relayV1Router.POST("/audio/translations", controller.Relay)
|
relayV1Router.POST("/audio/translations", controller.Relay)
|
||||||
|
relayV1Router.POST("/audio/speech", controller.Relay)
|
||||||
relayV1Router.GET("/files", controller.RelayNotImplemented)
|
relayV1Router.GET("/files", controller.RelayNotImplemented)
|
||||||
relayV1Router.POST("/files", controller.RelayNotImplemented)
|
relayV1Router.POST("/files", controller.RelayNotImplemented)
|
||||||
relayV1Router.DELETE("/files/:id", controller.RelayNotImplemented)
|
relayV1Router.DELETE("/files/:id", controller.RelayNotImplemented)
|
||||||
|
Loading…
Reference in New Issue
Block a user