diff --git a/.github/workflows/linux-release.yml b/.github/workflows/linux-release.yml index 98edc471..e81ab09f 100644 --- a/.github/workflows/linux-release.yml +++ b/.github/workflows/linux-release.yml @@ -38,7 +38,7 @@ jobs: - name: Build Backend (amd64) run: | go mod download - go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api + go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)' -extldflags '-static'" -o one-api - name: Build Backend (arm64) run: | diff --git a/.github/workflows/macos-release.yml b/.github/workflows/macos-release.yml index 9142609f..13415276 100644 --- a/.github/workflows/macos-release.yml +++ b/.github/workflows/macos-release.yml @@ -38,7 +38,7 @@ jobs: - name: Build Backend run: | go mod download - go build -ldflags "-X 'one-api/common.Version=$(git describe --tags)'" -o one-api-macos + go build -ldflags "-X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api-macos - name: Release uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') diff --git a/.github/workflows/windows-release.yml b/.github/workflows/windows-release.yml index c058f41d..8b1160b4 100644 --- a/.github/workflows/windows-release.yml +++ b/.github/workflows/windows-release.yml @@ -41,7 +41,7 @@ jobs: - name: Build Backend run: | go mod download - go build -ldflags "-s -w -X 'one-api/common.Version=$(git describe --tags)'" -o one-api.exe + go build -ldflags "-s -w -X 'github.com/songquanpeng/one-api/common.Version=$(git describe --tags)'" -o one-api.exe - name: Release uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') diff --git a/README.md b/README.md index ff1fffd2..a92142ae 100644 --- a/README.md +++ b/README.md @@ -74,8 +74,9 @@ _✨ 通过标准的 OpenAI API 格式访问所有的大模型,开箱即用 + [x] [360 智脑](https://ai.360.cn) + [x] [腾讯混元大模型](https://cloud.tencent.com/document/product/1729) + [x] [Moonshot AI](https://platform.moonshot.cn/) + + [x] [百川大模型](https://platform.baichuan-ai.com) + [ ] [字节云雀大模型](https://www.volcengine.com/product/ark) (WIP) - + [ ] [MINIMAX](https://api.minimax.chat/) (WIP) + + [x] [MINIMAX](https://api.minimax.chat/) 2. 支持配置镜像以及众多[第三方代理服务](https://iamazing.cn/page/openai-api-third-party-services)。 3. 支持通过**负载均衡**的方式访问多个渠道。 4. 支持 **stream 模式**,可以通过流式传输实现打字机效果。 diff --git a/common/constants.go b/common/constants.go index ccaa3560..f67dc146 100644 --- a/common/constants.go +++ b/common/constants.go @@ -64,6 +64,8 @@ const ( ChannelTypeTencent = 23 ChannelTypeGemini = 24 ChannelTypeMoonshot = 25 + ChannelTypeBaichuan = 26 + ChannelTypeMinimax = 27 ) var ChannelBaseURLs = []string{ @@ -93,6 +95,8 @@ var ChannelBaseURLs = []string{ "https://hunyuan.cloud.tencent.com", // 23 "https://generativelanguage.googleapis.com", // 24 "https://api.moonshot.cn", // 25 + "https://api.baichuan-ai.com", // 26 + "https://api.minimax.chat", // 27 } const ( diff --git a/common/gin.go b/common/gin.go index bed2c2b1..b6ef96a6 100644 --- a/common/gin.go +++ b/common/gin.go @@ -8,12 +8,24 @@ import ( "strings" ) -func UnmarshalBodyReusable(c *gin.Context, v any) error { +const KeyRequestBody = "key_request_body" + +func GetRequestBody(c *gin.Context) ([]byte, error) { + requestBody, _ := c.Get(KeyRequestBody) + if requestBody != nil { + return requestBody.([]byte), nil + } requestBody, err := io.ReadAll(c.Request.Body) if err != nil { - return err + return nil, err } - err = c.Request.Body.Close() + _ = c.Request.Body.Close() + c.Set(KeyRequestBody, requestBody) + return requestBody.([]byte), nil +} + +func UnmarshalBodyReusable(c *gin.Context, v any) error { + requestBody, err := GetRequestBody(c) if err != nil { return err } diff --git a/common/logger/logger.go b/common/logger/logger.go index f970ee61..8232b2fc 100644 --- a/common/logger/logger.go +++ b/common/logger/logger.go @@ -13,6 +13,7 @@ import ( ) const ( + loggerDEBUG = "DEBUG" loggerINFO = "INFO" loggerWarn = "WARN" loggerError = "ERR" @@ -55,6 +56,10 @@ func SysError(s string) { _, _ = fmt.Fprintf(gin.DefaultErrorWriter, "[SYS] %v | %s \n", t.Format("2006/01/02 - 15:04:05"), s) } +func Debug(ctx context.Context, msg string) { + logHelper(ctx, loggerDEBUG, msg) +} + func Info(ctx context.Context, msg string) { logHelper(ctx, loggerINFO, msg) } @@ -67,6 +72,10 @@ func Error(ctx context.Context, msg string) { logHelper(ctx, loggerError, msg) } +func Debugf(ctx context.Context, format string, a ...any) { + Debug(ctx, fmt.Sprintf(format, a...)) +} + func Infof(ctx context.Context, format string, a ...any) { Info(ctx, fmt.Sprintf(format, a...)) } diff --git a/common/model-ratio.go b/common/model-ratio.go index 3f2e1fb1..1af80cb1 100644 --- a/common/model-ratio.go +++ b/common/model-ratio.go @@ -94,14 +94,18 @@ var ModelRatio = map[string]float64{ "claude-2.0": 5.51, // $11.02 / 1M tokens "claude-2.1": 5.51, // $11.02 / 1M tokens // https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7 - "ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens - "ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens - "ERNIE-Bot-4": 0.12 * RMB, // ¥0.12 / 1k tokens - "ERNIE-Bot-8k": 0.024 * RMB, - "Embedding-V1": 0.1429, // ¥0.002 / 1k tokens - "PaLM-2": 1, - "gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens - "gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens + "ERNIE-Bot": 0.8572, // ¥0.012 / 1k tokens + "ERNIE-Bot-turbo": 0.5715, // ¥0.008 / 1k tokens + "ERNIE-Bot-4": 0.12 * RMB, // ¥0.12 / 1k tokens + "ERNIE-Bot-8k": 0.024 * RMB, + "Embedding-V1": 0.1429, // ¥0.002 / 1k tokens + "PaLM-2": 1, + "gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens + "gemini-pro-vision": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens + // https://open.bigmodel.cn/pricing + "glm-4": 0.1 * RMB, + "glm-4v": 0.1 * RMB, + "glm-3-turbo": 0.005 * RMB, "chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens "chatglm_pro": 0.7143, // ¥0.01 / 1k tokens "chatglm_std": 0.3572, // ¥0.005 / 1k tokens @@ -127,7 +131,25 @@ var ModelRatio = map[string]float64{ "moonshot-v1-8k": 0.012 * RMB, "moonshot-v1-32k": 0.024 * RMB, "moonshot-v1-128k": 0.06 * RMB, + "embedding-001": 0.01 * RMB, + // https://platform.baichuan-ai.com/price + "Baichuan2-Turbo": 0.008 * RMB, + "Baichuan2-Turbo-192k": 0.016 * RMB, + "Baichuan2-53B": 0.02 * RMB, + // https://api.minimax.chat/document/price + "abab6-chat": 0.1 * RMB, + "abab5.5-chat": 0.015 * RMB, + "abab5.5s-chat": 0.005 * RMB, +} + +var DefaultModelRatio map[string]float64 + +func init() { + DefaultModelRatio = make(map[string]float64) + for k, v := range ModelRatio { + DefaultModelRatio[k] = v + } func ModelRatio2JSONString() string { @@ -148,6 +170,9 @@ func GetModelRatio(name string) float64 { name = strings.TrimSuffix(name, "-internet") } ratio, ok := ModelRatio[name] + if !ok { + ratio, ok = DefaultModelRatio[name] + } if !ok { logger.SysError("model ratio not found: " + name) return 30 diff --git a/controller/model.go b/controller/model.go index f5760901..0f33f919 100644 --- a/controller/model.go +++ b/controller/model.go @@ -4,6 +4,8 @@ import ( "fmt" "github.com/gin-gonic/gin" "github.com/songquanpeng/one-api/relay/channel/ai360" + "github.com/songquanpeng/one-api/relay/channel/baichuan" + "github.com/songquanpeng/one-api/relay/channel/minimax" "github.com/songquanpeng/one-api/relay/channel/moonshot" "github.com/songquanpeng/one-api/relay/constant" "github.com/songquanpeng/one-api/relay/helper" @@ -98,6 +100,28 @@ func init() { Parent: nil, }) } + for _, modelName := range baichuan.ModelList { + openAIModels = append(openAIModels, OpenAIModels{ + Id: modelName, + Object: "model", + Created: 1626777600, + OwnedBy: "baichuan", + Permission: permission, + Root: modelName, + Parent: nil, + }) + } + for _, modelName := range minimax.ModelList { + openAIModels = append(openAIModels, OpenAIModels{ + Id: modelName, + Object: "model", + Created: 1626777600, + OwnedBy: "minimax", + Permission: permission, + Root: modelName, + Parent: nil, + }) + } openAIModelsMap = make(map[string]OpenAIModels) for _, model := range openAIModels { openAIModelsMap[model.Id] = model diff --git a/controller/relay.go b/controller/relay.go index 240042b6..278c0b32 100644 --- a/controller/relay.go +++ b/controller/relay.go @@ -1,9 +1,11 @@ package controller import ( + "bytes" "context" "fmt" "github.com/gin-gonic/gin" + "github.com/songquanpeng/one-api/common" "github.com/songquanpeng/one-api/common/config" "github.com/songquanpeng/one-api/common/helper" "github.com/songquanpeng/one-api/common/logger" @@ -13,6 +15,7 @@ import ( "github.com/songquanpeng/one-api/relay/controller" "github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/util" + "io" "net/http" ) @@ -38,6 +41,10 @@ func relay(c *gin.Context, relayMode int) *model.ErrorWithStatusCode { func Relay(c *gin.Context) { ctx := c.Request.Context() relayMode := constant.Path2RelayMode(c.Request.URL.Path) + if config.DebugEnabled { + requestBody, _ := common.GetRequestBody(c) + logger.Debugf(ctx, "request body: %s", string(requestBody)) + } bizErr := relay(c, relayMode) if bizErr == nil { return @@ -50,8 +57,8 @@ func Relay(c *gin.Context) { go processChannelRelayError(ctx, channelId, channelName, bizErr) requestId := c.GetString(logger.RequestIdKey) retryTimes := config.RetryTimes - if !shouldRetry(bizErr.StatusCode) { - logger.Errorf(ctx, "relay error happen, but status code is %d, won't retry in this case", bizErr.StatusCode) + if !shouldRetry(c, bizErr.StatusCode) { + logger.Errorf(ctx, "relay error happen, status code is %d, won't retry in this case", bizErr.StatusCode) retryTimes = 0 } for i := retryTimes; i > 0; i-- { @@ -65,6 +72,8 @@ func Relay(c *gin.Context) { continue } middleware.SetupContextForSelectedChannel(c, channel, originalModel) + requestBody, err := common.GetRequestBody(c) + c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) bizErr = relay(c, relayMode) if bizErr == nil { return @@ -85,7 +94,10 @@ func Relay(c *gin.Context) { } } -func shouldRetry(statusCode int) bool { +func shouldRetry(c *gin.Context, statusCode int) bool { + if _, ok := c.Get("specific_channel_id"); ok { + return false + } if statusCode == http.StatusTooManyRequests { return true } diff --git a/relay/channel/baichuan/constants.go b/relay/channel/baichuan/constants.go new file mode 100644 index 00000000..cb20a1ff --- /dev/null +++ b/relay/channel/baichuan/constants.go @@ -0,0 +1,7 @@ +package baichuan + +var ModelList = []string{ + "Baichuan2-Turbo", + "Baichuan2-Turbo-192k", + "Baichuan-Text-Embedding", +} diff --git a/relay/channel/minimax/constants.go b/relay/channel/minimax/constants.go new file mode 100644 index 00000000..c3da5b2d --- /dev/null +++ b/relay/channel/minimax/constants.go @@ -0,0 +1,7 @@ +package minimax + +var ModelList = []string{ + "abab5.5s-chat", + "abab5.5-chat", + "abab6-chat", +} diff --git a/relay/channel/minimax/main.go b/relay/channel/minimax/main.go new file mode 100644 index 00000000..a01821c2 --- /dev/null +++ b/relay/channel/minimax/main.go @@ -0,0 +1,14 @@ +package minimax + +import ( + "fmt" + "github.com/songquanpeng/one-api/relay/constant" + "github.com/songquanpeng/one-api/relay/util" +) + +func GetRequestURL(meta *util.RelayMeta) (string, error) { + if meta.Mode == constant.RelayModeChatCompletions { + return fmt.Sprintf("%s/v1/text/chatcompletion_v2", meta.BaseURL), nil + } + return "", fmt.Errorf("unsupported relay mode %d for minimax", meta.Mode) +} diff --git a/relay/channel/openai/adaptor.go b/relay/channel/openai/adaptor.go index 1313e317..27d0fc27 100644 --- a/relay/channel/openai/adaptor.go +++ b/relay/channel/openai/adaptor.go @@ -7,6 +7,8 @@ import ( "github.com/songquanpeng/one-api/common" "github.com/songquanpeng/one-api/relay/channel" "github.com/songquanpeng/one-api/relay/channel/ai360" + "github.com/songquanpeng/one-api/relay/channel/baichuan" + "github.com/songquanpeng/one-api/relay/channel/minimax" "github.com/songquanpeng/one-api/relay/channel/moonshot" "github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/util" @@ -24,7 +26,8 @@ func (a *Adaptor) Init(meta *util.RelayMeta) { } func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) { - if meta.ChannelType == common.ChannelTypeAzure { + switch meta.ChannelType { + case common.ChannelTypeAzure: // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api requestURL := strings.Split(meta.RequestURLPath, "?")[0] requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, meta.APIVersion) @@ -38,8 +41,11 @@ func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) { requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task) return util.GetFullRequestURL(meta.BaseURL, requestURL, meta.ChannelType), nil + case common.ChannelTypeMinimax: + return minimax.GetRequestURL(meta) + default: + return util.GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil } - return util.GetFullRequestURL(meta.BaseURL, meta.RequestURLPath, meta.ChannelType), nil } func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Request, meta *util.RelayMeta) error { @@ -70,7 +76,7 @@ func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) { if meta.IsStream { var responseText string - err, responseText = StreamHandler(c, resp, meta.Mode) + err, responseText, _ = StreamHandler(c, resp, meta.Mode) usage = ResponseText2Usage(responseText, meta.ActualModelName, meta.PromptTokens) } else { err, usage = Handler(c, resp, meta.PromptTokens, meta.ActualModelName) @@ -84,6 +90,10 @@ func (a *Adaptor) GetModelList() []string { return ai360.ModelList case common.ChannelTypeMoonshot: return moonshot.ModelList + case common.ChannelTypeBaichuan: + return baichuan.ModelList + case common.ChannelTypeMinimax: + return minimax.ModelList default: return ModelList } @@ -97,6 +107,10 @@ func (a *Adaptor) GetChannelName() string { return "360" case common.ChannelTypeMoonshot: return "moonshot" + case common.ChannelTypeBaichuan: + return "baichuan" + case common.ChannelTypeMinimax: + return "minimax" default: return "openai" } diff --git a/relay/channel/openai/main.go b/relay/channel/openai/main.go index fbe55cf9..d47cd164 100644 --- a/relay/channel/openai/main.go +++ b/relay/channel/openai/main.go @@ -14,7 +14,7 @@ import ( "strings" ) -func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string) { +func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.ErrorWithStatusCode, string, *model.Usage) { responseText := "" scanner := bufio.NewScanner(resp.Body) scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { @@ -31,6 +31,7 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E }) dataChan := make(chan string) stopChan := make(chan bool) + var usage *model.Usage go func() { for scanner.Scan() { data := scanner.Text() @@ -54,6 +55,9 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E for _, choice := range streamResponse.Choices { responseText += choice.Delta.Content } + if streamResponse.Usage != nil { + usage = streamResponse.Usage + } case constant.RelayModeCompletions: var streamResponse CompletionsStreamResponse err := json.Unmarshal([]byte(data), &streamResponse) @@ -86,9 +90,9 @@ func StreamHandler(c *gin.Context, resp *http.Response, relayMode int) (*model.E }) err := resp.Body.Close() if err != nil { - return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "" + return ErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), "", nil } - return nil, responseText + return nil, responseText, usage } func Handler(c *gin.Context, resp *http.Response, promptTokens int, modelName string) (*model.ErrorWithStatusCode, *model.Usage) { diff --git a/relay/channel/openai/model.go b/relay/channel/openai/model.go index b24485a8..6c0b2c53 100644 --- a/relay/channel/openai/model.go +++ b/relay/channel/openai/model.go @@ -132,6 +132,7 @@ type ChatCompletionsStreamResponse struct { Created int64 `json:"created"` Model string `json:"model"` Choices []ChatCompletionsStreamResponseChoice `json:"choices"` + Usage *model.Usage `json:"usage"` } type CompletionsStreamResponse struct { diff --git a/relay/channel/tencent/main.go b/relay/channel/tencent/main.go index 05edac20..fa26651b 100644 --- a/relay/channel/tencent/main.go +++ b/relay/channel/tencent/main.go @@ -81,6 +81,7 @@ func responseTencent2OpenAI(response *ChatResponse) *openai.TextResponse { func streamResponseTencent2OpenAI(TencentResponse *ChatResponse) *openai.ChatCompletionsStreamResponse { response := openai.ChatCompletionsStreamResponse{ + Id: fmt.Sprintf("chatcmpl-%s", helper.GetUUID()), Object: "chat.completion.chunk", Created: helper.GetTimestamp(), Model: "tencent-hunyuan", diff --git a/relay/channel/zhipu/adaptor.go b/relay/channel/zhipu/adaptor.go index 7a822853..90cc79d3 100644 --- a/relay/channel/zhipu/adaptor.go +++ b/relay/channel/zhipu/adaptor.go @@ -5,20 +5,35 @@ import ( "fmt" "github.com/gin-gonic/gin" "github.com/songquanpeng/one-api/relay/channel" + "github.com/songquanpeng/one-api/relay/channel/openai" "github.com/songquanpeng/one-api/relay/model" "github.com/songquanpeng/one-api/relay/util" "io" "net/http" + "strings" ) type Adaptor struct { + APIVersion string } func (a *Adaptor) Init(meta *util.RelayMeta) { } +func (a *Adaptor) SetVersionByModeName(modelName string) { + if strings.HasPrefix(modelName, "glm-") { + a.APIVersion = "v4" + } else { + a.APIVersion = "v3" + } +} + func (a *Adaptor) GetRequestURL(meta *util.RelayMeta) (string, error) { + a.SetVersionByModeName(meta.ActualModelName) + if a.APIVersion == "v4" { + return fmt.Sprintf("%s/api/paas/v4/chat/completions", meta.BaseURL), nil + } method := "invoke" if meta.IsStream { method = "sse-invoke" @@ -37,6 +52,13 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, relayMode int, request *model.G if request == nil { return nil, errors.New("request is nil") } + if request.TopP >= 1 { + request.TopP = 0.99 + } + a.SetVersionByModeName(request.Model) + if a.APIVersion == "v4" { + return request, nil + } return ConvertRequest(*request), nil } @@ -44,7 +66,19 @@ func (a *Adaptor) DoRequest(c *gin.Context, meta *util.RelayMeta, requestBody io return channel.DoRequestHelper(a, c, meta, requestBody) } +func (a *Adaptor) DoResponseV4(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) { + if meta.IsStream { + err, _, usage = openai.StreamHandler(c, resp, meta.Mode) + } else { + err, usage = openai.Handler(c, resp, meta.PromptTokens, meta.ActualModelName) + } + return +} + func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, meta *util.RelayMeta) (usage *model.Usage, err *model.ErrorWithStatusCode) { + if a.APIVersion == "v4" { + return a.DoResponseV4(c, resp, meta) + } if meta.IsStream { err, usage = StreamHandler(c, resp) } else { diff --git a/relay/channel/zhipu/constants.go b/relay/channel/zhipu/constants.go index f0367b82..1655a59d 100644 --- a/relay/channel/zhipu/constants.go +++ b/relay/channel/zhipu/constants.go @@ -2,4 +2,5 @@ package zhipu var ModelList = []string{ "chatglm_turbo", "chatglm_pro", "chatglm_std", "chatglm_lite", + "glm-4", "glm-4v", "glm-3-turbo", } diff --git a/relay/controller/text.go b/relay/controller/text.go index cc460511..59c5f637 100644 --- a/relay/controller/text.go +++ b/relay/controller/text.go @@ -55,7 +55,8 @@ func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode { var requestBody io.Reader if meta.APIType == constant.APITypeOpenAI { // no need to convert request for openai - if isModelMapped { + shouldResetRequestBody := isModelMapped || meta.ChannelType == common.ChannelTypeBaichuan // frequency_penalty 0 is not acceptable for baichuan + if shouldResetRequestBody { jsonStr, err := json.Marshal(textRequest) if err != nil { return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError) diff --git a/web/berry/src/constants/ChannelConstants.js b/web/berry/src/constants/ChannelConstants.js index aeff5190..98ceaebf 100644 --- a/web/berry/src/constants/ChannelConstants.js +++ b/web/berry/src/constants/ChannelConstants.js @@ -71,6 +71,18 @@ export const CHANNEL_OPTIONS = { value: 23, color: 'default' }, + 26: { + key: 26, + text: '百川大模型', + value: 26, + color: 'default' + }, + 27: { + key: 27, + text: 'MiniMax', + value: 27, + color: 'default' + }, 8: { key: 8, text: '自定义渠道', diff --git a/web/berry/src/views/Channel/type/Config.js b/web/berry/src/views/Channel/type/Config.js index a091c8d6..4dec33de 100644 --- a/web/berry/src/views/Channel/type/Config.js +++ b/web/berry/src/views/Channel/type/Config.js @@ -67,7 +67,7 @@ const typeConfig = { }, 16: { input: { - models: ["chatglm_turbo", "chatglm_pro", "chatglm_std", "chatglm_lite"], + models: ["glm-4", "glm-4v", "glm-3-turbo", "chatglm_turbo", "chatglm_pro", "chatglm_std", "chatglm_lite"], }, modelGroup: "zhipu", }, @@ -145,6 +145,24 @@ const typeConfig = { }, modelGroup: "google gemini", }, + 25: { + input: { + models: ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k'], + }, + modelGroup: "moonshot", + }, + 26: { + input: { + models: ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan-Text-Embedding'], + }, + modelGroup: "baichuan", + }, + 27: { + input: { + models: ['abab5.5s-chat', 'abab5.5-chat', 'abab6-chat'], + }, + modelGroup: "minimax", + }, }; export { defaultConfig, typeConfig }; diff --git a/web/default/src/constants/channel.constants.js b/web/default/src/constants/channel.constants.js index 16da1b97..beb0adb1 100644 --- a/web/default/src/constants/channel.constants.js +++ b/web/default/src/constants/channel.constants.js @@ -11,6 +11,8 @@ export const CHANNEL_OPTIONS = [ { key: 19, text: '360 智脑', value: 19, color: 'blue' }, { key: 25, text: 'Moonshot AI', value: 25, color: 'black' }, { key: 23, text: '腾讯混元', value: 23, color: 'teal' }, + { key: 26, text: '百川大模型', value: 26, color: 'orange' }, + { key: 27, text: 'MiniMax', value: 27, color: 'red' }, { key: 8, text: '自定义渠道', value: 8, color: 'pink' }, { key: 22, text: '知识库:FastGPT', value: 22, color: 'blue' }, { key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple' }, diff --git a/web/default/src/pages/Channel/EditChannel.js b/web/default/src/pages/Channel/EditChannel.js index 4f4633ff..693242f9 100644 --- a/web/default/src/pages/Channel/EditChannel.js +++ b/web/default/src/pages/Channel/EditChannel.js @@ -79,7 +79,7 @@ const EditChannel = () => { localModels = [...localModels, ...withInternetVersion]; break; case 16: - localModels = ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']; + localModels = ["glm-4", "glm-4v", "glm-3-turbo",'chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']; break; case 18: localModels = [ @@ -102,6 +102,12 @@ const EditChannel = () => { case 25: localModels = ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k']; break; + case 26: + localModels = ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan-Text-Embedding']; + break; + case 27: + localModels = ['abab5.5s-chat', 'abab5.5-chat', 'abab6-chat']; + break; } setInputs((inputs) => ({ ...inputs, models: localModels })); }