🎨 Support Baichuan2

This commit is contained in:
Martial BE 2023-12-29 15:23:05 +08:00
parent c5aa59e297
commit 61c47a3b08
No known key found for this signature in database
GPG Key ID: D06C32DF0EDB9084
12 changed files with 190 additions and 5 deletions

View File

@ -189,6 +189,7 @@ const (
ChannelTypeTencent = 23 ChannelTypeTencent = 23
ChannelTypeAzureSpeech = 24 ChannelTypeAzureSpeech = 24
ChannelTypeGemini = 25 ChannelTypeGemini = 25
ChannelTypeBaichuan = 26
) )
var ChannelBaseURLs = []string{ var ChannelBaseURLs = []string{
@ -218,6 +219,7 @@ var ChannelBaseURLs = []string{
"https://hunyuan.cloud.tencent.com", //23 "https://hunyuan.cloud.tencent.com", //23
"", //24 "", //24
"", //25 "", //25
"https://api.baichuan-ai.com", //26
} }
const ( const (

View File

@ -101,6 +101,10 @@ var ModelRatio = map[string]float64{
"embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens "embedding_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens "semantic_similarity_s1_v1": 0.0715, // ¥0.001 / 1k tokens
"hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0 "hunyuan": 7.143, // ¥0.1 / 1k tokens // https://cloud.tencent.com/document/product/1729/97731#e0e6be58-60c8-469f-bdeb-6c264ce3b4d0
"Baichuan2-Turbo": 0.5715, // ¥0.008 / 1k tokens
"Baichuan2-Turbo-192k": 1.143, // ¥0.016 / 1k tokens
"Baichuan2-53B": 1.4286, // ¥0.02 / 1k tokens
"Baichuan-Text-Embedding": 0.0357, // ¥0.0005 / 1k tokens
} }
func ModelRatio2JSONString() string { func ModelRatio2JSONString() string {

View File

@ -190,13 +190,13 @@ func countImageTokens(url string, detail string) (_ int, err error) {
func CountTokenInput(input any, model string) int { func CountTokenInput(input any, model string) int {
switch v := input.(type) { switch v := input.(type) {
case string: case string:
return CountTokenInput(v, model) return CountTokenText(v, model)
case []string: case []string:
text := "" text := ""
for _, s := range v { for _, s := range v {
text += s text += s
} }
return CountTokenInput(text, model) return CountTokenText(text, model)
} }
return 0 return 0
} }

View File

@ -0,0 +1,30 @@
package baichuan
import (
"one-api/providers/base"
"one-api/providers/openai"
"github.com/gin-gonic/gin"
)
// 定义供应商工厂
type BaichuanProviderFactory struct{}
// 创建 BaichuanProvider
// https://platform.baichuan-ai.com/docs/api
func (f BaichuanProviderFactory) Create(c *gin.Context) base.ProviderInterface {
return &BaichuanProvider{
OpenAIProvider: openai.OpenAIProvider{
BaseProvider: base.BaseProvider{
BaseURL: "https://api.baichuan-ai.com",
ChatCompletions: "/v1/chat/completions",
Embeddings: "/v1/embeddings",
Context: c,
},
},
}
}
type BaichuanProvider struct {
openai.OpenAIProvider
}

100
providers/baichuan/chat.go Normal file
View File

@ -0,0 +1,100 @@
package baichuan
import (
"net/http"
"one-api/common"
"one-api/providers/openai"
"one-api/types"
"strings"
)
func (baichuanResponse *BaichuanChatResponse) ResponseHandler(resp *http.Response) (OpenAIResponse any, errWithCode *types.OpenAIErrorWithStatusCode) {
if baichuanResponse.Error.Message != "" {
errWithCode = &types.OpenAIErrorWithStatusCode{
OpenAIError: baichuanResponse.Error,
StatusCode: resp.StatusCode,
}
return
}
OpenAIResponse = types.ChatCompletionResponse{
ID: baichuanResponse.ID,
Object: baichuanResponse.Object,
Created: baichuanResponse.Created,
Model: baichuanResponse.Model,
Choices: baichuanResponse.Choices,
Usage: baichuanResponse.Usage,
}
return
}
// 获取聊天请求体
func (p *BaichuanProvider) getChatRequestBody(request *types.ChatCompletionRequest) *BaichuanChatRequest {
messages := make([]BaichuanMessage, 0, len(request.Messages))
for i := 0; i < len(request.Messages); i++ {
message := request.Messages[i]
if message.Role == "system" || message.Role == "assistant" {
message.Role = "assistant"
} else {
message.Role = "user"
}
messages = append(messages, BaichuanMessage{
Content: message.StringContent(),
Role: strings.ToLower(message.Role),
})
}
return &BaichuanChatRequest{
Model: request.Model,
Messages: messages,
Stream: request.Stream,
Temperature: request.Temperature,
TopP: request.TopP,
TopK: request.N,
}
}
// 聊天
func (p *BaichuanProvider) ChatAction(request *types.ChatCompletionRequest, isModelMapped bool, promptTokens int) (usage *types.Usage, errWithCode *types.OpenAIErrorWithStatusCode) {
requestBody := p.getChatRequestBody(request)
fullRequestURL := p.GetFullRequestURL(p.ChatCompletions, request.Model)
headers := p.GetRequestHeaders()
if request.Stream {
headers["Accept"] = "text/event-stream"
}
client := common.NewClient()
req, err := client.NewRequest(p.Context.Request.Method, fullRequestURL, common.WithBody(requestBody), common.WithHeader(headers))
if err != nil {
return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
}
if request.Stream {
openAIProviderChatStreamResponse := &openai.OpenAIProviderChatStreamResponse{}
var textResponse string
errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderChatStreamResponse)
if errWithCode != nil {
return
}
usage = &types.Usage{
PromptTokens: promptTokens,
CompletionTokens: common.CountTokenText(textResponse, request.Model),
TotalTokens: promptTokens + common.CountTokenText(textResponse, request.Model),
}
} else {
baichuanResponse := &BaichuanChatResponse{}
errWithCode = p.SendRequest(req, baichuanResponse, false)
if errWithCode != nil {
return
}
usage = baichuanResponse.Usage
}
return
}

View File

@ -0,0 +1,36 @@
package baichuan
import "one-api/providers/openai"
type BaichuanMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type BaichuanKnowledgeBase struct {
Ids []string `json:"id"`
}
type BaichuanChatRequest struct {
Model string `json:"model"`
Messages []BaichuanMessage `json:"messages"`
Stream bool `json:"stream,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
TopK int `json:"top_k,omitempty"`
WithSearchEnhance bool `json:"with_search_enhance,omitempty"`
KnowledgeBase BaichuanKnowledgeBase `json:"knowledge_base,omitempty"`
}
type BaichuanKnowledgeBaseResponse struct {
Cites []struct {
Title string `json:"title"`
Content string `json:"content"`
FileId string `json:"file_id"`
} `json:"cites"`
}
type BaichuanChatResponse struct {
openai.OpenAIProviderChatResponse
KnowledgeBase BaichuanKnowledgeBaseResponse `json:"knowledge_base,omitempty"`
}

View File

@ -108,7 +108,7 @@ func (p *OpenAIProvider) GetRequestBody(request any, isModelMapped bool) (reques
} }
// 发送流式请求 // 发送流式请求
func (p *OpenAIProvider) sendStreamRequest(req *http.Request, response OpenAIProviderStreamResponseHandler) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode, responseText string) { func (p *OpenAIProvider) SendStreamRequest(req *http.Request, response OpenAIProviderStreamResponseHandler) (openAIErrorWithStatusCode *types.OpenAIErrorWithStatusCode, responseText string) {
defer req.Body.Close() defer req.Body.Close()
client := common.GetHttpClient(p.Channel.Proxy) client := common.GetHttpClient(p.Channel.Proxy)

View File

@ -46,7 +46,7 @@ func (p *OpenAIProvider) ChatAction(request *types.ChatCompletionRequest, isMode
if request.Stream { if request.Stream {
openAIProviderChatStreamResponse := &OpenAIProviderChatStreamResponse{} openAIProviderChatStreamResponse := &OpenAIProviderChatStreamResponse{}
var textResponse string var textResponse string
errWithCode, textResponse = p.sendStreamRequest(req, openAIProviderChatStreamResponse) errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderChatStreamResponse)
if errWithCode != nil { if errWithCode != nil {
return return
} }

View File

@ -47,7 +47,7 @@ func (p *OpenAIProvider) CompleteAction(request *types.CompletionRequest, isMode
if request.Stream { if request.Stream {
// TODO // TODO
var textResponse string var textResponse string
errWithCode, textResponse = p.sendStreamRequest(req, openAIProviderCompletionResponse) errWithCode, textResponse = p.SendStreamRequest(req, openAIProviderCompletionResponse)
if errWithCode != nil { if errWithCode != nil {
return return
} }

View File

@ -10,6 +10,7 @@ import (
"one-api/providers/api2gpt" "one-api/providers/api2gpt"
"one-api/providers/azure" "one-api/providers/azure"
azurespeech "one-api/providers/azureSpeech" azurespeech "one-api/providers/azureSpeech"
"one-api/providers/baichuan"
"one-api/providers/baidu" "one-api/providers/baidu"
"one-api/providers/base" "one-api/providers/base"
"one-api/providers/claude" "one-api/providers/claude"
@ -52,6 +53,7 @@ func init() {
providerFactories[common.ChannelTypeAPI2GPT] = api2gpt.Api2gptProviderFactory{} providerFactories[common.ChannelTypeAPI2GPT] = api2gpt.Api2gptProviderFactory{}
providerFactories[common.ChannelTypeAzureSpeech] = azurespeech.AzureSpeechProviderFactory{} providerFactories[common.ChannelTypeAzureSpeech] = azurespeech.AzureSpeechProviderFactory{}
providerFactories[common.ChannelTypeGemini] = gemini.GeminiProviderFactory{} providerFactories[common.ChannelTypeGemini] = gemini.GeminiProviderFactory{}
providerFactories[common.ChannelTypeBaichuan] = baichuan.BaichuanProviderFactory{}
} }

View File

@ -65,6 +65,12 @@ export const CHANNEL_OPTIONS = {
value: 23, value: 23,
color: 'default' color: 'default'
}, },
26: {
key: 26,
text: '百川',
value: 26,
color: 'orange'
},
24: { 24: {
key: 24, key: 24,
text: 'Azure Speech', text: 'Azure Speech',

View File

@ -130,6 +130,11 @@ const typeConfig = {
prompt: { prompt: {
other: '请输入版本号例如v1' other: '请输入版本号例如v1'
} }
},
26: {
input: {
models: ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan2-53B', 'Baichuan-Text-Embedding']
}
} }
}; };