add custom test model

This commit is contained in:
Martial BE 2023-12-29 16:23:25 +08:00
parent 61c47a3b08
commit 9c0a49b97a
No known key found for this signature in database
GPG Key ID: D06C32DF0EDB9084
4 changed files with 63 additions and 31 deletions

View File

@ -18,6 +18,10 @@ import (
)
func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (err error, openaiErr *types.OpenAIError) {
if channel.TestModel == "" {
return errors.New("请填写测速模型后再试"), nil
}
// 创建一个 http.Request
req, err := http.NewRequest("POST", "/v1/chat/completions", nil)
if err != nil {
@ -28,26 +32,7 @@ func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (e
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = req
// 创建映射
channelTypeToModel := map[int]string{
common.ChannelTypePaLM: "PaLM-2",
common.ChannelTypeAnthropic: "claude-2",
common.ChannelTypeBaidu: "ERNIE-Bot",
common.ChannelTypeZhipu: "chatglm_lite",
common.ChannelTypeAli: "qwen-turbo",
common.ChannelType360: "360GPT_S2_V9",
common.ChannelTypeXunfei: "SparkDesk",
common.ChannelTypeTencent: "hunyuan",
common.ChannelTypeAzure: "gpt-3.5-turbo",
}
// 从映射中获取模型名称
model, ok := channelTypeToModel[channel.Type]
if !ok {
model = "gpt-3.5-turbo" // 默认值
}
request.Model = model
request.Model = channel.TestModel
provider := providers.GetProvider(channel, c)
if provider == nil {
@ -69,13 +54,15 @@ func testChannel(channel *model.Channel, request types.ChatCompletionRequest) (e
promptTokens := common.CountTokenMessages(request.Messages, request.Model)
Usage, openAIErrorWithStatusCode := chatProvider.ChatAction(&request, true, promptTokens)
if openAIErrorWithStatusCode != nil {
return nil, &openAIErrorWithStatusCode.OpenAIError
return errors.New(openAIErrorWithStatusCode.Message), &openAIErrorWithStatusCode.OpenAIError
}
if Usage.CompletionTokens == 0 {
return fmt.Errorf("channel %s, message 补全 tokens 非预期返回 0", channel.Name), nil
}
common.SysLog(fmt.Sprintf("测试模型 %s 返回内容为:%s", channel.Name, w.Body.String()))
return nil, nil
}

View File

@ -26,6 +26,7 @@ type Channel struct {
ModelMapping *string `json:"model_mapping" gorm:"type:varchar(1024);default:''"`
Priority *int64 `json:"priority" gorm:"bigint;default:0"`
Proxy string `json:"proxy" gorm:"type:varchar(255);default:''"`
TestModel string `json:"test_model" gorm:"type:varchar(50);default:''"`
}
func GetAllChannels(startIdx int, num int, selectAll bool) ([]*Channel, error) {

View File

@ -36,6 +36,7 @@ const validationSchema = Yup.object().shape({
key: Yup.string().when('is_edit', { is: false, then: Yup.string().required('密钥 不能为空') }),
other: Yup.string(),
proxy: Yup.string(),
test_model: Yup.string(),
models: Yup.array().min(1, '模型 不能为空'),
groups: Yup.array().min(1, '用户组 不能为空'),
base_url: Yup.string().when('type', {
@ -90,7 +91,7 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
if (newInput) {
Object.keys(newInput).forEach((key) => {
if (
(!Array.isArray(values[key]) && values[key] !== null && values[key] !== undefined) ||
(!Array.isArray(values[key]) && values[key] !== null && values[key] !== undefined && values[key] !== '') ||
(Array.isArray(values[key]) && values[key].length > 0)
) {
return;
@ -464,6 +465,29 @@ const EditModal = ({ open, channelId, onCancel, onOk }) => {
<FormHelperText id="helper-tex-channel-proxy-label"> {inputPrompt.proxy} </FormHelperText>
)}
</FormControl>
{inputPrompt.test_model && (
<FormControl fullWidth error={Boolean(touched.test_model && errors.test_model)} sx={{ ...theme.typography.otherInput }}>
<InputLabel htmlFor="channel-test_model-label">{inputLabel.test_model}</InputLabel>
<OutlinedInput
id="channel-test_model-label"
label={inputLabel.test_model}
type="text"
value={values.test_model}
name="test_model"
onBlur={handleBlur}
onChange={handleChange}
inputProps={{}}
aria-describedby="helper-text-channel-test_model-label"
/>
{touched.test_model && errors.test_model ? (
<FormHelperText error id="helper-tex-channel-test_model-label">
{errors.test_model}
</FormHelperText>
) : (
<FormHelperText id="helper-tex-channel-test_model-label"> {inputPrompt.test_model} </FormHelperText>
)}
</FormControl>
)}
<DialogActions>
<Button onClick={onCancel}>取消</Button>
<Button disableElevation disabled={isSubmitting} type="submit" variant="contained" color="primary">

View File

@ -6,6 +6,7 @@ const defaultConfig = {
base_url: '',
other: '',
proxy: '',
test_model: '',
model_mapping: '',
models: [],
groups: ['default']
@ -17,6 +18,7 @@ const defaultConfig = {
key: '密钥',
other: '其他参数',
proxy: '代理地址',
test_model: '测速模型',
models: '模型',
model_mapping: '模型映射关系',
groups: '用户组'
@ -28,6 +30,7 @@ const defaultConfig = {
key: '请输入渠道对应的鉴权密钥',
other: '',
proxy: '单独设置代理地址支持http和socks5例如http://127.0.0.1:1080',
test_model: '用于测试使用的模型,为空时无法测速,如gpt-3.5-turbo',
models: '请选择该渠道所支持的模型',
model_mapping:
'请输入要修改的模型映射关系格式为api请求模型ID:实际转发给渠道的模型ID使用JSON数组表示例如{"gpt-3.5": "gpt-35"}',
@ -48,17 +51,20 @@ const typeConfig = {
},
11: {
input: {
models: ['PaLM-2']
models: ['PaLM-2'],
test_model: 'PaLM-2'
}
},
14: {
input: {
models: ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1']
models: ['claude-instant-1', 'claude-2', 'claude-2.0', 'claude-2.1'],
test_model: 'claude-2'
}
},
15: {
input: {
models: ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1']
models: ['ERNIE-Bot', 'ERNIE-Bot-turbo', 'ERNIE-Bot-4', 'Embedding-V1'],
test_model: 'ERNIE-Bot'
},
prompt: {
key: '按照如下格式输入APIKey|SecretKey'
@ -66,7 +72,8 @@ const typeConfig = {
},
16: {
input: {
models: ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite']
models: ['chatglm_turbo', 'chatglm_pro', 'chatglm_std', 'chatglm_lite'],
test_model: 'chatglm_lite'
}
},
17: {
@ -84,7 +91,8 @@ const typeConfig = {
'qwen-plus-internet',
'qwen-max-internet',
'qwen-max-longcontext-internet'
]
],
test_model: 'qwen-turbo'
},
prompt: {
other: '请输入插件参数,即 X-DashScope-Plugin 请求头的取值'
@ -104,7 +112,8 @@ const typeConfig = {
},
19: {
input: {
models: ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1']
models: ['360GPT_S2_V9', 'embedding-bert-512-v1', 'embedding_s1_v1', 'semantic_similarity_s1_v1'],
test_model: '360GPT_S2_V9'
}
},
22: {
@ -114,7 +123,8 @@ const typeConfig = {
},
23: {
input: {
models: ['hunyuan']
models: ['hunyuan'],
test_model: 'hunyuan'
},
prompt: {
key: '按照如下格式输入AppId|SecretId|SecretKey'
@ -125,7 +135,8 @@ const typeConfig = {
other: '版本号'
},
input: {
models: ['gemini-pro']
models: ['gemini-pro', 'gemini-pro-vision'],
test_model: 'gemini-pro'
},
prompt: {
other: '请输入版本号例如v1'
@ -133,7 +144,16 @@ const typeConfig = {
},
26: {
input: {
models: ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan2-53B', 'Baichuan-Text-Embedding']
models: ['Baichuan2-Turbo', 'Baichuan2-Turbo-192k', 'Baichuan2-53B', 'Baichuan-Text-Embedding'],
test_model: 'Baichuan2-Turbo'
}
},
24: {
input: {
models: ['tts-1', 'tts-1-hd']
},
prompt: {
test_model: ''
}
}
};