智谱V4 stream回复带上准确模型
This commit is contained in:
parent
31b85ded54
commit
d72ebbda0b
@ -99,7 +99,7 @@ func ConvertRequest(request openai.GeneralOpenAIRequest) *Request {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func StreamResponseZhipuV42OpenAI(zhipuResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse) {
|
func StreamResponseZhipuV42OpenAI(zhipuResponse *StreamResponse, reqModel string) *openai.ChatCompletionsStreamResponse {
|
||||||
var choice openai.ChatCompletionsStreamResponseChoice
|
var choice openai.ChatCompletionsStreamResponseChoice
|
||||||
choice.Delta.Content = zhipuResponse.Choices[0].Delta.Content
|
choice.Delta.Content = zhipuResponse.Choices[0].Delta.Content
|
||||||
choice.Delta.Role = zhipuResponse.Choices[0].Delta.Role
|
choice.Delta.Role = zhipuResponse.Choices[0].Delta.Role
|
||||||
@ -110,18 +110,18 @@ func StreamResponseZhipuV42OpenAI(zhipuResponse *StreamResponse) (*openai.ChatCo
|
|||||||
Id: zhipuResponse.Id,
|
Id: zhipuResponse.Id,
|
||||||
Object: "chat.completion.chunk",
|
Object: "chat.completion.chunk",
|
||||||
Created: zhipuResponse.Created,
|
Created: zhipuResponse.Created,
|
||||||
Model: "glm-4",
|
Model: reqModel,
|
||||||
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
Choices: []openai.ChatCompletionsStreamResponseChoice{choice},
|
||||||
}
|
}
|
||||||
return &response
|
return &response
|
||||||
}
|
}
|
||||||
|
|
||||||
func LastStreamResponseZhipuV42OpenAI(zhipuResponse *StreamResponse) (*openai.ChatCompletionsStreamResponse, *openai.Usage) {
|
func LastStreamResponseZhipuV42OpenAI(zhipuResponse *StreamResponse, reqModel string) (*openai.ChatCompletionsStreamResponse, *openai.Usage) {
|
||||||
response := StreamResponseZhipuV42OpenAI(zhipuResponse)
|
response := StreamResponseZhipuV42OpenAI(zhipuResponse, reqModel)
|
||||||
return response, &zhipuResponse.Usage
|
return response, &zhipuResponse.Usage
|
||||||
}
|
}
|
||||||
|
|
||||||
func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
func StreamHandler(c *gin.Context, resp *http.Response, reqModel string) (*openai.ErrorWithStatusCode, *openai.Usage) {
|
||||||
var usage *openai.Usage
|
var usage *openai.Usage
|
||||||
scanner := bufio.NewScanner(resp.Body)
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||||
@ -168,9 +168,9 @@ func StreamHandler(c *gin.Context, resp *http.Response) (*openai.ErrorWithStatus
|
|||||||
}
|
}
|
||||||
var response *openai.ChatCompletionsStreamResponse
|
var response *openai.ChatCompletionsStreamResponse
|
||||||
if strings.Contains(data, "prompt_tokens") {
|
if strings.Contains(data, "prompt_tokens") {
|
||||||
response, usage = LastStreamResponseZhipuV42OpenAI(&streamResponse)
|
response, usage = LastStreamResponseZhipuV42OpenAI(&streamResponse, reqModel)
|
||||||
} else {
|
} else {
|
||||||
response = StreamResponseZhipuV42OpenAI(&streamResponse)
|
response = StreamResponseZhipuV42OpenAI(&streamResponse, reqModel)
|
||||||
}
|
}
|
||||||
jsonResponse, err := json.Marshal(response)
|
jsonResponse, err := json.Marshal(response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -301,7 +301,7 @@ func DoResponse(c *gin.Context, textRequest *openai.GeneralOpenAIRequest, resp *
|
|||||||
}
|
}
|
||||||
case constant.APITypeZhipu_v4:
|
case constant.APITypeZhipu_v4:
|
||||||
if isStream {
|
if isStream {
|
||||||
err, usage = zhipu_v4.StreamHandler(c, resp)
|
err, usage = zhipu_v4.StreamHandler(c, resp, textRequest.Model)
|
||||||
} else {
|
} else {
|
||||||
err, usage = zhipu_v4.Handler(c, resp)
|
err, usage = zhipu_v4.Handler(c, resp)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user