disable MaxTokens
This commit is contained in:
parent
d086f34cff
commit
90cd939df1
@ -67,8 +67,8 @@ func testChannel(channel *model.Channel, request *ChatRequest) error {
|
|||||||
func buildTestRequest(c *gin.Context) *ChatRequest {
|
func buildTestRequest(c *gin.Context) *ChatRequest {
|
||||||
model_ := c.Query("model")
|
model_ := c.Query("model")
|
||||||
testRequest := &ChatRequest{
|
testRequest := &ChatRequest{
|
||||||
Model: model_,
|
Model: model_,
|
||||||
MaxTokens: 1,
|
//MaxTokens: 1,
|
||||||
}
|
}
|
||||||
testMessage := Message{
|
testMessage := Message{
|
||||||
Role: "user",
|
Role: "user",
|
||||||
|
@ -51,7 +51,7 @@ func relayPaLM(openAIRequest GeneralOpenAIRequest, c *gin.Context) *OpenAIErrorW
|
|||||||
Temperature: openAIRequest.Temperature,
|
Temperature: openAIRequest.Temperature,
|
||||||
CandidateCount: openAIRequest.N,
|
CandidateCount: openAIRequest.N,
|
||||||
TopP: openAIRequest.TopP,
|
TopP: openAIRequest.TopP,
|
||||||
TopK: openAIRequest.MaxTokens,
|
//TopK: openAIRequest.MaxTokens,
|
||||||
}
|
}
|
||||||
// TODO: forward request to PaLM & convert response
|
// TODO: forward request to PaLM & convert response
|
||||||
fmt.Print(request)
|
fmt.Print(request)
|
||||||
|
@ -30,28 +30,28 @@ const (
|
|||||||
// https://platform.openai.com/docs/api-reference/chat
|
// https://platform.openai.com/docs/api-reference/chat
|
||||||
|
|
||||||
type GeneralOpenAIRequest struct {
|
type GeneralOpenAIRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Messages []Message `json:"messages"`
|
Messages []Message `json:"messages"`
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
Stream bool `json:"stream"`
|
Stream bool `json:"stream"`
|
||||||
MaxTokens int `json:"max_tokens"`
|
//MaxTokens int `json:"max_tokens"`
|
||||||
Temperature float64 `json:"temperature"`
|
Temperature float64 `json:"temperature"`
|
||||||
TopP float64 `json:"top_p"`
|
TopP float64 `json:"top_p"`
|
||||||
N int `json:"n"`
|
N int `json:"n"`
|
||||||
Input any `json:"input"`
|
Input any `json:"input"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Messages []Message `json:"messages"`
|
Messages []Message `json:"messages"`
|
||||||
MaxTokens int `json:"max_tokens"`
|
//MaxTokens int `json:"max_tokens"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TextRequest struct {
|
type TextRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Messages []Message `json:"messages"`
|
Messages []Message `json:"messages"`
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
MaxTokens int `json:"max_tokens"`
|
//MaxTokens int `json:"max_tokens"`
|
||||||
//Stream bool `json:"stream"`
|
//Stream bool `json:"stream"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,9 +193,9 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
|
promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
|
||||||
}
|
}
|
||||||
preConsumedTokens := common.PreConsumedQuota
|
preConsumedTokens := common.PreConsumedQuota
|
||||||
if textRequest.MaxTokens != 0 {
|
//if textRequest.MaxTokens != 0 {
|
||||||
preConsumedTokens = promptTokens + textRequest.MaxTokens
|
// preConsumedTokens = promptTokens + textRequest.MaxTokens
|
||||||
}
|
//}
|
||||||
modelRatio := common.GetModelRatio(textRequest.Model)
|
modelRatio := common.GetModelRatio(textRequest.Model)
|
||||||
groupRatio := common.GetGroupRatio(group)
|
groupRatio := common.GetGroupRatio(group)
|
||||||
ratio := modelRatio * groupRatio
|
ratio := modelRatio * groupRatio
|
||||||
|
Loading…
Reference in New Issue
Block a user