feat: You can save chat content while using the OpenAI model.

This commit is contained in:
tangfei-china 2023-10-30 21:05:32 +08:00
parent d2e1791812
commit 04cabfac18
3 changed files with 41 additions and 0 deletions

View File

@ -198,10 +198,12 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL) fullRequestURL = fmt.Sprintf("%s/api/library/ask", baseURL)
} }
var promptTokens int var promptTokens int
var prompt string
var completionTokens int var completionTokens int
switch relayMode { switch relayMode {
case RelayModeChatCompletions: case RelayModeChatCompletions:
promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model) promptTokens = countTokenMessages(textRequest.Messages, textRequest.Model)
prompt = textRequest.Messages[len(textRequest.Messages)-1].Content
case RelayModeCompletions: case RelayModeCompletions:
promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model) promptTokens = countTokenInput(textRequest.Prompt, textRequest.Model)
case RelayModeModerations: case RelayModeModerations:
@ -406,6 +408,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
var textResponse TextResponse var textResponse TextResponse
tokenName := c.GetString("token_name") tokenName := c.GetString("token_name")
var response string
defer func(ctx context.Context) { defer func(ctx context.Context) {
// c.Writer.Flush() // c.Writer.Flush()
go func() { go func() {
@ -438,6 +442,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
if quota != 0 { if quota != 0 {
logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio) logContent := fmt.Sprintf("模型倍率 %.2f,分组倍率 %.2f", modelRatio, groupRatio)
model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent) model.RecordConsumeLog(ctx, userId, channelId, promptTokens, completionTokens, textRequest.Model, tokenName, quota, logContent)
model.RecordConsumeText(userId, tokenName, prompt, response)
model.UpdateUserUsedQuotaAndRequestCount(userId, quota) model.UpdateUserUsedQuotaAndRequestCount(userId, quota)
model.UpdateChannelUsedQuota(channelId, quota) model.UpdateChannelUsedQuota(channelId, quota)
} }
@ -453,6 +458,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
} }
textResponse.Usage.PromptTokens = promptTokens textResponse.Usage.PromptTokens = promptTokens
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model) textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
response = responseText
return nil return nil
} else { } else {
err, usage := openaiHandler(c, resp, consumeQuota, promptTokens, textRequest.Model) err, usage := openaiHandler(c, resp, consumeQuota, promptTokens, textRequest.Model)

31
model/log-text.go Normal file
View File

@ -0,0 +1,31 @@
package model
import (
"one-api/common"
)
type LogText struct {
Id int `json:"id"`
UserId int `json:"user_id" gorm:"index"`
CreatedAt int64 `json:"created_at" gorm:"index"`
Username string `json:"username" gorm:"index;default:''"`
TokenName string `json:"token_name" gorm:"index;default:''"`
Prompt string `json:"prompt" gorm:"type:text"`
Completion string `json:"completion" gorm:"type:text"`
}
func RecordConsumeText(userId int, token string, prompt string, completion string) {
text := &LogText{
UserId: userId,
Username: GetUsernameById(userId),
CreatedAt: common.GetTimestamp(),
TokenName: token,
Prompt: prompt,
Completion: completion,
}
err := DB.Create(text).Error
if err != nil {
common.SysError("failed to record text: " + err.Error())
}
}

View File

@ -111,6 +111,10 @@ func InitDB() (err error) {
if err != nil { if err != nil {
return err return err
} }
err = db.AutoMigrate(&LogText{})
if err != nil {
return err
}
common.SysLog("database migrated") common.SysLog("database migrated")
err = createRootAccountIfNeed() err = createRootAccountIfNeed()
return err return err