2023-05-19 01:41:26 +00:00
package controller
import (
2023-11-17 13:18:51 +00:00
"context"
2023-08-26 04:37:45 +00:00
"encoding/json"
2023-12-10 11:39:46 +00:00
"errors"
2023-05-19 01:41:26 +00:00
"fmt"
2023-08-26 04:37:45 +00:00
"io"
2023-12-10 11:39:46 +00:00
"math"
2023-08-26 04:05:18 +00:00
"net/http"
2023-05-19 01:41:26 +00:00
"one-api/common"
2023-12-10 11:39:46 +00:00
"one-api/common/image"
2023-11-17 13:18:51 +00:00
"one-api/model"
2023-08-26 04:37:45 +00:00
"strconv"
2023-09-29 09:56:11 +00:00
"strings"
2023-11-19 07:52:35 +00:00
"github.com/gin-gonic/gin"
"github.com/pkoukk/tiktoken-go"
2023-05-19 01:41:26 +00:00
)
2023-08-12 03:04:53 +00:00
var stopFinishReason = "stop"
2023-09-29 09:56:11 +00:00
// tokenEncoderMap won't grow after initialization
2023-05-19 01:41:26 +00:00
var tokenEncoderMap = map [ string ] * tiktoken . Tiktoken { }
2023-09-29 09:56:11 +00:00
var defaultTokenEncoder * tiktoken . Tiktoken
2023-05-19 01:41:26 +00:00
2023-08-26 05:02:02 +00:00
func InitTokenEncoders ( ) {
common . SysLog ( "initializing token encoders" )
2023-09-29 09:56:11 +00:00
gpt35TokenEncoder , err := tiktoken . EncodingForModel ( "gpt-3.5-turbo" )
2023-08-26 05:02:02 +00:00
if err != nil {
2023-09-29 09:56:11 +00:00
common . FatalLog ( fmt . Sprintf ( "failed to get gpt-3.5-turbo token encoder: %s" , err . Error ( ) ) )
}
defaultTokenEncoder = gpt35TokenEncoder
gpt4TokenEncoder , err := tiktoken . EncodingForModel ( "gpt-4" )
if err != nil {
common . FatalLog ( fmt . Sprintf ( "failed to get gpt-4 token encoder: %s" , err . Error ( ) ) )
2023-08-26 05:02:02 +00:00
}
for model , _ := range common . ModelRatio {
2023-09-29 09:56:11 +00:00
if strings . HasPrefix ( model , "gpt-3.5" ) {
tokenEncoderMap [ model ] = gpt35TokenEncoder
} else if strings . HasPrefix ( model , "gpt-4" ) {
tokenEncoderMap [ model ] = gpt4TokenEncoder
} else {
tokenEncoderMap [ model ] = nil
2023-08-26 05:02:02 +00:00
}
}
common . SysLog ( "token encoders initialized" )
}
2023-05-19 01:41:26 +00:00
func getTokenEncoder ( model string ) * tiktoken . Tiktoken {
2023-09-29 09:56:11 +00:00
tokenEncoder , ok := tokenEncoderMap [ model ]
if ok && tokenEncoder != nil {
2023-05-19 01:41:26 +00:00
return tokenEncoder
}
2023-09-29 09:56:11 +00:00
if ok {
tokenEncoder , err := tiktoken . EncodingForModel ( model )
2023-05-21 03:11:19 +00:00
if err != nil {
2023-09-29 09:56:11 +00:00
common . SysError ( fmt . Sprintf ( "failed to get token encoder for model %s: %s, using encoder for gpt-3.5-turbo" , model , err . Error ( ) ) )
tokenEncoder = defaultTokenEncoder
2023-05-21 03:11:19 +00:00
}
2023-09-29 09:56:11 +00:00
tokenEncoderMap [ model ] = tokenEncoder
return tokenEncoder
2023-05-19 01:41:26 +00:00
}
2023-09-29 09:56:11 +00:00
return defaultTokenEncoder
2023-05-19 01:41:26 +00:00
}
2023-07-04 11:54:13 +00:00
func getTokenNum ( tokenEncoder * tiktoken . Tiktoken , text string ) int {
if common . ApproximateTokenEnabled {
return int ( float64 ( len ( text ) ) * 0.38 )
}
return len ( tokenEncoder . Encode ( text , nil , nil ) )
}
2023-05-19 01:41:26 +00:00
func countTokenMessages ( messages [ ] Message , model string ) int {
tokenEncoder := getTokenEncoder ( model )
// Reference:
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
// https://github.com/pkoukk/tiktoken-go/issues/6
//
// Every message follows <|start|>{role/name}\n{content}<|end|>\n
var tokensPerMessage int
var tokensPerName int
2023-07-03 01:42:34 +00:00
if model == "gpt-3.5-turbo-0301" {
2023-05-19 01:41:26 +00:00
tokensPerMessage = 4
tokensPerName = - 1 // If there's a name, the role is omitted
} else {
tokensPerMessage = 3
tokensPerName = 1
}
tokenNum := 0
for _ , message := range messages {
tokenNum += tokensPerMessage
2023-12-10 11:39:46 +00:00
switch v := message . Content . ( type ) {
case string :
tokenNum += getTokenNum ( tokenEncoder , v )
case [ ] any :
for _ , it := range v {
m := it . ( map [ string ] any )
switch m [ "type" ] {
case "text" :
tokenNum += getTokenNum ( tokenEncoder , m [ "text" ] . ( string ) )
case "image_url" :
imageUrl , ok := m [ "image_url" ] . ( map [ string ] any )
if ok {
url := imageUrl [ "url" ] . ( string )
detail := ""
if imageUrl [ "detail" ] != nil {
detail = imageUrl [ "detail" ] . ( string )
}
imageTokens , err := countImageTokens ( url , detail )
if err != nil {
common . SysError ( "error counting image tokens: " + err . Error ( ) )
} else {
tokenNum += imageTokens
}
}
}
}
}
2023-07-04 11:54:13 +00:00
tokenNum += getTokenNum ( tokenEncoder , message . Role )
2023-05-19 03:07:17 +00:00
if message . Name != nil {
2023-05-19 01:41:26 +00:00
tokenNum += tokensPerName
2023-07-04 11:54:13 +00:00
tokenNum += getTokenNum ( tokenEncoder , * message . Name )
2023-05-19 01:41:26 +00:00
}
}
tokenNum += 3 // Every reply is primed with <|start|>assistant<|message|>
return tokenNum
}
2023-12-10 11:39:46 +00:00
const (
lowDetailCost = 85
highDetailCostPerTile = 170
additionalCost = 85
)
// https://platform.openai.com/docs/guides/vision/calculating-costs
// https://github.com/openai/openai-cookbook/blob/05e3f9be4c7a2ae7ecf029a7c32065b024730ebe/examples/How_to_count_tokens_with_tiktoken.ipynb
func countImageTokens ( url string , detail string ) ( _ int , err error ) {
var fetchSize = true
var width , height int
// Reference: https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
// detail == "auto" is undocumented on how it works, it just said the model will use the auto setting which will look at the image input size and decide if it should use the low or high setting.
// According to the official guide, "low" disable the high-res model,
// and only receive low-res 512px x 512px version of the image, indicating
// that image is treated as low-res when size is smaller than 512px x 512px,
// then we can assume that image size larger than 512px x 512px is treated
// as high-res. Then we have the following logic:
// if detail == "" || detail == "auto" {
// width, height, err = image.GetImageSize(url)
// if err != nil {
// return 0, err
// }
// fetchSize = false
// // not sure if this is correct
// if width > 512 || height > 512 {
// detail = "high"
// } else {
// detail = "low"
// }
// }
// However, in my test, it seems to be always the same as "high".
// The following image, which is 125x50, is still treated as high-res, taken
// 255 tokens in the response of non-stream chat completion api.
// https://upload.wikimedia.org/wikipedia/commons/1/10/18_Infantry_Division_Messina.jpg
if detail == "" || detail == "auto" {
// assume by test, not sure if this is correct
detail = "high"
}
switch detail {
case "low" :
return lowDetailCost , nil
case "high" :
if fetchSize {
width , height , err = image . GetImageSize ( url )
if err != nil {
return 0 , err
}
}
if width > 2048 || height > 2048 { // max(width, height) > 2048
ratio := float64 ( 2048 ) / math . Max ( float64 ( width ) , float64 ( height ) )
width = int ( float64 ( width ) * ratio )
height = int ( float64 ( height ) * ratio )
}
if width > 768 && height > 768 { // min(width, height) > 768
ratio := float64 ( 768 ) / math . Min ( float64 ( width ) , float64 ( height ) )
width = int ( float64 ( width ) * ratio )
height = int ( float64 ( height ) * ratio )
}
numSquares := int ( math . Ceil ( float64 ( width ) / 512 ) * math . Ceil ( float64 ( height ) / 512 ) )
result := numSquares * highDetailCostPerTile + additionalCost
return result , nil
default :
return 0 , errors . New ( "invalid detail option" )
}
}
2023-06-12 08:11:57 +00:00
func countTokenInput ( input any , model string ) int {
2023-12-10 11:39:46 +00:00
switch v := input . ( type ) {
2023-06-12 08:11:57 +00:00
case string :
2023-12-10 11:39:46 +00:00
return countTokenText ( v , model )
2023-06-12 08:11:57 +00:00
case [ ] string :
text := ""
2023-12-10 11:39:46 +00:00
for _ , s := range v {
2023-06-12 08:11:57 +00:00
text += s
}
return countTokenText ( text , model )
}
return 0
}
2023-05-19 01:41:26 +00:00
func countTokenText ( text string , model string ) int {
tokenEncoder := getTokenEncoder ( model )
2023-07-04 11:54:13 +00:00
return getTokenNum ( tokenEncoder , text )
2023-05-19 01:41:26 +00:00
}
2023-06-19 02:28:55 +00:00
func errorWrapper ( err error , code string , statusCode int ) * OpenAIErrorWithStatusCode {
openAIError := OpenAIError {
Message : err . Error ( ) ,
Type : "one_api_error" ,
Code : code ,
}
return & OpenAIErrorWithStatusCode {
OpenAIError : openAIError ,
StatusCode : statusCode ,
}
}
2023-07-22 10:15:30 +00:00
2023-08-26 04:05:18 +00:00
func shouldDisableChannel ( err * OpenAIError , statusCode int ) bool {
2023-07-22 10:15:30 +00:00
if ! common . AutomaticDisableChannelEnabled {
return false
}
if err == nil {
return false
}
2023-08-26 04:05:18 +00:00
if statusCode == http . StatusUnauthorized {
return true
}
2023-07-22 10:15:30 +00:00
if err . Type == "insufficient_quota" || err . Code == "invalid_api_key" || err . Code == "account_deactivated" {
return true
}
return false
}
2023-08-12 13:55:18 +00:00
2023-12-03 12:10:57 +00:00
func shouldEnableChannel ( err error , openAIErr * OpenAIError ) bool {
if ! common . AutomaticEnableChannelEnabled {
return false
}
if err != nil {
return false
}
if openAIErr != nil {
return false
}
return true
}
2023-08-12 13:55:18 +00:00
func setEventStreamHeaders ( c * gin . Context ) {
c . Writer . Header ( ) . Set ( "Content-Type" , "text/event-stream" )
c . Writer . Header ( ) . Set ( "Cache-Control" , "no-cache" )
c . Writer . Header ( ) . Set ( "Connection" , "keep-alive" )
c . Writer . Header ( ) . Set ( "Transfer-Encoding" , "chunked" )
c . Writer . Header ( ) . Set ( "X-Accel-Buffering" , "no" )
}
2023-08-26 04:37:45 +00:00
2023-12-17 10:33:27 +00:00
type GeneralErrorResponse struct {
Error OpenAIError ` json:"error" `
Message string ` json:"message" `
Msg string ` json:"msg" `
Err string ` json:"err" `
ErrorMsg string ` json:"error_msg" `
Header struct {
Message string ` json:"message" `
} ` json:"header" `
Response struct {
Error struct {
Message string ` json:"message" `
} ` json:"error" `
} ` json:"response" `
}
func ( e GeneralErrorResponse ) ToMessage ( ) string {
if e . Error . Message != "" {
return e . Error . Message
}
if e . Message != "" {
return e . Message
}
if e . Msg != "" {
return e . Msg
}
if e . Err != "" {
return e . Err
}
if e . ErrorMsg != "" {
return e . ErrorMsg
}
if e . Header . Message != "" {
return e . Header . Message
}
if e . Response . Error . Message != "" {
return e . Response . Error . Message
}
return ""
}
2023-08-26 04:37:45 +00:00
func relayErrorHandler ( resp * http . Response ) ( openAIErrorWithStatusCode * OpenAIErrorWithStatusCode ) {
openAIErrorWithStatusCode = & OpenAIErrorWithStatusCode {
StatusCode : resp . StatusCode ,
OpenAIError : OpenAIError {
2023-12-17 10:33:27 +00:00
Message : "" ,
2023-09-17 03:30:20 +00:00
Type : "upstream_error" ,
2023-08-26 04:37:45 +00:00
Code : "bad_response_status_code" ,
Param : strconv . Itoa ( resp . StatusCode ) ,
} ,
}
responseBody , err := io . ReadAll ( resp . Body )
if err != nil {
return
}
err = resp . Body . Close ( )
if err != nil {
return
}
2023-12-17 10:33:27 +00:00
var errResponse GeneralErrorResponse
err = json . Unmarshal ( responseBody , & errResponse )
2023-08-26 04:37:45 +00:00
if err != nil {
return
}
2023-12-17 10:33:27 +00:00
if errResponse . Error . Message != "" {
// OpenAI format error, so we override the default one
openAIErrorWithStatusCode . OpenAIError = errResponse . Error
} else {
openAIErrorWithStatusCode . OpenAIError . Message = errResponse . ToMessage ( )
}
if openAIErrorWithStatusCode . OpenAIError . Message == "" {
openAIErrorWithStatusCode . OpenAIError . Message = fmt . Sprintf ( "bad response status code %d" , resp . StatusCode )
}
2023-08-26 04:37:45 +00:00
return
}
2023-10-22 09:50:52 +00:00
func getFullRequestURL ( baseURL string , requestURL string , channelType int ) string {
fullRequestURL := fmt . Sprintf ( "%s%s" , baseURL , requestURL )
2023-11-19 07:52:35 +00:00
if strings . HasPrefix ( baseURL , "https://gateway.ai.cloudflare.com" ) {
switch channelType {
case common . ChannelTypeOpenAI :
2023-10-22 09:50:52 +00:00
fullRequestURL = fmt . Sprintf ( "%s%s" , baseURL , strings . TrimPrefix ( requestURL , "/v1" ) )
2023-11-19 07:52:35 +00:00
case common . ChannelTypeAzure :
fullRequestURL = fmt . Sprintf ( "%s%s" , baseURL , strings . TrimPrefix ( requestURL , "/openai/deployments" ) )
2023-10-22 09:50:52 +00:00
}
}
return fullRequestURL
}
2023-11-17 13:18:51 +00:00
2023-11-26 04:05:16 +00:00
func postConsumeQuota ( ctx context . Context , tokenId int , quotaDelta int , totalQuota int , userId int , channelId int , modelRatio float64 , groupRatio float64 , modelName string , tokenName string ) {
// quotaDelta is remaining quota to be consumed
err := model . PostConsumeTokenQuota ( tokenId , quotaDelta )
2023-11-17 13:18:51 +00:00
if err != nil {
common . SysError ( "error consuming token remain quota: " + err . Error ( ) )
}
err = model . CacheUpdateUserQuota ( userId )
if err != nil {
common . SysError ( "error update user quota cache: " + err . Error ( ) )
}
2023-11-26 04:05:16 +00:00
// totalQuota is total quota consumed
if totalQuota != 0 {
2023-11-17 13:18:51 +00:00
logContent := fmt . Sprintf ( "模型倍率 %.2f,分组倍率 %.2f" , modelRatio , groupRatio )
2023-11-26 04:05:16 +00:00
model . RecordConsumeLog ( ctx , userId , channelId , totalQuota , 0 , modelName , tokenName , totalQuota , logContent )
model . UpdateUserUsedQuotaAndRequestCount ( userId , totalQuota )
model . UpdateChannelUsedQuota ( channelId , totalQuota )
}
if totalQuota <= 0 {
common . LogError ( ctx , fmt . Sprintf ( "totalQuota consumed is %d, something is wrong" , totalQuota ) )
2023-11-17 13:18:51 +00:00
}
}
2023-12-03 09:34:59 +00:00
func GetAPIVersion ( c * gin . Context ) string {
query := c . Request . URL . Query ( )
apiVersion := query . Get ( "api-version" )
if apiVersion == "" {
apiVersion = c . GetString ( "api_version" )
}
return apiVersion
}