Merge remote-tracking branch 'origin/main'
This commit is contained in:
commit
f9848d9d10
@ -117,6 +117,8 @@ sudo certbot --nginx
|
|||||||
sudo service nginx restart
|
sudo service nginx restart
|
||||||
```
|
```
|
||||||
|
|
||||||
|
初始账号用户名为 `root`,密码为 `123456`。
|
||||||
|
|
||||||
### 手动部署
|
### 手动部署
|
||||||
1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译:
|
1. 从 [GitHub Releases](https://github.com/songquanpeng/one-api/releases/latest) 下载可执行文件或者从源码编译:
|
||||||
```shell
|
```shell
|
||||||
|
@ -17,6 +17,7 @@ func GroupRatio2JSONString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func UpdateGroupRatioByJSONString(jsonStr string) error {
|
func UpdateGroupRatioByJSONString(jsonStr string) error {
|
||||||
|
GroupRatio = make(map[string]float64)
|
||||||
return json.Unmarshal([]byte(jsonStr), &GroupRatio)
|
return json.Unmarshal([]byte(jsonStr), &GroupRatio)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ func ModelRatio2JSONString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func UpdateModelRatioByJSONString(jsonStr string) error {
|
func UpdateModelRatioByJSONString(jsonStr string) error {
|
||||||
|
ModelRatio = make(map[string]float64)
|
||||||
return json.Unmarshal([]byte(jsonStr), &ModelRatio)
|
return json.Unmarshal([]byte(jsonStr), &ModelRatio)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,6 +58,20 @@ func countTokenMessages(messages []Message, model string) int {
|
|||||||
return tokenNum
|
return tokenNum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func countTokenInput(input any, model string) int {
|
||||||
|
switch input.(type) {
|
||||||
|
case string:
|
||||||
|
return countTokenText(input.(string), model)
|
||||||
|
case []string:
|
||||||
|
text := ""
|
||||||
|
for _, s := range input.([]string) {
|
||||||
|
text += s
|
||||||
|
}
|
||||||
|
return countTokenText(text, model)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
func countTokenText(text string, model string) int {
|
func countTokenText(text string, model string) int {
|
||||||
tokenEncoder := getTokenEncoder(model)
|
tokenEncoder := getTokenEncoder(model)
|
||||||
token := tokenEncoder.Encode(text, nil, nil)
|
token := tokenEncoder.Encode(text, nil, nil)
|
||||||
|
@ -38,7 +38,7 @@ type GeneralOpenAIRequest struct {
|
|||||||
Temperature float64 `json:"temperature"`
|
Temperature float64 `json:"temperature"`
|
||||||
TopP float64 `json:"top_p"`
|
TopP float64 `json:"top_p"`
|
||||||
N int `json:"n"`
|
N int `json:"n"`
|
||||||
Input string `json:"input"`
|
Input any `json:"input"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
@ -189,7 +189,7 @@ func relayHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
|
|||||||
case RelayModeCompletions:
|
case RelayModeCompletions:
|
||||||
promptTokens = countTokenText(textRequest.Prompt, textRequest.Model)
|
promptTokens = countTokenText(textRequest.Prompt, textRequest.Model)
|
||||||
case RelayModeModeration:
|
case RelayModeModeration:
|
||||||
promptTokens = countTokenText(textRequest.Input, textRequest.Model)
|
promptTokens = countTokenInput(textRequest.Input, textRequest.Model)
|
||||||
}
|
}
|
||||||
preConsumedTokens := common.PreConsumedQuota
|
preConsumedTokens := common.PreConsumedQuota
|
||||||
if textRequest.MaxTokens != 0 {
|
if textRequest.MaxTokens != 0 {
|
||||||
|
@ -29,7 +29,7 @@ function renderType(type) {
|
|||||||
|
|
||||||
function renderBalance(type, balance) {
|
function renderBalance(type, balance) {
|
||||||
if (type === 5) {
|
if (type === 5) {
|
||||||
return <span>{balance.toFixed(2)}</span>
|
return <span>{balance.toFixed(5)}</span>
|
||||||
}
|
}
|
||||||
return <span>${balance.toFixed(2)}</span>
|
return <span>${balance.toFixed(2)}</span>
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user