2023-06-20 11:09:49 +00:00
|
|
|
package model
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
2023-06-21 09:04:18 +00:00
|
|
|
"errors"
|
2023-06-20 11:09:49 +00:00
|
|
|
"fmt"
|
2024-01-28 11:38:58 +00:00
|
|
|
"github.com/songquanpeng/one-api/common"
|
|
|
|
"github.com/songquanpeng/one-api/common/config"
|
|
|
|
"github.com/songquanpeng/one-api/common/logger"
|
2023-06-21 09:04:18 +00:00
|
|
|
"math/rand"
|
2023-09-17 11:18:16 +00:00
|
|
|
"sort"
|
2023-06-21 09:26:26 +00:00
|
|
|
"strconv"
|
2023-06-21 09:04:18 +00:00
|
|
|
"strings"
|
2023-06-20 11:09:49 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2023-07-23 11:26:37 +00:00
|
|
|
var (
|
2024-01-21 15:21:42 +00:00
|
|
|
TokenCacheSeconds = config.SyncFrequency
|
|
|
|
UserId2GroupCacheSeconds = config.SyncFrequency
|
|
|
|
UserId2QuotaCacheSeconds = config.SyncFrequency
|
|
|
|
UserId2StatusCacheSeconds = config.SyncFrequency
|
2023-06-20 11:09:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func CacheGetTokenByKey(key string) (*Token, error) {
|
2023-10-22 10:38:29 +00:00
|
|
|
keyCol := "`key`"
|
|
|
|
if common.UsingPostgreSQL {
|
|
|
|
keyCol = `"key"`
|
|
|
|
}
|
2023-06-20 11:09:49 +00:00
|
|
|
var token Token
|
|
|
|
if !common.RedisEnabled {
|
2023-10-22 10:38:29 +00:00
|
|
|
err := DB.Where(keyCol+" = ?", key).First(&token).Error
|
2023-06-20 11:09:49 +00:00
|
|
|
return &token, err
|
|
|
|
}
|
|
|
|
tokenObjectString, err := common.RedisGet(fmt.Sprintf("token:%s", key))
|
|
|
|
if err != nil {
|
2023-10-22 10:38:29 +00:00
|
|
|
err := DB.Where(keyCol+" = ?", key).First(&token).Error
|
2023-06-20 11:09:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
jsonBytes, err := json.Marshal(token)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-07-23 11:26:37 +00:00
|
|
|
err = common.RedisSet(fmt.Sprintf("token:%s", key), string(jsonBytes), time.Duration(TokenCacheSeconds)*time.Second)
|
2023-06-20 11:09:49 +00:00
|
|
|
if err != nil {
|
2024-01-21 15:21:42 +00:00
|
|
|
logger.SysError("Redis set token error: " + err.Error())
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|
2023-06-20 12:56:35 +00:00
|
|
|
return &token, nil
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|
|
|
|
err = json.Unmarshal([]byte(tokenObjectString), &token)
|
|
|
|
return &token, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func CacheGetUserGroup(id int) (group string, err error) {
|
|
|
|
if !common.RedisEnabled {
|
|
|
|
return GetUserGroup(id)
|
|
|
|
}
|
|
|
|
group, err = common.RedisGet(fmt.Sprintf("user_group:%d", id))
|
|
|
|
if err != nil {
|
|
|
|
group, err = GetUserGroup(id)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2023-07-23 11:26:37 +00:00
|
|
|
err = common.RedisSet(fmt.Sprintf("user_group:%d", id), group, time.Duration(UserId2GroupCacheSeconds)*time.Second)
|
2023-06-20 11:09:49 +00:00
|
|
|
if err != nil {
|
2024-01-21 15:21:42 +00:00
|
|
|
logger.SysError("Redis set user group error: " + err.Error())
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return group, err
|
|
|
|
}
|
|
|
|
|
2023-06-21 09:26:26 +00:00
|
|
|
func CacheGetUserQuota(id int) (quota int, err error) {
|
|
|
|
if !common.RedisEnabled {
|
|
|
|
return GetUserQuota(id)
|
|
|
|
}
|
|
|
|
quotaString, err := common.RedisGet(fmt.Sprintf("user_quota:%d", id))
|
|
|
|
if err != nil {
|
|
|
|
quota, err = GetUserQuota(id)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2023-07-23 11:26:37 +00:00
|
|
|
err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
|
2023-06-21 09:26:26 +00:00
|
|
|
if err != nil {
|
2024-01-21 15:21:42 +00:00
|
|
|
logger.SysError("Redis set user quota error: " + err.Error())
|
2023-06-21 09:26:26 +00:00
|
|
|
}
|
|
|
|
return quota, err
|
|
|
|
}
|
|
|
|
quota, err = strconv.Atoi(quotaString)
|
|
|
|
return quota, err
|
|
|
|
}
|
|
|
|
|
2023-06-27 11:22:58 +00:00
|
|
|
func CacheUpdateUserQuota(id int) error {
|
|
|
|
if !common.RedisEnabled {
|
|
|
|
return nil
|
|
|
|
}
|
2024-02-25 08:58:14 +00:00
|
|
|
quota, err := CacheGetUserQuota(id)
|
2023-06-27 11:22:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-23 11:26:37 +00:00
|
|
|
err = common.RedisSet(fmt.Sprintf("user_quota:%d", id), fmt.Sprintf("%d", quota), time.Duration(UserId2QuotaCacheSeconds)*time.Second)
|
2023-06-27 11:22:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-08-16 15:40:24 +00:00
|
|
|
func CacheDecreaseUserQuota(id int, quota int) error {
|
|
|
|
if !common.RedisEnabled {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err := common.RedisDecrease(fmt.Sprintf("user_quota:%d", id), int64(quota))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-09-03 13:31:58 +00:00
|
|
|
func CacheIsUserEnabled(userId int) (bool, error) {
|
2023-06-21 09:26:26 +00:00
|
|
|
if !common.RedisEnabled {
|
|
|
|
return IsUserEnabled(userId)
|
|
|
|
}
|
|
|
|
enabled, err := common.RedisGet(fmt.Sprintf("user_enabled:%d", userId))
|
2023-09-03 13:31:58 +00:00
|
|
|
if err == nil {
|
|
|
|
return enabled == "1", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
userEnabled, err := IsUserEnabled(userId)
|
2023-06-21 09:26:26 +00:00
|
|
|
if err != nil {
|
2023-09-03 13:31:58 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
enabled = "0"
|
|
|
|
if userEnabled {
|
|
|
|
enabled = "1"
|
|
|
|
}
|
|
|
|
err = common.RedisSet(fmt.Sprintf("user_enabled:%d", userId), enabled, time.Duration(UserId2StatusCacheSeconds)*time.Second)
|
|
|
|
if err != nil {
|
2024-01-21 15:21:42 +00:00
|
|
|
logger.SysError("Redis set user enabled error: " + err.Error())
|
2023-06-21 09:26:26 +00:00
|
|
|
}
|
2023-09-03 13:31:58 +00:00
|
|
|
return userEnabled, err
|
2023-06-21 09:26:26 +00:00
|
|
|
}
|
|
|
|
|
2023-06-20 11:09:49 +00:00
|
|
|
var group2model2channels map[string]map[string][]*Channel
|
2023-06-21 09:04:18 +00:00
|
|
|
var channelSyncLock sync.RWMutex
|
2023-06-20 11:09:49 +00:00
|
|
|
|
|
|
|
func InitChannelCache() {
|
2023-06-21 09:04:18 +00:00
|
|
|
newChannelId2channel := make(map[int]*Channel)
|
2023-06-20 11:09:49 +00:00
|
|
|
var channels []*Channel
|
2023-06-25 15:14:15 +00:00
|
|
|
DB.Where("status = ?", common.ChannelStatusEnabled).Find(&channels)
|
2023-06-20 11:09:49 +00:00
|
|
|
for _, channel := range channels {
|
2023-06-21 09:04:18 +00:00
|
|
|
newChannelId2channel[channel.Id] = channel
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|
|
|
|
var abilities []*Ability
|
|
|
|
DB.Find(&abilities)
|
|
|
|
groups := make(map[string]bool)
|
|
|
|
for _, ability := range abilities {
|
|
|
|
groups[ability.Group] = true
|
|
|
|
}
|
2023-06-21 09:04:18 +00:00
|
|
|
newGroup2model2channels := make(map[string]map[string][]*Channel)
|
2023-06-20 11:09:49 +00:00
|
|
|
for group := range groups {
|
2023-06-21 09:04:18 +00:00
|
|
|
newGroup2model2channels[group] = make(map[string][]*Channel)
|
|
|
|
}
|
|
|
|
for _, channel := range channels {
|
|
|
|
groups := strings.Split(channel.Group, ",")
|
|
|
|
for _, group := range groups {
|
|
|
|
models := strings.Split(channel.Models, ",")
|
|
|
|
for _, model := range models {
|
|
|
|
if _, ok := newGroup2model2channels[group][model]; !ok {
|
|
|
|
newGroup2model2channels[group][model] = make([]*Channel, 0)
|
|
|
|
}
|
|
|
|
newGroup2model2channels[group][model] = append(newGroup2model2channels[group][model], channel)
|
|
|
|
}
|
|
|
|
}
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|
2023-09-17 11:18:16 +00:00
|
|
|
|
|
|
|
// sort by priority
|
|
|
|
for group, model2channels := range newGroup2model2channels {
|
|
|
|
for model, channels := range model2channels {
|
|
|
|
sort.Slice(channels, func(i, j int) bool {
|
2023-09-18 13:43:45 +00:00
|
|
|
return channels[i].GetPriority() > channels[j].GetPriority()
|
2023-09-17 11:18:16 +00:00
|
|
|
})
|
|
|
|
newGroup2model2channels[group][model] = channels
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-21 09:04:18 +00:00
|
|
|
channelSyncLock.Lock()
|
|
|
|
group2model2channels = newGroup2model2channels
|
|
|
|
channelSyncLock.Unlock()
|
2024-01-21 15:21:42 +00:00
|
|
|
logger.SysLog("channels synced from database")
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func SyncChannelCache(frequency int) {
|
|
|
|
for {
|
|
|
|
time.Sleep(time.Duration(frequency) * time.Second)
|
2024-01-21 15:21:42 +00:00
|
|
|
logger.SysLog("syncing channels from database")
|
2023-06-20 11:09:49 +00:00
|
|
|
InitChannelCache()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-03 14:14:07 +00:00
|
|
|
func CacheGetRandomSatisfiedChannel(group string, model string, ignoreFirstPriority bool) (*Channel, error) {
|
2024-01-21 15:21:42 +00:00
|
|
|
if !config.MemoryCacheEnabled {
|
2023-06-20 11:09:49 +00:00
|
|
|
return GetRandomSatisfiedChannel(group, model)
|
|
|
|
}
|
2023-06-21 09:04:18 +00:00
|
|
|
channelSyncLock.RLock()
|
|
|
|
defer channelSyncLock.RUnlock()
|
|
|
|
channels := group2model2channels[group][model]
|
|
|
|
if len(channels) == 0 {
|
|
|
|
return nil, errors.New("channel not found")
|
|
|
|
}
|
2023-09-18 13:43:45 +00:00
|
|
|
endIdx := len(channels)
|
2023-09-17 11:18:16 +00:00
|
|
|
// choose by priority
|
|
|
|
firstChannel := channels[0]
|
2023-09-18 13:43:45 +00:00
|
|
|
if firstChannel.GetPriority() > 0 {
|
|
|
|
for i := range channels {
|
|
|
|
if channels[i].GetPriority() != firstChannel.GetPriority() {
|
|
|
|
endIdx = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2023-09-17 11:18:16 +00:00
|
|
|
}
|
2023-09-18 13:43:45 +00:00
|
|
|
idx := rand.Intn(endIdx)
|
2024-03-03 14:14:07 +00:00
|
|
|
if ignoreFirstPriority {
|
|
|
|
if endIdx < len(channels) { // which means there are more than one priority
|
|
|
|
idx = common.RandRange(endIdx, len(channels))
|
|
|
|
}
|
|
|
|
}
|
2023-06-21 09:04:18 +00:00
|
|
|
return channels[idx], nil
|
2023-06-20 11:09:49 +00:00
|
|
|
}
|