ai-gateway/relay/controller/text.go
Laisky.Cai fc9a784950
feat: support aws bedrockruntime claude3 (#1328)
* feat: support aws bedrockruntime claude3

closes #622, closes #749, closes #1300

* fix: convert to aws claude model id

* fix: Update AWS adapter to handle stream completions and calculate usage metrics

Based on the file summaries provided, here are the important bullet points for the commit message:

- Add functionality to handle stream completion events from AWS in the relay/adaptor/aws/main.go file
- Marshall AWS response to OpenAI format and calculate usage metrics in the same file
- Implement a custom render function for streaming events in the same file
- Improve error handling for JSON unmarshalling and marshalling errors in the same file

* fix: Implement AWS handler with usage tracking and error handling

- Implemented streaming response handling for AWS handler
- Set response content type to text/event-stream
- Added error handling for failed marshaling/unmarshaling
- Updated return values to include `relaymodel.ErrorWithStatusCode` and `relaymodel.Usage`
- Improved error handling and response formatting for AWS adaptor

* fix: Refactor AWS Adapter for Improved Model Mapping and Error Handling

* Refactor AWS adapter to improve model management
  - Replace hardcoded model list in `adapter.go` with a function to get models from `awsModelIDMap`
  - Update `GetModelList` function to return model list directly
  - Add `GetChannelName` function to get channel name from `Adaptor` object
* Improve error handling and code organization in main.go
  - Replace switch statement with a map to map AWS model IDs to OpenAI model IDs
  - Return an error if the model is not found in the map
  - Use a single return statement instead of wrapping multiple return statements in the `awsModelID` function
  - Add a new error message for when the model is not found in the map in the `Handler` function

* fix: bug fix

* chore: change variable name & package

* chore: change variable name

* perf: update config related code

---------

Co-authored-by: JustSong <songquanpeng@foxmail.com>
2024-04-20 00:40:47 +08:00

108 lines
3.7 KiB
Go

package controller
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/gin-gonic/gin"
"github.com/songquanpeng/one-api/common/logger"
"github.com/songquanpeng/one-api/relay"
"github.com/songquanpeng/one-api/relay/adaptor/openai"
"github.com/songquanpeng/one-api/relay/apitype"
"github.com/songquanpeng/one-api/relay/billing"
billingratio "github.com/songquanpeng/one-api/relay/billing/ratio"
"github.com/songquanpeng/one-api/relay/channeltype"
"github.com/songquanpeng/one-api/relay/meta"
"github.com/songquanpeng/one-api/relay/model"
)
func RelayTextHelper(c *gin.Context) *model.ErrorWithStatusCode {
ctx := c.Request.Context()
meta := meta.GetByContext(c)
// get & validate textRequest
textRequest, err := getAndValidateTextRequest(c, meta.Mode)
if err != nil {
logger.Errorf(ctx, "getAndValidateTextRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "invalid_text_request", http.StatusBadRequest)
}
meta.IsStream = textRequest.Stream
// map model name
var isModelMapped bool
meta.OriginModelName = textRequest.Model
textRequest.Model, isModelMapped = getMappedModelName(textRequest.Model, meta.ModelMapping)
meta.ActualModelName = textRequest.Model
// get model ratio & group ratio
modelRatio := billingratio.GetModelRatio(textRequest.Model)
groupRatio := billingratio.GetGroupRatio(meta.Group)
ratio := modelRatio * groupRatio
// pre-consume quota
promptTokens := getPromptTokens(textRequest, meta.Mode)
meta.PromptTokens = promptTokens
preConsumedQuota, bizErr := preConsumeQuota(ctx, textRequest, promptTokens, ratio, meta)
if bizErr != nil {
logger.Warnf(ctx, "preConsumeQuota failed: %+v", *bizErr)
return bizErr
}
adaptor := relay.GetAdaptor(meta.APIType)
if adaptor == nil {
return openai.ErrorWrapper(fmt.Errorf("invalid api type: %d", meta.APIType), "invalid_api_type", http.StatusBadRequest)
}
// get request body
var requestBody io.Reader
if meta.APIType == apitype.OpenAI {
// no need to convert request for openai
shouldResetRequestBody := isModelMapped || meta.ChannelType == channeltype.Baichuan // frequency_penalty 0 is not acceptable for baichuan
if shouldResetRequestBody {
jsonStr, err := json.Marshal(textRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
} else {
requestBody = c.Request.Body
}
} else {
convertedRequest, err := adaptor.ConvertRequest(c, meta.Mode, textRequest)
if err != nil {
return openai.ErrorWrapper(err, "convert_request_failed", http.StatusInternalServerError)
}
jsonData, err := json.Marshal(convertedRequest)
if err != nil {
return openai.ErrorWrapper(err, "json_marshal_failed", http.StatusInternalServerError)
}
logger.Debugf(ctx, "converted request: \n%s", string(jsonData))
requestBody = bytes.NewBuffer(jsonData)
}
// do request
resp, err := adaptor.DoRequest(c, meta, requestBody)
if err != nil {
logger.Errorf(ctx, "DoRequest failed: %s", err.Error())
return openai.ErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
if resp != nil {
errorHappened := (resp.StatusCode != http.StatusOK) || (meta.IsStream && resp.Header.Get("Content-Type") == "application/json")
if errorHappened {
billing.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)
return RelayErrorHandler(resp)
}
}
// do response
usage, respErr := adaptor.DoResponse(c, resp, meta)
if respErr != nil {
logger.Errorf(ctx, "respErr is not nil: %+v", respErr)
billing.ReturnPreConsumedQuota(ctx, preConsumedQuota, meta.TokenId)
return respErr
}
// post-consume quota
go postConsumeQuota(ctx, usage, meta, textRequest, ratio, preConsumedQuota, modelRatio, groupRatio)
return nil
}