From 763c8883dad97b4b29a32fcfc743329cf2af1349 Mon Sep 17 00:00:00 2001
From: Buer <42402987+MartialBE@users.noreply.github.com>
Date: Wed, 24 Apr 2024 16:59:58 +0800
Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20feat:=20=20support=20=20Ollama=20(#?=
=?UTF-8?q?168)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.en.md | 51 ++---
README.md | 1 +
common/constants.go | 2 +
i18n/en.json | 6 +-
providers/ollama/base.go | 79 +++++++
providers/ollama/chat.go | 207 +++++++++++++++++++
providers/ollama/embeddings.go | 70 +++++++
providers/ollama/type.go | 48 +++++
providers/providers.go | 2 +
relay/util/type.go | 1 +
web/src/constants/ChannelConstants.js | 13 +-
web/src/views/Channel/component/EditModal.js | 1 +
web/src/views/Channel/type/Config.js | 9 +
web/src/views/Channel/type/Plugin.json | 20 ++
14 files changed, 481 insertions(+), 29 deletions(-)
create mode 100644 providers/ollama/base.go
create mode 100644 providers/ollama/chat.go
create mode 100644 providers/ollama/embeddings.go
create mode 100644 providers/ollama/type.go
diff --git a/README.en.md b/README.en.md
index d62ca652..ce3ca292 100644
--- a/README.en.md
+++ b/README.en.md
@@ -68,31 +68,32 @@ Please refer to the [documentation](https://github.com/MartialBE/one-api/wiki).
## Current Supported Providers
-| Provider | Chat | Embeddings | Audio | Images | Other |
-| --------------------------------------------------------------------- | ------------------------ | ---------- | ------ | ----------- | ---------------------------------------------------------------- |
-| [OpenAI](https://platform.openai.com/docs/api-reference/introduction) | ✅ | ✅ | ✅ | ✅ | - |
-| [Azure OpenAI](https://oai.azure.com/) | ✅ | ✅ | ✅ | ✅ | - |
-| [Azure Speech](https://portal.azure.com/) | - | - | ⚠️ tts | - | - |
-| [Anthropic](https://www.anthropic.com/) | ✅ | - | - | - | - |
-| [Gemini](https://aistudio.google.com/) | ✅ | - | - | - | - |
-| [百度文心](https://console.bce.baidu.com/qianfan/overview) | ✅ | ✅ | - | - | - |
-| [通义千问](https://dashscope.console.aliyun.com/overview) | ✅ | ✅ | - | - | - |
-| [讯飞星火](https://console.xfyun.cn/) | ✅ | - | - | - | - |
-| [智谱](https://open.bigmodel.cn/overview) | ✅ | ✅ | - | ⚠️ image | - |
-| [腾讯混元](https://cloud.tencent.com/product/hunyuan) | ✅ | - | - | - | - |
-| [百川](https://platform.baichuan-ai.com/console/apikey) | ✅ | ✅ | - | - | - |
-| [MiniMax](https://www.minimaxi.com/user-center/basic-information) | ✅ | ✅ | - | - | - |
-| [Deepseek](https://platform.deepseek.com/usage) | ✅ | - | - | - | - |
-| [Moonshot](https://moonshot.ai/) | ✅ | - | - | - | - |
-| [Mistral](https://mistral.ai/) | ✅ | ✅ | - | - | - |
-| [Groq](https://console.groq.com/keys) | ✅ | - | - | - | - |
-| [Amazon Bedrock](https://console.aws.amazon.com/bedrock/home) | ⚠️ Only support Anthropic models | - | - | - | - |
-| [零一万物](https://platform.lingyiwanwu.com/details) | ✅ | - | - | - | - |
-| [Cloudflare AI](https://ai.cloudflare.com/) | ✅ | - | ⚠️ stt | ⚠️ image | - |
-| [Midjourney](https://www.midjourney.com/) | - | - | - | - | [midjourney-proxy](https://github.com/novicezk/midjourney-proxy) |
-| [Cohere](https://cohere.com/) | ✅ | - | - | - | - |
-| [Stability AI](https://platform.stability.ai/account/credits) | - | - | - | ⚠️ image | - |
-| [Coze](https://www.coze.com/open/docs/chat?_lang=zh) | ✅ | - | - | - | - |
+| Provider | Chat | Embeddings | Audio | Images | Other |
+| --------------------------------------------------------------------- | -------------------------------- | ---------- | ------ | -------- | ---------------------------------------------------------------- |
+| [OpenAI](https://platform.openai.com/docs/api-reference/introduction) | ✅ | ✅ | ✅ | ✅ | - |
+| [Azure OpenAI](https://oai.azure.com/) | ✅ | ✅ | ✅ | ✅ | - |
+| [Azure Speech](https://portal.azure.com/) | - | - | ⚠️ tts | - | - |
+| [Anthropic](https://www.anthropic.com/) | ✅ | - | - | - | - |
+| [Gemini](https://aistudio.google.com/) | ✅ | - | - | - | - |
+| [百度文心](https://console.bce.baidu.com/qianfan/overview) | ✅ | ✅ | - | - | - |
+| [通义千问](https://dashscope.console.aliyun.com/overview) | ✅ | ✅ | - | - | - |
+| [讯飞星火](https://console.xfyun.cn/) | ✅ | - | - | - | - |
+| [智谱](https://open.bigmodel.cn/overview) | ✅ | ✅ | - | ⚠️ image | - |
+| [腾讯混元](https://cloud.tencent.com/product/hunyuan) | ✅ | - | - | - | - |
+| [百川](https://platform.baichuan-ai.com/console/apikey) | ✅ | ✅ | - | - | - |
+| [MiniMax](https://www.minimaxi.com/user-center/basic-information) | ✅ | ✅ | - | - | - |
+| [Deepseek](https://platform.deepseek.com/usage) | ✅ | - | - | - | - |
+| [Moonshot](https://moonshot.ai/) | ✅ | - | - | - | - |
+| [Mistral](https://mistral.ai/) | ✅ | ✅ | - | - | - |
+| [Groq](https://console.groq.com/keys) | ✅ | - | - | - | - |
+| [Amazon Bedrock](https://console.aws.amazon.com/bedrock/home) | ⚠️ Only support Anthropic models | - | - | - | - |
+| [零一万物](https://platform.lingyiwanwu.com/details) | ✅ | - | - | - | - |
+| [Cloudflare AI](https://ai.cloudflare.com/) | ✅ | - | ⚠️ stt | ⚠️ image | - |
+| [Midjourney](https://www.midjourney.com/) | - | - | - | - | [midjourney-proxy](https://github.com/novicezk/midjourney-proxy) |
+| [Cohere](https://cohere.com/) | ✅ | - | - | - | - |
+| [Stability AI](https://platform.stability.ai/account/credits) | - | - | - | ⚠️ image | - |
+| [Coze](https://www.coze.com/open/docs/chat?_lang=zh) | ✅ | - | - | - | - |
+| [Ollama](https://github.com/ollama/ollama) | ✅ | ✅ | - | - | - |
## Acknowledgements
diff --git a/README.md b/README.md
index 6033232e..a06a7e92 100644
--- a/README.md
+++ b/README.md
@@ -93,6 +93,7 @@ _本项目是基于[one-api](https://github.com/songquanpeng/one-api)二次开
| [Cohere](https://cohere.com/) | ✅ | - | - | - | - |
| [Stability AI](https://platform.stability.ai/account/credits) | - | - | - | ⚠️ 图片生成 | - |
| [Coze](https://www.coze.com/open/docs/chat?_lang=zh) | ✅ | - | - | - | - |
+| [Ollama](https://github.com/ollama/ollama) | ✅ | ✅ | - | - | - |
## 感谢
diff --git a/common/constants.go b/common/constants.go
index 2b9d4c2c..af24cdb5 100644
--- a/common/constants.go
+++ b/common/constants.go
@@ -176,6 +176,7 @@ const (
ChannelTypeCohere = 36
ChannelTypeStabilityAI = 37
ChannelTypeCoze = 38
+ ChannelTypeOllama = 39
)
var ChannelBaseURLs = []string{
@@ -218,6 +219,7 @@ var ChannelBaseURLs = []string{
"https://api.cohere.ai/v1", //36
"https://api.stability.ai/v2beta", //37
"https://api.coze.com/open_api", //38
+ "", //39
}
const (
diff --git a/i18n/en.json b/i18n/en.json
index e3d4dc27..d6113003 100644
--- a/i18n/en.json
+++ b/i18n/en.json
@@ -1124,5 +1124,9 @@
"位置/区域": "Location/Region",
"请输入你 Speech Studio 的位置/区域,例如:eastasia": "Please enter the location/region of your Speech Studio, for example: eastasia",
"必须指定渠道": "Channel must be specified",
- "中继": "Relay"
+ "中继": "Relay",
+ "请输入你部署的Ollama地址,例如:http://127.0.0.1:11434,如果你使用了cloudflare Zero Trust,可以在下方插件填入授权信息": "Please enter the Ollama address you deployed, for example: http://127.0.0.1:11434. If you are using Cloudflare Zero Trust, you can fill in the authorization information in the plugin below.",
+ "请随意填写": "Please fill in at will",
+ "Header 配置": "Header Configuration",
+ "本配置主要是用于使用cloudflare Zero Trust将端口暴露到公网时,需要配置的header": "This configuration is mainly used for the header that needs to be configured when using Cloudflare Zero Trust to expose the port to the public network"
}
diff --git a/providers/ollama/base.go b/providers/ollama/base.go
new file mode 100644
index 00000000..29a967ec
--- /dev/null
+++ b/providers/ollama/base.go
@@ -0,0 +1,79 @@
+package ollama
+
+import (
+ "encoding/json"
+ "net/http"
+ "one-api/common/requester"
+ "one-api/model"
+ "one-api/types"
+
+ "one-api/providers/base"
+)
+
+type OllamaProviderFactory struct{}
+
+type OllamaProvider struct {
+ base.BaseProvider
+}
+
+// 创建 OllamaProvider
+func (f OllamaProviderFactory) Create(channel *model.Channel) base.ProviderInterface {
+ config := getOllamaConfig()
+
+ return &OllamaProvider{
+ BaseProvider: base.BaseProvider{
+ Config: config,
+ Channel: channel,
+ Requester: requester.NewHTTPRequester(*channel.Proxy, RequestErrorHandle),
+ },
+ }
+}
+
+func getOllamaConfig() base.ProviderConfig {
+ return base.ProviderConfig{
+ BaseURL: "",
+ ChatCompletions: "/api/chat",
+ Embeddings: "/api/embeddings",
+ }
+}
+
+// 请求错误处理
+func RequestErrorHandle(resp *http.Response) *types.OpenAIError {
+ errorResponse := &OllamaError{}
+ err := json.NewDecoder(resp.Body).Decode(errorResponse)
+ if err != nil {
+ return nil
+ }
+
+ return errorHandle(errorResponse)
+}
+
+// 错误处理
+func errorHandle(OllamaError *OllamaError) *types.OpenAIError {
+ if OllamaError.Error == "" {
+ return nil
+ }
+ return &types.OpenAIError{
+ Message: OllamaError.Error,
+ Type: "Ollama Error",
+ }
+}
+
+// 获取请求头
+func (p *OllamaProvider) GetRequestHeaders() (headers map[string]string) {
+ headers = make(map[string]string)
+ p.CommonRequestHeaders(headers)
+
+ otherHeaders := p.Channel.Plugin.Data()["headers"]
+
+ for key, value := range otherHeaders {
+ headerValue, isString := value.(string)
+ if !isString || headerValue == "" {
+ continue
+ }
+
+ headers[key] = headerValue
+ }
+
+ return headers
+}
diff --git a/providers/ollama/chat.go b/providers/ollama/chat.go
new file mode 100644
index 00000000..0143b40b
--- /dev/null
+++ b/providers/ollama/chat.go
@@ -0,0 +1,207 @@
+package ollama
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "one-api/common"
+ "one-api/common/image"
+ "one-api/common/requester"
+ "one-api/types"
+ "strings"
+)
+
+type ollamaStreamHandler struct {
+ Usage *types.Usage
+ Request *types.ChatCompletionRequest
+}
+
+func (p *OllamaProvider) CreateChatCompletion(request *types.ChatCompletionRequest) (*types.ChatCompletionResponse, *types.OpenAIErrorWithStatusCode) {
+ req, errWithCode := p.getChatRequest(request)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+ defer req.Body.Close()
+
+ response := &ChatResponse{}
+ // 发送请求
+ _, errWithCode = p.Requester.SendRequest(req, response, false)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+
+ return p.convertToChatOpenai(response, request)
+}
+
+func (p *OllamaProvider) CreateChatCompletionStream(request *types.ChatCompletionRequest) (requester.StreamReaderInterface[string], *types.OpenAIErrorWithStatusCode) {
+ req, errWithCode := p.getChatRequest(request)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+ defer req.Body.Close()
+
+ // 发送请求
+ resp, errWithCode := p.Requester.SendRequestRaw(req)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+
+ chatHandler := &ollamaStreamHandler{
+ Usage: p.Usage,
+ Request: request,
+ }
+
+ return requester.RequestStream(p.Requester, resp, chatHandler.handlerStream)
+}
+
+func (p *OllamaProvider) getChatRequest(request *types.ChatCompletionRequest) (*http.Request, *types.OpenAIErrorWithStatusCode) {
+ url, errWithCode := p.GetSupportedAPIUri(common.RelayModeChatCompletions)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+
+ // 获取请求地址
+ fullRequestURL := p.GetFullRequestURL(url, request.Model)
+
+ // 获取请求头
+ headers := p.GetRequestHeaders()
+
+ ollamaRequest, errWithCode := convertFromChatOpenai(request)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+
+ // 创建请求
+ req, err := p.Requester.NewRequest(http.MethodPost, fullRequestURL, p.Requester.WithBody(ollamaRequest), p.Requester.WithHeader(headers))
+ if err != nil {
+ return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
+ }
+
+ return req, nil
+}
+
+func (p *OllamaProvider) convertToChatOpenai(response *ChatResponse, request *types.ChatCompletionRequest) (openaiResponse *types.ChatCompletionResponse, errWithCode *types.OpenAIErrorWithStatusCode) {
+ err := errorHandle(&response.OllamaError)
+ if err != nil {
+ errWithCode = &types.OpenAIErrorWithStatusCode{
+ OpenAIError: *err,
+ StatusCode: http.StatusBadRequest,
+ }
+ return
+ }
+
+ choices := types.ChatCompletionChoice{
+ Index: 0,
+ Message: types.ChatCompletionMessage{
+ Role: response.Message.Role,
+ Content: response.Message.Content,
+ },
+ FinishReason: types.FinishReasonStop,
+ }
+
+ openaiResponse = &types.ChatCompletionResponse{
+ ID: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
+ Object: "chat.completion",
+ Created: common.GetTimestamp(),
+ Model: request.Model,
+ Choices: []types.ChatCompletionChoice{choices},
+ Usage: &types.Usage{
+ PromptTokens: response.PromptEvalCount,
+ CompletionTokens: response.EvalCount,
+ TotalTokens: response.PromptEvalCount + response.EvalCount,
+ },
+ }
+
+ *p.Usage = *openaiResponse.Usage
+
+ return openaiResponse, nil
+}
+
+func convertFromChatOpenai(request *types.ChatCompletionRequest) (*ChatRequest, *types.OpenAIErrorWithStatusCode) {
+ ollamaRequest := &ChatRequest{
+ Model: request.Model,
+ Stream: request.Stream,
+ Messages: make([]Message, 0, len(request.Messages)),
+ Options: Option{
+ Temperature: request.Temperature,
+ TopP: request.TopP,
+ Seed: request.Seed,
+ },
+ }
+
+ for _, message := range request.Messages {
+ ollamaMessage := Message{
+ Role: message.Role,
+ Content: "",
+ }
+
+ openaiMessagePart := message.ParseContent()
+ for _, openaiPart := range openaiMessagePart {
+ if openaiPart.Type == types.ContentTypeText {
+ ollamaMessage.Content += openaiPart.Text
+ } else if openaiPart.Type == types.ContentTypeImageURL {
+ _, data, err := image.GetImageFromUrl(openaiPart.ImageURL.URL)
+ if err != nil {
+ return nil, common.ErrorWrapper(err, "image_url_invalid", http.StatusBadRequest)
+ }
+ ollamaMessage.Images = append(ollamaMessage.Images, data)
+ }
+ }
+ ollamaRequest.Messages = append(ollamaRequest.Messages, ollamaMessage)
+ }
+
+ return ollamaRequest, nil
+}
+
+// 转换为OpenAI聊天流式请求体
+func (h *ollamaStreamHandler) handlerStream(rawLine *[]byte, dataChan chan string, errChan chan error) {
+ if !strings.HasPrefix(string(*rawLine), "{") {
+ *rawLine = nil
+ return
+ }
+
+ var chatResponse ChatResponse
+ err := json.Unmarshal(*rawLine, &chatResponse)
+ if err != nil {
+ errChan <- common.ErrorToOpenAIError(err)
+ return
+ }
+
+ errWithCode := errorHandle(&chatResponse.OllamaError)
+ if errWithCode != nil {
+ errChan <- errWithCode
+ return
+ }
+
+ choice := types.ChatCompletionStreamChoice{
+ Index: 0,
+ }
+
+ if chatResponse.Message.Content != "" {
+ choice.Delta = types.ChatCompletionStreamChoiceDelta{
+ Role: types.ChatMessageRoleAssistant,
+ Content: chatResponse.Message.Content,
+ }
+ }
+
+ if chatResponse.Done {
+ choice.FinishReason = types.FinishReasonStop
+ }
+
+ if chatResponse.EvalCount > 0 {
+ h.Usage.PromptTokens = chatResponse.PromptEvalCount
+ h.Usage.CompletionTokens = chatResponse.EvalCount
+ h.Usage.TotalTokens = h.Usage.PromptTokens + chatResponse.EvalCount
+ }
+
+ chatCompletion := types.ChatCompletionStreamResponse{
+ ID: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
+ Object: "chat.completion.chunk",
+ Created: common.GetTimestamp(),
+ Model: h.Request.Model,
+ Choices: []types.ChatCompletionStreamChoice{choice},
+ }
+
+ responseBody, _ := json.Marshal(chatCompletion)
+ dataChan <- string(responseBody)
+}
diff --git a/providers/ollama/embeddings.go b/providers/ollama/embeddings.go
new file mode 100644
index 00000000..7231f9e4
--- /dev/null
+++ b/providers/ollama/embeddings.go
@@ -0,0 +1,70 @@
+package ollama
+
+import (
+ "net/http"
+ "one-api/common"
+ "one-api/types"
+)
+
+func (p *OllamaProvider) CreateEmbeddings(request *types.EmbeddingRequest) (*types.EmbeddingResponse, *types.OpenAIErrorWithStatusCode) {
+ url, errWithCode := p.GetSupportedAPIUri(common.RelayModeEmbeddings)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+ // 获取请求地址
+ fullRequestURL := p.GetFullRequestURL(url, request.Model)
+ if fullRequestURL == "" {
+ return nil, common.ErrorWrapper(nil, "invalid_ollama_config", http.StatusInternalServerError)
+ }
+
+ // 获取请求头
+ headers := p.GetRequestHeaders()
+
+ ollamaRequest := &EmbeddingRequest{
+ Model: request.Model,
+ Prompt: request.ParseInputString(),
+ }
+
+ // 创建请求
+ req, err := p.Requester.NewRequest(http.MethodPost, fullRequestURL, p.Requester.WithBody(ollamaRequest), p.Requester.WithHeader(headers))
+ if err != nil {
+ return nil, common.ErrorWrapper(err, "new_request_failed", http.StatusInternalServerError)
+ }
+ defer req.Body.Close()
+
+ ollamaResponse := &EmbeddingResponse{}
+
+ // 发送请求
+ var res *http.Response
+ res, errWithCode = p.Requester.SendRequest(req, ollamaResponse, false)
+ if errWithCode != nil {
+ return nil, errWithCode
+ }
+
+ errWithOP := errorHandle(&ollamaResponse.OllamaError)
+ if errWithOP != nil {
+ return nil, &types.OpenAIErrorWithStatusCode{
+ OpenAIError: *errWithOP,
+ StatusCode: res.StatusCode,
+ }
+ }
+
+ response := &types.EmbeddingResponse{
+ Object: "list",
+ Model: request.Model,
+ Data: []types.Embedding{{
+ Object: "embedding",
+ Index: 0,
+ Embedding: ollamaResponse.Embedding,
+ }},
+ Usage: &types.Usage{
+ TotalTokens: 0,
+ CompletionTokens: 0,
+ PromptTokens: 0,
+ },
+ }
+
+ *p.Usage = *response.Usage
+
+ return response, nil
+}
diff --git a/providers/ollama/type.go b/providers/ollama/type.go
new file mode 100644
index 00000000..a49d4d32
--- /dev/null
+++ b/providers/ollama/type.go
@@ -0,0 +1,48 @@
+package ollama
+
+import "time"
+
+type OllamaError struct {
+ Error string `json:"error,omitempty"`
+}
+
+type ChatRequest struct {
+ Model string `json:"model"`
+ Messages []Message `json:"messages,omitempty"`
+ Stream bool `json:"stream"`
+ Format string `json:"format,omitempty"`
+ Options Option `json:"options,omitempty"`
+}
+
+type Option struct {
+ Temperature float64 `json:"temperature,omitempty"`
+ Seed *int `json:"seed,omitempty"`
+ TopP float64 `json:"top_p,omitempty"`
+ TopK int `json:"top_k,omitempty"`
+}
+
+type ChatResponse struct {
+ OllamaError
+ Model string `json:"model"`
+ CreatedAt time.Time `json:"created_at"`
+ Message Message `json:"message,omitempty"`
+ Done bool `json:"done"`
+ EvalCount int `json:"eval_count,omitempty"`
+ PromptEvalCount int `json:"prompt_eval_count,omitempty"`
+}
+
+type Message struct {
+ Role string `json:"role,omitempty"`
+ Content string `json:"content,omitempty"`
+ Images []string `json:"images,omitempty"`
+}
+
+type EmbeddingRequest struct {
+ Model string `json:"model"`
+ Prompt string `json:"prompt"`
+}
+
+type EmbeddingResponse struct {
+ OllamaError
+ Embedding []float64 `json:"embedding,omitempty"`
+}
diff --git a/providers/providers.go b/providers/providers.go
index 2e1f0f78..22eb9302 100644
--- a/providers/providers.go
+++ b/providers/providers.go
@@ -20,6 +20,7 @@ import (
"one-api/providers/midjourney"
"one-api/providers/minimax"
"one-api/providers/mistral"
+ "one-api/providers/ollama"
"one-api/providers/openai"
"one-api/providers/palm"
"one-api/providers/stabilityAI"
@@ -62,6 +63,7 @@ func init() {
providerFactories[common.ChannelTypeCohere] = cohere.CohereProviderFactory{}
providerFactories[common.ChannelTypeStabilityAI] = stabilityAI.StabilityAIProviderFactory{}
providerFactories[common.ChannelTypeCoze] = coze.CozeProviderFactory{}
+ providerFactories[common.ChannelTypeOllama] = ollama.OllamaProviderFactory{}
}
diff --git a/relay/util/type.go b/relay/util/type.go
index 1cea1065..0ab72196 100644
--- a/relay/util/type.go
+++ b/relay/util/type.go
@@ -28,5 +28,6 @@ func init() {
common.ChannelTypeCloudflareAI: "Cloudflare AI",
common.ChannelTypeCohere: "Cohere",
common.ChannelTypeStabilityAI: "Stability AI",
+ common.ChannelTypeOllama: "Ollama",
}
}
diff --git a/web/src/constants/ChannelConstants.js b/web/src/constants/ChannelConstants.js
index bc78c7e2..1998f6f3 100644
--- a/web/src/constants/ChannelConstants.js
+++ b/web/src/constants/ChannelConstants.js
@@ -144,27 +144,34 @@ export const CHANNEL_OPTIONS = {
text: 'Cloudflare AI',
value: 35,
color: 'orange',
- url: ''
+ url: 'https://ai.cloudflare.com/'
},
36: {
key: 36,
text: 'Cohere',
value: 36,
color: 'default',
- url: ''
+ url: 'https://cohere.com/'
},
37: {
key: 37,
text: 'Stability AI',
value: 37,
color: 'default',
- url: ''
+ url: 'https://platform.stability.ai/account/credits'
},
38: {
key: 38,
text: 'Coze',
value: 38,
color: 'primary',
+ url: 'https://www.coze.com/open/docs/chat?_lang=zh'
+ },
+ 39: {
+ key: 39,
+ text: 'Ollama',
+ value: 39,
+ color: 'orange',
url: ''
},
24: {
diff --git a/web/src/views/Channel/component/EditModal.js b/web/src/views/Channel/component/EditModal.js
index 53d7a3b8..069673b6 100644
--- a/web/src/views/Channel/component/EditModal.js
+++ b/web/src/views/Channel/component/EditModal.js
@@ -636,6 +636,7 @@ const EditModal = ({ open, channelId, onCancel, onOk, groupOptions }) => {
<>
{plugin.name}
+ {plugin.description}
{Object.keys(plugin.params).map((paramId) => {
const param = plugin.params[paramId];
const name = `plugin.${pluginId}.${paramId}`;
diff --git a/web/src/views/Channel/type/Config.js b/web/src/views/Channel/type/Config.js
index 10b4d7d7..74ca393d 100644
--- a/web/src/views/Channel/type/Config.js
+++ b/web/src/views/Channel/type/Config.js
@@ -315,6 +315,15 @@ const typeConfig = {
'模型名称映射, 你可以取一个容易记忆的名字来代替coze-{bot_id},例如:{"coze-translate": "coze-xxxxx"},注意:如果使用了模型映射,那么上面的模型名称必须使用映射前的名称,上述例子中,你应该在模型中填入coze-translate(如果已经使用了coze-*,可以忽略)。'
},
modelGroup: 'Coze'
+ },
+ 39: {
+ input: {
+ models: ['phi3', 'llama3']
+ },
+ prompt: {
+ base_url: '请输入你部署的Ollama地址,例如:http://127.0.0.1:11434,如果你使用了cloudflare Zero Trust,可以在下方插件填入授权信息',
+ key: '请随意填写'
+ }
}
};
diff --git a/web/src/views/Channel/type/Plugin.json b/web/src/views/Channel/type/Plugin.json
index da1030f5..cded6d50 100644
--- a/web/src/views/Channel/type/Plugin.json
+++ b/web/src/views/Channel/type/Plugin.json
@@ -88,5 +88,25 @@
}
}
}
+ },
+ "39": {
+ "headers": {
+ "name": "Header 配置",
+ "description": "本配置主要是用于使用cloudflare Zero Trust将端口暴露到公网时,需要配置的header",
+ "params": {
+ "CF-Access-Client-Id": {
+ "name": "CF-Access-Client-Id",
+ "description": "CF-Access-Client-Id",
+ "type": "string",
+ "required": true
+ },
+ "CF-Access-Client-Secret": {
+ "name": "CF-Access-Client-Secret",
+ "description": "CF-Access-Client-Secret",
+ "type": "string",
+ "required": true
+ }
+ }
+ }
}
}