diff --git a/README.md b/README.md
index 8221d34f..bc140949 100644
--- a/README.md
+++ b/README.md
@@ -102,6 +102,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Z.AI (chat models)](https://z.ai/model-api)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
+- [Gitee AI](https://ai.gitee.com/)
- [Moonshot AI](https://www.moonshot.ai/)
- [Microsoft Foundry Local](https://github.com/microsoft/Foundry-Local)
- [CometAPI (chat models)](https://api.cometapi.com/)
diff --git a/docker/.env.example b/docker/.env.example
index 0cf05f3c..76131cff 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -157,6 +157,11 @@ GID='1000'
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
+# LLM_PROVIDER='giteeai'
+# GITEE_AI_API_KEY=
+# GITEE_AI_MODEL_PREF=
+# GITEE_AI_MODEL_TOKEN_LIMIT=
+
###########################################
######## Embedding API SElECTION ##########
###########################################
diff --git a/frontend/src/components/LLMSelection/GiteeAIOptions/index.jsx b/frontend/src/components/LLMSelection/GiteeAIOptions/index.jsx
new file mode 100644
index 00000000..fa6fa910
--- /dev/null
+++ b/frontend/src/components/LLMSelection/GiteeAIOptions/index.jsx
@@ -0,0 +1,116 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function GiteeAIOptions({ settings }) {
+ return (
+
+
+
+
+
+ {!settings?.credentialsOnly && (
+ <>
+
+
+
+ e.target.blur()}
+ defaultValue={settings?.GiteeAITokenLimit}
+ required={true}
+ autoComplete="off"
+ />
+
+ >
+ )}
+
+ );
+}
+
+function GiteeAIModelSelection({ settings }) {
+ const [groupedModels, setGroupedModels] = useState({});
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ setLoading(true);
+ const { models = [] } = await System.customModels("giteeai");
+ if (models?.length > 0) {
+ const modelsByOrganization = models.reduce((acc, model) => {
+ acc[model.organization] = acc[model.organization] || [];
+ acc[model.organization].push(model);
+ return acc;
+ }, {});
+ setGroupedModels(modelsByOrganization);
+ }
+
+ setLoading(false);
+ }
+ findCustomModels();
+ }, []);
+
+ if (loading) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/giteeai.png b/frontend/src/media/llmprovider/giteeai.png
new file mode 100644
index 00000000..a1ef5fda
Binary files /dev/null and b/frontend/src/media/llmprovider/giteeai.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 19177607..1a50539a 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -36,6 +36,7 @@ import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
+import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -69,6 +70,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import FoundryOptions from "@/components/LLMSelection/FoundryOptions";
+import GiteeAIOptions from "@/components/LLMSelection/GiteeAIOptions/index.jsx";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -345,6 +347,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run Z.AI's powerful GLM models.",
requiredConfig: ["ZAiApiKey"],
},
+ {
+ name: "GiteeAI",
+ value: "giteeai",
+ logo: GiteeAILogo,
+ options: (settings) => ,
+ description: "Run GiteeAI's powerful LLMs.",
+ requiredConfig: ["GiteeAIApiKey"],
+ },
{
name: "Generic OpenAI",
value: "generic-openai",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 3c804ec8..76568582 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -42,6 +42,7 @@ import DPAISLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
+import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
@@ -279,6 +280,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: FoundryLogo,
},
+ giteeai: {
+ name: "GiteeAI",
+ description: [
+ "Your model and chat contents are visible to GiteeAI in accordance with their terms of service.",
+ ],
+ logo: GiteeAILogo,
+ },
};
export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index ed4b02f7..a0cf2ae8 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -30,6 +30,7 @@ import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
+import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -61,6 +62,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions";
+import GiteeAiOptions from "@/components/LLMSelection/GiteeAIOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@@ -290,6 +292,13 @@ const LLMS = [
options: (settings) => ,
description: "500+ AI Models all in one API.",
},
+ {
+ name: "GiteeAI",
+ value: "giteeai",
+ logo: GiteeAILogo,
+ options: (settings) => ,
+ description: "Run GiteeAI's powerful LLMs.",
+ },
];
export default function LLMPreference({
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index a1309203..020c5016 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -35,6 +35,7 @@ const ENABLED_PROVIDERS = [
"cometapi",
"foundry",
"zai",
+ "giteeai",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/server/.env.example b/server/.env.example
index c7c02a0a..47aef59d 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -156,6 +156,11 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
+# LLM_PROVIDER='giteeai'
+# GITEE_AI_API_KEY=
+# GITEE_AI_MODEL_PREF=
+# GITEE_AI_MODEL_TOKEN_LIMIT=
+
###########################################
######## Embedding API SElECTION ##########
###########################################
diff --git a/server/endpoints/utils.js b/server/endpoints/utils.js
index 136245dc..fc5b4133 100644
--- a/server/endpoints/utils.js
+++ b/server/endpoints/utils.js
@@ -148,6 +148,9 @@ function getModelTag() {
case "zai":
model = process.env.ZAI_MODEL_PREF;
break;
+ case "giteeai":
+ model = process.env.GITEE_AI_MODEL_PREF;
+ break;
default:
model = "--";
break;
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 34693396..28e44ca6 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -641,6 +641,11 @@ const SystemSettings = {
// Z.AI Keys
ZAiApiKey: !!process.env.ZAI_API_KEY,
ZAiModelPref: process.env.ZAI_MODEL_PREF,
+
+ // GiteeAI API Keys
+ GiteeAIApiKey: !!process.env.GITEE_AI_API_KEY,
+ GiteeAIModelPref: process.env.GITEE_AI_MODEL_PREF,
+ GiteeAITokenLimit: process.env.GITEE_AI_MODEL_TOKEN_LIMIT || 8192,
};
},
diff --git a/server/storage/models/.gitignore b/server/storage/models/.gitignore
index 9181a534..6bda7b7a 100644
--- a/server/storage/models/.gitignore
+++ b/server/storage/models/.gitignore
@@ -12,4 +12,5 @@ ppio
context-windows/*
MintplexLabs
cometapi
-fireworks
\ No newline at end of file
+fireworks
+giteeai
\ No newline at end of file
diff --git a/server/utils/AiProviders/giteeai/index.js b/server/utils/AiProviders/giteeai/index.js
new file mode 100644
index 00000000..e74a6d55
--- /dev/null
+++ b/server/utils/AiProviders/giteeai/index.js
@@ -0,0 +1,397 @@
+const fs = require("fs");
+const path = require("path");
+const { v4: uuidv4 } = require("uuid");
+const { safeJsonParse, toValidNumber } = require("../../http");
+const LEGACY_MODEL_MAP = require("../modelMap/legacy");
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+const cacheFolder = path.resolve(
+ process.env.STORAGE_DIR
+ ? path.resolve(process.env.STORAGE_DIR, "models", "giteeai")
+ : path.resolve(__dirname, `../../../storage/models/giteeai`)
+);
+
+class GiteeAILLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.GITEE_AI_API_KEY)
+ throw new Error("No Gitee AI API key was set.");
+ const { OpenAI: OpenAIApi } = require("openai");
+
+ this.openai = new OpenAIApi({
+ apiKey: process.env.GITEE_AI_API_KEY,
+ baseURL: "https://ai.gitee.com/v1",
+ });
+ this.model = modelPreference || process.env.GITEE_AI_MODEL_PREF || "";
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+
+ if (!fs.existsSync(cacheFolder))
+ fs.mkdirSync(cacheFolder, { recursive: true });
+ this.cacheModelPath = path.resolve(cacheFolder, "models.json");
+ this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
+ this.log("Initialized with model:", this.model);
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+ }
+
+ // This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
+ // from the current date. If it is, then we will refetch the API so that all the models are up
+ // to date.
+ #cacheIsStale() {
+ const MAX_STALE = 6.048e8; // 1 Week in MS
+ if (!fs.existsSync(this.cacheAtPath)) return true;
+ const now = Number(new Date());
+ const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
+ return now - timestampMs > MAX_STALE;
+ }
+
+ // This function fetches the models from the GiteeAI API and caches them locally.
+ async #syncModels() {
+ if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
+ return false;
+
+ this.log("Model cache is not present or stale. Fetching from GiteeAI API.");
+ await giteeAiModels();
+ return;
+ }
+
+ models() {
+ if (!fs.existsSync(this.cacheModelPath)) return {};
+ return safeJsonParse(
+ fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
+ {}
+ );
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ static promptWindowLimit(model) {
+ return (
+ toValidNumber(process.env.GITEE_AI_MODEL_TOKEN_LIMIT) ||
+ LEGACY_MODEL_MAP.giteeai[model] ||
+ 8192
+ );
+ }
+
+ promptWindowLimit() {
+ return (
+ toValidNumber(process.env.GITEE_AI_MODEL_TOKEN_LIMIT) ||
+ LEGACY_MODEL_MAP.giteeai[this.model] ||
+ 8192
+ );
+ }
+
+ async isValidChatCompletionModel(modelName = "") {
+ return true;
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ /**
+ * Parses and prepends reasoning from the response and returns the full text response.
+ * @param {Object} response
+ * @returns {string}
+ */
+ #parseReasoningFromResponse({ message }) {
+ let textResponse = message?.content;
+ if (
+ !!message?.reasoning_content &&
+ message.reasoning_content.trim().length > 0
+ )
+ textResponse = `${message.reasoning_content}${textResponse}`;
+ return textResponse;
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.openai.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.message);
+ })
+ );
+
+ if (
+ !result?.output?.hasOwnProperty("choices") ||
+ result?.output?.choices?.length === 0
+ )
+ throw new Error(
+ `Invalid response body returned from GiteeAI: ${JSON.stringify(result.output)}`
+ );
+
+ return {
+ textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
+ metrics: {
+ prompt_tokens: result.output.usage.prompt_tokens || 0,
+ completion_tokens: result.output.usage.completion_tokens || 0,
+ total_tokens: result.output.usage.total_tokens || 0,
+ outputTps: result.output.usage.completion_tokens / result.duration,
+ duration: result.duration,
+ },
+ };
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.openai.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ }),
+ messages,
+ false
+ );
+
+ return measuredStreamRequest;
+ }
+
+ // TODO: This is a copy of the generic handleStream function in responses.js
+ // to specifically handle the GiteeAI reasoning model `reasoning_content` field.
+ // When or if ever possible, we should refactor this to be in the generic function.
+ handleStream(response, stream, responseProps) {
+ const { uuid = uuidv4(), sources = [] } = responseProps;
+ let hasUsageMetrics = false;
+ let usage = {
+ completion_tokens: 0,
+ };
+
+ return new Promise(async (resolve) => {
+ let fullText = "";
+ let reasoningText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => {
+ stream?.endMeasurement(usage);
+ clientAbortedHandler(resolve, fullText);
+ };
+ response.on("close", handleAbort);
+
+ try {
+ for await (const chunk of stream) {
+ const message = chunk?.choices?.[0];
+ const token = message?.delta?.content;
+ const reasoningToken = message?.delta?.reasoning_content;
+
+ if (
+ chunk.hasOwnProperty("usage") && // exists
+ !!chunk.usage && // is not null
+ Object.values(chunk.usage).length > 0 // has values
+ ) {
+ if (chunk.usage.hasOwnProperty("prompt_tokens")) {
+ usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
+ }
+
+ if (chunk.usage.hasOwnProperty("completion_tokens")) {
+ hasUsageMetrics = true; // to stop estimating counter
+ usage.completion_tokens = Number(chunk.usage.completion_tokens);
+ }
+ }
+
+ // Reasoning models will always return the reasoning text before the token text.
+ if (reasoningToken) {
+ // If the reasoning text is empty (''), we need to initialize it
+ // and send the first chunk of reasoning text.
+ if (reasoningText.length === 0) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: `${reasoningToken}`,
+ close: false,
+ error: false,
+ });
+ reasoningText += `${reasoningToken}`;
+ continue;
+ } else {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: reasoningToken,
+ close: false,
+ error: false,
+ });
+ reasoningText += reasoningToken;
+ }
+ }
+
+ // If the reasoning text is not empty, but the reasoning token is empty
+ // and the token text is not empty we need to close the reasoning text and begin sending the token text.
+ if (!!reasoningText && !reasoningToken && token) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: ``,
+ close: false,
+ error: false,
+ });
+ fullText += `${reasoningText}`;
+ reasoningText = "";
+ }
+
+ if (token) {
+ fullText += token;
+ // If we never saw a usage metric, we can estimate them by number of completion chunks
+ if (!hasUsageMetrics) usage.completion_tokens++;
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: token,
+ close: false,
+ error: false,
+ });
+ }
+
+ // LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
+ // Either way, the key `finish_reason` must be present to determine ending chunk.
+ if (
+ message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
+ message.finish_reason !== "" &&
+ message.finish_reason !== null
+ ) {
+ writeResponseChunk(response, {
+ uuid,
+ sources,
+ type: "textResponseChunk",
+ textResponse: "",
+ close: true,
+ error: false,
+ });
+ response.removeListener("close", handleAbort);
+ stream?.endMeasurement(usage);
+ resolve(fullText);
+ break; // Break streaming when a valid finish_reason is first encountered
+ }
+ }
+ } catch (e) {
+ console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
+ writeResponseChunk(response, {
+ uuid,
+ type: "abort",
+ textResponse: null,
+ sources: [],
+ close: true,
+ error: e.message,
+ });
+ stream?.endMeasurement(usage);
+ resolve(fullText); // Return what we currently have - if anything.
+ }
+ });
+ }
+
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+async function giteeAiModels() {
+ const url = new URL("https://ai.gitee.com/v1/models");
+ url.searchParams.set("type", "text2text");
+ return await fetch(url.toString(), {
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${process.env.GITEE_AI_API_KEY}`,
+ },
+ })
+ .then((res) => res.json())
+ .then(({ data = [] }) => data)
+ .then((models = []) => {
+ const validModels = {};
+ models.forEach(
+ (model) =>
+ (validModels[model.id] = {
+ id: model.id,
+ name: model.id,
+ organization: model.owned_by,
+ })
+ );
+ // Cache all response information
+ if (!fs.existsSync(cacheFolder))
+ fs.mkdirSync(cacheFolder, { recursive: true });
+ fs.writeFileSync(
+ path.resolve(cacheFolder, "models.json"),
+ JSON.stringify(validModels),
+ {
+ encoding: "utf-8",
+ }
+ );
+ fs.writeFileSync(
+ path.resolve(cacheFolder, ".cached_at"),
+ String(Number(new Date())),
+ {
+ encoding: "utf-8",
+ }
+ );
+
+ return validModels;
+ })
+ .catch((e) => {
+ console.error(e);
+ return {};
+ });
+}
+
+module.exports = {
+ GiteeAILLM,
+ giteeAiModels,
+};
diff --git a/server/utils/AiProviders/modelMap/legacy.js b/server/utils/AiProviders/modelMap/legacy.js
index 1187cf51..d8de4e54 100644
--- a/server/utils/AiProviders/modelMap/legacy.js
+++ b/server/utils/AiProviders/modelMap/legacy.js
@@ -120,5 +120,27 @@ const LEGACY_MODEL_MAP = {
xai: {
"grok-beta": 131072,
},
+ giteeai: {
+ "Qwen2.5-72B-Instruct": 16_384,
+ "Qwen2.5-14B-Instruct": 24_576,
+ "Qwen2-7B-Instruct": 24_576,
+ "Qwen2.5-32B-Instruct": 32_768,
+ "Qwen2-72B-Instruct": 32_768,
+ "Qwen2-VL-72B": 32_768,
+ "QwQ-32B-Preview": 32_768,
+ "Yi-34B-Chat": 4_096,
+ "glm-4-9b-chat": 32_768,
+ "deepseek-coder-33B-instruct": 8_192,
+ "codegeex4-all-9b": 32_768,
+ "InternVL2-8B": 32_768,
+ "InternVL2.5-26B": 32_768,
+ "InternVL2.5-78B": 32_768,
+ "DeepSeek-R1-Distill-Qwen-32B": 32_768,
+ "DeepSeek-R1-Distill-Qwen-1.5B": 32_768,
+ "DeepSeek-R1-Distill-Qwen-14B": 32_768,
+ "DeepSeek-R1-Distill-Qwen-7B": 32_768,
+ "DeepSeek-V3": 32_768,
+ "DeepSeek-R1": 32_768,
+ },
};
module.exports = LEGACY_MODEL_MAP;
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 9edfbc4c..add1adb1 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -988,6 +988,8 @@ ${this.getHistory({ to: route.to })
return new Providers.CometApiProvider({ model: config.model });
case "foundry":
return new Providers.FoundryProvider({ model: config.model });
+ case "giteeai":
+ return new Providers.GiteeAIProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index ec9884a1..c1a41909 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -231,6 +231,14 @@ class Provider {
apiKey: process.env.COMETAPI_LLM_API_KEY ?? null,
...config,
});
+ case "giteeai":
+ return new ChatOpenAI({
+ configuration: {
+ baseURL: "https://ai.gitee.com/v1",
+ },
+ apiKey: process.env.GITEE_AI_API_KEY ?? null,
+ ...config,
+ });
// OSS Model Runners
// case "anythingllm_ollama":
// return new ChatOllama({
diff --git a/server/utils/agents/aibitat/providers/giteeai.js b/server/utils/agents/aibitat/providers/giteeai.js
new file mode 100644
index 00000000..261760a8
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/giteeai.js
@@ -0,0 +1,85 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+class GiteeAIProvider extends InheritMultiple([Provider, UnTooled]) {
+ model;
+
+ constructor(config = {}) {
+ super();
+ const { model = "DeepSeek-R1" } = config;
+ this._client = new OpenAI({
+ baseURL: "https://ai.gitee.com/v1",
+ apiKey: process.env.GITEE_AI_API_KEY ?? null,
+ maxRetries: 3,
+ });
+ this.model = model;
+ this.verbose = true;
+ }
+
+ get client() {
+ return this._client;
+ }
+
+ get supportsAgentStreaming() {
+ return true;
+ }
+
+ async #handleFunctionCallChat({ messages = [] }) {
+ return await this.client.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ })
+ .then((result) => {
+ if (!result.hasOwnProperty("choices"))
+ throw new Error("GiteeAI chat: No results!");
+ if (result.choices.length === 0)
+ throw new Error("GiteeAI chat: No results length!");
+ return result.choices[0].message.content;
+ })
+ .catch((_) => {
+ return null;
+ });
+ }
+
+ async #handleFunctionCallStream({ messages = [] }) {
+ return await this.client.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ });
+ }
+
+ async stream(messages, functions = [], eventHandler = null) {
+ return await UnTooled.prototype.stream.call(
+ this,
+ messages,
+ functions,
+ this.#handleFunctionCallStream.bind(this),
+ eventHandler
+ );
+ }
+
+ async complete(messages, functions = []) {
+ return await UnTooled.prototype.complete.call(
+ this,
+ messages,
+ functions,
+ this.#handleFunctionCallChat.bind(this)
+ );
+ }
+
+ /**
+ * Get the cost of the completion.
+ *
+ * @param _usage The completion to get the cost for.
+ * @returns The cost of the completion.
+ */
+ getCost(_usage) {
+ return 0;
+ }
+}
+
+module.exports = GiteeAIProvider;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index f927c82c..9ac8465f 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -27,6 +27,7 @@ const DellProAiStudioProvider = require("./dellProAiStudio.js");
const MoonshotAiProvider = require("./moonshotAi.js");
const CometApiProvider = require("./cometapi.js");
const FoundryProvider = require("./foundry.js");
+const GiteeAIProvider = require("./giteeai.js");
module.exports = {
OpenAIProvider,
@@ -58,4 +59,5 @@ module.exports = {
DellProAiStudioProvider,
MoonshotAiProvider,
FoundryProvider,
+ GiteeAIProvider,
};
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 10972594..b2d95676 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -208,17 +208,17 @@ class AgentHandler {
if (!process.env.MOONSHOT_AI_MODEL_PREF)
throw new Error("Moonshot AI model must be set to use agents.");
break;
-
case "cometapi":
if (!process.env.COMETAPI_LLM_API_KEY)
throw new Error("CometAPI API Key must be provided to use agents.");
break;
-
case "foundry":
if (!process.env.FOUNDRY_BASE_PATH)
throw new Error("Foundry base path must be provided to use agents.");
break;
-
+ case "giteeai":
+ if (!process.env.GITEE_AI_API_KEY)
+ throw new Error("GiteeAI API Key must be provided to use agents.");
default:
throw new Error(
"No workspace agent provider set. Please set your agent provider in the workspace's settings"
@@ -295,6 +295,8 @@ class AgentHandler {
return process.env.COMETAPI_LLM_MODEL_PREF ?? "gpt-5-mini";
case "foundry":
return process.env.FOUNDRY_MODEL_PREF ?? null;
+ case "giteeai":
+ return process.env.GITEE_AI_MODEL_PREF ?? null;
default:
return null;
}
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 27371909..4a06a1b3 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -42,6 +42,7 @@ const SUPPORT_CUSTOM_MODELS = [
"foundry",
"cohere",
"zai",
+ "giteeai",
// Embedding Engines
"native-embedder",
"cohere-embedder",
@@ -113,6 +114,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getCohereModels(apiKey, "embed");
case "openrouter-embedder":
return await getOpenRouterEmbeddingModels();
+ case "giteeai":
+ return await getGiteeAIModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -596,6 +599,20 @@ async function getDeepSeekModels(apiKey = null) {
return { models, error: null };
}
+async function getGiteeAIModels() {
+ const { giteeAiModels } = require("../AiProviders/giteeai");
+ const modelMap = await giteeAiModels();
+ if (!Object.keys(modelMap).length === 0) return { models: [], error: null };
+ const models = Object.values(modelMap).map((model) => {
+ return {
+ id: model.id,
+ organization: model.organization ?? "GiteeAI",
+ name: model.id,
+ };
+ });
+ return { models, error: null };
+}
+
async function getXAIModels(_apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const apiKey =
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index f1cc1fde..01e24926 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -225,6 +225,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "zai":
const { ZAiLLM } = require("../AiProviders/zai");
return new ZAiLLM(embedder, model);
+ case "giteeai":
+ const { GiteeAILLM } = require("../AiProviders/giteeai");
+ return new GiteeAILLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -387,6 +390,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "zai":
const { ZAiLLM } = require("../AiProviders/zai");
return ZAiLLM;
+ case "giteeai":
+ const { GiteeAILLM } = require("../AiProviders/giteeai");
+ return GiteeAILLM;
default:
return null;
}
@@ -461,6 +467,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
return process.env.FOUNDRY_MODEL_PREF;
case "zai":
return process.env.ZAI_MODEL_PREF;
+ case "giteeai":
+ return process.env.GITEE_AI_MODEL_PREF;
default:
return null;
}
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 5bfe58f1..43b48794 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -775,6 +775,20 @@ const KEY_MAPPING = {
envKey: "ZAI_MODEL_PREF",
checks: [isNotEmpty],
},
+
+ // GiteeAI Options
+ GiteeAIApiKey: {
+ envKey: "GITEE_AI_API_KEY",
+ checks: [isNotEmpty],
+ },
+ GiteeAIModelPref: {
+ envKey: "GITEE_AI_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
+ GiteeAITokenLimit: {
+ envKey: "GITEE_AI_MODEL_TOKEN_LIMIT",
+ checks: [nonZero],
+ },
};
function isNotEmpty(input = "") {
@@ -887,6 +901,7 @@ function supportedLLM(input = "") {
"cometapi",
"foundry",
"zai",
+ "giteeai",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}