Support Gitee AI(LLM Provider) (#3361)

* Support Gitee AI(LLM Provider)

* refactor(server): 重构 GiteeAI 模型窗口限制功能,暂时将窗口限制硬编码,计划使用外部 API 数据和缓存

* updates for Gitee AI

* use legacy lookup since gitee does not enable getting token context windows

* add more missing records

* reorder imports

---------

Co-authored-by: 方程 <fangcheng@oschina.cn>
Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
方程 2025-11-26 06:19:32 +08:00 committed by GitHub
parent 66e44f65b4
commit 90e474abcb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 726 additions and 4 deletions

View File

@ -102,6 +102,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Z.AI (chat models)](https://z.ai/model-api)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
- [Gitee AI](https://ai.gitee.com/)
- [Moonshot AI](https://www.moonshot.ai/)
- [Microsoft Foundry Local](https://github.com/microsoft/Foundry-Local)
- [CometAPI (chat models)](https://api.cometapi.com/)

View File

@ -157,6 +157,11 @@ GID='1000'
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='giteeai'
# GITEE_AI_API_KEY=
# GITEE_AI_MODEL_PREF=
# GITEE_AI_MODEL_TOKEN_LIMIT=
###########################################
######## Embedding API SElECTION ##########
###########################################

View File

@ -0,0 +1,116 @@
import { useState, useEffect } from "react";
import System from "@/models/system";
export default function GiteeAIOptions({ settings }) {
return (
<div className="flex gap-[36px] mt-1.5">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
API Key
</label>
<input
type="password"
name="GiteeAIApiKey"
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="GiteeAI API Key"
defaultValue={settings?.GiteeAIApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<>
<GiteeAIModelSelection settings={settings} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-2">
Token context window
</label>
<input
type="number"
name="GiteeAITokenLimit"
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="Content window limit (eg: 8192)"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.GiteeAITokenLimit}
required={true}
autoComplete="off"
/>
</div>
</>
)}
</div>
);
}
function GiteeAIModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
setLoading(true);
const { models = [] } = await System.customModels("giteeai");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
findCustomModels();
}, []);
if (loading) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="GiteeAIModelPref"
disabled={true}
className="border-none bg-theme-settings-input-bg border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="GiteeAIModelPref"
required={true}
className="border-none bg-theme-settings-input-bg border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.GiteeAIModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@ -36,6 +36,7 @@ import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@ -69,6 +70,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import FoundryOptions from "@/components/LLMSelection/FoundryOptions";
import GiteeAIOptions from "@/components/LLMSelection/GiteeAIOptions/index.jsx";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -345,6 +347,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run Z.AI's powerful GLM models.",
requiredConfig: ["ZAiApiKey"],
},
{
name: "GiteeAI",
value: "giteeai",
logo: GiteeAILogo,
options: (settings) => <GiteeAIOptions settings={settings} />,
description: "Run GiteeAI's powerful LLMs.",
requiredConfig: ["GiteeAIApiKey"],
},
{
name: "Generic OpenAI",
value: "generic-openai",

View File

@ -42,6 +42,7 @@ import DPAISLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
@ -279,6 +280,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: FoundryLogo,
},
giteeai: {
name: "GiteeAI",
description: [
"Your model and chat contents are visible to GiteeAI in accordance with their terms of service.",
],
logo: GiteeAILogo,
},
};
export const VECTOR_DB_PRIVACY = {

View File

@ -30,6 +30,7 @@ import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@ -61,6 +62,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions";
import GiteeAiOptions from "@/components/LLMSelection/GiteeAIOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@ -290,6 +292,13 @@ const LLMS = [
options: (settings) => <CometApiLLMOptions settings={settings} />,
description: "500+ AI Models all in one API.",
},
{
name: "GiteeAI",
value: "giteeai",
logo: GiteeAILogo,
options: (settings) => <GiteeAiOptions settings={settings} />,
description: "Run GiteeAI's powerful LLMs.",
},
];
export default function LLMPreference({

View File

@ -35,6 +35,7 @@ const ENABLED_PROVIDERS = [
"cometapi",
"foundry",
"zai",
"giteeai",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.

View File

@ -156,6 +156,11 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='giteeai'
# GITEE_AI_API_KEY=
# GITEE_AI_MODEL_PREF=
# GITEE_AI_MODEL_TOKEN_LIMIT=
###########################################
######## Embedding API SElECTION ##########
###########################################

View File

@ -148,6 +148,9 @@ function getModelTag() {
case "zai":
model = process.env.ZAI_MODEL_PREF;
break;
case "giteeai":
model = process.env.GITEE_AI_MODEL_PREF;
break;
default:
model = "--";
break;

View File

@ -641,6 +641,11 @@ const SystemSettings = {
// Z.AI Keys
ZAiApiKey: !!process.env.ZAI_API_KEY,
ZAiModelPref: process.env.ZAI_MODEL_PREF,
// GiteeAI API Keys
GiteeAIApiKey: !!process.env.GITEE_AI_API_KEY,
GiteeAIModelPref: process.env.GITEE_AI_MODEL_PREF,
GiteeAITokenLimit: process.env.GITEE_AI_MODEL_TOKEN_LIMIT || 8192,
};
},

View File

@ -13,3 +13,4 @@ context-windows/*
MintplexLabs
cometapi
fireworks
giteeai

View File

@ -0,0 +1,397 @@
const fs = require("fs");
const path = require("path");
const { v4: uuidv4 } = require("uuid");
const { safeJsonParse, toValidNumber } = require("../../http");
const LEGACY_MODEL_MAP = require("../modelMap/legacy");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "giteeai")
: path.resolve(__dirname, `../../../storage/models/giteeai`)
);
class GiteeAILLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.GITEE_AI_API_KEY)
throw new Error("No Gitee AI API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
apiKey: process.env.GITEE_AI_API_KEY,
baseURL: "https://ai.gitee.com/v1",
});
this.model = modelPreference || process.env.GITEE_AI_MODEL_PREF || "";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log("Initialized with model:", this.model);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// This function fetches the models from the GiteeAI API and caches them locally.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from GiteeAI API.");
await giteeAiModels();
return;
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(model) {
return (
toValidNumber(process.env.GITEE_AI_MODEL_TOKEN_LIMIT) ||
LEGACY_MODEL_MAP.giteeai[model] ||
8192
);
}
promptWindowLimit() {
return (
toValidNumber(process.env.GITEE_AI_MODEL_TOKEN_LIMIT) ||
LEGACY_MODEL_MAP.giteeai[this.model] ||
8192
);
}
async isValidChatCompletionModel(modelName = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (
!!message?.reasoning_content &&
message.reasoning_content.trim().length > 0
)
textResponse = `<think>${message.reasoning_content}</think>${textResponse}`;
return textResponse;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
throw new Error(
`Invalid response body returned from GiteeAI: ${JSON.stringify(result.output)}`
);
return {
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
false
);
return measuredStreamRequest;
}
// TODO: This is a copy of the generic handleStream function in responses.js
// to specifically handle the GiteeAI reasoning model `reasoning_content` field.
// When or if ever possible, we should refactor this to be in the generic function.
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning_content;
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
// Reasoning models will always return the reasoning text before the token text.
if (reasoningToken) {
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${reasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${reasoningToken}`;
continue;
} else {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
// Either way, the key `finish_reason` must be present to determine ending chunk.
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function giteeAiModels() {
const url = new URL("https://ai.gitee.com/v1/models");
url.searchParams.set("type", "text2text");
return await fetch(url.toString(), {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.GITEE_AI_API_KEY}`,
},
})
.then((res) => res.json())
.then(({ data = [] }) => data)
.then((models = []) => {
const validModels = {};
models.forEach(
(model) =>
(validModels[model.id] = {
id: model.id,
name: model.id,
organization: model.owned_by,
})
);
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(validModels),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return validModels;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
GiteeAILLM,
giteeAiModels,
};

View File

@ -120,5 +120,27 @@ const LEGACY_MODEL_MAP = {
xai: {
"grok-beta": 131072,
},
giteeai: {
"Qwen2.5-72B-Instruct": 16_384,
"Qwen2.5-14B-Instruct": 24_576,
"Qwen2-7B-Instruct": 24_576,
"Qwen2.5-32B-Instruct": 32_768,
"Qwen2-72B-Instruct": 32_768,
"Qwen2-VL-72B": 32_768,
"QwQ-32B-Preview": 32_768,
"Yi-34B-Chat": 4_096,
"glm-4-9b-chat": 32_768,
"deepseek-coder-33B-instruct": 8_192,
"codegeex4-all-9b": 32_768,
"InternVL2-8B": 32_768,
"InternVL2.5-26B": 32_768,
"InternVL2.5-78B": 32_768,
"DeepSeek-R1-Distill-Qwen-32B": 32_768,
"DeepSeek-R1-Distill-Qwen-1.5B": 32_768,
"DeepSeek-R1-Distill-Qwen-14B": 32_768,
"DeepSeek-R1-Distill-Qwen-7B": 32_768,
"DeepSeek-V3": 32_768,
"DeepSeek-R1": 32_768,
},
};
module.exports = LEGACY_MODEL_MAP;

View File

@ -988,6 +988,8 @@ ${this.getHistory({ to: route.to })
return new Providers.CometApiProvider({ model: config.model });
case "foundry":
return new Providers.FoundryProvider({ model: config.model });
case "giteeai":
return new Providers.GiteeAIProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`

View File

@ -231,6 +231,14 @@ class Provider {
apiKey: process.env.COMETAPI_LLM_API_KEY ?? null,
...config,
});
case "giteeai":
return new ChatOpenAI({
configuration: {
baseURL: "https://ai.gitee.com/v1",
},
apiKey: process.env.GITEE_AI_API_KEY ?? null,
...config,
});
// OSS Model Runners
// case "anythingllm_ollama":
// return new ChatOllama({

View File

@ -0,0 +1,85 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
class GiteeAIProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
super();
const { model = "DeepSeek-R1" } = config;
this._client = new OpenAI({
baseURL: "https://ai.gitee.com/v1",
apiKey: process.env.GITEE_AI_API_KEY ?? null,
maxRetries: 3,
});
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
get supportsAgentStreaming() {
return true;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("GiteeAI chat: No results!");
if (result.choices.length === 0)
throw new Error("GiteeAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
async #handleFunctionCallStream({ messages = [] }) {
return await this.client.chat.completions.create({
model: this.model,
stream: true,
messages,
});
}
async stream(messages, functions = [], eventHandler = null) {
return await UnTooled.prototype.stream.call(
this,
messages,
functions,
this.#handleFunctionCallStream.bind(this),
eventHandler
);
}
async complete(messages, functions = []) {
return await UnTooled.prototype.complete.call(
this,
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = GiteeAIProvider;

View File

@ -27,6 +27,7 @@ const DellProAiStudioProvider = require("./dellProAiStudio.js");
const MoonshotAiProvider = require("./moonshotAi.js");
const CometApiProvider = require("./cometapi.js");
const FoundryProvider = require("./foundry.js");
const GiteeAIProvider = require("./giteeai.js");
module.exports = {
OpenAIProvider,
@ -58,4 +59,5 @@ module.exports = {
DellProAiStudioProvider,
MoonshotAiProvider,
FoundryProvider,
GiteeAIProvider,
};

View File

@ -208,17 +208,17 @@ class AgentHandler {
if (!process.env.MOONSHOT_AI_MODEL_PREF)
throw new Error("Moonshot AI model must be set to use agents.");
break;
case "cometapi":
if (!process.env.COMETAPI_LLM_API_KEY)
throw new Error("CometAPI API Key must be provided to use agents.");
break;
case "foundry":
if (!process.env.FOUNDRY_BASE_PATH)
throw new Error("Foundry base path must be provided to use agents.");
break;
case "giteeai":
if (!process.env.GITEE_AI_API_KEY)
throw new Error("GiteeAI API Key must be provided to use agents.");
default:
throw new Error(
"No workspace agent provider set. Please set your agent provider in the workspace's settings"
@ -295,6 +295,8 @@ class AgentHandler {
return process.env.COMETAPI_LLM_MODEL_PREF ?? "gpt-5-mini";
case "foundry":
return process.env.FOUNDRY_MODEL_PREF ?? null;
case "giteeai":
return process.env.GITEE_AI_MODEL_PREF ?? null;
default:
return null;
}

View File

@ -42,6 +42,7 @@ const SUPPORT_CUSTOM_MODELS = [
"foundry",
"cohere",
"zai",
"giteeai",
// Embedding Engines
"native-embedder",
"cohere-embedder",
@ -113,6 +114,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getCohereModels(apiKey, "embed");
case "openrouter-embedder":
return await getOpenRouterEmbeddingModels();
case "giteeai":
return await getGiteeAIModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@ -596,6 +599,20 @@ async function getDeepSeekModels(apiKey = null) {
return { models, error: null };
}
async function getGiteeAIModels() {
const { giteeAiModels } = require("../AiProviders/giteeai");
const modelMap = await giteeAiModels();
if (!Object.keys(modelMap).length === 0) return { models: [], error: null };
const models = Object.values(modelMap).map((model) => {
return {
id: model.id,
organization: model.organization ?? "GiteeAI",
name: model.id,
};
});
return { models, error: null };
}
async function getXAIModels(_apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const apiKey =

View File

@ -225,6 +225,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "zai":
const { ZAiLLM } = require("../AiProviders/zai");
return new ZAiLLM(embedder, model);
case "giteeai":
const { GiteeAILLM } = require("../AiProviders/giteeai");
return new GiteeAILLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@ -387,6 +390,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "zai":
const { ZAiLLM } = require("../AiProviders/zai");
return ZAiLLM;
case "giteeai":
const { GiteeAILLM } = require("../AiProviders/giteeai");
return GiteeAILLM;
default:
return null;
}
@ -461,6 +467,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
return process.env.FOUNDRY_MODEL_PREF;
case "zai":
return process.env.ZAI_MODEL_PREF;
case "giteeai":
return process.env.GITEE_AI_MODEL_PREF;
default:
return null;
}

View File

@ -775,6 +775,20 @@ const KEY_MAPPING = {
envKey: "ZAI_MODEL_PREF",
checks: [isNotEmpty],
},
// GiteeAI Options
GiteeAIApiKey: {
envKey: "GITEE_AI_API_KEY",
checks: [isNotEmpty],
},
GiteeAIModelPref: {
envKey: "GITEE_AI_MODEL_PREF",
checks: [isNotEmpty],
},
GiteeAITokenLimit: {
envKey: "GITEE_AI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
};
function isNotEmpty(input = "") {
@ -887,6 +901,7 @@ function supportedLLM(input = "") {
"cometapi",
"foundry",
"zai",
"giteeai",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}