Z.ai LLM & agent provider (#4573)
* wip zai llm provider * cleanup + add zai agent provider * lint * change how caching works for failed models --------- Co-authored-by: Timothy Carambat <rambat1010@gmail.com>
This commit is contained in:
parent
7a7ec969d7
commit
49c29fb968
@ -99,6 +99,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
|
||||
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
||||
- [Apipie](https://apipie.ai/)
|
||||
- [xAI](https://x.ai/)
|
||||
- [Z.AI (chat models)](https://z.ai/model-api)
|
||||
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
|
||||
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
|
||||
- [Moonshot AI](https://www.moonshot.ai/)
|
||||
|
||||
@ -131,6 +131,10 @@ GID='1000'
|
||||
# XAI_LLM_API_KEY='xai-your-api-key-here'
|
||||
# XAI_LLM_MODEL_PREF='grok-beta'
|
||||
|
||||
# LLM_PROVIDER='zai'
|
||||
# ZAI_API_KEY="your-zai-api-key-here"
|
||||
# ZAI_MODEL_PREF="glm-4.5"
|
||||
|
||||
# LLM_PROVIDER='nvidia-nim'
|
||||
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
|
||||
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
|
||||
|
||||
114
frontend/src/components/LLMSelection/ZAiLLMOptions/index.jsx
Normal file
114
frontend/src/components/LLMSelection/ZAiLLMOptions/index.jsx
Normal file
@ -0,0 +1,114 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import System from "@/models/system";
|
||||
|
||||
export default function ZAiLLMOptions({ settings }) {
|
||||
const [inputValue, setInputValue] = useState(settings?.ZAiApiKey);
|
||||
const [apiKey, setApiKey] = useState(settings?.ZAiApiKey);
|
||||
|
||||
return (
|
||||
<div className="flex gap-[36px] mt-1.5">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Z.AI API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="ZAiApiKey"
|
||||
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||
placeholder="Z.AI API Key"
|
||||
defaultValue={settings?.ZAiApiKey ? "*".repeat(20) : ""}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
onChange={(e) => setInputValue(e.target.value)}
|
||||
onBlur={() => setApiKey(inputValue)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{!settings?.credentialsOnly && (
|
||||
<ZAiModelSelection settings={settings} apiKey={apiKey} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ZAiModelSelection({ apiKey, settings }) {
|
||||
const [customModels, setCustomModels] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
async function findCustomModels() {
|
||||
if (!apiKey) {
|
||||
setCustomModels([]);
|
||||
setLoading(true);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
setLoading(true);
|
||||
const { models } = await System.customModels("zai", apiKey);
|
||||
setCustomModels(models || []);
|
||||
} catch (error) {
|
||||
console.error("Failed to fetch custom models:", error);
|
||||
setCustomModels([]);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}
|
||||
findCustomModels();
|
||||
}, [apiKey]);
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Chat Model Selection
|
||||
</label>
|
||||
<select
|
||||
name="ZAiModelPref"
|
||||
disabled={true}
|
||||
className="border-none bg-theme-settings-input-bg border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<option disabled={true} selected={true}>
|
||||
--loading available models--
|
||||
</option>
|
||||
</select>
|
||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||
Enter a valid API key to view all available models for your account.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Chat Model Selection
|
||||
</label>
|
||||
<select
|
||||
name="ZAiModelPref"
|
||||
required={true}
|
||||
className="border-none bg-theme-settings-input-bg border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
{customModels.length > 0 && (
|
||||
<optgroup label="Available models">
|
||||
{customModels.map((model) => {
|
||||
return (
|
||||
<option
|
||||
key={model.id}
|
||||
value={model.id}
|
||||
selected={settings?.ZAiModelPref === model.id}
|
||||
>
|
||||
{model.id}
|
||||
</option>
|
||||
);
|
||||
})}
|
||||
</optgroup>
|
||||
)}
|
||||
</select>
|
||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||
Select the Z.AI model you want to use for your conversations.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
BIN
frontend/src/media/llmprovider/zai.png
Normal file
BIN
frontend/src/media/llmprovider/zai.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.8 KiB |
@ -29,6 +29,7 @@ import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||
import APIPieLogo from "@/media/llmprovider/apipie.png";
|
||||
import XAILogo from "@/media/llmprovider/xai.png";
|
||||
import ZAiLogo from "@/media/llmprovider/zai.png";
|
||||
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
|
||||
import PPIOLogo from "@/media/llmprovider/ppio.png";
|
||||
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
|
||||
@ -62,6 +63,7 @@ import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions
|
||||
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
||||
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
|
||||
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
|
||||
import ZAiLLMOptions from "@/components/LLMSelection/ZAiLLMOptions";
|
||||
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
|
||||
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
|
||||
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
|
||||
@ -335,6 +337,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
||||
description: "Run xAI's powerful LLMs like Grok-2 and more.",
|
||||
requiredConfig: ["XAIApiKey", "XAIModelPref"],
|
||||
},
|
||||
{
|
||||
name: "Z.AI",
|
||||
value: "zai",
|
||||
logo: ZAiLogo,
|
||||
options: (settings) => <ZAiLLMOptions settings={settings} />,
|
||||
description: "Run Z.AI's powerful GLM models.",
|
||||
requiredConfig: ["ZAiApiKey"],
|
||||
},
|
||||
{
|
||||
name: "Generic OpenAI",
|
||||
value: "generic-openai",
|
||||
|
||||
@ -25,6 +25,7 @@ import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||
import APIPieLogo from "@/media/llmprovider/apipie.png";
|
||||
import XAILogo from "@/media/llmprovider/xai.png";
|
||||
import ZAiLogo from "@/media/llmprovider/zai.png";
|
||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||
import ZillizLogo from "@/media/vectordbs/zilliz.png";
|
||||
import AstraDBLogo from "@/media/vectordbs/astraDB.png";
|
||||
@ -231,6 +232,15 @@ export const LLM_SELECTION_PRIVACY = {
|
||||
],
|
||||
logo: XAILogo,
|
||||
},
|
||||
zai: {
|
||||
name: "Z.AI",
|
||||
description: [
|
||||
"Your content is processed in real-time and not stored on Z.AI servers",
|
||||
"Your prompts and document text are visible to Z.AI during processing",
|
||||
"Data is processed in accordance with Z.AI's API Services terms",
|
||||
],
|
||||
logo: ZAiLogo,
|
||||
},
|
||||
ppio: {
|
||||
name: "PPIO",
|
||||
description: [
|
||||
|
||||
@ -23,6 +23,7 @@ import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||
import APIPieLogo from "@/media/llmprovider/apipie.png";
|
||||
import NovitaLogo from "@/media/llmprovider/novita.png";
|
||||
import XAILogo from "@/media/llmprovider/xai.png";
|
||||
import ZAiLogo from "@/media/llmprovider/zai.png";
|
||||
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
|
||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||
import PPIOLogo from "@/media/llmprovider/ppio.png";
|
||||
@ -54,6 +55,7 @@ import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
||||
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
|
||||
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
|
||||
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
|
||||
import ZAiLLMOptions from "@/components/LLMSelection/ZAiLLMOptions";
|
||||
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
|
||||
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
|
||||
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
|
||||
@ -267,6 +269,13 @@ const LLMS = [
|
||||
options: (settings) => <XAILLMOptions settings={settings} />,
|
||||
description: "Run xAI's powerful LLMs like Grok-2 and more.",
|
||||
},
|
||||
{
|
||||
name: "Z.AI",
|
||||
value: "zai",
|
||||
logo: ZAiLogo,
|
||||
options: (settings) => <ZAiLLMOptions settings={settings} />,
|
||||
description: "Run Z.AI's powerful GLM models.",
|
||||
},
|
||||
{
|
||||
name: "Moonshot AI",
|
||||
value: "moonshotai",
|
||||
|
||||
@ -34,6 +34,7 @@ const ENABLED_PROVIDERS = [
|
||||
"moonshotai",
|
||||
"cometapi",
|
||||
"foundry",
|
||||
"zai",
|
||||
// TODO: More agent support.
|
||||
// "cohere", // Has tool calling and will need to build explicit support
|
||||
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
|
||||
|
||||
@ -102,6 +102,7 @@ AnythingLLM اسناد شما را به اشیایی به نام `workspaces` ت
|
||||
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
||||
- [Apipie](https://apipie.ai/)
|
||||
- [xAI](https://x.ai/)
|
||||
- [Z.AI (chat models)](https://z.ai/model-api)
|
||||
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
|
||||
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
|
||||
|
||||
|
||||
@ -90,6 +90,8 @@ AnythingLLMは、ドキュメントを`ワークスペース`と呼ばれるオ
|
||||
- [Groq](https://groq.com/)
|
||||
- [Cohere](https://cohere.com/)
|
||||
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
|
||||
- [xAI](https://x.ai/)
|
||||
- [Z.AI (チャットモデル)](https://z.ai/model-api)
|
||||
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
|
||||
- [CometAPI (チャットモデル)](https://api.cometapi.com/)
|
||||
|
||||
|
||||
@ -99,6 +99,7 @@ AnythingLLM, belgelerinizi **"çalışma alanları" (workspaces)** adı verilen
|
||||
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
||||
- [Apipie](https://apipie.ai/)
|
||||
- [xAI](https://x.ai/)
|
||||
- [Z.AI (chat models)](https://z.ai/model-api)
|
||||
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
|
||||
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
|
||||
|
||||
|
||||
@ -98,6 +98,7 @@ AnythingLLM将您的文档划分为称为`workspaces` (工作区)的对象。工
|
||||
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
||||
- [Apipie](https://apipie.ai/)
|
||||
- [xAI](https://x.ai/)
|
||||
- [Z.AI (聊天模型)](https://z.ai/model-api)
|
||||
- [Novita AI (聊天模型)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
|
||||
- [PPIO (聊天模型)](https://ppinfra.com?utm_source=github_anything-llm)
|
||||
- [CometAPI (聊天模型)](https://api.cometapi.com/)
|
||||
|
||||
@ -134,6 +134,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
|
||||
# XAI_LLM_API_KEY='xai-your-api-key-here'
|
||||
# XAI_LLM_MODEL_PREF='grok-beta'
|
||||
|
||||
# LLM_PROVIDER='zai'
|
||||
# ZAI_API_KEY="your-zai-api-key-here"
|
||||
# ZAI_MODEL_PREF="glm-4.5"
|
||||
|
||||
# LLM_PROVIDER='nvidia-nim'
|
||||
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
|
||||
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
|
||||
|
||||
@ -145,6 +145,9 @@ function getModelTag() {
|
||||
case "moonshotai":
|
||||
model = process.env.MOONSHOT_AI_MODEL_PREF;
|
||||
break;
|
||||
case "zai":
|
||||
model = process.env.ZAI_MODEL_PREF;
|
||||
break;
|
||||
default:
|
||||
model = "--";
|
||||
break;
|
||||
|
||||
@ -625,6 +625,10 @@ const SystemSettings = {
|
||||
CometApiLLMApiKey: !!process.env.COMETAPI_LLM_API_KEY,
|
||||
CometApiLLMModelPref: process.env.COMETAPI_LLM_MODEL_PREF,
|
||||
CometApiLLMTimeout: process.env.COMETAPI_LLM_TIMEOUT_MS,
|
||||
|
||||
// Z.AI Keys
|
||||
ZAiApiKey: !!process.env.ZAI_API_KEY,
|
||||
ZAiModelPref: process.env.ZAI_MODEL_PREF,
|
||||
};
|
||||
},
|
||||
|
||||
|
||||
@ -19,6 +19,7 @@ class ContextWindowFinder {
|
||||
xai: "xai",
|
||||
deepseek: "deepseek",
|
||||
moonshot: "moonshot",
|
||||
zai: "vercel_ai_gateway", // Vercel has correct context windows for Z.AI models
|
||||
};
|
||||
static expiryMs = 1000 * 60 * 60 * 24 * 3; // 3 days
|
||||
static remoteUrl =
|
||||
@ -116,8 +117,9 @@ You can fix this by restarting AnythingLLM so the model map is re-pulled.
|
||||
});
|
||||
if (!remoteContexWindowMap) return null;
|
||||
|
||||
const modelMap = this.#formatModelMap(remoteContexWindowMap);
|
||||
this.#validateModelMap(modelMap);
|
||||
const modelMap = this.#validateModelMap(
|
||||
this.#formatModelMap(remoteContexWindowMap)
|
||||
);
|
||||
fs.writeFileSync(this.cacheFilePath, JSON.stringify(modelMap, null, 2));
|
||||
fs.writeFileSync(this.cacheFileExpiryPath, Date.now().toString());
|
||||
return modelMap;
|
||||
@ -139,13 +141,17 @@ You can fix this by restarting AnythingLLM so the model map is re-pulled.
|
||||
|
||||
// Validate that the context window is a number
|
||||
for (const [model, contextWindow] of Object.entries(models)) {
|
||||
if (isNaN(contextWindow) || contextWindow <= 0)
|
||||
throw new Error(
|
||||
`Invalid model map for ${provider} - context window is not a positive number for model ${model}`
|
||||
if (isNaN(contextWindow) || contextWindow <= 0) {
|
||||
this.log(
|
||||
`${provider}:${model} - context window is not a positive number. Got ${contextWindow}.`
|
||||
);
|
||||
delete models[model];
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
return modelMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the remote model map to a format that is compatible with how we store the model map
|
||||
|
||||
@ -435,7 +435,8 @@ class OllamaAILLM {
|
||||
type: "textResponseChunk",
|
||||
textResponse: "",
|
||||
close: true,
|
||||
error: `Ollama:streaming - could not stream chat. ${error?.cause ?? error.message
|
||||
error: `Ollama:streaming - could not stream chat. ${
|
||||
error?.cause ?? error.message
|
||||
}`,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
|
||||
179
server/utils/AiProviders/zai/index.js
Normal file
179
server/utils/AiProviders/zai/index.js
Normal file
@ -0,0 +1,179 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const {
|
||||
LLMPerformanceMonitor,
|
||||
} = require("../../helpers/chat/LLMPerformanceMonitor");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
const { MODEL_MAP } = require("../modelMap");
|
||||
|
||||
class ZAiLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
if (!process.env.ZAI_API_KEY) throw new Error("No Z.AI API key was set.");
|
||||
this.className = "ZAiLLM";
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
|
||||
this.openai = new OpenAIApi({
|
||||
baseURL: "https://api.z.ai/api/paas/v4",
|
||||
apiKey: process.env.ZAI_API_KEY,
|
||||
});
|
||||
this.model = modelPreference || process.env.ZAI_MODEL_PREF || "glm-4.5";
|
||||
this.limits = {
|
||||
history: this.promptWindowLimit() * 0.15,
|
||||
system: this.promptWindowLimit() * 0.15,
|
||||
user: this.promptWindowLimit() * 0.7,
|
||||
};
|
||||
|
||||
this.embedder = embedder ?? new NativeEmbedder();
|
||||
this.defaultTemp = 0.7;
|
||||
this.log(
|
||||
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
|
||||
);
|
||||
}
|
||||
|
||||
log(text, ...args) {
|
||||
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
if (!contextTexts || !contextTexts.length) return "";
|
||||
return (
|
||||
"\nContext:\n" +
|
||||
contextTexts
|
||||
.map((text, i) => {
|
||||
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||
})
|
||||
.join("")
|
||||
);
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
static promptWindowLimit(modelName) {
|
||||
return MODEL_MAP.get("zai", modelName) ?? 131072;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
return MODEL_MAP.get("zai", this.model) ?? 131072;
|
||||
}
|
||||
|
||||
async isValidChatCompletionModel(modelName = "") {
|
||||
return !!modelName; // name just needs to exist
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates appropriate content array for a message + attachments.
|
||||
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
|
||||
* @returns {string|object[]}
|
||||
*/
|
||||
#generateContent({ userPrompt, attachments = [] }) {
|
||||
if (!attachments.length) return userPrompt;
|
||||
|
||||
const content = [{ type: "text", text: userPrompt }];
|
||||
for (let attachment of attachments) {
|
||||
content.push({
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: attachment.contentString,
|
||||
},
|
||||
});
|
||||
}
|
||||
return content.flat();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the user prompt for this model.
|
||||
* @param {{attachments: import("../../helpers").Attachment[]}} param0
|
||||
* @returns
|
||||
*/
|
||||
constructPrompt({
|
||||
systemPrompt = "",
|
||||
contextTexts = [],
|
||||
chatHistory = [],
|
||||
userPrompt = "",
|
||||
attachments = [],
|
||||
}) {
|
||||
const prompt = {
|
||||
role: "system",
|
||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||
};
|
||||
return [
|
||||
prompt,
|
||||
...chatHistory,
|
||||
{
|
||||
role: "user",
|
||||
content: this.#generateContent({ userPrompt, attachments }),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
const result = await LLMPerformanceMonitor.measureAsyncFunction(
|
||||
this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature,
|
||||
})
|
||||
.catch((e) => {
|
||||
throw new Error(e.message);
|
||||
})
|
||||
);
|
||||
|
||||
if (
|
||||
!result.output.hasOwnProperty("choices") ||
|
||||
result.output.choices.length === 0
|
||||
)
|
||||
return null;
|
||||
|
||||
return {
|
||||
textResponse: result.output.choices[0].message.content,
|
||||
metrics: {
|
||||
prompt_tokens: result.output.usage?.prompt_tokens || 0,
|
||||
completion_tokens: result.output.usage?.completion_tokens || 0,
|
||||
total_tokens: result.output.usage?.total_tokens || 0,
|
||||
outputTps: result.output.usage?.completion_tokens / result.duration,
|
||||
duration: result.duration,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
|
||||
this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
temperature,
|
||||
}),
|
||||
messages,
|
||||
false
|
||||
);
|
||||
|
||||
return measuredStreamRequest;
|
||||
}
|
||||
|
||||
handleStream(response, stream, responseProps) {
|
||||
return handleDefaultStreamResponseV2(response, stream, responseProps);
|
||||
}
|
||||
|
||||
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||
async embedTextInput(textInput) {
|
||||
return await this.embedder.embedTextInput(textInput);
|
||||
}
|
||||
async embedChunks(textChunks = []) {
|
||||
return await this.embedder.embedChunks(textChunks);
|
||||
}
|
||||
|
||||
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||
const messageArray = this.constructPrompt(promptArgs);
|
||||
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ZAiLLM,
|
||||
};
|
||||
@ -964,6 +964,8 @@ ${this.getHistory({ to: route.to })
|
||||
return new Providers.ApiPieProvider({ model: config.model });
|
||||
case "xai":
|
||||
return new Providers.XAIProvider({ model: config.model });
|
||||
case "zai":
|
||||
return new Providers.ZAIProvider({ model: config.model });
|
||||
case "novita":
|
||||
return new Providers.NovitaProvider({ model: config.model });
|
||||
case "ppio":
|
||||
|
||||
@ -183,6 +183,14 @@ class Provider {
|
||||
apiKey: process.env.XAI_LLM_API_KEY ?? null,
|
||||
...config,
|
||||
});
|
||||
case "zai":
|
||||
return new ChatOpenAI({
|
||||
configuration: {
|
||||
baseURL: "https://api.z.ai/api/paas/v4",
|
||||
},
|
||||
apiKey: process.env.ZAI_API_KEY ?? null,
|
||||
...config,
|
||||
});
|
||||
case "novita":
|
||||
return new ChatOpenAI({
|
||||
configuration: {
|
||||
|
||||
@ -18,6 +18,7 @@ const DeepSeekProvider = require("./deepseek.js");
|
||||
const LiteLLMProvider = require("./litellm.js");
|
||||
const ApiPieProvider = require("./apipie.js");
|
||||
const XAIProvider = require("./xai.js");
|
||||
const ZAIProvider = require("./zai.js");
|
||||
const NovitaProvider = require("./novita.js");
|
||||
const NvidiaNimProvider = require("./nvidiaNim.js");
|
||||
const PPIOProvider = require("./ppio.js");
|
||||
@ -48,6 +49,7 @@ module.exports = {
|
||||
LiteLLMProvider,
|
||||
ApiPieProvider,
|
||||
XAIProvider,
|
||||
ZAIProvider,
|
||||
NovitaProvider,
|
||||
CometApiProvider,
|
||||
NvidiaNimProvider,
|
||||
|
||||
88
server/utils/agents/aibitat/providers/zai.js
Normal file
88
server/utils/agents/aibitat/providers/zai.js
Normal file
@ -0,0 +1,88 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
|
||||
class ZAIProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
const { model = "glm-4.5" } = config;
|
||||
super();
|
||||
const client = new OpenAI({
|
||||
baseURL: "https://api.z.ai/api/paas/v4",
|
||||
apiKey: process.env.ZAI_API_KEY,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
get supportsAgentStreaming() {
|
||||
return true;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
messages,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Z.AI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Z.AI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
async #handleFunctionCallStream({ messages = [] }) {
|
||||
return await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
});
|
||||
}
|
||||
|
||||
async stream(messages, functions = [], eventHandler = null) {
|
||||
return await UnTooled.prototype.stream.call(
|
||||
this,
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallStream.bind(this),
|
||||
eventHandler
|
||||
);
|
||||
}
|
||||
|
||||
async complete(messages, functions = []) {
|
||||
return await UnTooled.prototype.complete.call(
|
||||
this,
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
}
|
||||
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ZAIProvider;
|
||||
@ -172,6 +172,10 @@ class AgentHandler {
|
||||
if (!process.env.XAI_LLM_API_KEY)
|
||||
throw new Error("xAI API Key must be provided to use agents.");
|
||||
break;
|
||||
case "zai":
|
||||
if (!process.env.ZAI_API_KEY)
|
||||
throw new Error("Z.AI API Key must be provided to use agents.");
|
||||
break;
|
||||
case "novita":
|
||||
if (!process.env.NOVITA_LLM_API_KEY)
|
||||
throw new Error("Novita API Key must be provided to use agents.");
|
||||
@ -275,6 +279,8 @@ class AgentHandler {
|
||||
return process.env.APIPIE_LLM_MODEL_PREF ?? null;
|
||||
case "xai":
|
||||
return process.env.XAI_LLM_MODEL_PREF ?? "grok-beta";
|
||||
case "zai":
|
||||
return process.env.ZAI_MODEL_PREF ?? "glm-4.5";
|
||||
case "novita":
|
||||
return process.env.NOVITA_LLM_MODEL_PREF ?? "deepseek/deepseek-r1";
|
||||
case "nvidia-nim":
|
||||
|
||||
@ -38,6 +38,7 @@ const SUPPORT_CUSTOM_MODELS = [
|
||||
"moonshotai",
|
||||
"foundry",
|
||||
"cohere",
|
||||
"zai",
|
||||
// Embedding Engines
|
||||
"native-embedder",
|
||||
"cohere-embedder",
|
||||
@ -100,6 +101,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
||||
return await getFoundryModels(basePath);
|
||||
case "cohere":
|
||||
return await getCohereModels(apiKey, "chat");
|
||||
case "zai":
|
||||
return await getZAiModels(apiKey);
|
||||
case "native-embedder":
|
||||
return await getNativeEmbedderModels();
|
||||
case "cohere-embedder":
|
||||
@ -798,6 +801,29 @@ async function getCohereModels(_apiKey = null, type = "chat") {
|
||||
return { models, error: null };
|
||||
}
|
||||
|
||||
async function getZAiModels(_apiKey = null) {
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
const apiKey =
|
||||
_apiKey === true
|
||||
? process.env.ZAI_API_KEY
|
||||
: _apiKey || process.env.ZAI_API_KEY || null;
|
||||
const openai = new OpenAIApi({
|
||||
baseURL: "https://api.z.ai/api/paas/v4",
|
||||
apiKey,
|
||||
});
|
||||
const models = await openai.models
|
||||
.list()
|
||||
.then((results) => results.data)
|
||||
.catch((e) => {
|
||||
console.error(`Z.AI:listModels`, e.message);
|
||||
return [];
|
||||
});
|
||||
|
||||
// Api Key was successful so lets save it for future uses
|
||||
if (models.length > 0 && !!apiKey) process.env.ZAI_API_KEY = apiKey;
|
||||
return { models, error: null };
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getCustomModels,
|
||||
SUPPORT_CUSTOM_MODELS,
|
||||
|
||||
@ -222,6 +222,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
|
||||
case "foundry":
|
||||
const { FoundryLLM } = require("../AiProviders/foundry");
|
||||
return new FoundryLLM(embedder, model);
|
||||
case "zai":
|
||||
const { ZAiLLM } = require("../AiProviders/zai");
|
||||
return new ZAiLLM(embedder, model);
|
||||
default:
|
||||
throw new Error(
|
||||
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
|
||||
@ -378,6 +381,9 @@ function getLLMProviderClass({ provider = null } = {}) {
|
||||
case "foundry":
|
||||
const { FoundryLLM } = require("../AiProviders/foundry");
|
||||
return FoundryLLM;
|
||||
case "zai":
|
||||
const { ZAiLLM } = require("../AiProviders/zai");
|
||||
return ZAiLLM;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
@ -450,6 +456,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
|
||||
return process.env.COMETAPI_LLM_MODEL_PREF;
|
||||
case "foundry":
|
||||
return process.env.FOUNDRY_MODEL_PREF;
|
||||
case "zai":
|
||||
return process.env.ZAI_MODEL_PREF;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -752,6 +752,16 @@ const KEY_MAPPING = {
|
||||
envKey: "COMETAPI_LLM_TIMEOUT_MS",
|
||||
checks: [],
|
||||
},
|
||||
|
||||
// Z.AI Options
|
||||
ZAiApiKey: {
|
||||
envKey: "ZAI_API_KEY",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
ZAiModelPref: {
|
||||
envKey: "ZAI_MODEL_PREF",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
};
|
||||
|
||||
function isNotEmpty(input = "") {
|
||||
@ -863,6 +873,7 @@ function supportedLLM(input = "") {
|
||||
"moonshotai",
|
||||
"cometapi",
|
||||
"foundry",
|
||||
"zai",
|
||||
].includes(input);
|
||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user