Support PrivateModeAI Integration (#4937)

* Support PrivateModeAI Integration

* tooltip for proxy
This commit is contained in:
Timothy Carambat 2026-01-29 12:01:11 -08:00 committed by GitHub
parent 9fa455c10a
commit b8dd7bc97e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 576 additions and 0 deletions

View File

@ -167,6 +167,10 @@ GID='1000'
# DOCKER_MODEL_RUNNER_LLM_MODEL_PREF='phi-3.5-mini'
# DOCKER_MODEL_RUNNER_LLM_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='privatemode'
# PRIVATEMODE_LLM_BASE_PATH='http://127.0.0.1:8080'
# PRIVATEMODE_LLM_MODEL_PREF='gemma-3-27b'
###########################################
######## Embedding API SElECTION ##########
###########################################

View File

@ -0,0 +1,129 @@
import { useEffect, useState } from "react";
import { Info } from "@phosphor-icons/react";
import { Tooltip } from "react-tooltip";
import System from "@/models/system";
import { Link } from "react-router-dom";
export default function PrivateModeOptions({ settings }) {
const [models, setModels] = useState([]);
const [loading, setLoading] = useState(!!settings?.PrivateModeBasePath);
const [basePath, setBasePath] = useState(settings?.PrivateModeBasePath);
const [model, setModel] = useState(settings?.PrivateModeModelPref || "");
useEffect(() => {
setModel(settings?.PrivateModeModelPref || "");
}, [settings?.PrivateModeModelPref]);
useEffect(() => {
async function fetchModels() {
try {
setLoading(true);
if (!basePath) throw new Error("Base path is required");
const { models, error } = await System.customModels(
"privatemode",
null,
basePath
);
if (error) throw new Error(error);
setModels(models);
} catch (error) {
console.error("Error fetching Private Mode models:", error);
setModels([]);
} finally {
setLoading(false);
}
}
fetchModels();
}, [basePath]);
return (
<div className="flex flex-col gap-y-7">
<div className="flex gap-[36px] mt-1.5 flex-wrap">
<div className="flex flex-col w-60">
<div className="flex items-center gap-1 mb-2">
<label className="text-white text-sm font-semibold">
Privatemode Proxy URL
</label>
<Info
size={18}
className="text-theme-text-secondary cursor-pointer"
data-tooltip-id="private-mode-base-url"
/>
<Tooltip
id="private-mode-base-url"
place="top"
delayShow={300}
clickable={true}
className="tooltip !text-xs !opacity-100"
style={{
maxWidth: "250px",
whiteSpace: "normal",
wordWrap: "break-word",
}}
>
Enter the URL where Privatemode Proxy is running.
<br />
<br />
<Link
to="https://docs.privatemode.ai/quickstart#2-run-the-proxy"
target="_blank"
className="text-blue-500 hover:underline"
>
Learn more &rarr;
</Link>
</Tooltip>
</div>
<input
type="url"
name="PrivateModeBasePath"
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="eg: http://127.0.0.1:8080"
defaultValue={settings?.PrivateModeBasePath}
required={true}
autoComplete="off"
spellCheck={false}
onChange={(e) => setBasePath(e.target.value)}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-2">
Chat Model
</label>
{loading ? (
<select
name="PrivateModeModelPref"
required={true}
disabled={true}
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
>
<option>---- Loading ----</option>
</select>
) : (
<select
name="PrivateModeModelPref"
value={model}
onChange={(e) => setModel(e.target.value)}
required={true}
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
>
{models.length > 0 ? (
<>
<option value="">-- Select a model --</option>
{models.map((model) => (
<option key={model.id} value={model.id}>
{model.name}
</option>
))}
</>
) : (
<option disabled value="">
No models found
</option>
)}
</select>
)}
</div>
</div>
</div>
);
}

View File

@ -42,6 +42,7 @@ import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import DockerModelRunnerLogo from "@/media/llmprovider/docker-model-runner.png";
import PrivateModeLogo from "@/media/llmprovider/privatemode.png";
const LLM_PROVIDER_PRIVACY_MAP = {
openai: {
@ -232,6 +233,11 @@ const LLM_PROVIDER_PRIVACY_MAP = {
],
logo: DockerModelRunnerLogo,
},
privatemode: {
name: "Privatemode",
policyUrl: "https://docs.privatemode.ai/getting-started/faq#q2",
logo: PrivateModeLogo,
},
};
const VECTOR_DB_PROVIDER_PRIVACY_MAP = {

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB

View File

@ -38,6 +38,7 @@ import CometApiLogo from "@/media/llmprovider/cometapi.png";
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import DockerModelRunnerLogo from "@/media/llmprovider/docker-model-runner.png";
import PrivateModeLogo from "@/media/llmprovider/privatemode.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@ -73,6 +74,7 @@ import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import FoundryOptions from "@/components/LLMSelection/FoundryOptions";
import GiteeAIOptions from "@/components/LLMSelection/GiteeAIOptions/index.jsx";
import DockerModelRunnerOptions from "@/components/LLMSelection/DockerModelRunnerOptions";
import PrivateModeOptions from "@/components/LLMSelection/PrivateModeOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -316,6 +318,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run Moonshot AI's powerful LLMs.",
requiredConfig: ["MoonshotAiApiKey"],
},
{
name: "Privatemode",
value: "privatemode",
logo: PrivateModeLogo,
options: (settings) => <PrivateModeOptions settings={settings} />,
description: "Run LLMs with end-to-end encryption.",
requiredConfig: ["PrivateModeBasePath"],
},
{
name: "Novita AI",
value: "novita",

View File

@ -32,6 +32,7 @@ import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import DockerModelRunnerLogo from "@/media/llmprovider/docker-model-runner.png";
import PrivateModeLogo from "@/media/llmprovider/privatemode.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@ -65,6 +66,7 @@ import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions";
import GiteeAiOptions from "@/components/LLMSelection/GiteeAIOptions";
import DockerModelRunnerOptions from "@/components/LLMSelection/DockerModelRunnerOptions";
import PrivateModeOptions from "@/components/LLMSelection/PrivateModeOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@ -273,6 +275,13 @@ const LLMS = [
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
description: "Run powerful foundation models privately with AWS Bedrock.",
},
{
name: "Privatemode",
value: "privatemode",
logo: PrivateModeLogo,
options: (settings) => <PrivateModeOptions settings={settings} />,
description: "Run LLMs with end-to-end encryption.",
},
{
name: "xAI",
value: "xai",

View File

@ -38,6 +38,7 @@ const ENABLED_PROVIDERS = [
"giteeai",
"cohere",
"docker-model-runner",
"privatemode",
// TODO: More agent support.
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
];

View File

@ -166,6 +166,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# DOCKER_MODEL_RUNNER_LLM_MODEL_PREF='phi-3.5-mini'
# DOCKER_MODEL_RUNNER_LLM_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='privatemode'
# PRIVATEMODE_LLM_BASE_PATH='http://127.0.0.1:8080'
# PRIVATEMODE_LLM_MODEL_PREF='gemma-3-27b'
###########################################
######## Embedding API SElECTION ##########
###########################################

View File

@ -162,6 +162,9 @@ function getModelTag() {
case "docker-model-runner":
model = process.env.DOCKER_MODEL_RUNNER_LLM_MODEL_PREF;
break;
case "privatemode":
model = process.env.PRIVATEMODE_LLM_MODEL_PREF;
break;
default:
model = "--";
break;

View File

@ -674,6 +674,10 @@ const SystemSettings = {
process.env.DOCKER_MODEL_RUNNER_LLM_MODEL_PREF,
DockerModelRunnerModelTokenLimit:
process.env.DOCKER_MODEL_RUNNER_LLM_MODEL_TOKEN_LIMIT || 8192,
// Privatemode Keys
PrivateModeBasePath: process.env.PRIVATEMODE_LLM_BASE_PATH,
PrivateModeModelPref: process.env.PRIVATEMODE_LLM_MODEL_PREF,
};
},

View File

@ -0,0 +1,218 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
class PrivatemodeLLM {
static contextWindows = {
"leon-se/gemma-3-27b-it-fp8-dynamic": 128000,
"gemma-3-27b": 128000,
"qwen3-coder-30b-a3b": 128000,
"gpt-oss-120b": 128000,
"openai/gpt-oss-120b": 128000,
};
constructor(embedder = null, modelPreference = null) {
if (!process.env.PRIVATEMODE_LLM_BASE_PATH)
throw new Error("No Privatemode Base Path was set.");
this.className = "PrivatemodeLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.client = new OpenAIApi({
baseURL: PrivatemodeLLM.parseBasePath(),
apiKey: null,
});
this.model = modelPreference || process.env.PRIVATEMODE_LLM_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Privatemode LLM initialized with ${this.model}. ctx: ${this.promptWindowLimit()}`
);
}
/**
* Parse the base path for the Privatemode API
* so we can use it for inference requests
* @param {string} providedBasePath
* @returns {string}
*/
static parseBasePath(
providedBasePath = process.env.PRIVATEMODE_LLM_BASE_PATH
) {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1`;
return basePath;
} catch (e) {
return null;
}
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = PrivatemodeLLM.contextWindows[_modelName] || 16384;
return Number(limit);
}
promptWindowLimit() {
const limit = PrivatemodeLLM.contextWindows[this.model] || 16384;
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Privatemode chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.client.chat.completions.create({
model: this.model,
messages,
temperature,
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Privatemode chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.client.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
PrivatemodeLLM,
};

View File

@ -994,6 +994,8 @@ ${this.getHistory({ to: route.to })
return new Providers.CohereProvider({ model: config.model });
case "docker-model-runner":
return new Providers.DockerModelRunnerProvider({ model: config.model });
case "privatemode":
return new Providers.PrivatemodeProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`

View File

@ -249,6 +249,14 @@ class Provider {
apiKey: process.env.COHERE_API_KEY ?? null,
...config,
});
case "privatemode":
return new ChatOpenAI({
configuration: {
baseURL: process.env.PRIVATEMODE_LLM_BASE_PATH,
},
apiKey: null,
...config,
});
// OSS Model Runners
// case "anythingllm_ollama":
// return new ChatOllama({

View File

@ -30,6 +30,7 @@ const FoundryProvider = require("./foundry.js");
const GiteeAIProvider = require("./giteeai.js");
const CohereProvider = require("./cohere.js");
const DockerModelRunnerProvider = require("./dockerModelRunner.js");
const PrivatemodeProvider = require("./privatemode.js");
module.exports = {
OpenAIProvider,
@ -64,4 +65,5 @@ module.exports = {
GiteeAIProvider,
CohereProvider,
DockerModelRunnerProvider,
PrivatemodeProvider,
};

View File

@ -0,0 +1,98 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
const { PrivatemodeLLM } = require("../../../AiProviders/privatemode/index.js");
/**
* The agent provider for the Privatemodel provider.
* @extends {Provider}
* @extends {UnTooled}
*/
class PrivatemodelProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = process.env.PRIVATEMODE_LLM_MODEL_PREF } = config;
super();
const client = new OpenAI({
baseURL: PrivatemodeLLM.parseBasePath(
process.env.PRIVATEMODE_LLM_BASE_PATH
),
apiKey: null,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
get supportsAgentStreaming() {
return true;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
messages,
user: this.executingUserId,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Privatemodel chat: No results!");
if (result.choices.length === 0)
throw new Error("Privatemodel chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
async #handleFunctionCallStream({ messages = [] }) {
return await this.client.chat.completions.create({
model: this.model,
stream: true,
messages,
user: this.executingUserId,
});
}
async stream(messages, functions = [], eventHandler = null) {
return await UnTooled.prototype.stream.call(
this,
messages,
functions,
this.#handleFunctionCallStream.bind(this),
eventHandler
);
}
async complete(messages, functions = []) {
return await UnTooled.prototype.complete.call(
this,
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since Privatemodel has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = PrivatemodelProvider;

View File

@ -223,6 +223,12 @@ class AgentHandler {
"Docker Model Runner base path must be provided to use agents."
);
break;
case "privatemode":
if (!process.env.PRIVATEMODE_LLM_BASE_PATH)
throw new Error(
"Privatemode base path must be provided to use agents."
);
break;
default:
throw new Error(
"No workspace agent provider set. Please set your agent provider in the workspace's settings"
@ -305,6 +311,8 @@ class AgentHandler {
return process.env.COHERE_MODEL_PREF ?? "command-r-08-2024";
case "docker-model-runner":
return process.env.DOCKER_MODEL_RUNNER_LLM_MODEL_PREF ?? null;
case "privatemode":
return process.env.PRIVATEMODE_LLM_MODEL_PREF ?? null;
default:
return null;
}

View File

@ -45,6 +45,7 @@ const SUPPORT_CUSTOM_MODELS = [
"zai",
"giteeai",
"docker-model-runner",
"privatemode",
// Embedding Engines
"native-embedder",
"cohere-embedder",
@ -120,6 +121,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getGiteeAIModels(apiKey);
case "docker-model-runner":
return await getDockerModelRunnerModels(basePath);
case "privatemode":
return await getPrivatemodeModels(basePath, "generate");
default:
return { models: [], error: "Invalid provider for custom models" };
}
@ -881,6 +884,54 @@ async function getDockerModelRunnerModels(basePath = null) {
}
}
/**
* Get Privatemode models
* @param {string} basePath - The base path of the Privatemode endpoint.
* @param {'any' | 'generate' | 'embed' | 'transcribe'} task - The task to fetch the models for.
* @returns {Promise<{models: Array<{id: string, organization: string, name: string}>, error: string | null}>}
*/
async function getPrivatemodeModels(basePath = null, task = "any") {
try {
const { PrivatemodeLLM } = require("../AiProviders/privatemode");
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: PrivatemodeLLM.parseBasePath(
basePath || process.env.PRIVATEMODE_LLM_BASE_PATH
),
apiKey: null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.then(
(models) =>
models
.filter((model) => !model.id.includes("/")) // remove legacy prefixed models
.filter((model) =>
task === "any" ? true : model.tasks.includes(task)
) // filter by task or show all if task is any
)
.then((models) =>
models.map((model) => ({
id: model.id,
organization: "Privatemode",
name: model.id
.split("-")
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(" "),
}))
)
.catch((e) => {
console.error(`Privatemode:listModels`, e.message);
return [];
});
return { models, error: null };
} catch (e) {
console.error(`Privatemode:getPrivatemodeModels`, e.message);
return { models: [], error: "Could not fetch Privatemode Models" };
}
}
module.exports = {
getCustomModels,
SUPPORT_CUSTOM_MODELS,

View File

@ -234,6 +234,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
DockerModelRunnerLLM,
} = require("../AiProviders/dockerModelRunner");
return new DockerModelRunnerLLM(embedder, model);
case "privatemode":
const { PrivatemodeLLM } = require("../AiProviders/privatemode");
return new PrivatemodeLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@ -404,6 +407,9 @@ function getLLMProviderClass({ provider = null } = {}) {
DockerModelRunnerLLM,
} = require("../AiProviders/dockerModelRunner");
return DockerModelRunnerLLM;
case "privatemode":
const { PrivateModeLLM } = require("../AiProviders/privatemode");
return PrivateModeLLM;
default:
return null;
}
@ -482,6 +488,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
return process.env.GITEE_AI_MODEL_PREF;
case "docker-model-runner":
return process.env.DOCKER_MODEL_RUNNER_LLM_MODEL_PREF;
case "privatemode":
return process.env.PRIVATEMODE_LLM_MODEL_PREF;
default:
return null;
}

View File

@ -799,6 +799,16 @@ const KEY_MAPPING = {
envKey: "DOCKER_MODEL_RUNNER_LLM_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
// Privatemode Options
PrivateModeBasePath: {
envKey: "PRIVATEMODE_LLM_BASE_PATH",
checks: [isValidURL],
},
PrivateModeModelPref: {
envKey: "PRIVATEMODE_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
};
function isNotEmpty(input = "") {
@ -913,6 +923,7 @@ function supportedLLM(input = "") {
"zai",
"giteeai",
"docker-model-runner",
"privatemode",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}