Merge branch 'master' of github.com:Mintplex-Labs/anything-llm

This commit is contained in:
timothycarambat 2025-05-14 15:26:20 -07:00
commit dbd8afe1d2
11 changed files with 134 additions and 11 deletions

View File

@ -31,6 +31,7 @@ import APIPieLogo from "@/media/llmprovider/apipie.png";
import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@ -59,6 +60,7 @@ import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -128,13 +130,17 @@ export const AVAILABLE_LLM_PROVIDERS = [
requiredConfig: ["OllamaLLMBasePath"],
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
name: "Dell Pro AI Studio",
value: "dpais",
logo: DellProAiStudioLogo,
options: (settings) => <DellProAiStudioOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
requiredConfig: ["NovitaLLMApiKey"],
"Run powerful LLMs quickly on NPU powered by Dell Pro AI Studio.",
requiredConfig: [
"DellProAiStudioBasePath",
"DellProAiStudioModelPref",
"DellProAiStudioTokenLimit",
],
},
{
name: "LM Studio",
@ -153,6 +159,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run LLMs locally on your own machine.",
requiredConfig: ["LocalAiApiKey", "LocalAiBasePath", "LocalAiTokenLimit"],
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
requiredConfig: ["NovitaLLMApiKey"],
},
{
name: "Together AI",
value: "togetherai",

View File

@ -37,6 +37,7 @@ import MilvusLogo from "@/media/vectordbs/milvus.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import PGVectorLogo from "@/media/vectordbs/pgvector.png";
import DPAISLogo from "@/media/llmprovider/dpais.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom";
@ -234,6 +235,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: PPIOLogo,
},
dpais: {
name: "Dell Pro AI Studio",
description: [
"Your model and chat contents are only accessible on the computer running Dell Pro AI Studio",
],
logo: DPAISLogo,
},
};
export const VECTOR_DB_PRIVACY = {

View File

@ -26,6 +26,7 @@ import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@ -53,6 +54,7 @@ import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@ -114,12 +116,12 @@ const LLMS = [
description: "Run LLMs locally on your own machine.",
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
name: "Dell Pro AI Studio",
value: "dpais",
logo: DellProAiStudioLogo,
options: (settings) => <DellProAiStudioOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
"Run powerful LLMs quickly on NPU powered by Dell Pro AI Studio.",
},
{
name: "LM Studio",
@ -136,6 +138,14 @@ const LLMS = [
options: (settings) => <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
},
{
name: "KoboldCPP",
value: "koboldcpp",

View File

@ -37,6 +37,13 @@ export const LOCALAI_COMMON_URLS = [
"http://172.17.0.1:8080/v1",
];
export const DPAIS_COMMON_URLS = [
"http://127.0.0.1:8553/v1",
"http://0.0.0.0:8553/v1",
"http://localhost:8553/v1",
"http://host.docker.internal:8553/v1",
];
export const NVIDIA_NIM_COMMON_URLS = [
"http://127.0.0.1:8000/v1/version",
"http://localhost:8000/v1/version",

View File

@ -574,6 +574,12 @@ const SystemSettings = {
// PPIO API keys
PPIOApiKey: !!process.env.PPIO_API_KEY,
PPIOModelPref: process.env.PPIO_MODEL_PREF,
// Dell Pro AI Studio Keys
DellProAiStudioBasePath: process.env.DPAIS_LLM_BASE_PATH,
DellProAiStudioModelPref: process.env.DPAIS_LLM_MODEL_PREF,
DellProAiStudioTokenLimit:
process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT ?? 4096,
};
},

View File

@ -797,6 +797,8 @@ ${this.getHistory({ to: route.to })
return new Providers.PPIOProvider({ model: config.model });
case "gemini":
return new Providers.GeminiProvider({ model: config.model });
case "dpais":
return new Providers.DellProAiStudioProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`

View File

@ -22,6 +22,7 @@ const NovitaProvider = require("./novita.js");
const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
const GeminiProvider = require("./gemini.js");
const DellProAiStudioProvider = require("./dellProAiStudio.js");
module.exports = {
OpenAIProvider,
@ -48,4 +49,5 @@ module.exports = {
NvidiaNimProvider,
PPIOProvider,
GeminiProvider,
DellProAiStudioProvider,
};

View File

@ -189,6 +189,16 @@ class AgentHandler {
if (!process.env.GEMINI_API_KEY)
throw new Error("Gemini API key must be provided to use agents.");
break;
case "dpais":
if (!process.env.DPAIS_LLM_BASE_PATH)
throw new Error(
"Dell Pro AI Studio base path must be provided to use agents."
);
if (!process.env.DPAIS_LLM_MODEL_PREF)
throw new Error(
"Dell Pro AI Studio model must be set to use agents."
);
break;
default:
throw new Error(
@ -256,6 +266,8 @@ class AgentHandler {
return process.env.PPIO_MODEL_PREF ?? "qwen/qwen2.5-32b-instruct";
case "gemini":
return process.env.GEMINI_LLM_MODEL_PREF ?? "gemini-2.0-flash-lite";
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
default:
return null;
}

View File

@ -32,6 +32,7 @@ const SUPPORT_CUSTOM_MODELS = [
"xai",
"gemini",
"ppio",
"dpais",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@ -81,6 +82,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getGeminiModels(apiKey);
case "ppio":
return await getPPIOModels(apiKey);
case "dpais":
return await getDellProAiStudioModels(basePath);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@ -635,6 +638,43 @@ async function getPPIOModels() {
return { models, error: null };
}
async function getDellProAiStudioModels(basePath = null) {
const { OpenAI: OpenAIApi } = require("openai");
try {
const { origin } = new URL(
basePath || process.env.DELL_PRO_AI_STUDIO_BASE_PATH
);
const openai = new OpenAIApi({
baseURL: `${origin}/v1/openai`,
apiKey: null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.then((models) => {
return models
.filter((model) => model.capability === "TextToText") // Only include text-to-text models for this handler
.map((model) => {
return {
id: model.id,
name: model.name,
organization: model.owned_by,
};
});
})
.catch((e) => {
throw new Error(e.message);
});
return { models, error: null };
} catch (e) {
console.error(`getDellProAiStudioModels`, e.message);
return {
models: [],
error: "Could not reach Dell Pro Ai Studio from the provided base path",
};
}
}
module.exports = {
getCustomModels,
};

View File

@ -203,6 +203,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return new PPIOLLM(embedder, model);
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return new DellProAiStudioLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@ -347,6 +350,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return PPIOLLM;
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return DellProAiStudioLLM;
default:
return null;
}

View File

@ -262,6 +262,20 @@ const KEY_MAPPING = {
checks: [nonZero],
},
// Dell Pro AI Studio Settings
DellProAiStudioBasePath: {
envKey: "DPAIS_LLM_BASE_PATH",
checks: [isNotEmpty, validDockerizedUrl],
},
DellProAiStudioModelPref: {
envKey: "DPAIS_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
DellProAiStudioTokenLimit: {
envKey: "DPAIS_LLM_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
EmbeddingEngine: {
envKey: "EMBEDDING_ENGINE",
checks: [supportedEmbeddingModel],
@ -765,6 +779,7 @@ function supportedLLM(input = "") {
"xai",
"nvidia-nim",
"ppio",
"dpais",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}