diff --git a/frontend/src/components/LLMSelection/DPAISOptions/index.jsx b/frontend/src/components/LLMSelection/DPAISOptions/index.jsx
new file mode 100644
index 00000000..e2c7187a
--- /dev/null
+++ b/frontend/src/components/LLMSelection/DPAISOptions/index.jsx
@@ -0,0 +1,181 @@
+import React, { useEffect, useState } from "react";
+import { CaretDown, CaretUp } from "@phosphor-icons/react";
+import System from "@/models/system";
+import PreLoader from "@/components/Preloader";
+import { DPAIS_COMMON_URLS } from "@/utils/constants";
+import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery";
+
+export default function DellProAIStudioOptions({
+ settings,
+ showAlert = false,
+}) {
+ const {
+ autoDetecting: loading,
+ basePath,
+ basePathValue,
+ showAdvancedControls,
+ setShowAdvancedControls,
+ handleAutoDetectClick,
+ } = useProviderEndpointAutoDiscovery({
+ provider: "dpais",
+ initialBasePath: settings?.DellProAiStudioBasePath,
+ ENDPOINTS: DPAIS_COMMON_URLS,
+ });
+
+ return (
+
+
+ {!settings?.credentialsOnly && (
+ <>
+
+
+
+ e.target.blur()}
+ defaultValue={settings?.DellProAiStudioTokenLimit}
+ required={true}
+ autoComplete="off"
+ />
+
+ >
+ )}
+
+
+
+
+
+
+
+
+
+ {loading ? (
+
+ ) : (
+ <>
+ {!basePathValue.value && (
+
+ )}
+ >
+ )}
+
+
+
+
+
+
+ );
+}
+
+function DellProAiStudioModelSelection({ settings, basePath = null }) {
+ const [customModels, setCustomModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!basePath) {
+ setCustomModels([]);
+ setLoading(false);
+ return;
+ }
+ setLoading(true);
+ const { models } = await System.customModels(
+ "dpais",
+ null,
+ basePath,
+ 2_000
+ );
+ setCustomModels(models || []);
+ setLoading(false);
+ }
+ findCustomModels();
+ }, [basePath]);
+
+ if (loading || customModels.length == 0) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/dpais.png b/frontend/src/media/llmprovider/dpais.png
new file mode 100644
index 00000000..3678f969
Binary files /dev/null and b/frontend/src/media/llmprovider/dpais.png differ
diff --git a/server/utils/AiProviders/dellProAiStudio/index.js b/server/utils/AiProviders/dellProAiStudio/index.js
new file mode 100644
index 00000000..2cca4c8b
--- /dev/null
+++ b/server/utils/AiProviders/dellProAiStudio/index.js
@@ -0,0 +1,210 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ handleDefaultStreamResponseV2,
+ formatChatHistory,
+} = require("../../helpers/chat/responses");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+
+// hybrid of openAi LLM chat completion for Dell Pro AI Studio
+class DellProAiStudioLLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.DPAIS_LLM_BASE_PATH)
+ throw new Error("No Dell Pro AI Studio Base Path was set.");
+
+ const { OpenAI: OpenAIApi } = require("openai");
+ this.dpais = new OpenAIApi({
+ baseURL: DellProAiStudioLLM.parseBasePath(),
+ apiKey: null,
+ });
+
+ this.model = modelPreference || process.env.DPAIS_LLM_MODEL_PREF;
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+ this.log(
+ `Dell Pro AI Studio LLM initialized with ${this.model}. ctx: ${this.promptWindowLimit()}`
+ );
+ }
+
+ /**
+ * Parse the base path for the Dell Pro AI Studio API
+ * so we can use it for inference requests
+ * @param {string} providedBasePath
+ * @returns {string}
+ */
+ static parseBasePath(providedBasePath = process.env.DPAIS_LLM_BASE_PATH) {
+ try {
+ const baseURL = new URL(providedBasePath);
+ const basePath = `${baseURL.origin}/v1/openai`;
+ return basePath;
+ } catch (e) {
+ return null;
+ }
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ static promptWindowLimit(_modelName) {
+ const limit = process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT || 4096;
+ if (!limit || isNaN(Number(limit)))
+ throw new Error("No Dell Pro AI Studio token context limit was set.");
+ return Number(limit);
+ }
+
+ // Ensure the user set a value for the token limit
+ // and if undefined - assume 4096 window.
+ promptWindowLimit() {
+ const limit = process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT || 4096;
+ if (!limit || isNaN(Number(limit)))
+ throw new Error("No Dell Pro AI Studio token context limit was set.");
+ return Number(limit);
+ }
+
+ async isValidChatCompletionModel(_ = "") {
+ return true;
+ }
+
+ /**
+ * Generates appropriate content array for a message + attachments.
+ * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
+ * @returns {string|object[]}
+ */
+ #generateContent({ userPrompt, attachments = [] }) {
+ if (!attachments.length) return userPrompt;
+
+ const content = [{ type: "text", text: userPrompt }];
+ for (let attachment of attachments) {
+ content.push({
+ type: "image_url",
+ image_url: {
+ url: attachment.contentString,
+ detail: "auto",
+ },
+ });
+ }
+ return content.flat();
+ }
+
+ /**
+ * Construct the user prompt for this model.
+ * @param {{attachments: import("../../helpers").Attachment[]}} param0
+ * @returns
+ */
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ _attachments = [], // not used for Dell Pro AI Studio - `attachments` passed in is ignored
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [
+ prompt,
+ ...formatChatHistory(chatHistory, this.#generateContent),
+ {
+ role: "user",
+ content: this.#generateContent({ userPrompt, _attachments }),
+ },
+ ];
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!this.model)
+ throw new Error(
+ `Dell Pro AI Studio chat: ${this.model} is not valid or defined model for chat completion!`
+ );
+
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.dpais.chat.completions.create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ );
+
+ if (
+ !result.output.hasOwnProperty("choices") ||
+ result.output.choices.length === 0
+ )
+ return null;
+
+ return {
+ textResponse: result.output.choices[0].message.content,
+ metrics: {
+ prompt_tokens: result.output.usage?.prompt_tokens || 0,
+ completion_tokens: result.output.usage?.completion_tokens || 0,
+ total_tokens: result.output.usage?.total_tokens || 0,
+ outputTps: result.output.usage?.completion_tokens / result.duration,
+ duration: result.duration,
+ },
+ };
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!this.model)
+ throw new Error(
+ `Dell Pro AI Studio chat: ${this.model} is not valid or defined model for chat completion!`
+ );
+
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.dpais.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ }),
+ messages
+ );
+ return measuredStreamRequest;
+ }
+
+ handleStream(response, stream, responseProps) {
+ return handleDefaultStreamResponseV2(response, stream, responseProps);
+ }
+
+ // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+module.exports = {
+ DellProAiStudioLLM,
+};
diff --git a/server/utils/agents/aibitat/providers/dellProAiStudio.js b/server/utils/agents/aibitat/providers/dellProAiStudio.js
new file mode 100644
index 00000000..07f86416
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/dellProAiStudio.js
@@ -0,0 +1,122 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+const {
+ DellProAiStudioLLM,
+} = require("../../../AiProviders/dellProAiStudio/index.js");
+
+/**
+ * The agent provider for Dell Pro AI Studio.
+ */
+class DellProAiStudioProvider extends InheritMultiple([Provider, UnTooled]) {
+ model;
+
+ /**
+ *
+ * @param {{model?: string}} config
+ */
+ constructor(config = {}) {
+ super();
+ const model = config?.model || process.env.DPAIS_LLM_MODEL_PREF;
+ const client = new OpenAI({
+ baseURL: DellProAiStudioLLM.parseBasePath(), // Will use process.env.DPAIS_LLM_BASE_PATH if not provided
+ apiKey: null,
+ });
+
+ this._client = client;
+ this.model = model;
+ this.verbose = true;
+ }
+
+ get client() {
+ return this._client;
+ }
+
+ async #handleFunctionCallChat({ messages = [] }) {
+ return await this.client.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ })
+ .then((result) => {
+ if (!result.hasOwnProperty("choices"))
+ throw new Error("DellProAiStudio chat: No results!");
+ if (result.choices.length === 0)
+ throw new Error("DellProAiStudio chat: No results length!");
+ return result.choices[0].message.content;
+ })
+ .catch((_) => {
+ return null;
+ });
+ }
+
+ /**
+ * Create a completion based on the received messages.
+ *
+ * @param messages A list of messages to send to the API.
+ * @param functions
+ * @returns The completion.
+ */
+ async complete(messages, functions = []) {
+ try {
+ let completion;
+ if (functions.length > 0) {
+ const { toolCall, text } = await this.functionCall(
+ messages,
+ functions,
+ this.#handleFunctionCallChat.bind(this)
+ );
+
+ if (toolCall !== null) {
+ this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+ this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+ return {
+ result: null,
+ functionCall: {
+ name: toolCall.name,
+ arguments: toolCall.arguments,
+ },
+ cost: 0,
+ };
+ }
+ completion = { content: text };
+ }
+
+ if (!completion?.content) {
+ this.providerLog(
+ "Will assume chat completion without tool call inputs."
+ );
+ const response = await this.client.chat.completions.create({
+ model: this.model,
+ messages: this.cleanMsgs(messages),
+ });
+ completion = response.choices[0].message;
+ }
+
+ // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+ // from calling the exact same function over and over in a loop within a single chat exchange
+ // _but_ we should enable it to call previously used tools in a new chat interaction.
+ this.deduplicator.reset("runs");
+ return {
+ result: completion.content,
+ cost: 0,
+ };
+ } catch (error) {
+ throw error;
+ }
+ }
+
+ /**
+ * Get the cost of the completion.
+ *
+ * @param _usage The completion to get the cost for.
+ * @returns The cost of the completion.
+ * Stubbed since LMStudio has no cost basis.
+ */
+ getCost(_usage) {
+ return 0;
+ }
+}
+
+module.exports = DellProAiStudioProvider;