* Add automatic chat mode with native tool calling support
Introduces a new automatic chat mode (now the default) that automatically invokes tools when the provider supports native tool calling. Conditionally shows/hides the @agent command based on whether native tooling is available.
- Add supportsNativeToolCalling() to AI providers (OpenAI, Anthropic, Azure always support; others opt-in via ENV)
- Update all locale translations with new mode descriptions
- Enhance translator to preserve Trans component tags
- Remove deprecated ability tags UI
* rebase translations
* WIP on image attachments. Supports initial image attachment + subsequent attachments
* persist images
* Image attachments and updates for providers
* desktop pre-change
* always show command on failure
* add back gemini streaming detection
* move provider native tooling flag to Provider func
* whoops - forgot to delete
* strip "@agent" from prompts to prevent weird replies
* translations for automatic-mode (#5145)
* translations for automatic-mode
* rebase
* translations
* lint
* fix dead translations
* change default for now to chat mode just for rollout
* remove pfp for workspace
* passthrough workspace for showAgentCommand detection and rendering
* Agent API automatic mode support
* ephemeral attachments passthrough
* support reading of pinned documents in agent context
196 lines
5.3 KiB
JavaScript
196 lines
5.3 KiB
JavaScript
const OpenAI = require("openai");
|
|
const Provider = require("./ai-provider.js");
|
|
const InheritMultiple = require("./helpers/classes.js");
|
|
const UnTooled = require("./helpers/untooled.js");
|
|
const { tooledStream, tooledComplete } = require("./helpers/tooled.js");
|
|
const { RetryError } = require("../error.js");
|
|
|
|
/**
|
|
* The agent provider for the OpenRouter provider.
|
|
* Supports true OpenAI-compatible tool calling when enabled via ENV,
|
|
* falling back to the UnTooled prompt-based approach otherwise.
|
|
* @extends {Provider}
|
|
* @extends {UnTooled}
|
|
*/
|
|
class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) {
|
|
model;
|
|
|
|
constructor(config = {}) {
|
|
const { model = "openrouter/auto" } = config;
|
|
super();
|
|
const client = new OpenAI({
|
|
baseURL: "https://openrouter.ai/api/v1",
|
|
apiKey: process.env.OPENROUTER_API_KEY,
|
|
maxRetries: 3,
|
|
defaultHeaders: {
|
|
"HTTP-Referer": "https://anythingllm.com",
|
|
"X-Title": "AnythingLLM",
|
|
},
|
|
});
|
|
|
|
this._client = client;
|
|
this.model = model;
|
|
this.verbose = true;
|
|
this._supportsToolCalling = null;
|
|
}
|
|
|
|
get client() {
|
|
return this._client;
|
|
}
|
|
|
|
get supportsAgentStreaming() {
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* Whether this provider supports native OpenAI-compatible tool calling.
|
|
* - Since OpenRouter models vary in tool calling support, we check the ENV.
|
|
* - If the ENV is not set, we default to false.
|
|
* @returns {boolean}
|
|
*/
|
|
supportsNativeToolCalling() {
|
|
if (this._supportsToolCalling !== null) return this._supportsToolCalling;
|
|
const supportsToolCalling =
|
|
this.supportsNativeToolCallingViaEnv("openrouter");
|
|
if (supportsToolCalling)
|
|
this.providerLog(
|
|
"OpenRouter supports native tool calling is ENABLED via ENV."
|
|
);
|
|
else
|
|
this.providerLog(
|
|
"OpenRouter supports native tool calling is DISABLED via ENV. Will use UnTooled instead."
|
|
);
|
|
this._supportsToolCalling = supportsToolCalling;
|
|
return supportsToolCalling;
|
|
}
|
|
|
|
async #handleFunctionCallChat({ messages = [] }) {
|
|
return await this.client.chat.completions
|
|
.create({
|
|
model: this.model,
|
|
messages,
|
|
user: this.executingUserId,
|
|
})
|
|
.then((result) => {
|
|
if (!result.hasOwnProperty("choices"))
|
|
throw new Error("OpenRouter chat: No results!");
|
|
if (result.choices.length === 0)
|
|
throw new Error("OpenRouter chat: No results length!");
|
|
return result.choices[0].message.content;
|
|
})
|
|
.catch((_) => {
|
|
return null;
|
|
});
|
|
}
|
|
|
|
async #handleFunctionCallStream({ messages = [] }) {
|
|
return await this.client.chat.completions.create({
|
|
model: this.model,
|
|
stream: true,
|
|
messages,
|
|
user: this.executingUserId,
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Stream a chat completion with tool calling support.
|
|
* Uses native tool calling when enabled via ENV, otherwise falls back to UnTooled.
|
|
*/
|
|
async stream(messages, functions = [], eventHandler = null) {
|
|
const useNative =
|
|
functions.length > 0 && (await this.supportsNativeToolCalling());
|
|
|
|
if (!useNative) {
|
|
return await UnTooled.prototype.stream.call(
|
|
this,
|
|
messages,
|
|
functions,
|
|
this.#handleFunctionCallStream.bind(this),
|
|
eventHandler
|
|
);
|
|
}
|
|
|
|
this.providerLog(
|
|
"Provider.stream (tooled) - will process this chat completion."
|
|
);
|
|
|
|
try {
|
|
return await tooledStream(
|
|
this.client,
|
|
this.model,
|
|
messages,
|
|
functions,
|
|
eventHandler,
|
|
{ provider: this }
|
|
);
|
|
} catch (error) {
|
|
console.error(error.message, error);
|
|
if (error instanceof OpenAI.AuthenticationError) throw error;
|
|
if (
|
|
error instanceof OpenAI.RateLimitError ||
|
|
error instanceof OpenAI.InternalServerError ||
|
|
error instanceof OpenAI.APIError
|
|
) {
|
|
throw new RetryError(error.message);
|
|
}
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Create a non-streaming completion with tool calling support.
|
|
* Uses native tool calling when enabled via ENV, otherwise falls back to UnTooled.
|
|
*/
|
|
async complete(messages, functions = []) {
|
|
const useNative =
|
|
functions.length > 0 && (await this.supportsNativeToolCalling());
|
|
|
|
if (!useNative) {
|
|
return await UnTooled.prototype.complete.call(
|
|
this,
|
|
messages,
|
|
functions,
|
|
this.#handleFunctionCallChat.bind(this)
|
|
);
|
|
}
|
|
|
|
try {
|
|
const result = await tooledComplete(
|
|
this.client,
|
|
this.model,
|
|
messages,
|
|
functions,
|
|
this.getCost.bind(this),
|
|
{ provider: this }
|
|
);
|
|
|
|
if (result.retryWithError) {
|
|
return this.complete([...messages, result.retryWithError], functions);
|
|
}
|
|
|
|
return result;
|
|
} catch (error) {
|
|
if (error instanceof OpenAI.AuthenticationError) throw error;
|
|
if (
|
|
error instanceof OpenAI.RateLimitError ||
|
|
error instanceof OpenAI.InternalServerError ||
|
|
error instanceof OpenAI.APIError
|
|
) {
|
|
throw new RetryError(error.message);
|
|
}
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Get the cost of the completion.
|
|
* @param _usage The completion to get the cost for.
|
|
* @returns The cost of the completion.
|
|
*/
|
|
getCost(_usage) {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
module.exports = OpenRouterProvider;
|