merlyn/server/utils/AiProviders/moonshotAi/index.js
Timothy Carambat 664f466e3f
4601 log model on response (#4781)
* add model tag to chatCompletion

* add modelTag `model` to async streaming
keeps default arguments for prompt token calculation where applied via explict arg

* fix HF default arg

* render all performance metrics as available for backward compatibility
add `timestamp` to both sync/async chat methods

* extract metrics string to function
2025-12-14 14:46:55 -08:00

175 lines
4.7 KiB
JavaScript

const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
class MoonshotAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.MOONSHOT_AI_API_KEY)
throw new Error("No Moonshot AI API key was set.");
this.className = "MoonshotAiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.moonshot.ai/v1",
apiKey: process.env.MOONSHOT_AI_API_KEY,
});
this.model =
modelPreference ||
process.env.MOONSHOT_AI_MODEL_PREF ||
"moonshot-v1-32k";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
streamingEnabled() {
return true;
}
promptWindowLimit() {
return MODEL_MAP.get("moonshot", this.model) ?? 8_192;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!Object.prototype.hasOwnProperty.call(result.output, "choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
true,
this.model
);
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
}
module.exports = { MoonshotAiLLM };