+ The prompt that will be used on this workspace. Define the
+ context and instructions for the AI to generate a response.
+ You should to provide a carefully crafted prompt so the AI can
+ generate a relevant and accurate response.
+
+
+
+
-
please reload the page to see the results of the import.
+
+ please reload the page to see the results of the import.
+
{
diff --git a/server/utils/vectorDbProviders/lance/index.js b/server/utils/vectorDbProviders/lance/index.js
index 293e835a..21d962ff 100644
--- a/server/utils/vectorDbProviders/lance/index.js
+++ b/server/utils/vectorDbProviders/lance/index.js
@@ -5,6 +5,7 @@ const { RecursiveCharacterTextSplitter } = require("langchain/text_splitter");
const { storeVectorResult, cachedVectorInformation } = require("../../files");
const { Configuration, OpenAIApi } = require("openai");
const { v4: uuidv4 } = require("uuid");
+const { chatPrompt } = require("../../chats");
// Since we roll our own results for prompting we
// have to manually curate sources as well.
@@ -260,7 +261,7 @@ const LanceDb = {
);
const prompt = {
role: "system",
- content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
+ content: `${chatPrompt(workspace)}
Context:
${contextTexts
.map((text, i) => {
@@ -309,7 +310,7 @@ const LanceDb = {
);
const prompt = {
role: "system",
- content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
+ content: `${chatPrompt(workspace)}
Context:
${contextTexts
.map((text, i) => {
diff --git a/server/utils/vectorDbProviders/pinecone/index.js b/server/utils/vectorDbProviders/pinecone/index.js
index dc984f7f..67e4d1ef 100644
--- a/server/utils/vectorDbProviders/pinecone/index.js
+++ b/server/utils/vectorDbProviders/pinecone/index.js
@@ -10,6 +10,7 @@ const { storeVectorResult, cachedVectorInformation } = require("../../files");
const { Configuration, OpenAIApi } = require("openai");
const { v4: uuidv4 } = require("uuid");
const { toChunks, curateSources } = require("../../helpers");
+const { chatPrompt } = require("../../chats");
const Pinecone = {
name: "Pinecone",
@@ -278,7 +279,7 @@ const Pinecone = {
});
const model = this.llm({
- temperature: workspace?.openAiTemp,
+ temperature: workspace?.openAiTemp ?? 0.7,
});
const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
k: 5,
@@ -318,14 +319,15 @@ const Pinecone = {
);
const prompt = {
role: "system",
- content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
-Context:
-${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
+ content: `${chatPrompt(workspace)}
+ Context:
+ ${contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")}`,
};
+
const memory = [prompt, ...chatHistory, { role: "user", content: input }];
const responseText = await this.getChatCompletion(this.openai(), memory, {