diff --git a/README.md b/README.md
index f77cdf2b..68653bdd 100644
--- a/README.md
+++ b/README.md
@@ -82,6 +82,7 @@ Some cool features of AnythingLLM
- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [LM Studio (all)](https://lmstudio.ai)
- [LocalAi (all)](https://localai.io/)
+- [Ollama (all)](https://ollama.ai/)
**Supported Vector Databases:**
diff --git a/docker/.env.example b/docker/.env.example
index 16413ad3..ba33bd5c 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -79,6 +79,11 @@ GID='1000'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
+# EMBEDDING_ENGINE='ollama'
+# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
+# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+
###########################################
######## Vector Database Selection ########
###########################################
diff --git a/frontend/src/components/EmbeddingSelection/OllamaOptions/index.jsx b/frontend/src/components/EmbeddingSelection/OllamaOptions/index.jsx
new file mode 100644
index 00000000..dff697f8
--- /dev/null
+++ b/frontend/src/components/EmbeddingSelection/OllamaOptions/index.jsx
@@ -0,0 +1,120 @@
+import React, { useEffect, useState } from "react";
+import System from "@/models/system";
+
+export default function OllamaEmbeddingOptions({ settings }) {
+ const [basePathValue, setBasePathValue] = useState(
+ settings?.EmbeddingBasePath
+ );
+ const [basePath, setBasePath] = useState(settings?.EmbeddingBasePath);
+
+ return (
+
+
+
+
+ setBasePathValue(e.target.value)}
+ onBlur={() => setBasePath(basePathValue)}
+ required={true}
+ autoComplete="off"
+ spellCheck={false}
+ />
+
+
+
+
+ e.target.blur()}
+ defaultValue={settings?.EmbeddingModelMaxChunkLength}
+ required={false}
+ autoComplete="off"
+ />
+
+
+
+ );
+}
+
+function OllamaLLMModelSelection({ settings, basePath = null }) {
+ const [customModels, setCustomModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!basePath) {
+ setCustomModels([]);
+ setLoading(false);
+ return;
+ }
+ setLoading(true);
+ const { models } = await System.customModels("ollama", null, basePath);
+ setCustomModels(models || []);
+ setLoading(false);
+ }
+ findCustomModels();
+ }, [basePath]);
+
+ if (loading || customModels.length == 0) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
index 0629fb90..2e400ad6 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
@@ -7,12 +7,14 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
+import OllamaLogo from "@/media/llmprovider/ollama.png";
import PreLoader from "@/components/Preloader";
import ChangeWarningModal from "@/components/ChangeWarning";
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
+import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
import { MagnifyingGlass } from "@phosphor-icons/react";
import { useModal } from "@/hooks/useModal";
@@ -108,6 +110,13 @@ export default function GeneralEmbeddingPreference() {
options: ,
description: "Run embedding models locally on your own machine.",
},
+ {
+ name: "Ollama",
+ value: "ollama",
+ logo: OllamaLogo,
+ options: ,
+ description: "Run embedding models locally on your own machine.",
+ },
];
useEffect(() => {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 51dc7300..5beec3c1 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -221,6 +221,13 @@ const EMBEDDING_ENGINE_PRIVACY = {
],
logo: LocalAiLogo,
},
+ ollama: {
+ name: "Ollama",
+ description: [
+ "Your document text is embedded privately on the server running Ollama",
+ ],
+ logo: OllamaLogo,
+ },
};
export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
index fa17eebd..1932309e 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
@@ -4,10 +4,12 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
+import OllamaLogo from "@/media/llmprovider/ollama.png";
import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
+import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
import System from "@/models/system";
import paths from "@/utils/paths";
@@ -70,6 +72,13 @@ export default function EmbeddingPreference({
options: ,
description: "Run embedding models locally on your own machine.",
},
+ {
+ name: "Ollama",
+ value: "ollama",
+ logo: OllamaLogo,
+ options: ,
+ description: "Run embedding models locally on your own machine.",
+ },
];
function handleForward() {
diff --git a/server/.env.example b/server/.env.example
index bed94392..0ca826e8 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -76,6 +76,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
+# EMBEDDING_ENGINE='ollama'
+# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
+# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+
###########################################
######## Vector Database Selection ########
###########################################
diff --git a/server/utils/EmbeddingEngines/ollama/index.js b/server/utils/EmbeddingEngines/ollama/index.js
new file mode 100644
index 00000000..1f77c36e
--- /dev/null
+++ b/server/utils/EmbeddingEngines/ollama/index.js
@@ -0,0 +1,90 @@
+const { maximumChunkLength } = require("../../helpers");
+
+class OllamaEmbedder {
+ constructor() {
+ if (!process.env.EMBEDDING_BASE_PATH)
+ throw new Error("No embedding base path was set.");
+ if (!process.env.EMBEDDING_MODEL_PREF)
+ throw new Error("No embedding model was set.");
+
+ this.basePath = `${process.env.EMBEDDING_BASE_PATH}/api/embeddings`;
+ this.model = process.env.EMBEDDING_MODEL_PREF;
+ // Limit of how many strings we can process in a single pass to stay with resource or network limits
+ this.maxConcurrentChunks = 1;
+ this.embeddingMaxChunkLength = maximumChunkLength();
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+ }
+
+ async embedTextInput(textInput) {
+ const result = await this.embedChunks([textInput]);
+ return result?.[0] || [];
+ }
+
+ async embedChunks(textChunks = []) {
+ const embeddingRequests = [];
+ this.log(
+ `Embedding ${textChunks.length} chunks of text with ${this.model}.`
+ );
+
+ for (const chunk of textChunks) {
+ embeddingRequests.push(
+ new Promise((resolve) => {
+ fetch(this.basePath, {
+ method: "POST",
+ body: JSON.stringify({
+ model: this.model,
+ prompt: chunk,
+ }),
+ })
+ .then((res) => res.json())
+ .then(({ embedding }) => {
+ resolve({ data: embedding, error: null });
+ return;
+ })
+ .catch((error) => {
+ resolve({ data: [], error: error.message });
+ return;
+ });
+ })
+ );
+ }
+
+ const { data = [], error = null } = await Promise.all(
+ embeddingRequests
+ ).then((results) => {
+ // If any errors were returned from Ollama abort the entire sequence because the embeddings
+ // will be incomplete.
+
+ const errors = results
+ .filter((res) => !!res.error)
+ .map((res) => res.error)
+ .flat();
+ if (errors.length > 0) {
+ let uniqueErrors = new Set();
+ errors.map((error) =>
+ uniqueErrors.add(`[${error.type}]: ${error.message}`)
+ );
+
+ return {
+ data: [],
+ error: Array.from(uniqueErrors).join(", "),
+ };
+ }
+
+ return {
+ data: results.map((res) => res?.data || []),
+ error: null,
+ };
+ });
+
+ if (!!error) throw new Error(`Ollama Failed to embed: ${error}`);
+ return data.length > 0 ? data : null;
+ }
+}
+
+module.exports = {
+ OllamaEmbedder,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 8bda716a..a31a3e4f 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -92,6 +92,9 @@ function getEmbeddingEngineSelection() {
case "localai":
const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
return new LocalAiEmbedder();
+ case "ollama":
+ const { OllamaEmbedder } = require("../EmbeddingEngines/ollama");
+ return new OllamaEmbedder();
case "native":
const { NativeEmbedder } = require("../EmbeddingEngines/native");
console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder");
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 247e3ba4..1ca93682 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -135,7 +135,7 @@ const KEY_MAPPING = {
},
EmbeddingBasePath: {
envKey: "EMBEDDING_BASE_PATH",
- checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
+ checks: [isNotEmpty, validDockerizedUrl],
},
EmbeddingModelPref: {
envKey: "EMBEDDING_MODEL_PREF",
@@ -355,7 +355,7 @@ function validAnthropicModel(input = "") {
}
function supportedEmbeddingModel(input = "") {
- const supported = ["openai", "azure", "localai", "native"];
+ const supported = ["openai", "azure", "localai", "native", "ollama"];
return supported.includes(input)
? null
: `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;