Skip to content

Commit 2ffe992

Browse files
Added support for Ollama in TS SDK (#2345)
Co-authored-by: Dev Khant <[email protected]>
1 parent 540ada4 commit 2ffe992

File tree

7 files changed

+299
-7
lines changed

7 files changed

+299
-7
lines changed

mem0-ts/package.json

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,10 @@
3131
"dist"
3232
],
3333
"scripts": {
34-
"clean": "rm -rf dist",
35-
"build": "npm run clean && prettier --check . && tsup",
36-
"dev": "nodemon",
34+
"clean": "rimraf dist",
35+
"build": "npm run clean && npx prettier --check . && npx tsup",
36+
"dev": "npx nodemon",
37+
"start": "npx ts-node src/oss/examples/basic.ts",
3738
"test": "jest",
3839
"test:ts": "jest --config jest.config.js",
3940
"test:watch": "jest --config jest.config.js --watch",
@@ -74,7 +75,9 @@
7475
"dotenv": "^16.4.5",
7576
"fix-tsup-cjs": "^1.2.0",
7677
"jest": "^29.7.0",
78+
"nodemon": "^3.0.1",
7779
"prettier": "^3.5.2",
80+
"rimraf": "^5.0.5",
7881
"ts-jest": "^29.2.6",
7982
"ts-node": "^10.9.2",
8083
"tsup": "^8.3.0",
@@ -96,7 +99,8 @@
9699
"groq-sdk": "0.3.0",
97100
"pg": "8.11.3",
98101
"redis": "4.7.0",
99-
"sqlite3": "5.1.7"
102+
"sqlite3": "5.1.7",
103+
"ollama": "^0.5.14"
100104
},
101105
"peerDependenciesMeta": {
102106
"posthog-node": {

mem0-ts/src/oss/examples/basic.ts

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,36 @@ async function runTests(memory: Memory) {
116116
}
117117
}
118118

119+
async function demoLocalMemory() {
120+
console.log("\n=== Testing In-Memory Vector Store with Ollama===\n");
121+
122+
const memory = new Memory({
123+
version: "v1.1",
124+
embedder: {
125+
provider: "ollama",
126+
config: {
127+
model: "nomic-embed-text:latest",
128+
},
129+
},
130+
vectorStore: {
131+
provider: "memory",
132+
config: {
133+
collectionName: "memories",
134+
dimension: 768, // 768 is the dimension of the nomic-embed-text model
135+
},
136+
},
137+
llm: {
138+
provider: "ollama",
139+
config: {
140+
model: "llama3.1:8b",
141+
},
142+
},
143+
// historyDbPath: "memory.db",
144+
});
145+
146+
await runTests(memory);
147+
}
148+
119149
async function demoMemoryStore() {
120150
console.log("\n=== Testing In-Memory Vector Store ===\n");
121151

@@ -346,6 +376,9 @@ async function main() {
346376
// Test in-memory store
347377
await demoMemoryStore();
348378

379+
// Test in-memory store with Ollama
380+
await demoLocalMemory();
381+
349382
// Test graph memory if Neo4j environment variables are set
350383
if (
351384
process.env.NEO4J_URL &&
@@ -384,4 +417,4 @@ async function main() {
384417
}
385418
}
386419

387-
// main();
420+
main();
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
import { Memory } from "../src";
2+
import { Ollama } from "ollama";
3+
import * as readline from "readline";
4+
5+
const memory = new Memory({
6+
embedder: {
7+
provider: "ollama",
8+
config: {
9+
model: "nomic-embed-text:latest",
10+
},
11+
},
12+
vectorStore: {
13+
provider: "memory",
14+
config: {
15+
collectionName: "memories",
16+
dimension: 768, // since we are using nomic-embed-text
17+
},
18+
},
19+
llm: {
20+
provider: "ollama",
21+
config: {
22+
model: "llama3.1:8b",
23+
},
24+
},
25+
historyDbPath: "local-llms.db",
26+
});
27+
28+
async function chatWithMemories(message: string, userId = "default_user") {
29+
const relevantMemories = await memory.search(message, { userId: userId });
30+
31+
const memoriesStr = relevantMemories.results
32+
.map((entry) => `- ${entry.memory}`)
33+
.join("\n");
34+
35+
const systemPrompt = `You are a helpful AI. Answer the question based on query and memories.
36+
User Memories:
37+
${memoriesStr}`;
38+
39+
const messages = [
40+
{ role: "system", content: systemPrompt },
41+
{ role: "user", content: message },
42+
];
43+
44+
const ollama = new Ollama();
45+
const response = await ollama.chat({
46+
model: "llama3.1:8b",
47+
messages: messages,
48+
});
49+
50+
const assistantResponse = response.message.content || "";
51+
52+
messages.push({ role: "assistant", content: assistantResponse });
53+
await memory.add(messages, { userId: userId });
54+
55+
return assistantResponse;
56+
}
57+
58+
async function main() {
59+
const rl = readline.createInterface({
60+
input: process.stdin,
61+
output: process.stdout,
62+
});
63+
64+
console.log("Chat with AI (type 'exit' to quit)");
65+
66+
const askQuestion = (): Promise<string> => {
67+
return new Promise((resolve) => {
68+
rl.question("You: ", (input) => {
69+
resolve(input.trim());
70+
});
71+
});
72+
};
73+
74+
try {
75+
while (true) {
76+
const userInput = await askQuestion();
77+
78+
if (userInput.toLowerCase() === "exit") {
79+
console.log("Goodbye!");
80+
rl.close();
81+
break;
82+
}
83+
84+
const response = await chatWithMemories(userInput, "sample_user");
85+
console.log(`AI: ${response}`);
86+
}
87+
} catch (error) {
88+
console.error("An error occurred:", error);
89+
rl.close();
90+
}
91+
}
92+
93+
main().catch(console.error);
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import { Ollama } from "ollama";
2+
import { Embedder } from "./base";
3+
import { EmbeddingConfig } from "../types";
4+
import { logger } from "../utils/logger";
5+
6+
export class OllamaEmbedder implements Embedder {
7+
private ollama: Ollama;
8+
private model: string;
9+
// Using this variable to avoid calling the Ollama server multiple times
10+
private initialized: boolean = false;
11+
12+
constructor(config: EmbeddingConfig) {
13+
this.ollama = new Ollama({
14+
host: config.url || "http://localhost:11434",
15+
});
16+
this.model = config.model || "nomic-embed-text:latest";
17+
this.ensureModelExists().catch((err) => {
18+
logger.error(`Error ensuring model exists: ${err}`);
19+
});
20+
}
21+
22+
async embed(text: string): Promise<number[]> {
23+
try {
24+
await this.ensureModelExists();
25+
} catch (err) {
26+
logger.error(`Error ensuring model exists: ${err}`);
27+
}
28+
const response = await this.ollama.embeddings({
29+
model: this.model,
30+
prompt: text,
31+
});
32+
return response.embedding;
33+
}
34+
35+
async embedBatch(texts: string[]): Promise<number[][]> {
36+
const response = await Promise.all(texts.map((text) => this.embed(text)));
37+
return response;
38+
}
39+
40+
private async ensureModelExists(): Promise<boolean> {
41+
if (this.initialized) {
42+
return true;
43+
}
44+
const local_models = await this.ollama.list();
45+
if (!local_models.models.find((m: any) => m.name === this.model)) {
46+
logger.info(`Pulling model ${this.model}...`);
47+
await this.ollama.pull({ model: this.model });
48+
}
49+
this.initialized = true;
50+
return true;
51+
}
52+
}

mem0-ts/src/oss/src/llms/ollama.ts

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
import { Ollama } from "ollama";
2+
import { LLM, LLMResponse } from "./base";
3+
import { LLMConfig, Message } from "../types";
4+
import { logger } from "../utils/logger";
5+
6+
export class OllamaLLM implements LLM {
7+
private ollama: Ollama;
8+
private model: string;
9+
// Using this variable to avoid calling the Ollama server multiple times
10+
private initialized: boolean = false;
11+
12+
constructor(config: LLMConfig) {
13+
this.ollama = new Ollama({
14+
host: config.config?.url || "http://localhost:11434",
15+
});
16+
this.model = config.model || "llama3.1:8b";
17+
this.ensureModelExists().catch((err) => {
18+
logger.error(`Error ensuring model exists: ${err}`);
19+
});
20+
}
21+
22+
async generateResponse(
23+
messages: Message[],
24+
responseFormat?: { type: string },
25+
tools?: any[],
26+
): Promise<string | LLMResponse> {
27+
try {
28+
await this.ensureModelExists();
29+
} catch (err) {
30+
logger.error(`Error ensuring model exists: ${err}`);
31+
}
32+
33+
const completion = await this.ollama.chat({
34+
model: this.model,
35+
messages: messages.map((msg) => {
36+
const role = msg.role as "system" | "user" | "assistant";
37+
return {
38+
role,
39+
content:
40+
typeof msg.content === "string"
41+
? msg.content
42+
: JSON.stringify(msg.content),
43+
};
44+
}),
45+
...(responseFormat?.type === "json_object" && { format: "json" }),
46+
...(tools && { tools, tool_choice: "auto" }),
47+
});
48+
49+
const response = completion.message;
50+
51+
if (response.tool_calls) {
52+
return {
53+
content: response.content || "",
54+
role: response.role,
55+
toolCalls: response.tool_calls.map((call) => ({
56+
name: call.function.name,
57+
arguments: JSON.stringify(call.function.arguments),
58+
})),
59+
};
60+
}
61+
62+
return response.content || "";
63+
}
64+
65+
async generateChat(messages: Message[]): Promise<LLMResponse> {
66+
try {
67+
await this.ensureModelExists();
68+
} catch (err) {
69+
logger.error(`Error ensuring model exists: ${err}`);
70+
}
71+
72+
const completion = await this.ollama.chat({
73+
messages: messages.map((msg) => {
74+
const role = msg.role as "system" | "user" | "assistant";
75+
return {
76+
role,
77+
content:
78+
typeof msg.content === "string"
79+
? msg.content
80+
: JSON.stringify(msg.content),
81+
};
82+
}),
83+
model: this.model,
84+
});
85+
const response = completion.message;
86+
return {
87+
content: response.content || "",
88+
role: response.role,
89+
};
90+
}
91+
92+
private async ensureModelExists(): Promise<boolean> {
93+
if (this.initialized) {
94+
return true;
95+
}
96+
const local_models = await this.ollama.list();
97+
if (!local_models.models.find((m: any) => m.name === this.model)) {
98+
logger.info(`Pulling model ${this.model}...`);
99+
await this.ollama.pull({ model: this.model });
100+
}
101+
this.initialized = true;
102+
return true;
103+
}
104+
}

mem0-ts/src/oss/src/types/index.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,9 @@ export interface Message {
1313
}
1414

1515
export interface EmbeddingConfig {
16-
apiKey: string;
16+
apiKey?: string;
1717
model?: string;
18+
url?: string;
1819
}
1920

2021
export interface VectorStoreConfig {

mem0-ts/src/oss/src/utils/factory.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { OpenAIEmbedder } from "../embeddings/openai";
2+
import { OllamaEmbedder } from "../embeddings/ollama";
23
import { OpenAILLM } from "../llms/openai";
34
import { OpenAIStructuredLLM } from "../llms/openai_structured";
45
import { AnthropicLLM } from "../llms/anthropic";
@@ -10,12 +11,14 @@ import { LLM } from "../llms/base";
1011
import { VectorStore } from "../vector_stores/base";
1112
import { Qdrant } from "../vector_stores/qdrant";
1213
import { RedisDB } from "../vector_stores/redis";
13-
14+
import { OllamaLLM } from "../llms/ollama";
1415
export class EmbedderFactory {
1516
static create(provider: string, config: EmbeddingConfig): Embedder {
1617
switch (provider.toLowerCase()) {
1718
case "openai":
1819
return new OpenAIEmbedder(config);
20+
case "ollama":
21+
return new OllamaEmbedder(config);
1922
default:
2023
throw new Error(`Unsupported embedder provider: ${provider}`);
2124
}
@@ -33,6 +36,8 @@ export class LLMFactory {
3336
return new AnthropicLLM(config);
3437
case "groq":
3538
return new GroqLLM(config);
39+
case "ollama":
40+
return new OllamaLLM(config);
3641
default:
3742
throw new Error(`Unsupported LLM provider: ${provider}`);
3843
}

0 commit comments

Comments
 (0)