The Memory Harvester uses LLM-powered extraction to automatically identify and store memories from conversation transcripts. It detects facts, preferences, experiences, and skills mentioned in a chat and persists them to the Memory Fabric.
from cerebe import AsyncCerebeclient = AsyncCerebe(api_key="ck_live_...")result = await client.memory.harvest( session_id="session_abc", transcript="User: I really struggle with fractions\nAssistant: Let me try a visual approach...\nUser: Oh that makes much more sense! I love diagrams", metadata={"student_id": "student_456", "domain": "education"},)for memory in result.harvested_memories: print(f"[{memory.memory_type}] {memory.summary}") print(f" Confidence: {memory.confidence}")
import Cerebe from '@cerebe/sdk'const client = new Cerebe({ apiKey: 'ck_live_...' })const result = await client.memory.harvest({ sessionId: 'session_abc', transcript: 'User: I really struggle with fractions\nAssistant: Let me try a visual approach...\nUser: Oh that makes much more sense! I love diagrams', metadata: { studentId: 'student_456', domain: 'education' },})for (const memory of result.harvestedMemories) { console.log(`[${memory.memoryType}] ${memory.summary}`) console.log(` Confidence: ${memory.confidence}`)}
curl -X POST https://api.cerebe.ai/api/v1/memory/harvest \ -H "X-API-Key: ck_live_..." \ -H "Content-Type: application/json" \ -d '{ "session_id": "session_abc", "transcript": "User: I really struggle with fractions\nAssistant: Let me try a visual approach...\nUser: Oh that makes much more sense! I love diagrams", "metadata": { "student_id": "student_456", "domain": "education" } }'