Spaces organize memories for different contexts or users.
from memvai import Memvclient = Memv()# Create a spaceresponse = client.spaces.create( name="My AI Assistant", description="Personal assistant memories")space_id = response.space.idprint(f"Created space: {space_id}")
2
Add memories
Add text, upload files, or import from connectors.
# Add a text memorymemory = client.memories.add( space_id=space_id, content="User prefers concise responses without small talk")# Upload a filewith open("meeting_notes.pdf", "rb") as file: batch = client.upload.batch.create( space_id=space_id, files=[file] )
3
Search memories
Query memories using natural language.
# Search for relevant memoriesresults = client.memories.search( space_id=space_id, query="What are the user's communication preferences?")for memory in results.memories: print(f"- {memory.content}")
Here’s a complete example of a memory-enabled chatbot:
from memvai import Memv# Initialize clientclient = Memv()# Create or get spaceresponse = client.spaces.create(name="Chatbot Memory")space_id = response.space.iddef chat_with_memory(user_message: str) -> str: """Process user message with memory context.""" # 1. Search for relevant memories memories = client.memories.search( space_id=space_id, query=user_message, limit=5 ) # 2. Build context from memories context = "\n".join([m.content for m in memories.memories]) # 3. Generate response (using your LLM of choice) # response = your_llm.generate( # prompt=f"Context: {context}\n\nUser: {user_message}", # ) # 4. Store the conversation as a new memory client.memories.add( space_id=space_id, content=f"User: {user_message}\nAssistant: {response}" ) return response# Use itresponse = chat_with_memory("What features should we prioritize?")print(response)