Documentation Index Fetch the complete documentation index at: https://docs.memv.ai/llms.txt
Use this file to discover all available pages before exploring further.
Explore advanced features of the Mem[v] SDK including custom HTTP clients, raw responses, proxies, and more.
Raw response access
from memvai import Memv
client = Memv()
# Get raw response with headers
response = client.spaces.with_raw_response().list()
# Access headers
print (response.headers.get( 'X-Request-ID' ))
print (response.headers.get( 'X-RateLimit-Remaining' ))
# Parse the response
spaces = response.parse()
print (spaces.spaces)
Custom HTTP client
import httpx
from memvai import Memv, DefaultHttpxClient
# Custom HTTP client with proxy
client = Memv(
http_client = DefaultHttpxClient(
proxy = "http://proxy.example.com:8080"
)
)
import Memv from 'memvai' ;
import fetch from 'node-fetch' ;
const client = new Memv ({
fetch: fetch as any ,
});
import httpx
from memvai import Memv, DefaultHttpxClient
client = Memv(
http_client = DefaultHttpxClient(
proxy = "http://localhost:8888"
)
)
import Memv from 'memvai' ;
import * as undici from 'undici' ;
const proxyAgent = new undici . ProxyAgent ( 'http://localhost:8888' );
const client = new Memv ({
fetchOptions: {
dispatcher: proxyAgent ,
},
});
import Memv from 'memvai' ;
const client = new Memv ({
fetchOptions: {
proxy: 'http://localhost:8888' ,
},
});
import Memv from 'npm:memvai' ;
const httpClient = Deno . createHttpClient ({
proxy: { url: 'http://localhost:8888' },
});
const client = new Memv ({
fetchOptions: {
client: httpClient ,
},
});
Concurrent operations
Parallel requests
import asyncio
from memvai import AsyncMemv
async def main ():
client = AsyncMemv()
# Execute multiple requests in parallel
spaces_task = client.spaces.list()
stats_task = client.spaces.get_stats()
spaces, stats = await asyncio.gather(spaces_task, stats_task)
print ( f "Spaces: { len (spaces.spaces) } " )
print ( f "Total memories: { stats.total_memories } " )
asyncio.run(main())
Batch uploads
async def upload_files_parallel ( space_id : str , file_paths : list[ str ]):
"""Upload multiple files concurrently."""
client = AsyncMemv()
async def upload_file ( file_path : str ):
with open (file_path, 'rb' ) as f:
return await client.upload.batch.create(
space_id = space_id,
files = [f]
)
# Upload all files concurrently
tasks = [upload_file(path) for path in file_paths]
results = await asyncio.gather( * tasks)
return [r.batch_id for r in results]
# Usage
files = [ "doc1.pdf" , "doc2.pdf" , "doc3.pdf" ]
batch_ids = asyncio.run(upload_files_parallel( "space_abc123" , files))
Custom logging
import logging
from memvai import Memv
# Configure logging
logging.basicConfig( level = logging. INFO )
logger = logging.getLogger( __name__ )
client = Memv()
# Logging is automatic with MEMV_LOG environment variable
Best practices
Create one client instance and reuse it throughout your application.
Use async for I/O-bound operations
Async is more efficient for multiple concurrent requests.
Track rate limit headers to avoid throttling.
Implement proper error handling
Always handle potential errors gracefully.
Next steps
Error handling Comprehensive error handling
API Reference Complete API documentation