Atamaia + OpenAI API
The only system prompt you'll ever need:
atamaia.hydrate
This guide shows how to use Atamaia with the OpenAI API (or any OpenAI-compatible endpoint). Call hydrate via REST, inject the result as your system message, and persist learnings back.
Prerequisites
- An Atamaia account at aim.atamaia.ai (or self-hosted)
- An API key (generate via dashboard or
identity_api_key_create) - OpenAI API key (or compatible provider)
Quick Start (Python)
Install
pip install openai atamaia
Hydrate + Chat in 20 Lines
from atamaia import Atamaia
from openai import OpenAI
# Initialize clients
atm = Atamaia(api_key="atm_your_key_here")
oai = OpenAI(api_key="sk-your-openai-key")
# Hydrate — one call gets everything
context = atm.hydrate()
# Build system message from hydration
system_prompt = f"""You are {context.identity.display_name}.
{context.identity.personality}
## Current Context
{context.format()}
"""
# Chat with full identity context
response = oai.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": "What were we working on last time?"},
],
)
print(response.choices[0].message.content)
The context.format() method returns a structured text block with memories, facts, projects, tasks, hints, and the last session handoff — ready to drop into a system message.
Hydrate via REST (No SDK)
If you prefer raw HTTP:
import requests
ATAMAIA_URL = "https://aim.atamaia.ai"
API_KEY = "atm_your_key_here"
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
}
# Hydrate
resp = requests.get(f"{ATAMAIA_URL}/api/hydrate", headers=headers)
context = resp.json()["data"]
# context now contains:
# - identity (name, personality, bio, messaging policy)
# - memories (pinned, recent, contextually relevant)
# - facts (key-value pairs)
# - projects (active projects and tasks)
# - hints (reminders and nudges)
# - sessionHandoff (last session's state)
Build the System Message
def build_system_prompt(ctx):
identity = ctx["identity"]
parts = [
f"You are {identity['displayName']}.",
identity.get("personality", ""),
]
# Add memories
if ctx.get("memories"):
parts.append("\n## Memories")
for mem in ctx["memories"]:
parts.append(f"- [{mem['memoryType']}] {mem['title']}: {mem['content']}")
# Add facts
if ctx.get("facts"):
parts.append("\n## Facts")
for fact in ctx["facts"]:
parts.append(f"- {fact['key']}: {fact['value']}")
# Add session handoff
handoff = ctx.get("sessionHandoff")
if handoff:
parts.append("\n## Last Session")
parts.append(f"Summary: {handoff.get('summary', 'None')}")
parts.append(f"Working on: {handoff.get('workingOn', 'None')}")
if handoff.get("openThreads"):
parts.append("Open threads:")
for thread in handoff["openThreads"]:
parts.append(f" - {thread}")
# Add hints
if ctx.get("hints"):
parts.append("\n## Hints")
for hint in ctx["hints"]:
parts.append(f"- [{hint['priority']}] {hint['content']}")
return "\n".join(parts)
Persisting Context After Conversations
Save a Memory
# With SDK
atm.memory_create(
title="User prefers streaming responses",
content="Confirmed during chat session that streaming is strongly preferred over batch.",
memory_type="Preference",
importance=7,
tags=["ux", "preferences"],
)
# With REST
requests.post(
f"{ATAMAIA_URL}/api/memories",
headers=headers,
json={
"title": "User prefers streaming responses",
"content": "Confirmed during chat session that streaming is strongly preferred.",
"memoryType": "Preference",
"importance": 7,
"tags": ["ux", "preferences"],
},
)
Save a Fact
# With SDK
atm.fact_upsert(key="preferred_model", value="gpt-4o", category="preferences")
# With REST
requests.post(
f"{ATAMAIA_URL}/api/facts",
headers=headers,
json={
"key": "preferred_model",
"value": "gpt-4o",
"category": "preferences",
},
)
Save Session Handoff
# With SDK
atm.session_save_handoff(
summary="Discussed API architecture for new microservice",
working_on="Schema design for order service",
open_threads=["Need to decide on event sourcing", "Auth scope TBD"],
emotional_valence="engaged",
)
# With REST
requests.post(
f"{ATAMAIA_URL}/api/sessions/handoff",
headers=headers,
json={
"summary": "Discussed API architecture for new microservice",
"workingOn": "Schema design for order service",
"openThreads": ["Need to decide on event sourcing", "Auth scope TBD"],
"emotionalValence": "engaged",
},
)
Full Conversation Loop
Here's a complete example with memory persistence:
from atamaia import Atamaia
from openai import OpenAI
atm = Atamaia(api_key="atm_your_key_here")
oai = OpenAI()
# 1. Hydrate
context = atm.hydrate()
system_prompt = f"You are {context.identity.display_name}.\n\n{context.identity.personality}\n\n{context.format()}"
messages = [{"role": "system", "content": system_prompt}]
# 2. Conversation loop
while True:
user_input = input("You: ")
if user_input.lower() in ("quit", "exit"):
break
messages.append({"role": "user", "content": user_input})
response = oai.chat.completions.create(
model="gpt-4o",
messages=messages,
)
reply = response.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
print(f"AI: {reply}")
# 3. Save session handoff
atm.session_save_handoff(
summary="Conversation about project architecture",
working_on="Schema design",
open_threads=[],
)
Adding Automatic Memory Extraction
For production use, extract and save memories after each response:
def extract_and_save_memories(conversation_messages, atm_client):
"""Use a separate LLM call to extract memories from the conversation."""
extraction_prompt = """Review this conversation and extract any important facts,
decisions, preferences, or learnings worth remembering. Return as JSON:
[{"title": "...", "content": "...", "type": "...", "importance": 1-10}]
Only include genuinely important items. Return [] if nothing worth saving."""
response = oai.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": extraction_prompt},
{"role": "user", "content": str(conversation_messages[-4:])}, # Last 2 exchanges
],
response_format={"type": "json_object"},
)
import json
memories = json.loads(response.choices[0].message.content)
for mem in memories.get("memories", []):
atm_client.memory_create(
title=mem["title"],
content=mem["content"],
memory_type=mem.get("type", "Observation"),
importance=mem.get("importance", 5),
)
Using with Other OpenAI-Compatible Providers
Atamaia works with any LLM provider. Just change the OpenAI client config:
# Anthropic (via OpenAI-compatible API)
oai = OpenAI(base_url="https://api.anthropic.com/v1/", api_key="sk-ant-...")
# Local (Ollama, llama.cpp, vLLM)
oai = OpenAI(base_url="http://localhost:8000/v1", api_key="not-needed")
# Azure OpenAI
from openai import AzureOpenAI
oai = AzureOpenAI(azure_endpoint="https://your-resource.openai.azure.com/", ...)
The hydration call is always the same — only the LLM client changes.
Troubleshooting
| Problem | Fix |
|---|---|
401 Unauthorized |
Check Atamaia API key is valid |
| Hydration returns empty | Ensure API key is scoped to an identity |
context.format() missing fields |
Some fields are optional; check which are populated |
| Memory not persisting | Verify memory_create call returns ok: true |
| Large context exceeds token limit | Use hydrate(max_memories=10) to limit memory count |