This comprehensive tutorial will walk you through building a complete AI application with Honcho, from basic setup to advanced features.
What We’ll Build
By the end of this tutorial, you’ll have created a personal AI assistant that:
- Learns about users through conversation and remembers facts across sessions
- Automatically formats conversation context for any LLM (OpenAI, Anthropic, etc.)
- Answers questions about what it knows using natural language queries
- Handles multi-party conversations with theory-of-mind modeling
Prerequisites
- Python 3.8 or higher
- Basic understanding of Python
- OpenAI API key (or another LLM provider)
Part 1: Basic Setup
Install Dependencies
pip install honcho-ai openai python-dotenv
Environment Setup
Create a .env
file in your project directory:
OPENAI_API_KEY=your_openai_api_key_here
HONCHO_API_KEY=your_honcho_api_key_here
Initialize Honcho
import os
from dotenv import load_dotenv
from honcho import Honcho
import openai
# Load environment variables
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# Initialize Honcho with your app workspace
honcho = Honcho(workspace_id="personal-assistant-tutorial")
Part 2: Peer Management
Create and Manage Peers
def get_user_peer(username):
"""Get or create a peer representing a user"""
# Peers are created lazily - no API call until used
user_peer = honcho.peer(f"user-{username}")
print(f"Created peer for user: {username}")
return user_peer
def get_assistant_peer():
"""Get or create the assistant peer"""
assistant_peer = honcho.peer("assistant")
print("Created assistant peer")
return assistant_peer
# Create peers for our conversation
alice = get_user_peer("alice")
assistant = get_assistant_peer()
print(f"User peer ID: {alice.id}")
print(f"Assistant peer ID: {assistant.id}")
Part 3: Session Management
Create Conversation Sessions
def start_new_session(session_name=None):
"""Start a new conversation session"""
# Create session with descriptive ID
session_id = session_name or f"chat-{int(time.time())}"
session = honcho.session(session_id)
# Add both peers to the session
session.add_peers([alice, assistant])
print(f"Started new session: {session.id}")
return session
import time
# Start a session
session = start_new_session("daily-checkin")
Message Handling
def add_conversation_turn(session, user_message, assistant_response=None):
"""Add a conversation turn to the session"""
messages_to_add = [alice.message(user_message)]
if assistant_response:
messages_to_add.append(assistant.message(assistant_response))
session.add_messages(messages_to_add)
print(f"Added {len(messages_to_add)} messages to session")
# Add user message
add_conversation_turn(session, "Hi! I'm working on a Python project today.")
Part 4: Fact Learning and Memory
def teach_assistant_about_user(assistant_peer, user_peer, facts):
"""Add facts about the user to the assistant's knowledge"""
# Format facts as messages to build the assistant's representation
fact_messages = []
for fact in facts:
fact_messages.append(assistant_peer.message(f"I learned that {user_peer.id} {fact}"))
# Add these to the assistant's global knowledge
assistant_peer.add_messages(fact_messages)
print(f"Taught assistant {len(facts)} facts about {user_peer.id}")
def extract_facts_from_message(user_message):
"""Extract facts about the user from their message using LLM"""
prompt = f"""
Extract discrete facts about the user from this message:
"{user_message}"
Return only factual statements about the user, no inferences.
Format as a simple list of facts starting with action verbs or descriptors.
If no facts can be extracted, return an empty list.
Example: "is working on a Python project", "likes morning coffee"
"""
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.1
)
facts_text = response.choices[0].message.content.strip()
# Parse facts (simple line-by-line approach)
facts = [fact.strip("- ").strip() for fact in facts_text.split('\n') if fact.strip()]
return [fact for fact in facts if fact and len(fact) > 5]
# Extract and store facts
user_input = "Hi! I'm working on a Python project today."
facts = extract_facts_from_message(user_input)
print(f"Extracted facts: {facts}")
# Teach the assistant these facts
if facts:
teach_assistant_about_user(assistant, alice, facts)
Part 5: LLM Integration with Context
Generate Responses Using Built-in Context
def generate_response_with_context(session, assistant_peer, user_message):
"""Generate AI response using Honcho's built-in context management"""
# Get formatted conversation context - Honcho handles the complexity!
context = session.get_context(tokens=2000)
messages = context.to_openai(assistant=assistant_peer)
# Add the current user message
messages.append({"role": "user", "content": user_message})
# Call your LLM with the properly formatted context
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.7
)
return response.choices[0].message.content
# Generate response using built-in context
user_input = "How's my project going?"
ai_response = generate_response_with_context(session, assistant, user_input)
print(f"AI Response: {ai_response}")
# Add the complete conversation turn to the session
add_conversation_turn(session, user_input, ai_response)
Query Peer Knowledge Directly
def get_personalized_insight(assistant_peer, user_peer, query):
"""Query what the assistant knows about a specific user"""
# Honcho's chat handles context retrieval automatically
response = assistant_peer.chat(
f"Based on what I know about {user_peer.id}: {query}",
target=user_peer
)
return response
# Get personalized insights without manual context building
insight = get_personalized_insight(
assistant,
alice,
"What programming projects has this user worked on?"
)
print(f"Programming insights: {insight}")
Part 6: Complete Conversation Loop
Put It All Together
def chat_with_assistant(user_peer, assistant_peer, message_text, session=None):
"""Complete conversation flow with memory and personalization"""
# Use existing session or create new one
if not session:
session = start_new_session()
# Extract facts from user message and teach assistant
facts = extract_facts_from_message(message_text)
if facts:
teach_assistant_about_user(assistant_peer, user_peer, facts)
# Generate response using Honcho's built-in context management
ai_response = generate_response_with_context(session, assistant_peer, message_text)
# Add the conversation turn to session
add_conversation_turn(session, message_text, ai_response)
return ai_response
# Test the complete flow
response = chat_with_assistant(
alice,
assistant,
"I finished the authentication module for my Python project!"
)
print(f"Assistant: {response}")
# Continue the conversation
response2 = chat_with_assistant(
alice,
assistant,
"What should I work on next?",
session # Continue in same session
)
print(f"Assistant: {response2}")
Part 7: Advanced Features
Multi-Session Memory
def query_user_history(assistant_peer, user_peer, query):
"""Query what the assistant knows about the user across all sessions"""
response = assistant_peer.chat(
f"Based on everything I know about {user_peer.id}, {query}",
target=user_peer
)
return response
# Query across all conversations
history_query = query_user_history(
assistant,
alice,
"what programming languages and technologies has this user mentioned?"
)
print(f"User's programming history: {history_query}")
Session-Specific Context
def query_session_specific(assistant_peer, session, query):
"""Query what happened in a specific session"""
response = assistant_peer.chat(
query,
session_id=session.id
)
return response
# Query about current session
session_summary = query_session_specific(
assistant,
session,
"What did we discuss in this conversation?"
)
print(f"Session summary: {session_summary}")
Working with Multiple Users
def create_group_session(user_peers, assistant_peer):
"""Create a session with multiple users and an assistant"""
group_session = honcho.session("group-discussion")
# Add all peers to the session
all_peers = user_peers + [assistant_peer]
group_session.add_peers(all_peers)
return group_session
# Create multiple user peers
bob = honcho.peer("user-bob")
charlie = honcho.peer("user-charlie")
# Create group session
group_session = create_group_session([alice, bob, charlie], assistant)
# Add group conversation
group_session.add_messages([
alice.message("I think we should use Python for the backend"),
bob.message("I prefer TypeScript, it's more type-safe"),
charlie.message("What about performance considerations?"),
assistant.message("Both are good choices. Let me help you compare them based on your requirements.")
])
# Query different perspectives
alice_view = assistant.chat(
"What does alice think about the technology discussion?",
target=alice,
session_id=group_session.id
)
print(f"Alice's perspective: {alice_view}")
Part 8: Advanced Features
Direct Knowledge Queries
# Instead of complex manual context building, use peer.chat() directly
response = assistant.chat("What programming languages does alice prefer and why?", target=alice)
print(f"Alice's language preferences: {response}")
# Query session-specific knowledge
session_insights = assistant.chat(
"What was the main topic of discussion in this session?",
session_id=session.id
)
print(f"Session insights: {session_insights}")
# Honcho supports multiple LLM formats out of the box
def use_anthropic_format(session, assistant_peer, user_message):
"""Example using Anthropic's message format"""
context = session.get_context(tokens=1500)
messages = context.to_anthropic(assistant=assistant_peer)
# Now you can use these messages with Anthropic's API
# anthropic_response = anthropic.messages.create(...)
return messages
# Get Anthropic-formatted messages
anthropic_messages = use_anthropic_format(session, assistant, "Hello!")
print(f"Formatted for Anthropic: {len(anthropic_messages)} messages")
Next Steps
Congratulations! You’ve built a complete personal AI assistant with Honcho that automatically handles memory, context, and LLM integration. Here are some ideas to extend it further:
- Web Interface: Build a web UI using Flask/FastAPI - the SDK makes it easy to integrate
- Streaming Responses: Use
peer.chat(..., stream=True)
for real-time conversations
- Multi-Modal Support: Integrate with vision models while leveraging Honcho’s memory
- Advanced Theory-of-Mind: Explore peer modeling with
observe_others=False
configurations
- Production Deployment: Scale with workspaces, metadata, and batch operations
Troubleshooting
Common Issues
“No module named ‘honcho’”
- Make sure you installed the package:
pip install honcho-ai
API authentication errors
- Check your
HONCHO_API_KEY
environment variable
- Verify your API key is valid
Empty context or knowledge queries
- Ensure you’ve added messages to peers before querying
- Check that peers are added to sessions before conversation
- Verify session has messages before getting context
Rate limiting or timeout issues
- The SDK handles retries automatically
- Consider adding delays between large batch operations
Resources