The Honcho SDKs provide ergonomic interfaces for building agentic AI applications with Honcho in Python and TypeScript/JavaScript.

Installation

uv add honcho-ai

Quick Start

from honcho import Honcho

# Initialize client (using the default workspace)
honcho = Honcho()

# Create peers
alice = honcho.peer("alice")
assistant = honcho.peer("assistant")

# Create a session for conversation
session = honcho.session("conversation-1")

# Add messages to conversation
session.add_messages([
    alice.message("What's the weather like today?"),
    assistant.message("It's sunny and 75°F outside!")
])

# Query peer representations in natural language
response = alice.chat("What did the assistant tell this user about the weather?")

# Get conversation context for LLM completions
context = session.get_context()
openai_messages = context.to_openai(assistant=assistant)

Core Concepts

Peers and Representations

Representations are how Honcho models what peers know. Each peer has a global representation (everything they know across all sessions) and local representations (what other specific peers know about them, scoped by session or globally).

# Query alice's global knowledge
response = alice.chat("What does the user know about weather?")

# Query what alice knows about the assistant (local representation)
response = alice.chat("What does the user know about the assistant?", target=assistant)

# Query scoped to a specific session
response = alice.chat("What happened in our conversation?", session_id=session.id)

Core Classes

Honcho Client

The main entry point for workspace operations:

from honcho import Honcho

# Basic initialization (uses environment variables)
honcho = Honcho(workspace_id="my-app-name")

# Full configuration
honcho = Honcho(
    workspace_id="my-app-name",
    api_key="my-api-key",
    environment="production",  # or "local", "demo"
    base_url="https://api.honcho.dev",
    timeout=30.0,
    max_retries=3
)

Environment Variables:

  • HONCHO_API_KEY - API key for authentication
  • HONCHO_BASE_URL - Base URL for the Honcho API
  • HONCHO_WORKSPACE_ID - Default workspace ID

Key Methods:

# Get or create a peer
peer = honcho.peer(id)

# Get or create a session
session = honcho.session(id)

# List all peers in workspace
peers = honcho.get_peers()

# List all sessions in workspace
sessions = honcho.get_sessions()

# Search across all content in workspace
results = honcho.search(query)

# Workspace metadata management
metadata = honcho.get_metadata()
honcho.set_metadata(dict)

# Get list of all workspace IDs
workspaces = honcho.get_workspaces()

Peer and session creation is lazy - no API calls are made until you actually use the peer or session.

Peer

Represents an entity that can participate in conversations:

# Create peers (lazy creation - no API call yet)
alice = honcho.peer("alice")
assistant = honcho.peer("assistant")

# Create with immediate configuration
# This will make an API call to create the peer with the custom configuration and/or metadata
alice = honcho.peer("bob", config={"role": "user", "active": True}, metadata={"location": "NYC", "role": "developer"})

# Peer properties
print(f"Peer ID: {alice.id}")
print(f"Workspace: {alice.workspace_id}")

# Chat with peer's representations (supports streaming)
response = alice.chat("What did I have for breakfast?")
response = alice.chat("What do I know about Bob?", target="bob")
response = alice.chat("What happened in session-1?", session_id="session-1")

# Add content to peer's global representation
alice.add_messages("I love Python programming")
alice.add_messages([
    alice.message("Today I learned about async programming"),
    alice.message("I prefer functional programming patterns")
])

# Get peer's sessions and messages
sessions = alice.get_sessions()
messages = alice.get_messages()

# Search peer's content
results = alice.search("programming")

# Metadata management
metadata = alice.get_metadata()
metadata["location"] = "Paris"
alice.set_metadata(metadata)

Session

Manages multi-party conversations:

# Create session (like peers, lazy creation)
session = honcho.session("conversation-1")

# Create with immediate configuration
# This will make an API call to create the session with the custom configuration and/or metadata
session = honcho.session("meeting-1", config={"type": "meeting", "max_peers": 10})

# Session properties
print(f"Session ID: {session.id}")
print(f"Workspace: {session.workspace_id}")

# Peer management
session.add_peers([alice, assistant])
session.add_peers([(alice, SessionPeerConfig(observe_others=True))])
session.set_peers([alice, bob, charlie])  # Replace all peers
session.remove_peers([alice])

# Get session peers and their configurations
peers = session.get_peers()
peer_config = session.get_peer_config(alice)
session.set_peer_config(alice, SessionPeerConfig(observe_me=False))

# Message management
session.add_messages([
    alice.message("Hello everyone!"),
    assistant.message("Hi Alice! How can I help today?")
])

# Get messages
messages = session.get_messages()

# Get conversation context
context = session.get_context(summary=True, tokens=2000)

# Search session content
results = session.search("help")

# Working representation queries
global_rep = session.working_rep("alice")
targeted_rep = session.working_rep(alice, bob)

# Metadata management
session.set_metadata({"topic": "product planning", "status": "active"})
metadata = session.get_metadata()

Session-Level Theory of Mind Configuration:

Theory of Mind controls whether peers can form models of what other peers think. Use observe_others=False to prevent a peer from modeling others within a session, and observe_me=False to prevent others from modeling this peer within a session.

from honcho import SessionPeerConfig

# Configure peer observation settings
config = SessionPeerConfig(
    observe_others=False,  # Form theory-of-mind of other peers -- False by default
    observe_me=True        # Don't let others form theory-of-mind of me -- True by default
)

session.add_peers([(alice, config)])

SessionContext

Provides formatted conversation context for LLM integration:

# Get session context
context = session.get_context(summary=True, tokens=1500)

# Convert to LLM-friendly formats
openai_messages = context.to_openai(assistant=assistant)
anthropic_messages = context.to_anthropic(assistant=assistant)

Advanced Usage

Multi-Party Conversations

# Create multiple peers
users = [honcho.peer(f"user-{i}") for i in range(5)]
moderator = honcho.peer("moderator")

# Create group session
group_chat = honcho.session("group-discussion")
group_chat.add_peers(users + [moderator])

# Add messages from different peers
group_chat.add_messages([
    users[0].message("What's our agenda for today?"),
    moderator.message("We'll discuss the new feature roadmap"),
    users[1].message("I have some concerns about the timeline")
])

# Query different perspectives
user_perspective = users[0].chat("What are people's concerns?")
moderator_view = moderator.chat("What feedback am I getting?", session_id=group_chat.id)

LLM Integration

import openai

# Get conversation context
context = session.get_context(tokens=3000)
messages = context.to_openai(assistant=assistant)

# Call OpenAI API
response = openai.chat.completions.create(
    model="gpt-4",
    messages=messages + [
        {"role": "user", "content": "Summarize the key discussion points."}
    ]
)

Metadata and Filtering

See Using Filters for more examples on how to use filters.

# Add messages with metadata
session.add_messages([
    alice.message("Let's discuss the budget", metadata={
        "topic": "finance",
        "priority": "high"
    }),
    assistant.message("I'll prepare the financial report", metadata={
        "action_item": True,
        "due_date": "2024-01-15"
    })
])

# Filter messages by metadata
finance_messages = session.get_messages(filter={"metadata": {"topic": "finance"}})
action_items = session.get_messages(filter={"metadata": {"action_item": True}})

Pagination

# Iterate through all sessions
for session in honcho.get_sessions():
    print(f"Session: {session.id}")
    
    # Iterate through session messages
    for message in session.get_messages():  
        print(f"  {message.peer_id}: {message.content}")

Best Practices

Resource Management

# Peers and sessions are lightweight - create as needed
alice = honcho.peer("alice")
session = honcho.session("chat-1")

# Use descriptive IDs for better debugging
user_session = honcho.session(f"user-{user_id}-support-{ticket_id}")
support_agent = honcho.peer(f"agent-{agent_id}")

Performance Optimization

# Lazy creation - no API calls until needed
peers = [honcho.peer(f"user-{i}") for i in range(100)]  # Fast

# Batch operations when possible
session.add_messages([peer.message(f"Message {i}") for i, peer in enumerate(peers)])

# Use context limits to control token usage
context = session.get_context(tokens=1500)  # Limit context size