Set environment="production" and provide your api_key
from honcho import Honcho# Initialize client (using the default workspace)honcho = Honcho()# Create peersalice = honcho.peer("alice")assistant = honcho.peer("assistant")# Create a session for conversationsession = honcho.session("conversation-1")# Add messages to conversationsession.add_messages([ alice.message("What's the weather like today?"), assistant.message("It's sunny and 75°F outside!")])# Chat with Honcho about a peerresponse = alice.chat("What did the assistant tell this user about the weather?")# Get conversation context for LLM completionscontext = session.context()openai_messages = context.to_openai(assistant=assistant)
Representations are how Honcho models what peers know. Each peer has a global representation (everything they know across all sessions) and local representations (what other specific peers know about them, scoped by session or globally).
# Query alice's global knowledgeresponse = alice.chat("What does the user know about weather?")# Query what alice knows about the assistant (local representation)response = alice.chat("What does the user know about the assistant?", target=assistant)# Query scoped to a specific sessionresponse = alice.chat("What happened in our conversation?", session=session.id)
from honcho import Honcho# Basic initialization (uses environment variables)honcho = Honcho(workspace_id="my-app-name")# Full configurationhoncho = Honcho( workspace_id="my-app-name", api_key="my-api-key", environment="production", # or "local", "demo" base_url="https://api.honcho.dev", timeout=30.0, max_retries=3)
Environment Variables:
HONCHO_API_KEY - API key for authentication
HONCHO_BASE_URL - Base URL for the Honcho API
HONCHO_WORKSPACE_ID - Default workspace ID
Key Methods:
# Get or create a peerpeer = honcho.peer(id)# Get or create a sessionsession = honcho.session(id)# List all peers in workspacepeers = honcho.peers()# List all sessions in workspacesessions = honcho.sessions()# Search across all content in workspaceresults = honcho.search(query)# Workspace metadata managementmetadata = honcho.get_metadata()honcho.set_metadata(dict)# Get list of all workspace IDsworkspaces = honcho.workspaces()
peer() and session() always make a get-or-create API call, returning objects with cached metadata, configuration, and timestamps.
Represents an entity that can participate in conversations:
# Create peers (get-or-create API call)alice = honcho.peer("alice")assistant = honcho.peer("assistant")# Create with immediate configuration# This will make an API call to create the peer with the custom configuration and/or metadataalice = honcho.peer("bob", config={"role": "user", "active": True}, metadata={"location": "NYC", "role": "developer"})# Peer propertiesprint(f"Peer ID: {alice.id}")print(f"Workspace: {alice.workspace_id}")print(f"Created: {alice.created_at}") # Available after API fetch# Chat with peer's representations (supports streaming)response = alice.chat("What did I have for breakfast?")response = alice.chat("What do I know about Bob?", target="bob")response = alice.chat("What happened in session-1?", session="session-1")response = alice.chat("Summarize what matters most to me.", reasoning_level="high")# Add content to a session with a peersession = honcho.session("session-1")session.add_messages([ alice.message("I love Python programming"), alice.message("Today I learned about async programming"), alice.message("I prefer functional programming patterns")])# Get peer's sessionssessions = alice.sessions()# Search peer's messagesresults = alice.search("programming")# Metadata managementmetadata = alice.get_metadata()metadata["location"] = "Paris"alice.set_metadata(metadata)# Peer card managementcard = alice.get_card() # Get peer cardcard = alice.get_card(target="bob") # Get card about another peerupdated = alice.set_card(["Likes Python", "Lives in NYC"]) # Set peer cardupdated = alice.set_card(["Works at Acme"], target="bob") # Set card about another peer# Get peer context (representation + peer card in one call)context = alice.context()context = alice.context(target="bob") # What alice knows about bob# Get working representation with semantic searchrep = alice.representation(search_query="preferences", search_top_k=10)# Access conclusionsself_conclusions = alice.conclusions.list() # Self-conclusionsbob_conclusions = alice.conclusions_of("bob").list() # Conclusions of bob
The context() method on peers retrieves both the working representation and peer card in a single API call:
# Get peer's own contextcontext = alice.context()print(context.representation) # Working representationprint(context.peer_card) # Peer card as list of strings# Get context about another peer (what alice knows about bob)bob_context = alice.context(target="bob")# Get context with semantic searchcontext = alice.context( target="bob", search_query="work preferences", search_top_k=10, search_max_distance=0.8, include_most_frequent=True, max_conclusions=50)
The peer card contains stable biographical facts about a peer (name, preferences, background). Use get_card() / getCard() to retrieve it and set_card() / setCard() to overwrite it:
# Get peer's own cardcard = alice.get_card()print(card) # ["Likes Python", "Lives in NYC", ...]# Get card about another peer (local representation)bob_card = alice.get_card(target="bob")# Set peer's own cardupdated = alice.set_card(["Likes Python", "Lives in NYC"])# Set card about another peerupdated = alice.set_card(["Works at Acme", "Enjoys hiking"], target="bob")
Peer cards are automatically maintained by the dreaming agent during message processing. Use set_card() / setCard() when you need to manually override or seed the card — the peer will be created automatically if it doesn’t already exist.
You can also create conclusions directly, which is useful for importing data or adding explicit facts:
# Create conclusions for what alice knows about bobbob_conclusions = alice.conclusions_of("bob")# Create a single conclusioncreated = bob_conclusions.create([ {"content": "User prefers dark mode", "session_id": "session-1"}])# Create multiple conclusions in batchcreated = bob_conclusions.create([ {"content": "User prefers dark mode", "session_id": "session-1"}, {"content": "User works late at night", "session_id": "session-1"}, {"content": "User enjoys programming", "session_id": "session-1"},])# Returns list of created Conclusion objects with IDsfor conclusion in created: print(f"Created conclusion: {conclusion.id} - {conclusion.content}")
Manually created conclusions are marked as “explicit” and are treated the same as system-derived conclusions. Each conclusion must be tied to a session and the content length is validated against the embedding token limit.
# Create session (get-or-create API call)session = honcho.session("conversation-1")# Create with immediate configuration# This will make an API call to create the session with the custom configuration and/or metadatasession = honcho.session("meeting-1", config={"type": "meeting", "max_peers": 10})# Session propertiesprint(f"Session ID: {session.id}")print(f"Workspace: {session.workspace_id}")print(f"Created: {session.created_at}") # Available after API fetchprint(f"Active: {session.is_active}") # Available after API fetch# Peer managementsession.add_peers([alice, assistant])session.add_peers([(alice, SessionPeerConfig(observe_others=True))])session.set_peers([alice, bob, charlie]) # Replace all peerssession.remove_peers([alice])# Get session peers and their configurationspeers = session.peers()peer_config = session.get_peer_configuration(alice)session.set_peer_configuration(alice, SessionPeerConfig(observe_me=False))# Message managementsession.add_messages([ alice.message("Hello everyone!"), assistant.message("Hi Alice! How can I help today?")])# Get messages (with optional pagination)messages = session.messages()messages = session.messages(page=1, size=100, reverse=True)# Get a single message by IDmessage = session.get_message("message-id")# Get conversation contextcontext = session.context(summary=True, tokens=2000)# Get context with peer representation includedcontext = session.context( tokens=2000, peer_target="user", peer_perspective="assistant", search_query="What are my preferences?", limit_to_session=True, search_top_k=10, search_max_distance=0.8, include_most_frequent=True, max_conclusions=25)# Search session contentresults = session.search("help")# Working representation queries with semantic searchglobal_rep = session.representation("alice")targeted_rep = session.representation(alice, target=bob)searched_rep = session.representation( "alice", search_query="preferences", search_top_k=10, include_most_frequent=True)# Upload a file to create messagesmessages = session.upload_file( file=open("document.pdf", "rb"), peer="user", metadata={"source": "upload"}, created_at="2024-01-15T10:30:00Z")# Clone a session (creates a copy with all data)# Copies: messages, metadata, configuration, peers, and peer configurationscloned = session.clone()# Clone up to a specific message (inclusive)# Only messages up to and including the specified message are copiedcloned_partial = session.clone(message_id="msg-123")# Delete session (async - returns 202)session.delete()# Metadata managementsession.set_metadata({"topic": "product planning", "status": "active"})metadata = session.get_metadata()
Session-Level Theory of Mind Configuration:
Theory of Mind controls whether peers can form models of what other peers think. Use observe_others=False to prevent a peer from modeling others within a session, and observe_me=False to prevent others from modeling this peer within a session.
from honcho.api_types import SessionPeerConfig# Configure peer observation settingsconfig = SessionPeerConfig( observe_others=False, # Form theory-of-mind of other peers -- False by default observe_me=True # Don't let others form theory-of-mind of me -- True by default)session.add_peers([(alice, config)])
# Create multiple peersusers = [honcho.peer(f"user-{i}") for i in range(5)]moderator = honcho.peer("moderator")# Create group sessiongroup_chat = honcho.session("group-discussion")group_chat.add_peers(users + [moderator])# Add messages from different peersgroup_chat.add_messages([ users[0].message("What's our agenda for today?"), moderator.message("We'll discuss the new feature roadmap"), users[1].message("I have some concerns about the timeline")])# Query different perspectivesuser_perspective = users[0].chat("What are people's concerns?")moderator_view = moderator.chat("What feedback am I getting?", session=group_chat.id)
# Peers and sessions are lightweight - create as neededalice = honcho.peer("alice")session = honcho.session("chat-1")# Use descriptive IDs for better debugginguser_session = honcho.session(f"user-{user_id}-support-{ticket_id}")support_agent = honcho.peer(f"agent-{agent_id}")
# Create peers (each makes a get-or-create call)peers = [honcho.peer(f"user-{i}") for i in range(100)]# Batch operations when possiblesession.add_messages([peer.message(f"Message {i}") for i, peer in enumerate(peers)])# Use context limits to control token usagecontext = session.context(tokens=1500) # Limit context size