from honcho import Honcho# Initialize client (using the default workspace)honcho = Honcho()# Create peersalice = honcho.peer("alice")assistant = honcho.peer("assistant")# Create a session for conversationsession = honcho.session("conversation-1")# Add messages to conversationsession.add_messages([ alice.message("What's the weather like today?"), assistant.message("It's sunny and 75°F outside!")])# Query peer representations in natural languageresponse = alice.chat("What did the assistant tell this user about the weather?")# Get conversation context for LLM completionscontext = session.get_context()openai_messages = context.to_openai(assistant=assistant)
Representations are how Honcho models what peers know. Each peer has a global representation (everything they know across all sessions) and local representations (what other specific peers know about them, scoped by session or globally).
Copy
# Query alice's global knowledgeresponse = alice.chat("What does the user know about weather?")# Query what alice knows about the assistant (local representation)response = alice.chat("What does the user know about the assistant?", target=assistant)# Query scoped to a specific sessionresponse = alice.chat("What happened in our conversation?", session_id=session.id)
from honcho import Honcho# Basic initialization (uses environment variables)honcho = Honcho(workspace_id="my-app-name")# Full configurationhoncho = Honcho( workspace_id="my-app-name", api_key="my-api-key", environment="production", # or "local", "demo" base_url="https://api.honcho.dev", timeout=30.0, max_retries=3)
Environment Variables:
HONCHO_API_KEY - API key for authentication
HONCHO_BASE_URL - Base URL for the Honcho API
HONCHO_WORKSPACE_ID - Default workspace ID
Key Methods:
Copy
# Get or create a peerpeer = honcho.peer(id)# Get or create a sessionsession = honcho.session(id)# List all peers in workspacepeers = honcho.get_peers()# List all sessions in workspacesessions = honcho.get_sessions()# Search across all content in workspaceresults = honcho.search(query)# Workspace metadata managementmetadata = honcho.get_metadata()honcho.set_metadata(dict)# Get list of all workspace IDsworkspaces = honcho.get_workspaces()
Peer and session creation is lazy - no API calls are made until you actually use the peer or session.
Represents an entity that can participate in conversations:
Copy
# Create peers (lazy creation - no API call yet)alice = honcho.peer("alice")assistant = honcho.peer("assistant")# Create with immediate configuration# This will make an API call to create the peer with the custom configuration and/or metadataalice = honcho.peer("bob", config={"role": "user", "active": True}, metadata={"location": "NYC", "role": "developer"})# Peer propertiesprint(f"Peer ID: {alice.id}")print(f"Workspace: {alice.workspace_id}")# Chat with peer's representations (supports streaming)response = alice.chat("What did I have for breakfast?")response = alice.chat("What do I know about Bob?", target="bob")response = alice.chat("What happened in session-1?", session_id="session-1")# Add content to peer's global representationalice.add_messages("I love Python programming")alice.add_messages([ alice.message("Today I learned about async programming"), alice.message("I prefer functional programming patterns")])# Get peer's sessions and messagessessions = alice.get_sessions()messages = alice.get_messages()# Search peer's contentresults = alice.search("programming")# Metadata managementmetadata = alice.get_metadata()metadata["location"] = "Paris"alice.set_metadata(metadata)
# Create session (like peers, lazy creation)session = honcho.session("conversation-1")# Create with immediate configuration# This will make an API call to create the session with the custom configuration and/or metadatasession = honcho.session("meeting-1", config={"type": "meeting", "max_peers": 10})# Session propertiesprint(f"Session ID: {session.id}")print(f"Workspace: {session.workspace_id}")# Peer managementsession.add_peers([alice, assistant])session.add_peers([(alice, SessionPeerConfig(observe_others=True))])session.set_peers([alice, bob, charlie]) # Replace all peerssession.remove_peers([alice])# Get session peers and their configurationspeers = session.get_peers()peer_config = session.get_peer_config(alice)session.set_peer_config(alice, SessionPeerConfig(observe_me=False))# Message managementsession.add_messages([ alice.message("Hello everyone!"), assistant.message("Hi Alice! How can I help today?")])# Get messagesmessages = session.get_messages()# Get conversation contextcontext = session.get_context(summary=True, tokens=2000)# Search session contentresults = session.search("help")# Working representation queriesglobal_rep = session.working_rep("alice")targeted_rep = session.working_rep(alice, bob)# Metadata managementsession.set_metadata({"topic": "product planning", "status": "active"})metadata = session.get_metadata()
Session-Level Theory of Mind Configuration:
Theory of Mind controls whether peers can form models of what other peers think. Use observe_others=False to prevent a peer from modeling others within a session, and observe_me=False to prevent others from modeling this peer within a session.
Copy
from honcho import SessionPeerConfig# Configure peer observation settingsconfig = SessionPeerConfig( observe_others=False, # Form theory-of-mind of other peers -- False by default observe_me=True # Don't let others form theory-of-mind of me -- True by default)session.add_peers([(alice, config)])
# Create multiple peersusers = [honcho.peer(f"user-{i}") for i in range(5)]moderator = honcho.peer("moderator")# Create group sessiongroup_chat = honcho.session("group-discussion")group_chat.add_peers(users + [moderator])# Add messages from different peersgroup_chat.add_messages([ users[0].message("What's our agenda for today?"), moderator.message("We'll discuss the new feature roadmap"), users[1].message("I have some concerns about the timeline")])# Query different perspectivesuser_perspective = users[0].chat("What are people's concerns?")moderator_view = moderator.chat("What feedback am I getting?", session_id=group_chat.id)
# Iterate through all sessionsfor session in honcho.get_sessions(): print(f"Session: {session.id}") # Iterate through session messages for message in session.get_messages(): print(f" {message.peer_id}: {message.content}")
# Peers and sessions are lightweight - create as neededalice = honcho.peer("alice")session = honcho.session("chat-1")# Use descriptive IDs for better debugginguser_session = honcho.session(f"user-{user_id}-support-{ticket_id}")support_agent = honcho.peer(f"agent-{agent_id}")
# Lazy creation - no API calls until neededpeers = [honcho.peer(f"user-{i}") for i in range(100)] # Fast# Batch operations when possiblesession.add_messages([peer.message(f"Message {i}") for i, peer in enumerate(peers)])# Use context limits to control token usagecontext = session.get_context(tokens=1500) # Limit context size