#!/usr/bin/env python3 """ FlowTether MCP Server Model Context Protocol server for Claude CLI integration. Allows AI agents to search, save, and organize knowledge in FlowTether. Usage: 1. Set environment variables: - FLOWTETHER_API_KEY: Your API key (ft_live_xxx) - FLOWTETHER_API_URL: API URL (default: https://flowtether.ai) - FLOWTETHER_HARBOR_ID: Default harbor ID (optional) 2. Add to Claude CLI config (~/.claude/claude_desktop_config.json): { "mcpServers": { "flowtether": { "command": "python", "args": ["/path/to/flowtether_mcp.py"], "env": { "FLOWTETHER_API_KEY": "ft_live_xxx", "FLOWTETHER_HARBOR_ID": "optional-harbor-uuid" } } } } 3. Restart Claude CLI Available Tools: - search: Semantic search across pearls - save_pearl: Save a new pearl (knowledge item) - list_harbors: List available harbors - list_buoys: List topic clusters in a harbor - get_pearl: Get full pearl content by ID - bulk_save: Save multiple pearls at once """ import logging import os import sys from typing import Any import httpx from mcp.server import Server from mcp.server.stdio import stdio_server from mcp.types import TextContent, Tool, Resource, TextResourceContents # Configure logging logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", stream=sys.stderr, ) logger = logging.getLogger("flowtether_mcp") # Configuration from environment API_KEY = os.getenv("FLOWTETHER_API_KEY", "") API_URL = os.getenv("FLOWTETHER_API_URL", "https://flowtether.ai") DEFAULT_HARBOR_ID = os.getenv("FLOWTETHER_HARBOR_ID", "") # HTTP client with timeout http_client = httpx.AsyncClient(timeout=30.0) def get_headers(harbor_id: str | None = None) -> dict[str, str]: """Get HTTP headers for API requests.""" headers = { "Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json", } # Use provided harbor_id, fall back to default effective_harbor = harbor_id or DEFAULT_HARBOR_ID if effective_harbor: headers["X-Harbor-ID"] = effective_harbor return headers async def api_request( method: str, endpoint: str, json: dict[str, Any] | None = None, params: dict[str, Any] | None = None, harbor_id: str | None = None, ) -> dict[str, Any]: """Make an API request to FlowTether. Args: method: HTTP method (GET, POST, etc.) endpoint: API endpoint (e.g., /api/mcp/harbors) json: JSON body for POST/PUT requests params: Query parameters harbor_id: Override harbor ID for this request Returns: JSON response from API Raises: Exception: If API request fails """ url = f"{API_URL}{endpoint}" headers = get_headers(harbor_id) try: response = await http_client.request( method=method, url=url, headers=headers, json=json, params=params, ) response.raise_for_status() return response.json() except httpx.HTTPStatusError as e: error_detail = "Unknown error" try: error_detail = e.response.json().get("detail", str(e)) except Exception: error_detail = e.response.text or str(e) logger.error(f"API error: {e.response.status_code} - {error_detail}") raise Exception(f"API error ({e.response.status_code}): {error_detail}") from e except httpx.RequestError as e: logger.error(f"Request error: {e}") raise Exception(f"Connection error: {e}") from e # Create MCP server server = Server("flowtether") @server.list_tools() async def list_tools() -> list[Tool]: """List available MCP tools.""" return [ Tool( name="get_instructions", description="Full FlowTether reference guide — title writing, tagging strategy, pearl types, organization best practices. Read this when you need to understand HOW to write good content. You do NOT need to call this every session — begin_session() is your session entry point.", inputSchema={ "type": "object", "properties": {}, }, ), Tool( name="search", description="Search the USER'S knowledge base (harbor). Returns matching pearls with titles, summaries, and relevance scores. Does NOT search your workspace — use search_workspace() for your own notes.", inputSchema={ "type": "object", "properties": { "query": { "type": "string", "description": "Search query (natural language)", }, "limit": { "type": "integer", "description": "Maximum results to return (default: 10, max: 50)", "default": 10, }, "harbor_id": { "type": "string", "description": "Optional: Search in specific harbor (uses default if not provided)", }, }, "required": ["query"], }, ), Tool( name="save_pearl", description="Save a new pearl (knowledge item) to FlowTether. IMPORTANT: Always provide a specific, searchable title (not 'Notes' or 'Ideas'). Include a summary explaining WHY this matters. Add 3-5 relevant tags. Set pearl_type when applicable. Think: 'How will the user find this in 6 months?'", inputSchema={ "type": "object", "properties": { "content": { "type": "string", "description": "Main content of the pearl", }, "title": { "type": "string", "description": "IMPORTANT: Specific, searchable title. Use format: '[Topic] - [Specific aspect]'. Example: 'Docker networking - bridge vs host mode'. NEVER use generic titles like 'Notes' or 'Meeting'.", }, "summary": { "type": "string", "description": "Brief explanation of why this pearl matters and when to reference it. Example: 'Explains our rate limiting strategy - reference when debugging 429 errors.'", }, "tags": { "type": "array", "items": {"type": "string"}, "description": "3-5 tags for findability. Include: project name, content type (how-to, decision, reference), key topics. Example: ['docker', 'networking', 'devops', 'reference']", }, "pearl_type": { "type": "string", "enum": ["insight", "decision", "question", "idea", "reference", "note"], "description": "Type of knowledge. Use 'decision' for choices made (include WHY), 'insight' for realizations, 'question' for unresolved items, 'reference' for external docs.", }, "suggested_buoy_id": { "type": "string", "description": "Optional: ID of an existing buoy where this pearl should be placed. Get buoy IDs from list_buoys(). If unsure, omit — the user will place it during review.", }, "harbor_id": { "type": "string", "description": "Optional: Save to specific harbor", }, }, "required": ["content"], }, ), Tool( name="list_harbors", description="List all harbors (knowledge spaces) accessible to the API key.", inputSchema={ "type": "object", "properties": {}, }, ), Tool( name="list_buoys", description="List buoys (topic clusters) in a harbor. Buoys organize related pearls.", inputSchema={ "type": "object", "properties": { "harbor_id": { "type": "string", "description": "Harbor to list buoys from (uses default if not provided)", }, "limit": { "type": "integer", "description": "Maximum buoys to return (default: 20)", "default": 20, }, }, }, ), Tool( name="get_pearl", description="Get full content of a pearl by its ID.", inputSchema={ "type": "object", "properties": { "pearl_id": { "type": "string", "description": "UUID of the pearl to retrieve", }, "harbor_id": { "type": "string", "description": "Harbor containing the pearl (uses default if not provided)", }, }, "required": ["pearl_id"], }, ), Tool( name="bulk_save", description="Save multiple pearls at once. IMPORTANT: Don't just dump content - organize as you save! Each pearl needs a specific title, summary, and tags. Split content into separate pearls when topics are distinct.", inputSchema={ "type": "object", "properties": { "pearls": { "type": "array", "items": { "type": "object", "properties": { "content": {"type": "string", "description": "Main content of this pearl"}, "title": {"type": "string", "description": "REQUIRED for findability: Specific title like 'Docker networking - bridge mode setup'"}, "summary": {"type": "string", "description": "Why this pearl matters and when to reference it"}, "tags": {"type": "array", "items": {"type": "string"}, "description": "3-5 tags: project, type, topics"}, "pearl_type": {"type": "string", "description": "insight, decision, question, idea, reference, or note"}, "suggested_buoy_id": {"type": "string", "description": "Optional: ID of an existing buoy for suggested placement. Get IDs from list_buoys()."}, }, "required": ["content"], }, "description": "Array of pearls - each should be independently findable with good title/summary/tags", }, "harbor_id": { "type": "string", "description": "Harbor to save to (uses default if not provided)", }, }, "required": ["pearls"], }, ), Tool( name="explore_connections", description="Explore what's connected to a pearl or buoy via the knowledge graph. Returns related items with relationship types (builds_on, contradicts, references, etc.) and connection depth. Use this to understand how knowledge is interconnected — not just what exists, but how it relates.", inputSchema={ "type": "object", "properties": { "container_id": { "type": "string", "description": "UUID of the pearl or buoy to explore from", }, "max_depth": { "type": "integer", "description": "How many hops to traverse (default: 2, max: 3)", "default": 2, }, "max_results": { "type": "integer", "description": "Maximum connected items to return (default: 20)", "default": 20, }, "harbor_id": { "type": "string", "description": "Optional: harbor containing the item", }, }, "required": ["container_id"], }, ), Tool( name="explore_buoy", description="Get full context for a buoy (topic cluster). Returns all pearls inside it, connected buoys, and topic summary. Use after list_buoys() to understand a topic area before saving new content.", inputSchema={ "type": "object", "properties": { "buoy_id": { "type": "string", "description": "UUID of the buoy to explore", }, "include_content": { "type": "boolean", "description": "Include pearl content snippets (default: false for speed)", "default": False, }, "harbor_id": { "type": "string", "description": "Optional: harbor containing the buoy", }, }, "required": ["buoy_id"], }, ), Tool( name="deep_search", description="Search the user's harbor with graph context. Like search(), but also returns items connected to search results via the knowledge graph. Use when you need to understand a topic area, not just find one pearl. Searches user's harbor only — not your workspace.", inputSchema={ "type": "object", "properties": { "query": { "type": "string", "description": "Search query (natural language)", }, "limit": { "type": "integer", "description": "Max direct search results (default: 5, max: 10)", "default": 5, }, "graph_depth": { "type": "integer", "description": "How many hops of connections to include (default: 2, max: 3)", "default": 2, }, "harbor_id": { "type": "string", "description": "Optional: search in specific harbor", }, }, "required": ["query"], }, ), Tool( name="read_memory", description="Read your persistent session notes from previous sessions. Note: begin_session() already includes your memory — use this only if you need to re-read mid-session or if you're not using begin_session().", inputSchema={ "type": "object", "properties": {}, }, ), Tool( name="write_memory", description="Save persistent session notes for your next session. Call this at session end to record: user preferences you learned, naming conventions that worked, active topics, session breadcrumbs. 10KB max. Overwrites previous notes.", inputSchema={ "type": "object", "properties": { "content": { "type": "string", "description": "Your session notes in markdown. Include: user preferences, domain vocabulary, active topics, what you did this session.", }, }, "required": ["content"], }, ), Tool( name="begin_session", description="YOUR SESSION ENTRY POINT. Call this first, every session. Returns: your memory from last time, what changed in the harbor since your last visit, workspace status, and pending items. First-session agents get a quickstart guide. Call list_harbors()/list_buoys() only when you need to place or explore content — not every session.", inputSchema={ "type": "object", "properties": { "harbor_id": { "type": "string", "description": "Optional: which harbor to get activity for", }, }, }, ), Tool( name="end_session", description="End the current session. Saves a session summary to your workspace and optionally updates your memory. Call this before disconnecting to ensure continuity next time.", inputSchema={ "type": "object", "properties": { "summary": { "type": "string", "description": "Summary of what happened this session. Include: key actions taken, decisions made, topics discussed, pending items.", }, "memory_update": { "type": "string", "description": "Updated session notes to persist for next session (replaces existing memory). Include: user preferences learned, active topics, naming conventions, and session breadcrumbs. 10KB max.", }, }, "required": ["summary"], }, ), Tool( name="timeline", description="View activity over a time period, grouped by day. Shows BOTH harbor pearls AND your workspace items (tagged with 'harbor' or 'workspace' so you can tell them apart). Use to answer 'what happened last week?'", inputSchema={ "type": "object", "properties": { "days": { "type": "integer", "description": "Number of days to look back (default: 7, max: 90)", "default": 7, }, "harbor_id": { "type": "string", "description": "Optional: specific harbor (default: current)", }, "source_filter": { "type": "string", "enum": ["all", "human", "agent"], "description": "Filter by who created the content (default: all)", "default": "all", }, }, }, ), Tool( name="write_workspace", description="Write to agent workspace — session logs, analysis, working documents. Separate from the user's harbor. Invisible to user unless you promote_to_user(). NOTE: workspace may be shared with sibling agents on the same identity — prefix titles with your tool name (e.g., 'Claude Code — Session Notes').", inputSchema={ "type": "object", "properties": { "content": {"type": "string", "description": "Content of the workspace entry"}, "title": {"type": "string", "description": "Title for the workspace entry"}, "tags": {"type": "array", "items": {"type": "string"}, "description": "Tags for organization"}, "pearl_type": {"type": "string", "description": "Type: note, reference, insight, etc."}, }, "required": ["content", "title"], }, ), Tool( name="search_workspace", description="Search YOUR workspace only — session logs, analysis, working documents. This is a SEPARATE search space from the user's harbor. Use search() for user pearls, this for your own notes.", inputSchema={ "type": "object", "properties": { "query": {"type": "string", "description": "Search query"}, "limit": {"type": "integer", "description": "Max results (default 10)", "default": 10}, }, "required": ["query"], }, ), Tool( name="update_workspace", description="Update an existing workspace entry. Can only update your own workspace items.", inputSchema={ "type": "object", "properties": { "pearl_id": {"type": "string", "description": "ID of the workspace item to update"}, "content": {"type": "string", "description": "New content (optional)"}, "title": {"type": "string", "description": "New title (optional)"}, }, "required": ["pearl_id"], }, ), Tool( name="consolidate_workspace", description="Analyze your workspace health. Finds exact duplicates (same content), near-duplicates (similar titles), stale entries (>30 days), and items worth promoting to the user's knowledge base. Returns a report — does NOT auto-delete. You decide what to act on.", inputSchema={ "type": "object", "properties": { "auto_merge_threshold": { "type": "number", "description": "Similarity threshold for duplicate detection (default: 0.85, range: 0.7-0.95)", "default": 0.85, }, }, }, ), Tool( name="promote_to_user", description="Promote a workspace item to the user's Dry Dock for review. The original stays in your workspace. Use this when you've created something the user should see.", inputSchema={ "type": "object", "properties": { "pearl_id": {"type": "string", "description": "ID of the workspace item to promote"}, }, "required": ["pearl_id"], }, ), Tool( name="delete_workspace", description="Delete a workspace item. Use after consolidate_workspace() to clean up duplicates or stale entries. Only deletes YOUR workspace items. Soft-delete (recoverable).", inputSchema={ "type": "object", "properties": { "pearl_id": { "type": "string", "description": "ID of the workspace item to delete", }, }, "required": ["pearl_id"], }, ), Tool( name="list_shared_context", description="List items shared between agents in this harbor. See what other agents have shared for coordination. Each item shows who shared it and when.", inputSchema={ "type": "object", "properties": { "limit": { "type": "integer", "description": "Max items to return (default: 20)", "default": 20, }, "harbor_id": { "type": "string", "description": "Optional: specific harbor", }, }, }, ), Tool( name="share_with_agents", description="Share a pearl or workspace item with other agents in this harbor. Other agents will see it via list_shared_context(). Use to pass context between agents working on related tasks.", inputSchema={ "type": "object", "properties": { "pearl_id": { "type": "string", "description": "ID of the item to share", }, "note": { "type": "string", "description": "Optional: context for other agents about why this is shared", }, }, "required": ["pearl_id"], }, ), ] @server.call_tool() async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]: """Handle tool calls from Claude CLI.""" if not API_KEY: return [TextContent(type="text", text="Error: FLOWTETHER_API_KEY environment variable not set")] try: if name == "get_instructions": return await handle_get_instructions() elif name == "search": return await handle_search(arguments) elif name == "save_pearl": return await handle_save_pearl(arguments) elif name == "list_harbors": return await handle_list_harbors() elif name == "list_buoys": return await handle_list_buoys(arguments) elif name == "get_pearl": return await handle_get_pearl(arguments) elif name == "bulk_save": return await handle_bulk_save(arguments) elif name == "explore_connections": return await handle_explore_connections(arguments) elif name == "explore_buoy": return await handle_explore_buoy(arguments) elif name == "deep_search": return await handle_deep_search(arguments) elif name == "read_memory": return await handle_read_memory() elif name == "write_memory": return await handle_write_memory(arguments) elif name == "begin_session": return await handle_begin_session(arguments) elif name == "end_session": return await handle_end_session(arguments) elif name == "timeline": return await handle_timeline(arguments) elif name == "write_workspace": return await handle_write_workspace(arguments) elif name == "search_workspace": return await handle_search_workspace(arguments) elif name == "update_workspace": return await handle_update_workspace(arguments) elif name == "consolidate_workspace": return await handle_consolidate_workspace(arguments) elif name == "delete_workspace": return await handle_delete_workspace(arguments) elif name == "promote_to_user": return await handle_promote_to_user(arguments) elif name == "list_shared_context": return await handle_list_shared_context(arguments) elif name == "share_with_agents": return await handle_share_with_agents(arguments) else: return [TextContent(type="text", text=f"Unknown tool: {name}")] except Exception as e: logger.exception(f"Tool error: {name}") return [TextContent(type="text", text=f"Error: {e}")] async def handle_get_instructions() -> list[TextContent]: """Handle get_instructions tool - returns the static usage guide only. Harbor and buoy information is NOT pre-loaded here. The agent should call list_harbors() and list_buoys() when it needs that data (steps 2-3 in the guide). This keeps instructions lightweight and avoids stale context. """ return [TextContent(type="text", text=FLOWTETHER_INSTRUCTIONS)] async def handle_search(args: dict[str, Any]) -> list[TextContent]: """Handle search tool.""" query = args.get("query", "") limit = min(args.get("limit", 10), 50) harbor_id = args.get("harbor_id") result = await api_request( "POST", "/api/search/", json={"query": query, "limit": limit}, harbor_id=harbor_id, ) if not result.get("results"): return [TextContent(type="text", text="No results found.")] # Format results lines = [f"Found {len(result['results'])} results:\n"] for i, item in enumerate(result["results"], 1): title = item.get("container_title", "Untitled") summary = item.get("container_desc") or item.get("content_preview", "")[:100] pearl_id = item.get("container_id", "") score = item.get("score", 0) lines.append(f"{i}. **{title}** (score: {score:.2f})") lines.append(f" ID: {pearl_id}") if summary: lines.append(f" {summary}...") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_save_pearl(args: dict[str, Any]) -> list[TextContent]: """Handle save_pearl tool.""" content = args.get("content", "") if not content: return [TextContent(type="text", text="Error: content is required")] payload = { "content": content, "title": args.get("title"), "summary": args.get("summary"), "tags": args.get("tags", []), "pearl_type": args.get("pearl_type"), "suggested_buoy_id": args.get("suggested_buoy_id"), } harbor_id = args.get("harbor_id") result = await api_request( "POST", "/api/mcp/create_pearl", json=payload, harbor_id=harbor_id, ) if result.get("duplicate"): existing = result.get("existing_pearl", {}) return [ TextContent( type="text", text=f"Duplicate detected. Existing pearl: {existing.get('container_title', 'Untitled')} (ID: {existing.get('container_id', 'unknown')})", ) ] return [ TextContent( type="text", text=f"Pearl saved successfully!\nTitle: {result.get('title', 'Untitled')}\nID: {result.get('pearl_id', 'unknown')}", ) ] async def handle_list_harbors() -> list[TextContent]: """Handle list_harbors tool.""" result = await api_request("GET", "/api/mcp/harbors") if not result: return [TextContent(type="text", text="No harbors found.")] lines = ["Available harbors:\n"] for harbor in result: lines.append(f"- **{harbor.get('harbor_label', 'Unnamed')}**") lines.append(f" ID: {harbor.get('harbor_id', 'unknown')}") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_list_buoys(args: dict[str, Any]) -> list[TextContent]: """Handle list_buoys tool.""" harbor_id = args.get("harbor_id") limit = args.get("limit", 20) # Use dedicated MCP endpoint for buoys (API key safe) result = await api_request( "GET", "/api/mcp/buoys", params={"limit": limit}, harbor_id=harbor_id, ) if not result: return [TextContent(type="text", text="No buoys found in this harbor.")] lines = ["Buoys (topic clusters):\n"] for buoy in result: lines.append(f"- **{buoy.get('container_title', 'Unnamed')}**") lines.append(f" ID: {buoy.get('container_id', 'unknown')}") pearl_count = buoy.get("pearl_count", 0) lines.append(f" Pearls: {pearl_count}") if buoy.get("container_desc"): lines.append(f" {buoy['container_desc'][:100]}...") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_get_pearl(args: dict[str, Any]) -> list[TextContent]: """Handle get_pearl tool.""" pearl_id = args.get("pearl_id") if not pearl_id: return [TextContent(type="text", text="Error: pearl_id is required")] harbor_id = args.get("harbor_id") # Use dedicated MCP endpoint for pearl retrieval (API key safe) result = await api_request( "GET", f"/api/mcp/pearl/{pearl_id}", harbor_id=harbor_id, ) title = result.get("container_title", "Untitled") content = result.get("content", "") summary = result.get("container_desc", "") tags = result.get("tags", []) created = result.get("container_createdt", "") category = result.get("container_category", "") lines = [ f"# {title}", "", f"**ID:** {pearl_id}", f"**Created:** {created}", ] if category: lines.append(f"**Type:** {category}") if tags: lines.append(f"**Tags:** {', '.join(tags)}") if summary: lines.append(f"\n**Summary:** {summary}") lines.append(f"\n## Content\n\n{content}") return [TextContent(type="text", text="\n".join(lines))] async def handle_bulk_save(args: dict[str, Any]) -> list[TextContent]: """Handle bulk_save tool.""" pearls = args.get("pearls", []) if not pearls: return [TextContent(type="text", text="Error: pearls array is required")] harbor_id = args.get("harbor_id") # Convert to bulk_ingest format payload = { "source_description": "Claude CLI bulk save", "processor": "mcp-server", "pearls": [ { "temp_id": f"pearl_{i}", "title": p.get("title") or p.get("content", "")[:60], "content": p.get("content", ""), "summary": p.get("summary"), "tags": p.get("tags", []), "pearl_type": p.get("pearl_type"), "suggested_buoy_id": p.get("suggested_buoy_id"), } for i, p in enumerate(pearls) ], "buoys": [], "connections": [], } result = await api_request( "POST", "/api/mcp/bulk_ingest", json=payload, harbor_id=harbor_id, ) stats = result.get("stats", {}) created = result.get("created", {}) skipped = result.get("skipped", []) lines = [ "Bulk save completed:", f"- Created: {stats.get('total_pearls', 0)} pearls", f"- Skipped: {len(skipped)} (duplicates)", f"- Processing time: {stats.get('processing_time_ms', 0)}ms", ] if skipped: lines.append("\nSkipped items:") for item in skipped[:5]: # Show max 5 lines.append(f" - {item.get('temp_id')}: {item.get('reason')}") return [TextContent(type="text", text="\n".join(lines))] async def handle_explore_connections(args: dict[str, Any]) -> list[TextContent]: """Handle explore_connections tool — graph traversal from a container.""" container_id = args.get("container_id") if not container_id: return [TextContent(type="text", text="Error: container_id is required")] harbor_id = args.get("harbor_id") params = {} if args.get("max_depth"): params["max_depth"] = min(args["max_depth"], 3) if args.get("max_results"): params["max_results"] = min(args["max_results"], 50) result = await api_request( "GET", f"/api/mcp/connections/{container_id}", params=params, harbor_id=harbor_id, ) source = result.get("source", {}) connections = result.get("connections", []) if not connections: return [TextContent(type="text", text=f"No connections found for '{source.get('title', container_id)}'.")] lines = [ f"## Connections for: {source.get('title', 'Unknown')} ({source.get('type', '')})", f"Found {len(connections)} connected items:\n", ] for i, conn_item in enumerate(connections, 1): rel = conn_item.get("relationship", "related_to") depth = conn_item.get("depth", 1) indent = " " * (depth - 1) lines.append(f"{indent}{i}. **{conn_item.get('title', 'Untitled')}** ({conn_item.get('type', '')})") lines.append(f"{indent} Relationship: {rel} | Depth: {depth}") lines.append(f"{indent} ID: {conn_item.get('container_id', '')}") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_explore_buoy(args: dict[str, Any]) -> list[TextContent]: """Handle explore_buoy tool — get full buoy context.""" buoy_id = args.get("buoy_id") if not buoy_id: return [TextContent(type="text", text="Error: buoy_id is required")] harbor_id = args.get("harbor_id") params = {} if args.get("include_content"): params["include_content"] = "true" result = await api_request( "GET", f"/api/mcp/buoy/{buoy_id}", params=params, harbor_id=harbor_id, ) buoy = result.get("buoy", {}) pearls = result.get("pearls", []) connected_buoys = result.get("connected_buoys", []) lines = [ f"## Buoy: {buoy.get('title', 'Unknown')}", ] if buoy.get("description"): lines.append(f"*{buoy['description']}*") lines.append(f"\n**Pearls ({len(pearls)}):**\n") for i, pearl in enumerate(pearls, 1): lines.append(f"{i}. **{pearl.get('title', 'Untitled')}**") lines.append(f" ID: {pearl.get('container_id', '')}") if pearl.get("category"): lines.append(f" Type: {pearl['category']}") if pearl.get("created"): lines.append(f" Created: {pearl['created']}") if pearl.get("snippet"): lines.append(f" {pearl['snippet'][:150]}...") lines.append("") if connected_buoys: lines.append(f"\n**Connected Buoys ({len(connected_buoys)}):**\n") for cb in connected_buoys: lines.append(f"- **{cb.get('title', 'Untitled')}** (ID: {cb.get('container_id', '')})") return [TextContent(type="text", text="\n".join(lines))] async def handle_deep_search(args: dict[str, Any]) -> list[TextContent]: """Handle deep_search tool — search with graph context.""" query = args.get("query", "") if not query: return [TextContent(type="text", text="Error: query is required")] harbor_id = args.get("harbor_id") payload = { "query": query, "limit": min(args.get("limit", 5), 10), "graph_depth": min(args.get("graph_depth", 2), 3), } result = await api_request( "POST", "/api/mcp/deep_search", json=payload, harbor_id=harbor_id, ) direct_hits = result.get("direct_hits", []) graph_context = result.get("graph_context", []) stats = result.get("stats", {}) if not direct_hits and not graph_context: return [TextContent(type="text", text="No results found.")] lines = [ f"## Deep Search: \"{query}\"", f"Stats: {stats.get('vector_count', 0)} direct hits, {stats.get('graph_count', 0)} via graph, {stats.get('total_unique', 0)} total unique\n", ] if direct_hits: lines.append("### Direct Matches\n") for i, hit in enumerate(direct_hits, 1): lines.append(f"{i}. **{hit.get('title', 'Untitled')}** (score: {hit.get('score', 0):.3f})") lines.append(f" ID: {hit.get('container_id', '')}") if hit.get("source_type"): lines.append(f" Source: {hit['source_type']}") lines.append("") if graph_context: lines.append("### Graph Context (connected to search results)\n") for i, item in enumerate(graph_context, 1): depth_str = f", depth {item['depth']}" if "depth" in item else "" lines.append(f"{i}. **{item.get('title', 'Untitled')}** (score: {item.get('score', 0):.3f}{depth_str})") lines.append(f" ID: {item.get('container_id', '')}") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_read_memory() -> list[TextContent]: """Handle read_memory tool — load agent's persistent notes.""" result = await api_request("GET", "/api/mcp/memory") memory = result.get("memory", "") if not memory: return [TextContent(type="text", text="No session notes from previous sessions.")] return [TextContent(type="text", text=f"## Your Session Notes\n\n{memory}")] async def handle_write_memory(args: dict[str, Any]) -> list[TextContent]: """Handle write_memory tool — save agent's persistent notes.""" content = args.get("content", "") if not content: return [TextContent(type="text", text="Error: content is required")] result = await api_request( "PUT", "/api/mcp/memory", json={"content": content}, ) size = result.get("size_bytes", 0) return [TextContent(type="text", text=f"Session notes saved ({size} bytes). These will be available at the start of your next session.")] async def handle_begin_session(args: dict[str, Any]) -> list[TextContent]: """Handle begin_session tool — get context-aware session briefing.""" harbor_id = args.get("harbor_id") result = await api_request( "POST", "/api/mcp/session/begin", json={}, harbor_id=harbor_id, ) session_type = result.get("session_type", "first") memory = result.get("memory", "") activity = result.get("harbor_activity", {}) workspace = result.get("workspace", {}) pending_dupes = result.get("pending_duplicates", 0) lines = [f"## Session Started ({session_type})\n"] # Memory if memory: lines.append("### Your Session Notes from Last Time\n") lines.append(memory) lines.append("") else: lines.append("*No session notes from previous sessions.*\n") # Harbor activity activity_items = activity.get("items", []) if activity_items: lines.append(f"### Harbor Activity (since {activity.get('since', 'unknown')[:10]})\n") lines.append(f"{len(activity_items)} new items:\n") for item in activity_items[:10]: source = item.get("source", "") agent = f" ({item['source_agent']})" if item.get("source_agent") else "" cat = f" [{item['category']}]" if item.get("category") else "" lines.append(f"- **{item.get('title', 'Untitled')}**{cat} — {source}{agent}") if len(activity_items) > 10: lines.append(f" ... and {len(activity_items) - 10} more") lines.append("") # Workspace ws_total = workspace.get("total_items", 0) if ws_total > 0: lines.append(f"### Your Workspace ({ws_total} items)\n") for item in workspace.get("recent", []): lines.append(f"- {item.get('title', 'Untitled')} (ID: {item.get('container_id', '')})") lines.append("") # Pending duplicates if pending_dupes > 0: lines.append(f"### Pending: {pending_dupes} duplicate(s) to review\n") # First-session agents get enough guidance to work correctly if session_type == "first": lines.append("### Welcome — First Session Guide\n") lines.append("**FlowTether** is a knowledge management system. You help the user") lines.append("build a searchable, interconnected knowledge base.\n") lines.append("**Core concepts:**") lines.append(" Harbor = knowledge space (like a vault)") lines.append(" Pearl = a piece of knowledge (document, insight, decision)") lines.append(" Buoy = topic cluster grouping related pearls") lines.append(" Strand = connection between items\n") lines.append("**Rules that matter:**") lines.append(" 1. ALWAYS `search()` before `save_pearl()` — no blind saves") lines.append(" 2. Write specific titles: 'Docker networking - bridge vs host'") lines.append(" NOT 'Notes' or 'Ideas'") lines.append(" 3. Include a summary explaining WHY this pearl matters") lines.append(" 4. Add 3-5 tags for findability") lines.append(" 5. Set pearl_type: insight, decision, question, idea, reference, note\n") lines.append("**Your tools:**") lines.append(" - `save_pearl()` — for knowledge the USER should see (staged for review)") lines.append(" - `write_workspace()` — for YOUR working notes (private to you)") lines.append(" - `explore_connections(id)` — see what's connected to a pearl") lines.append(" - `deep_search(query)` — search + graph context in one call") lines.append(" - `list_harbors()` / `list_buoys()` — explore structure (only when needed)") lines.append(" - `end_session()` — ALWAYS call before disconnecting\n") lines.append("**For the full reference guide** (title writing, tag strategy, organization") lines.append("best practices), call `get_instructions()`.\n") return [TextContent(type="text", text="\n".join(lines))] async def handle_end_session(args: dict[str, Any]) -> list[TextContent]: """Handle end_session tool — save summary and update memory.""" summary = args.get("summary", "") if not summary: return [TextContent(type="text", text="Error: summary is required")] payload: dict[str, Any] = {"summary": summary} if args.get("memory_update"): payload["memory_update"] = args["memory_update"] result = await api_request( "POST", "/api/mcp/session/end", json=payload, ) lines = ["Session ended."] if result.get("summary_pearl_id"): lines.append(f"Summary saved to workspace (ID: {result['summary_pearl_id']}).") else: lines.append("Note: Session summary was NOT saved (no agent workspace configured for this key).") if result.get("memory_updated"): lines.append(f"Memory updated ({result.get('memory_size_bytes', 0)} bytes).") lines.append("These will be available when you start your next session via begin_session().") return [TextContent(type="text", text=" ".join(lines))] async def handle_timeline(args: dict[str, Any]) -> list[TextContent]: """Handle timeline tool — activity grouped by day.""" harbor_id = args.get("harbor_id") params: dict[str, Any] = {} if args.get("days"): params["days"] = min(args["days"], 90) if args.get("source_filter"): params["source_filter"] = args["source_filter"] result = await api_request( "GET", "/api/mcp/session/timeline", params=params, harbor_id=harbor_id, ) period = result.get("period", {}) days_data = result.get("days", []) total = result.get("total_items", 0) if not days_data: return [TextContent(type="text", text=f"No activity found in the last {args.get('days', 7)} days.")] lines = [ f"## Timeline: {period.get('from', '?')} to {period.get('to', '?')}", f"Total: {total} items across {len(days_data)} days\n", ] for day in days_data: lines.append(f"### {day.get('date', '?')} ({day.get('count', 0)} items)\n") for item in day.get("items", []): source = item.get("source", "") cat = f" [{item['category']}]" if item.get("category") else "" time_str = item.get("created", "") if time_str and "T" in time_str: time_str = time_str.split("T")[1][:5] space = f" [{item['space']}]" if item.get("space") else "" lines.append(f"- {time_str} **{item.get('title', 'Untitled')}**{cat}{space} ({source})") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_write_workspace(args: dict[str, Any]) -> list[TextContent]: """Handle write_workspace tool — create workspace entry.""" content = args.get("content", "") title = args.get("title", "") if not content or not title: return [TextContent(type="text", text="Error: content and title are required")] result = await api_request( "POST", "/api/mcp/workspace/create", json={ "content": content, "title": title, "tags": args.get("tags", []), "pearl_type": args.get("pearl_type"), }, ) return [TextContent(type="text", text=f"Workspace entry created: {result.get('title', title)} (ID: {result.get('pearl_id', 'unknown')})")] async def handle_search_workspace(args: dict[str, Any]) -> list[TextContent]: """Handle search_workspace tool — search agent's own workspace.""" query = args.get("query", "") if not query: return [TextContent(type="text", text="Error: query is required")] result = await api_request( "POST", "/api/mcp/workspace/search", json={"query": query, "limit": min(args.get("limit", 10), 50)}, ) items = result.get("results", []) if not items: return [TextContent(type="text", text="No workspace items found.")] lines = [f"Found {len(items)} workspace items:\n"] for i, item in enumerate(items, 1): lines.append(f"{i}. **{item.get('title', 'Untitled')}**") lines.append(f" ID: {item.get('container_id', '')}") if item.get("summary"): lines.append(f" {item['summary'][:100]}") if item.get("created"): lines.append(f" Created: {item['created']}") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_update_workspace(args: dict[str, Any]) -> list[TextContent]: """Handle update_workspace tool — update existing workspace entry.""" pearl_id = args.get("pearl_id") if not pearl_id: return [TextContent(type="text", text="Error: pearl_id is required")] payload: dict[str, Any] = {} if args.get("content"): payload["content"] = args["content"] if args.get("title"): payload["title"] = args["title"] if not payload: return [TextContent(type="text", text="Error: provide content or title to update")] result = await api_request("PUT", f"/api/mcp/workspace/{pearl_id}", json=payload) return [TextContent(type="text", text=f"Workspace entry updated: {pearl_id}")] async def handle_consolidate_workspace(args: dict[str, Any]) -> list[TextContent]: """Handle consolidate_workspace tool — analyze workspace health.""" payload: dict[str, Any] = {} if args.get("auto_merge_threshold"): payload["auto_merge_threshold"] = args["auto_merge_threshold"] result = await api_request( "POST", "/api/mcp/workspace/consolidate", json=payload, ) total = result.get("total_items", 0) exact_dupes = result.get("exact_duplicates", []) near_dupes = result.get("near_duplicates", []) stale = result.get("stale_items", []) promotions = result.get("promotion_candidates", []) lines = [ "## Workspace Health Report", f"Total items: {total}\n", ] if exact_dupes: lines.append(f"### Exact Duplicates ({len(exact_dupes)} groups)\n") for group in exact_dupes: lines.append(f"- **{group.get('title', '?')}** ({group.get('count', 0)} copies)") lines.append(f" IDs: {', '.join(group.get('ids', []))}") lines.append(f" Suggested: {group.get('action', 'keep_newest')}") lines.append("") else: lines.append("### No exact duplicates found.\n") if near_dupes: lines.append(f"### Near-Duplicates ({len(near_dupes)} pairs)\n") for pair in near_dupes: titles = pair.get("titles", ["?", "?"]) sim = pair.get("similarity", 0) ids = pair.get("ids", []) lines.append(f"- **{titles[0]}** ↔ **{titles[1]}** ({sim:.0%} similar)") lines.append(f" IDs: {', '.join(ids)}") lines.append(f" Suggested: {pair.get('action', 'review_and_merge')}") lines.append("") if stale: lines.append(f"### Stale Items ({len(stale)} older than 30 days)\n") for item in stale[:10]: age = item.get("age_days", "?") lines.append(f"- **{item.get('title', '?')}** ({age} days old)") lines.append(f" ID: {item.get('container_id', '')}") if len(stale) > 10: lines.append(f" ... and {len(stale) - 10} more") lines.append("") if promotions: lines.append(f"### Promotion Candidates ({len(promotions)} items worth sharing)\n") for item in promotions: reasons = ", ".join(item.get("reasons", [])) lines.append(f"- **{item.get('title', '?')}** (score: {item.get('score', 0)})") lines.append(f" ID: {item.get('container_id', '')} — {reasons}") lines.append("") has_findings = exact_dupes or near_dupes or stale or promotions if not has_findings: lines.append("Workspace is clean — no action needed.") return [TextContent(type="text", text="\n".join(lines))] async def handle_delete_workspace(args: dict[str, Any]) -> list[TextContent]: """Handle delete_workspace tool — soft-delete a workspace item.""" pearl_id = args.get("pearl_id") if not pearl_id: return [TextContent(type="text", text="Error: pearl_id is required")] result = await api_request("DELETE", f"/api/mcp/workspace/{pearl_id}") return [TextContent(type="text", text=f"Workspace item deleted: {pearl_id}")] async def handle_promote_to_user(args: dict[str, Any]) -> list[TextContent]: """Handle promote_to_user tool — copy workspace item to Dry Dock.""" pearl_id = args.get("pearl_id") if not pearl_id: return [TextContent(type="text", text="Error: pearl_id is required")] result = await api_request("POST", f"/api/mcp/workspace/{pearl_id}/promote") new_id = result.get("promoted_pearl_id", "unknown") return [TextContent(type="text", text=f"Promoted to user's Dry Dock for review (new ID: {new_id}). Original stays in your workspace.")] async def handle_list_shared_context(args: dict[str, Any]) -> list[TextContent]: """Handle list_shared_context tool — items shared between agents.""" harbor_id = args.get("harbor_id") params = {} if args.get("limit"): params["limit"] = min(args["limit"], 50) result = await api_request( "GET", "/api/mcp/shared", params=params, harbor_id=harbor_id, ) items = result.get("items", []) if not items: return [TextContent(type="text", text="No shared context found in this harbor. Use share_with_agents() to share items.")] lines = [f"## Shared Context ({len(items)} items)\n"] for i, item in enumerate(items, 1): source = item.get("source", "") agent = f" by {item['source_agent']}" if item.get("source_agent") else "" cat = f" [{item['category']}]" if item.get("category") else "" lines.append(f"{i}. **{item.get('title', 'Untitled')}**{cat}") lines.append(f" ID: {item.get('container_id', '')} | Shared: {source}{agent}") if item.get("created"): lines.append(f" Created: {item['created'][:10]}") lines.append("") return [TextContent(type="text", text="\n".join(lines))] async def handle_share_with_agents(args: dict[str, Any]) -> list[TextContent]: """Handle share_with_agents tool — mark item as agent-visible.""" pearl_id = args.get("pearl_id") if not pearl_id: return [TextContent(type="text", text="Error: pearl_id is required")] payload: dict[str, Any] = {} if args.get("note"): payload["note"] = args["note"] result = await api_request( "POST", f"/api/mcp/share/{pearl_id}", json=payload if payload else None, ) title = result.get("title", pearl_id) return [TextContent(type="text", text=f"Shared '{title}' with other agents in this harbor. They can see it via list_shared_context().")] # ============================================================================= # MCP Resources - Instructions for Claude # ============================================================================= FLOWTETHER_INSTRUCTIONS = """ # FlowTether Knowledge Organization Guide FlowTether is a personal knowledge management system. Your job as an AI assistant is to help the user BUILD A SEARCHABLE, INTERCONNECTED KNOWLEDGE BASE - not just dump content. ## Core Concepts Harbor = A knowledge space (like a vault or database) Pearl = A piece of content (document, note, insight, decision) Buoy = A topic cluster (emerges automatically from related pearls) Strand = An ordered sequence linking pearls together ------------------------------------------------------------------------------- ## TWO SEARCH SPACES (Important!) ------------------------------------------------------------------------------- You operate across TWO isolated spaces. They are SEPARATE — content in one does NOT appear when you search the other. USER'S HARBOR (the knowledge base): - Contains: the user's pearls, buoys, strands - Searched by: search(), deep_search(), explore_connections(), timeline() - Write to: save_pearl(), bulk_save() (lands in Dry Dock for review) YOUR WORKSPACE (agent notes — may be SHARED): - Contains: session summaries, working notes, analysis - Searched by: search_workspace() - Write to: write_workspace(), end_session() - Only agents can see this. The user sees it only if you promote_to_user(). ⚠ WORKSPACE SHARING: Your workspace is tied to your AGENT identity, not your API key. If the user gave the same API key (or same agent) to multiple tools (e.g., Claude Code AND Cursor), you SHARE this workspace with them. When sharing a workspace: - FINGERPRINT your entries: prefix titles with your tool name. Example: "Claude Code — Session Summary 2026-04-11" Not just: "Session Summary 2026-04-11" - RESPECT other agents' work: if you see entries you didn't create, don't update or overwrite them. They belong to a sibling agent. - READ before writing: search_workspace() first to see what's already there. Another agent may have context you need. - Your MEMORY (read_memory/write_memory) is per-API-key, so it IS private even if the workspace is shared. Use memory for notes only you need; use workspace for notes any agent on this identity could use. COMMON MISTAKES: - Searching for your own session notes with search() — WRONG, use search_workspace() - Searching for user's pearls with search_workspace() — WRONG, use search() NOTE: timeline() shows BOTH spaces (harbor + workspace items tagged separately). If you're looking for something and get zero results, ask: "Am I searching the right space?" If you wrote it → search_workspace(). If the user wrote it or you saved it with save_pearl() → search(). begin_session() gives you BOTH: recent harbor activity AND workspace status in one call. Use that as your starting context each session. ------------------------------------------------------------------------------- ## SESSION FLOW ------------------------------------------------------------------------------- EVERY SESSION: 1. begin_session() -- ALWAYS call first. Gets your memory, recent activity, workspace status, and pending items. 2. [Do your work] -- Search, save, explore, etc. 3. end_session(summary, memory_update) -- Save session summary and notes. WHEN YOU NEED STRUCTURE INFO (not every session — only when placing content): - list_harbors() -- Which harbors exist (call once, cache the answer) - list_buoys() -- What topic clusters exist in a harbor BEFORE CREATING CONTENT (always): - search(query) -- Check for duplicates before saving - save_pearl(...) -- Only after confirming no duplicate exists NAVIGATING KNOWLEDGE (on demand): - explore_connections(id) -- What's connected to a pearl/buoy - explore_buoy(id) -- Full topic cluster: pearls + connected buoys - deep_search(query) -- Search + graph context in one call - timeline(days) -- What happened recently, grouped by day MULTI-AGENT COORDINATION: - list_shared_context() -- Items other agents have shared - share_with_agents(id) -- Share an item for other agents to see WORKSPACE HEALTH (at most once per session): - consolidate_workspace() -- Find duplicates, stale items, promotion candidates REFERENCE (read once, not every session): - get_instructions() -- Full guide on writing titles, tags, summaries. You're reading it now. No need to call it again. ------------------------------------------------------------------------------- ## ERROR HANDLING CONTRACT ------------------------------------------------------------------------------- Handle API errors as follows: 401 Unauthorized - API key is invalid, expired, or missing. - STOP. Tell the user: "FlowTether API key is invalid or expired." - Do NOT retry. User must generate a new key. 403 Forbidden - Key lacks permission for this harbor or action. - STOP. Tell user: "API key does not have access to this harbor/action." - Do NOT retry with same request. 404 Not Found - Pearl or harbor does not exist. - Inform user. Do NOT retry. 429 Too Many Requests - Rate limited. - Wait 60 seconds, then retry ONCE. - If still 429, STOP and tell user: "Rate limited. Try again later." 500/502/503 Server Error - Transient server issue. - Retry up to 2 times with 5-second delay. - If still failing, STOP and tell user: "FlowTether server error." Connection Error / Timeout - Network issue. - Retry up to 2 times with 5-second delay. - If still failing, STOP and tell user: "Cannot connect to FlowTether." NEVER retry indefinitely. Max 2 retries for transient errors, 0 for auth errors. ------------------------------------------------------------------------------- ## RATE LIMITS (Per API Key) ------------------------------------------------------------------------------- Every tool call is rate-limited per API key. Exceeding a limit returns 429. Plan your tool usage to stay within these bounds. WRITES (save_pearl, bulk_save, end_session, share_with_agents, write_workspace, update_workspace, delete_workspace, promote_to_user): 10 calls/minute | 100 calls/hour | 500 calls/day BULK INGEST (bulk_save with multiple pearls): 5 calls/minute (each call can contain up to 100 pearls) SEARCH (search, deep_search, search_workspace): 20 calls/minute | 200 calls/hour READS (get_pearl, explore_connections, explore_buoy): 30 calls/minute | 300 calls/hour LISTINGS (list_harbors, list_buoys, begin_session, timeline, list_shared_context): 60 calls/hour COMPUTE-HEAVY (consolidate_workspace): 3 calls/minute (this runs expensive similarity analysis) PRACTICAL GUIDANCE: - begin_session: Call ONCE at session start. Not in a loop. - deep_search: Prefer regular search() for quick lookups. Use deep_search only when you need graph context (it costs more). - consolidate_workspace: Call at most once per session. The report doesn't change unless you've added/removed items since last check. - bulk_save vs save_pearl: For 3+ pearls, use bulk_save (1 call) instead of 3 separate save_pearl calls (3 calls against the write limit). - When you get a 429: Wait 60 seconds, retry ONCE. If still 429, STOP. Do NOT loop retries — the limits are per-window, not per-request. ------------------------------------------------------------------------------- ## SECURITY DEFAULTS (For API Key Users) ------------------------------------------------------------------------------- When advising users on API key setup: - Single harbor scope: Keys should access ONE harbor, not all. - Read-only by default: Only enable write if the agent needs to save. - Set expiration: 90 days recommended. Rotate keys periodically. - Never log the full key: Only the prefix (ft_live_xxxx...) in errors/logs. - Revoke unused keys: If a key hasn't been used in 30 days, revoke it. If you detect your key has write access but you only need to search, tell the user: "This key has write access. Consider a read-only key for safety." ------------------------------------------------------------------------------- ## YOUR FIRST ACTION (Every Session) ------------------------------------------------------------------------------- Call begin_session(). That's it. It returns everything you need to start: your memory, recent activity, workspace status, and pending items. Call list_harbors() / list_buoys() only when you need to PLACE content or explore topic structure — not as a startup ritual. Always search() before save_pearl() to avoid duplicates. ------------------------------------------------------------------------------- ## PROACTIVE KNOWLEDGE CAPTURE ------------------------------------------------------------------------------- Don't wait for the user to say "save this." PROACTIVELY save when: ALWAYS SAVE: - Decisions made: "We decided X because Y" -> type=decision - Problems solved: "Error was X, fixed by Y" -> type=reference - Key insights: "User realized architecture bottleneck at..." -> type=insight - How-to discoveries: "To deploy X, you need to..." -> type=reference - Meeting outcomes: Decisions, action items, important context - Research findings: Summarized conclusions PROBABLY SAVE: - Important project/life context the user shared - Preferences expressed ("I prefer X over Y because...") - Plans and strategies discussed - Reference material they'll need again DON'T SAVE (unless asked): - Trivial chat ("How are you?" "Thanks!") - Temporary debugging output (noise) - One-off questions that won't matter later - Content that already exists (search first!) - Half-baked thoughts still being formed HOW TO DECIDE: Ask: "Would the user want to find this in 6 months?" YES -> Save with good title/summary/tags MAYBE -> Ask: "Worth saving to FlowTether?" NO -> Don't clutter the knowledge base ------------------------------------------------------------------------------- ## SEARCH-FIRST WORKFLOW ------------------------------------------------------------------------------- Before answering domain questions, CHECK FLOWTETHER FIRST: User: "What was our database decision?" -> Search "database decision" before saying "I don't know" User: "How do we deploy?" -> Search "deploy" or "deployment" - answer might be saved User mentions a project name -> Search that project for context before responding User: "Remember when we discussed X?" -> Search for X - it might be there THE PATTERN: Search -> Use found knowledge -> Fill gaps -> Save new insights ------------------------------------------------------------------------------- ## WORKING WITH EXISTING CONTENT ------------------------------------------------------------------------------- EXPLORE BEFORE CREATING: - list_buoys() shows topic clusters - browse to understand what exists - search() with broad terms reveals the knowledge landscape - get_pearl(id) retrieves full content of interesting results AVOID DUPLICATES: Before saving, search for: - Similar titles - Same topic keywords - Related tags If you find existing content: - DON'T create a duplicate - Consider: UPDATE existing vs CREATE new (genuinely different)? - Ask user: "Found existing pearl about X. Update it or create new?" BUILD CONNECTIONS: When saving new content, think about what it RELATES to: - Same project? Use consistent tags - Builds on previous decision? Reference it in content - Part of a sequence? User might want a Strand later ------------------------------------------------------------------------------- ## WHEN TO ASK VS JUST DO ------------------------------------------------------------------------------- JUST DO IT (no need to ask): - Searching FlowTether for relevant context - Listing buoys/harbors to understand structure - Reading pearls to get full content - Saving obvious knowledge (decisions, insights, how-tos) with clear titles ASK FIRST: - "Should I save this to FlowTether?" (when uncertain about value) - "Found similar content - update existing or create new?" - "Which harbor should this go in?" (if multiple harbors exist) - "Long document - save as one pearl or split by topic?" NEVER ASSUME: - Don't save personal/sensitive info without consent - Don't make up tags the user hasn't used before - Don't guess at project names or context you're unsure about ------------------------------------------------------------------------------- ## YOUR MISSION: Make Everything Findable ------------------------------------------------------------------------------- Every pearl should be DISCOVERABLE later. Ask: "If the user searches for this in 6 months, what would they type?" Then make sure the pearl matches that search. ------------------------------------------------------------------------------- ## WRITING GREAT TITLES (Critical!) ------------------------------------------------------------------------------- Titles are the #1 factor in findability. Bad titles = lost knowledge. BAD TITLES: - "Notes" (notes about what?) - "Meeting" (which meeting? with whom? about what?) - "Ideas" (useless - everything is an idea) - "Interesting article" (says nothing) - "TODO" (what needs to be done?) GOOD TITLES (specific, searchable, complete): - "Docker container networking - bridge vs host mode comparison" - "Meeting notes: Auth system redesign with Sarah (2024-01)" - "Decision: Using PostgreSQL over MongoDB for user data" - "Bug fix: Login fails when email contains plus sign" - "Recipe: Thai green curry (vegetarian version)" TITLE FORMULA: [Topic/Subject] - [Specific aspect or context] Examples: - "Python asyncio - handling cancellation in task groups" - "Home renovation - bathroom tile options and costs" - "Book notes: Thinking Fast and Slow - Chapter 3 anchoring" ------------------------------------------------------------------------------- ## WRITING SUMMARIES ------------------------------------------------------------------------------- The summary answers: "Why would someone want to read this?" Include: - The key insight or conclusion - Context (when, why, what project) - What makes this useful Example: Content: Long technical document about API rate limiting Summary: "Explains our rate limiting strategy: 100 req/min free, 1000 paid. Includes Redis implementation and burst handling. Reference when debugging 429 errors." ------------------------------------------------------------------------------- ## USING TAGS EFFECTIVELY ------------------------------------------------------------------------------- Tags create findability paths. Use them for: 1. Projects: project-website-redesign, client-acme 2. Status: active, archived, needs-review 3. Type: how-to, reference, decision, meeting-notes 4. People: from-sarah, for-team 5. Time context: q1-2024, sprint-42 Don't over-tag. 3-5 relevant tags is ideal. ------------------------------------------------------------------------------- ## WHEN TO SPLIT VS KEEP TOGETHER ------------------------------------------------------------------------------- KEEP TOGETHER (one pearl): - A complete document (PDF, article) - A meeting's notes (even if long) - A recipe with all its steps - A coherent thought or insight SPLIT INTO MULTIPLE PEARLS: - Different topics in one meeting -> separate pearls per topic - Document with distinct sections -> pearl per section IF independently useful - Multiple decisions made -> one pearl per decision Rule: "Would someone want to find THIS SPECIFIC PART independently?" If yes, split. ------------------------------------------------------------------------------- ## PEARL TYPES ------------------------------------------------------------------------------- Classify pearls for easier filtering: insight - A realization, understanding, or "aha moment" decision - A choice that was made (include the WHY) question - Something unresolved to revisit later reference - External material, documentation, link note - General captured information idea - Something to potentially explore or do ------------------------------------------------------------------------------- ## THINKING ABOUT FUTURE SEARCH ------------------------------------------------------------------------------- Before saving, imagine the user searching 6 months from now: "What did we decide about authentication?" -> Ensure decision pearls have "decision" + topic in title "How do I deploy to production?" -> Ensure how-to content has action words + topic "What did Sarah say about the timeline?" -> Include people's names in relevant pearls "Why did we choose React?" -> Decision pearls capture the WHY, not just WHAT ------------------------------------------------------------------------------- ## ORGANIZATION BEST PRACTICES ------------------------------------------------------------------------------- 1. Be specific over general "Python error handling in async code" beats "Python tips" 2. Include context "For the mobile app project" or "From the Q3 planning session" 3. Capture the WHY Decisions without reasoning are useless later 4. Use consistent naming If you call it "auth" in one pearl, don't call it "authentication" in another 5. Front-load important words "Docker networking config" not "Config for Docker networking" 6. Date important events "Launch planning meeting (2024-03-15)" helps timeline reconstruction ------------------------------------------------------------------------------- ## BULK SAVE GUIDANCE ------------------------------------------------------------------------------- When saving multiple items: - Each item should stand alone (good title, summary, tags) - Don't just dump a list - organize as you save - Group related items but give each its own identity ------------------------------------------------------------------------------- ## QUICK REFERENCE CHECKLIST ------------------------------------------------------------------------------- When saving a pearl, ensure: [ ] Title is specific and searchable (not generic) [ ] Summary explains why this matters [ ] Tags cover project, type, and key topics [ ] Pearl type is set if applicable [ ] Content includes necessary context Remember: You're building a knowledge base the user will search for YEARS. Take the extra 10 seconds to organize properly. Future-them will thank you. """ @server.list_resources() async def list_resources() -> list[Resource]: """List available resources.""" return [ Resource( uri="flowtether://instructions", name="FlowTether Instructions", description="Guide for organizing knowledge in FlowTether - READ THIS FIRST", mimeType="text/plain", ) ] @server.read_resource() async def read_resource(uri: str) -> list[TextResourceContents]: """Read a resource by URI.""" if uri == "flowtether://instructions": return [ TextResourceContents( uri=uri, mimeType="text/plain", text=FLOWTETHER_INSTRUCTIONS, ) ] raise ValueError(f"Unknown resource: {uri}") async def main(): """Run the MCP server.""" if not API_KEY: logger.error("FLOWTETHER_API_KEY environment variable not set") print("Error: FLOWTETHER_API_KEY environment variable is required", file=sys.stderr) sys.exit(1) logger.info(f"Starting FlowTether MCP server (API URL: {API_URL})") async with stdio_server() as (read_stream, write_stream): await server.run(read_stream, write_stream, server.create_initialization_options()) if __name__ == "__main__": import asyncio asyncio.run(main())