Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions docs/analysis_windows_hangs.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
### Analysis & Recommendation for Maintainers

Following up on the great context provided by @L0rdS474n, I've completed a full audit of all `hooks.json` files in the repository.

**Findings:**

The following plugins are confirmed to be missing `"async": true` for their command-based hooks, which is the direct cause of the Windows startup hang:

1. `plugins/hookify/hooks/hooks.json`
2. `plugins/security-guidance/hooks/hooks.json`
3. `plugins/explanatory-output-style/hooks/hooks.json`
4. `plugins/learning-output-style/hooks/hooks.json`
5. `plugins/ralph-loop/hooks/hooks.json`

**Technical Root Cause:**

On Windows, synchronous subprocess calls (especially during the `SessionStart` or `PreToolUse` phases) can block the Node.js event loop before it has established its internal polling for subprocess handles. This results in a deadlocked state where Claude Code is waiting for a process to finish, but the signal that it has finished cannot be processed by the blocked loop.

**Next Steps for Maintainers:**

Since PR #354 was auto-closed due to policy, I recommend that a team member cherry-pick the following changes to restore Windows stability:

- **Add `"async": true`** to every hook entry of type `"command"` or `"shell"` in the 5 files listed above.
- **Investigate the Trivago connector**: As noted by the community, this connector might be inheriting or triggering similar blocking behavior when proxied to Claude Code. If it uses hooks, it likely also needs `async: true`.

**Long-term Fix:**

Consider adding a validation check in the `claude-plugins` system to warn or enforce `async: true` for hooks that don't need to return a block/allow decision (i.e., side-effect hooks).
3 changes: 2 additions & 1 deletion examples/mcpserver/logging_and_progress.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""MCPServer Echo Server that sends log messages and progress updates to the client"""

import asyncio
from typing import Any

from mcp.server.mcpserver import Context, MCPServer

Expand All @@ -9,7 +10,7 @@


@mcp.tool()
async def echo(text: str, ctx: Context) -> str:
async def echo(text: str, ctx: Context[Any, Any]) -> str:
"""Echo the input text sending log messages and progress updates during processing."""
await ctx.report_progress(progress=0, total=100)
await ctx.info("Starting to process echo for input: " + text)
Expand Down
148 changes: 99 additions & 49 deletions examples/mcpserver/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,52 @@
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import Annotated, Self, TypeVar

import asyncpg
import numpy as np
from openai import AsyncOpenAI
from pgvector.asyncpg import register_vector # Import register_vector
from typing import TYPE_CHECKING, Any, Protocol, TypeVar, cast, runtime_checkable

# External dependencies - these may not be installed
# pyright: reportUnknownMemberType=false
# pyright: reportUnknownVariableType=false
import asyncpg # type: ignore[import-untyped]
from openai import AsyncOpenAI # type: ignore[import-untyped]
from pgvector.asyncpg import register_vector # type: ignore[import-untyped]
from pydantic import BaseModel, Field
from pydantic_ai import Agent
from pydantic_ai import Agent # type: ignore[import-untyped]

from mcp.server.mcpserver import MCPServer


@runtime_checkable
class PGRecord(Protocol):
def __getitem__(self, key: str) -> Any: ...


@runtime_checkable
class PGConnection(Protocol):
async def execute(self, query: str, *args: Any) -> str: ...
async def fetch(self, query: str, *args: Any) -> list[PGRecord]: ...


@runtime_checkable
class OpenAIEmbeddings(Protocol):
async def create(self, **kwargs: Any) -> Any: ...


@runtime_checkable
class OpenAIClient(Protocol):
embeddings: OpenAIEmbeddings


@runtime_checkable
class PGPool(Protocol):
def acquire(self) -> Any: ... # Returns an async context manager
async def close(self) -> None: ...


if TYPE_CHECKING:
from openai import AsyncOpenAI # type: ignore
else:
AsyncOpenAI = Any

MAX_DEPTH = 5
SIMILARITY_THRESHOLD = 0.7
DECAY_FACTOR = 0.99
Expand All @@ -45,39 +80,43 @@


def cosine_similarity(a: list[float], b: list[float]) -> float:
a_array = np.array(a, dtype=np.float64)
b_array = np.array(b, dtype=np.float64)
return np.dot(a_array, b_array) / (np.linalg.norm(a_array) * np.linalg.norm(b_array))
"""Calculate cosine similarity between two vectors using math module."""
dot_product = sum(x * y for x, y in zip(a, b))
norm_a = math.sqrt(sum(x * x for x in a))
norm_b = math.sqrt(sum(y * y for y in b))
if norm_a == 0 or norm_b == 0:
return 0.0
return dot_product / (norm_a * norm_b)


@dataclass
class Deps:
openai: OpenAIClient
pool: PGPool


async def get_db_pool() -> PGPool:
async def init(conn: PGConnection) -> None:
await conn.execute("CREATE EXTENSION IF NOT EXISTS vector;")
await register_vector(conn) # type: ignore

pool = await asyncpg.create_pool(DB_DSN, init=init) # type: ignore
return cast(PGPool, pool)


async def do_ai(
user_prompt: str,
system_prompt: str,
result_type: type[T] | Annotated,
deps=None,
result_type: type[T],
deps: Deps | None = None,
) -> T:
agent = Agent(
agent: Agent[Deps, T] = Agent(
DEFAULT_LLM_MODEL,
system_prompt=system_prompt,
result_type=result_type,
)
result = await agent.run(user_prompt, deps=deps)
return result.data


@dataclass
class Deps:
openai: AsyncOpenAI
pool: asyncpg.Pool


async def get_db_pool() -> asyncpg.Pool:
async def init(conn):
await conn.execute("CREATE EXTENSION IF NOT EXISTS vector;")
await register_vector(conn)

pool = await asyncpg.create_pool(DB_DSN, init=init)
return pool
return cast(T, result.data)


class MemoryNode(BaseModel):
Expand Down Expand Up @@ -129,7 +168,7 @@ async def save(self, deps: Deps):
self.id,
)

async def merge_with(self, other: Self, deps: Deps):
async def merge_with(self, other: "MemoryNode", deps: Deps) -> None:
self.content = await do_ai(
f"{self.content}\n\n{other.content}",
"Combine the following two texts into a single, coherent text.",
Expand Down Expand Up @@ -191,13 +230,13 @@ async def find_similar_memories(embedding: list[float], deps: Deps) -> list[Memo
)
memories = [
MemoryNode(
id=row["id"],
content=row["content"],
summary=row["summary"],
importance=row["importance"],
access_count=row["access_count"],
timestamp=row["timestamp"],
embedding=row["embedding"],
id=cast(int, row["id"]),
content=cast(str, row["content"]),
summary=cast(str, row["summary"]),
importance=cast(float, row["importance"]),
access_count=cast(int, row["access_count"]),
timestamp=cast(float, row["timestamp"]),
embedding=cast(list[float], row["embedding"]),
)
for row in rows
]
Expand All @@ -206,16 +245,17 @@ async def find_similar_memories(embedding: list[float], deps: Deps) -> list[Memo

async def update_importance(user_embedding: list[float], deps: Deps):
async with deps.pool.acquire() as conn:
rows = await conn.fetch("SELECT id, importance, access_count, embedding FROM memories")
conn = cast(PGConnection, conn)
rows: list[PGRecord] = await conn.fetch("SELECT id, importance, access_count, embedding FROM memories")
for row in rows:
memory_embedding = row["embedding"]
memory_embedding = cast(list[float], row["embedding"])
similarity = cosine_similarity(user_embedding, memory_embedding)
if similarity > SIMILARITY_THRESHOLD:
new_importance = row["importance"] * REINFORCEMENT_FACTOR
new_access_count = row["access_count"] + 1
new_importance = cast(float, row["importance"]) * REINFORCEMENT_FACTOR
new_access_count = cast(int, row["access_count"]) + 1
else:
new_importance = row["importance"] * DECAY_FACTOR
new_access_count = row["access_count"]
new_importance = cast(float, row["importance"]) * DECAY_FACTOR
new_access_count = cast(int, row["access_count"])
await conn.execute(
"""
UPDATE memories
Expand All @@ -230,7 +270,8 @@ async def update_importance(user_embedding: list[float], deps: Deps):

async def prune_memories(deps: Deps):
async with deps.pool.acquire() as conn:
rows = await conn.fetch(
conn = cast(PGConnection, conn)
rows: list[PGRecord] = await conn.fetch(
"""
SELECT id, importance, access_count
FROM memories
Expand All @@ -245,7 +286,8 @@ async def prune_memories(deps: Deps):

async def display_memory_tree(deps: Deps) -> str:
async with deps.pool.acquire() as conn:
rows = await conn.fetch(
conn = cast(PGConnection, conn)
rows: list[PGRecord] = await conn.fetch(
"""
SELECT content, summary, importance, access_count
FROM memories
Expand All @@ -256,8 +298,10 @@ async def display_memory_tree(deps: Deps) -> str:
)
result = ""
for row in rows:
effective_importance = row["importance"] * (1 + math.log(row["access_count"] + 1))
summary = row["summary"] or row["content"]
importance = cast(float, row["importance"])
access_count = cast(int, row["access_count"])
effective_importance = importance * (1 + math.log(access_count + 1))
summary = cast(str, row["summary"] or row["content"])
result += f"- {summary} (Importance: {effective_importance:.2f})\n"
return result

Expand All @@ -266,7 +310,10 @@ async def display_memory_tree(deps: Deps) -> str:
async def remember(
contents: list[str] = Field(description="List of observations or memories to store"),
):
deps = Deps(openai=AsyncOpenAI(), pool=await get_db_pool())
deps = Deps(
openai=cast(OpenAIClient, AsyncOpenAI()),
pool=await get_db_pool(),
)
try:
return "\n".join(await asyncio.gather(*[add_memory(content, deps) for content in contents]))
finally:
Expand All @@ -275,7 +322,10 @@ async def remember(

@mcp.tool()
async def read_profile() -> str:
deps = Deps(openai=AsyncOpenAI(), pool=await get_db_pool())
deps = Deps(
openai=cast(OpenAIClient, AsyncOpenAI()),
pool=await get_db_pool(),
)
profile = await display_memory_tree(deps)
await deps.pool.close()
return profile
Expand Down
2 changes: 1 addition & 1 deletion examples/mcpserver/text_me.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@


class SurgeSettings(BaseSettings):
model_config: SettingsConfigDict = SettingsConfigDict(env_prefix="SURGE_", env_file=".env")
model_config = SettingsConfigDict(env_prefix="SURGE_", env_file=".env")

api_key: str
account_id: str
Expand Down
8 changes: 5 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ classifiers = [
]
dependencies = [
"anyio>=4.5",
"httpx>=0.27.1",
"httpx>=0.28.1",
"httpx-sse>=0.4",
"pydantic>=2.12.0",
"starlette>=0.48.0; python_version >= '3.14'",
"starlette>=0.27; python_version < '3.14'",
"starlette>=0.40.0; python_version < '3.14'",
"python-multipart>=0.0.9",
"sse-starlette>=1.6.1",
"sse-starlette>=2.1.3",
"pydantic-settings>=2.5.2",
"uvicorn>=0.31.1; sys_platform != 'emscripten'",
"jsonschema>=4.20.0",
Expand Down Expand Up @@ -183,6 +183,8 @@ filterwarnings = [
"ignore:Returning str or bytes.*:DeprecationWarning:mcp.server.lowlevel",
# pywin32 internal deprecation warning
"ignore:getargs.*The 'u' format is deprecated:DeprecationWarning",
# sse-starlette uses utcnow() in 1.x-2.1.2
"ignore:datetime.datetime.utcnow.*:DeprecationWarning",
]

[tool.markdown.lint]
Expand Down
5 changes: 3 additions & 2 deletions scripts/update_readme_snippets.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
#!/usr/bin/env python3
"""Update README.md with live code snippets from example files.

This script finds specially marked code blocks in README.md and updates them
Expand All @@ -9,6 +8,8 @@
python scripts/update_readme_snippets.py --check # Check mode for CI
"""

from __future__ import annotations

import argparse
import re
import sys
Expand Down Expand Up @@ -69,7 +70,7 @@ def process_snippet_block(match: re.Match[str], check_mode: bool = False) -> str
if existing_content is not None:
existing_lines = existing_content.strip().split("\n")
# Find code between ```python and ```
code_lines = []
code_lines: list[str] = []
in_code = False
for line in existing_lines:
if line.strip() == "```python":
Expand Down
Loading
Loading