diff --git a/.claude/skills/mcp-sdk-tier-audit/README.md b/.claude/skills/mcp-sdk-tier-audit/README.md new file mode 100644 index 0000000..77fc8cf --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/README.md @@ -0,0 +1,258 @@ +# MCP SDK Tier Audit + +Assess any MCP SDK repository against [SEP-1730](https://github.com/modelcontextprotocol/modelcontextprotocol/issues/1730) (the SDK Tiering System). Produces a tier classification (1/2/3) with an evidence-backed scorecard. + +Two components work together: + +- **`tier-check` CLI** — runs deterministic checks (server + client conformance pass rate, issue triage speed, P0 resolution, labels, releases, policy signals). Works standalone, no AI needed. +- **AI-assisted assessment** — an agent uses the CLI scorecard plus judgment-based evaluation (documentation coverage, dependency policy, roadmap) to produce a full tier report with remediation guide. + +## Quick Start: CLI + +The CLI is a subcommand of the [MCP Conformance](https://github.com/modelcontextprotocol/conformance) tool. + +```bash +# Clone and build +git clone https://github.com/modelcontextprotocol/conformance.git +cd conformance +npm install +npm run build + +# Authenticate with GitHub (needed for API access) +gh auth login + +# Run against any MCP SDK repo (without conformance tests) +npm run --silent tier-check -- --repo modelcontextprotocol/typescript-sdk --skip-conformance +``` + +The CLI uses the GitHub API (read-only) for issue metrics, labels, and release checks. Authenticate via one of: + +- **GitHub CLI** (recommended): `gh auth login` — the CLI picks up your token automatically +- **Environment variable**: `export GITHUB_TOKEN=ghp_...` +- **Flag**: `--token ghp_...` + +For public repos, any authenticated token works (no special scopes needed — authentication just avoids rate limits). For a [fine-grained personal access token](https://github.com/settings/personal-access-tokens/new), select **Public Repositories (read-only)** with no additional permissions. + +### CLI Options + +``` +--repo GitHub repository (required) +--branch Branch to check +--skip-conformance Skip conformance tests +--conformance-server-url URL of the already-running conformance server +--client-cmd Command to run the SDK conformance client (for client conformance tests) +--days Limit triage analysis to last N days +--output json | markdown | terminal (default: terminal) +--token GitHub token (defaults to GITHUB_TOKEN or gh auth token) +``` + +### What the CLI Checks + +| Check | What it measures | +| ------------------ | ------------------------------------------------------------------------------ | +| Server Conformance | Pass rate of server implementation against the conformance test suite | +| Client Conformance | Pass rate of client implementation against the conformance test suite | +| Labels | Whether SEP-1730 label taxonomy is set up (supports GitHub native issue types) | +| Triage | How quickly issues get labeled after creation | +| P0 Resolution | Whether critical bugs are resolved within SLA | +| Stable Release | Whether a stable release >= 1.0.0 exists | +| Policy Signals | Presence of CHANGELOG, SECURITY, CONTRIBUTING, dependabot, ROADMAP | +| Spec Tracking | Gap between latest spec release and SDK release | + +### Example Output + +``` +Tier Assessment: Tier 2 + +Repo: modelcontextprotocol/typescript-sdk +Timestamp: 2026-02-10T12:00:00Z + +Check Results: + + ✓ Server Conformance 45/45 (100%) + ✓ Client Conformance 4/4 (100%) + ✗ Labels 9/12 required labels + Missing: needs confirmation, needs repro, ready for work + ✓ Triage 92% within 2BD (150 issues, median 8h) + ✓ P0 Resolution 0 open, 3/3 closed within 7d + ✓ Stable Release 2.3.1 + ~ Policy Signals ✓ CHANGELOG.md, ✗ SECURITY.md, ✓ CONTRIBUTING.md, ✓ .github/dependabot.yml, ✗ ROADMAP.md + ✓ Spec Tracking 2d gap +``` + +Use `--output json` to get machine-readable results, or `--output markdown` for a report you can paste into an issue. + +## Full AI-Assisted Assessment + +The CLI produces a deterministic scorecard, but some SEP-1730 requirements need judgment: documentation quality, dependency policy, roadmap substance. An AI agent can evaluate these by reading the repo. + +### Claude Code + +The skill lives in `.claude/skills/` in this repo, so if you open [Claude Code](https://docs.anthropic.com/en/docs/claude-code) in the conformance repo it's already available. + +1. Make sure `gh auth login` is done (the skill checks this upfront) +2. Start the SDK's everything server in a separate terminal +3. Run the skill: + +``` +/mcp-sdk-tier-audit [client-cmd] +``` + +Pass the client command as the third argument to include client conformance testing. If omitted, client conformance is skipped and noted as a gap in the report. + +**TypeScript SDK example:** + +```bash +# Terminal 1: start the everything server (build first: npm run build) +cd ~/src/mcp/typescript-sdk && npm run test:conformance:server:run + +# Terminal 2: run the audit (from the conformance repo) +/mcp-sdk-tier-audit ~/src/mcp/typescript-sdk http://localhost:3000/mcp "npx tsx ~/src/mcp/typescript-sdk/test/conformance/src/everythingClient.ts" +``` + +**Python SDK example:** + +```bash +# Terminal 1: install and start the everything server +cd ~/src/mcp/python-sdk && uv sync --frozen --all-extras --package mcp-everything-server +uv run mcp-everything-server --port 3001 + +# Terminal 2: run the audit (from the conformance repo) +/mcp-sdk-tier-audit ~/src/mcp/python-sdk http://localhost:3001/mcp "uv run python ~/src/mcp/python-sdk/.github/actions/conformance/client.py" +``` + +**Go SDK example:** + +```bash +# Terminal 1: build and start the everything server +cd ~/src/mcp/go-sdk && go build -o /tmp/go-conformance-server ./conformance/everything-server +go build -o /tmp/go-conformance-client ./conformance/everything-client +/tmp/go-conformance-server -http="localhost:3002" + +# Terminal 2: run the audit (from the conformance repo) +/mcp-sdk-tier-audit ~/src/mcp/go-sdk http://localhost:3002 "/tmp/go-conformance-client" +``` + +**C# SDK example:** + +```bash +# Terminal 1: start the everything server (requires .NET SDK) +cd ~/src/mcp/csharp-sdk +dotnet run --project tests/ModelContextProtocol.ConformanceServer --framework net9.0 -- --urls http://localhost:3003 + +# Terminal 2: run the audit (from the conformance repo) +/mcp-sdk-tier-audit ~/src/mcp/csharp-sdk http://localhost:3003 "dotnet run --project ~/src/mcp/csharp-sdk/tests/ModelContextProtocol.ConformanceClient" +``` + +The skill derives `owner/repo` from git remote, runs the CLI, launches parallel evaluations for docs and policy, and writes detailed reports to `results/`. + +### Any Other AI Coding Agent + +If you use a different agent (Codex, Cursor, Aider, OpenCode, etc.), give it these instructions: + +1. **Run the CLI** to get the deterministic scorecard: + + ```bash + node dist/index.js tier-check --repo --conformance-server-url --output json + ``` + +2. **Evaluate documentation coverage** — check whether MCP features (tools, resources, prompts, sampling, transports, etc.) are documented with examples. See [`references/docs-coverage-prompt.md`](references/docs-coverage-prompt.md) for the full checklist. + +3. **Evaluate policies** — check for dependency update policy, roadmap, and versioning/breaking-change policy. See [`references/policy-evaluation-prompt.md`](references/policy-evaluation-prompt.md) for criteria. + +4. **Apply tier logic** — combine scorecard + evaluations against the thresholds in [`references/tier-requirements.md`](references/tier-requirements.md). + +5. **Generate report** — use [`references/report-template.md`](references/report-template.md) for the output format. + +### Manual Review + +Run the CLI for the scorecard, then review docs and policies yourself using the tier requirements as a checklist: + +| Requirement | Tier 1 | Tier 2 | +| ------------------ | ------------------------------ | ------------------------ | +| Server Conformance | 100% pass | >= 80% pass | +| Client Conformance | 100% pass | >= 80% pass | +| Issue triage | Within 2 business days | Within 1 month | +| P0 resolution | Within 7 days | Within 2 weeks | +| Stable release | >= 1.0.0 with clear versioning | At least one >= 1.0.0 | +| Documentation | All features with examples | Core features documented | +| Dependency policy | Published | Published | +| Roadmap | Published with spec tracking | Plan toward Tier 1 | + +## Running Conformance Tests + +To include conformance test results, start the SDK's everything server first, then pass the URL to the CLI. To also run client conformance tests, pass `--client-cmd` with the command to launch the SDK's conformance client. + +**TypeScript SDK**: + +```bash +# Terminal 1: start the server (SDK must be built first) +cd ~/src/mcp/typescript-sdk && npm run build +npm run test:conformance:server:run # starts on port 3000 + +# Terminal 2: run tier-check (server + client conformance) +npm run --silent tier-check -- \ + --repo modelcontextprotocol/typescript-sdk \ + --conformance-server-url http://localhost:3000/mcp \ + --client-cmd 'npx tsx ~/src/mcp/typescript-sdk/test/conformance/src/everythingClient.ts' +``` + +**Python SDK**: + +```bash +# Terminal 1: install and start the server +cd ~/src/mcp/python-sdk +uv sync --frozen --all-extras --package mcp-everything-server +uv run mcp-everything-server --port 3001 # specify port to avoid conflicts + +# Terminal 2: run tier-check (server + client conformance) +npm run --silent tier-check -- \ + --repo modelcontextprotocol/python-sdk \ + --conformance-server-url http://localhost:3001/mcp \ + --client-cmd 'uv run python ~/src/mcp/python-sdk/.github/actions/conformance/client.py' +``` + +**Go SDK**: + +```bash +# Terminal 1: build and start the server +cd ~/src/mcp/go-sdk +go build -o /tmp/go-conformance-server ./conformance/everything-server +go build -o /tmp/go-conformance-client ./conformance/everything-client +/tmp/go-conformance-server -http="localhost:3002" + +# Terminal 2: run tier-check (server + client conformance) +npm run --silent tier-check -- \ + --repo modelcontextprotocol/go-sdk \ + --conformance-server-url http://localhost:3002 \ + --client-cmd '/tmp/go-conformance-client' +``` + +**C# SDK**: + +```bash +# Terminal 1: start the server (requires .NET SDK) +cd ~/src/mcp/csharp-sdk +dotnet run --project tests/ModelContextProtocol.ConformanceServer --framework net9.0 -- --urls http://localhost:3003 + +# Terminal 2: run tier-check (server + client conformance) +npm run --silent tier-check -- \ + --repo modelcontextprotocol/csharp-sdk \ + --conformance-server-url http://localhost:3003 \ + --client-cmd 'dotnet run --project ~/src/mcp/csharp-sdk/tests/ModelContextProtocol.ConformanceClient' +``` + +**Other SDKs:** Your SDK needs an "everything server" — an HTTP server implementing the [Streamable HTTP transport](https://modelcontextprotocol.io/specification/draft/basic/transports.md) with all MCP features (tools, resources, prompts, etc.). See the implementations above as reference. + +Start your everything server, then pass `--conformance-server-url`. Pass `--client-cmd` if your SDK has a conformance client. If neither exists yet, use `--skip-conformance` — the scorecard will note this as a gap. + +## Reference Files + +These files in [`references/`](references/) contain the detailed criteria and prompts: + +| File | Purpose | +| ----------------------------- | ------------------------------------------------------- | +| `tier-requirements.md` | Full SEP-1730 requirements with exact thresholds | +| `docs-coverage-prompt.md` | Feature checklist for documentation evaluation | +| `policy-evaluation-prompt.md` | Criteria for dependency, roadmap, and versioning policy | +| `report-template.md` | Output format for the full audit report | diff --git a/.claude/skills/mcp-sdk-tier-audit/SKILL.md b/.claude/skills/mcp-sdk-tier-audit/SKILL.md new file mode 100644 index 0000000..5da0fbd --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/SKILL.md @@ -0,0 +1,307 @@ +--- +name: mcp-sdk-tier-audit +description: >- + Comprehensive tier assessment for an MCP SDK repository against SEP-1730. + Produces tier classification (1/2/3) with evidence table, gap list, and + remediation guide. Works for any official MCP SDK (TypeScript, Python, Go, + C#, Java, Kotlin, PHP, Swift, Rust, Ruby). +argument-hint: ' [client-cmd] [--branch ]' +--- + +# MCP SDK Tier Audit + +You are performing a comprehensive tier assessment for an MCP SDK repository against SEP-1730 (the SDK Tiering System). Your goal is to produce a definitive tier classification (Tier 1, 2, or 3) backed by evidence. + +## Step 0: Pre-flight Checks + +Before doing anything else, verify GitHub CLI authentication: + +```bash +gh auth status 2>&1 +``` + +If this fails (exit code non-zero or shows "not logged in"), stop immediately and tell the user: + +> GitHub authentication is required for this skill. Please run `gh auth login` first, then re-run the skill. + +Do NOT proceed to any other step if this check fails. + +After parsing arguments (Step 1), also verify the conformance server is reachable: + +```bash +curl -sf -o /dev/null -w '%{http_code}' 2>&1 || true +``` + +If the server is not reachable, stop and tell the user: + +> Conformance server at `` is not reachable. Make sure the everything server is running before invoking this skill. + +## Step 1: Parse Arguments + +Extract from the user's input: + +- **local-path**: absolute path to the SDK checkout (e.g. `~/src/mcp/typescript-sdk`) +- **conformance-server-url**: URL where the SDK's everything server is already running (e.g. `http://localhost:3000/mcp`) +- **client-cmd** (optional): command to run the SDK's conformance client (e.g. `npx tsx test/conformance/src/everythingClient.ts`). If not provided, client conformance tests are skipped and noted as a gap in the report. +- **branch** (optional): Git branch to check on GitHub (e.g. `--branch fweinberger/v1x-governance-docs`). If not provided, derive from the local checkout's current branch: `cd && git rev-parse --abbrev-ref HEAD`. This is passed to the tier-check CLI so that policy signal file checks use the correct branch instead of the repo's default branch. + +The first two arguments are required. If either is missing, ask the user to provide it. + +Derive the GitHub `owner/repo` from the local checkout: + +```bash +cd && git remote get-url origin | sed 's#.*github.com[:/]##; s#\.git$##' +``` + +## Step 2: Run the Deterministic Scorecard + +The `tier-check` CLI handles all deterministic checks — server conformance, client conformance, labels, triage, P0 resolution, releases, policy signals, and spec tracking. You are already in the conformance repo, so run it directly. + +```bash +npm run --silent tier-check -- \ + --repo \ + --branch \ + --conformance-server-url \ + --client-cmd '' \ + --output json +``` + +If no client-cmd was detected, omit the `--client-cmd` flag (client conformance will be skipped). The `--branch` flag should always be included (derived from the local checkout if not explicitly provided). + +The CLI output includes server conformance pass rate, client conformance pass rate (with per-spec-version breakdown), issue triage compliance, P0 resolution times, label taxonomy, stable release status, policy signal files, and spec tracking gap. Parse the JSON output to feed into Step 4. + +The conformance results now include a `specVersions` field on each detail entry, enabling per-version pass rate analysis. The `list` command also shows spec version tags: `node dist/index.js list` shows `[2025-06-18]`, `[2025-11-25]`, `[draft]`, or `[extension]` next to each scenario. + +### Conformance Baseline Check + +After running the CLI, check for an expected-failures baseline file in the SDK repo: + +```bash +find -name "baseline.yml" -o -name "expected-failures.yml" 2>/dev/null | head -5 +``` + +If found, read the file. It lists known/expected conformance failures. This context is essential for interpreting raw pass rates — a 20% client pass rate due entirely to unimplemented OAuth scenarios is very different from 20% due to broken core functionality. + +## Step 3: Launch Parallel Evaluations + +Launch 2 evaluations in parallel. Each reads the SDK from the local checkout path. + +**IMPORTANT**: Launch both evaluations at the same time (in the same response) so they run in parallel. + +### Evaluation 1: Documentation Coverage + +Use the prompt from `references/docs-coverage-prompt.md`. Pass the local path. + +This evaluation checks: + +- Whether all non-experimental features are documented with examples (Tier 1 requirement) +- Whether core features are documented (Tier 2 requirement) +- Produces an evidence table with file:line references + +### Evaluation 2: Policy Evaluation + +Use the prompt from `references/policy-evaluation-prompt.md`. Pass the local path, the derived `owner/repo`, and the `policy_signals` section from the CLI JSON output. + +The CLI has already checked which policy files exist (ROADMAP.md, DEPENDENCY_POLICY.md, dependabot.yml, VERSIONING.md, etc.). The AI evaluation reads only the files the CLI found to judge whether the content is substantive — it does NOT search for files in other locations. + +This evaluation checks: + +- Dependency update policy (required for Tier 1 and Tier 2) +- Published roadmap (required for Tier 1; plan-toward-Tier-1 for Tier 2) +- Clear versioning with documented breaking change policy (required for Tier 1) +- Produces evidence tables for each policy area + +## Step 4: Compute Final Tier + +Combine the deterministic scorecard (from the CLI) with the evaluation results (docs, policies). Apply the tier logic: + +### Tier 1 requires ALL of: + +- Server conformance test pass rate == 100% (date-versioned scenarios only; `draft` and `extension` are informational and not scored) +- Client conformance test pass rate == 100% (date-versioned scenarios only; `draft` and `extension` are informational and not scored) +- Issue triage compliance >= 90% within 2 business days +- All P0 bugs resolved within 7 days +- Stable release >= 1.0.0 with no pre-release suffix +- Clear versioning with documented breaking change policy (evaluation) +- All non-experimental features documented with examples (evaluation) +- Published dependency update policy (evaluation) +- Published roadmap with concrete steps tracking spec components (evaluation) + +### Tier 2 requires ALL of: + +- Server conformance test pass rate >= 80% (date-versioned scenarios only) +- Client conformance test pass rate >= 80% (date-versioned scenarios only) +- Issue triage compliance >= 80% within 1 month +- P0 bugs resolved within 2 weeks +- At least one stable release >= 1.0.0 +- Basic docs covering core features (evaluation) +- Published dependency update policy (evaluation) +- Published plan toward Tier 1 or explanation for remaining Tier 2 (evaluation) + +### Otherwise: Tier 3 + +If any Tier 2 requirement is not met, the SDK is Tier 3. + +**Important edge cases:** + +- If GitHub issue labels are not set up per SEP-1730, triage metrics cannot be computed. Note this as a gap. However, repos may use GitHub's native issue types instead of type labels — the CLI checks for both. +- If client conformance was skipped (no client command found), note this as a gap but do not block tier advancement based on it alone. + +**Conformance Breakdown:** + +The **full suite** pass rates (server total, client total) are used for tier threshold checks. To interpret them, present a single conformance matrix combining server and client results. Each detail entry in the tier-check JSON has a `specVersions` field; client category is derived from the scenario name (`auth/` prefix = Auth, everything else = Core). Server scenarios are all Core. + +Example: + +| | 2025-03-26 | 2025-06-18 | 2025-11-25 | All\* | +| ------------ | ---------- | ---------- | ---------- | ------------ | +| Server | — | 26/26 | 4/4 | 30/30 (100%) | +| Client: Core | — | 2/2 | 2/2 | 4/4 (100%) | +| Client: Auth | 2/2 | 3/3 | 6/11 | 8/16 (50%) | + +Informational (not scored for tier): + +| | draft | extension | +| ------------ | ----- | --------- | +| Client: Auth | 0/1 | 0/2 | + +The tier-scoring table only includes date-versioned scenarios. `draft` and `extension` scenarios are shown separately as informational — they do not affect tier advancement. + +This immediately shows where failures concentrate. Failures clustered in Client: Auth / `2025-11-25` means "new auth features not yet implemented" — a scope gap, not a quality problem. Failures in Server or Client: Core are more concerning. + +If the SDK has a `baseline.yml` or expected-failures file, cross-reference with the matrix to identify whether baselined failures cluster in a specific cell (e.g. all in `2025-11-25` / Client: Auth = scope gap). + +**P0 Label Audit Guidance:** + +When evaluating P0 metrics, flag potentially mislabeled P0 issues: + +- If P0 count is high (>2) but other Tier 2 metrics (conformance, triage compliance, docs) are strong, this may indicate P0 labels are being used for enhancements, lower-priority work, or feature requests rather than actual critical bugs. +- In such cases, recommend a P0 label audit as a remediation action. Review open P0 issues to verify they represent genuine blocking defects vs. misclassified work. +- Document this finding in the remediation output with specific issue numbers and suggested re-triage actions. +- Do not treat high P0 count as an automatic hard blocker if the audit reveals mislabeling; instead, note it as a process improvement opportunity. + +## Step 5: Generate Output + +Write detailed reports to files using subagents, then show a concise summary to the user. + +### Output files (write via subagents) + +**IMPORTANT**: Write both report files using parallel subagents (Task tool) so the file-writing work does not pollute the main conversation thread. Launch both subagents at the same time. + +Write two files to `results/` in the conformance repo: + +- `results/--assessment.md` +- `results/--remediation.md` + +For example: `results/2026-02-11-typescript-sdk-assessment.md` + +#### Assessment subagent + +Pass all the gathered data (CLI scorecard JSON, docs evaluation results, policy evaluation results) to a subagent and instruct it to write the assessment file using the template from `references/report-template.md`. This file contains the full requirements table, conformance test details (both server and client), triage metrics, documentation coverage table, and policy evaluation evidence. + +#### Remediation subagent + +Pass all the gathered data to a subagent and instruct it to write the remediation file using the template from `references/report-template.md`. This file always includes both: + +- **Path to Tier 2** (if current tier is 3) -- what's needed to reach Tier 2 +- **Path to Tier 1** (always) -- what's needed to reach Tier 1 + +### Console output (shown to the user) + +After the subagents finish, output a short executive summary directly to the user: + +``` +## — Tier + +Conformance: + +| | 2025-03-26 | 2025-06-18 | 2025-11-25 | All* | T2 | T1 | +|--------------|------------|------------|------------|------|----|----| +| Server | — | pass/total | pass/total | pass/total (rate%) | ✓/✗ | ✓/✗ | +| Client: Core | — | pass/total | pass/total | pass/total (rate%) | — | — | +| Client: Auth | pass/total | pass/total | pass/total | pass/total (rate%) | — | — | +| **Client Total** | | | | **pass/total (rate%)** | **✓/✗** | **✓/✗** | + +\* unique scenarios — a scenario may apply to multiple spec versions + +Informational (not scored for tier): + +| | draft | extension | +|--------------|-------|-----------| +| Client: Auth | pass/total | pass/total | + +If a baseline file was found, add a note below the conformance table: +> **Baseline**: {N} failures in `baseline.yml` ({list by cell, e.g. "6 in Client: Auth/2025-11-25, 2 in Client: Auth/extension"}). + +Repository Health: + +| Check | Value | T2 | T1 | +|-------|-------|----|----| +| Issue Triage | % (/) | ✓/✗ | ✓/✗ | +| Labels | / | ✓/✗ | ✓/✗ | +| P0 Resolution | open | ✓/✗ | ✓/✗ | +| Spec Tracking | d gap | ✓/✗ | ✓/✗ | +| Documentation | / features | ✓/✗ | ✓/✗ | +| Dependency Policy | | ✓/✗ | ✓/✗ | +| Roadmap | | ✓/✗ | ✓/✗ | +| Versioning Policy | | N/A | ✓/✗ | +| Stable Release | | ✓/✗ | ✓/✗ | + +--- + +**High-Priority Fixes:** +1. + +**For Tier 2:** +1. +2. + +**For Tier 1:** +1. +2. + +Reports: +- results/--assessment.md +- results/--remediation.md +``` + +Use ✓ for pass and ✗ for fail. + +**High-Priority Fixes**: List any issues that need urgent attention (e.g., P0 label audit if P0 count is >2 but other metrics are strong, suggesting mislabeled issues). If none, omit this section. + +**For Tier 2 / For Tier 1**: List each gap as a separate numbered item. Use "All requirements met" if there are no gaps for that tier. Each item should be a concise action (e.g., "Re-triage mislabeled P0s", "Document 16 undocumented core features"). + +## Reference Files + +The following reference files are available in the `references/` directory alongside this skill: + +- `references/feature-list.md` -- Canonical list of 48 non-experimental + 5 experimental features (single source of truth) +- `references/tier-requirements.md` -- Full SEP-1730 requirements table with exact thresholds +- `references/report-template.md` -- Output format template for the audit report +- `references/docs-coverage-prompt.md` -- Evaluation prompt for documentation coverage +- `references/policy-evaluation-prompt.md` -- Evaluation prompt for policy review + +Read these reference files when you need the detailed content for evaluation prompts or report formatting. + +## Usage Examples + +``` +# TypeScript SDK — server + client conformance +/mcp-sdk-tier-audit ~/src/mcp/typescript-sdk http://localhost:3000/mcp "npx tsx ~/src/mcp/typescript-sdk/test/conformance/src/everythingClient.ts" + +# Python SDK — server + client conformance +/mcp-sdk-tier-audit ~/src/mcp/python-sdk http://localhost:3001/mcp "uv run python ~/src/mcp/python-sdk/.github/actions/conformance/client.py" + +# Go SDK — server + client conformance +/mcp-sdk-tier-audit ~/src/mcp/go-sdk http://localhost:3002 "/tmp/go-conformance-client" + +# C# SDK — server + client conformance +# Two C#-specific requirements in the client-cmd: +# --framework net9.0 : required because the project targets net8.0/net9.0/net10.0 +# -- $MCP_CONFORMANCE_SCENARIO : the runner sets this env var and uses shell:true, so the +# shell expands it; dotnet passes [scenario, url] to the program +/mcp-sdk-tier-audit ~/src/mcp/csharp-sdk http://localhost:3003 "dotnet run --project ~/src/mcp/csharp-sdk/tests/ModelContextProtocol.ConformanceClient --framework net9.0 -- $MCP_CONFORMANCE_SCENARIO" + +# Any SDK — server conformance only (no client) +/mcp-sdk-tier-audit ~/src/mcp/some-sdk http://localhost:3004 +``` diff --git a/.claude/skills/mcp-sdk-tier-audit/references/docs-coverage-prompt.md b/.claude/skills/mcp-sdk-tier-audit/references/docs-coverage-prompt.md new file mode 100644 index 0000000..86bbcc5 --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/references/docs-coverage-prompt.md @@ -0,0 +1,139 @@ +# Documentation Coverage Subagent Prompt + +You are evaluating the documentation coverage of an MCP SDK repository for the SEP-1730 tier assessment. + +## Input + +- **SDK path**: {local-path} (absolute path to local SDK checkout) + +## Your Task + +Evaluate the documentation quality and coverage of this MCP SDK against the canonical feature list. You need to determine: + +1. **Tier 1 compliance**: Are ALL non-experimental features documented with examples? +2. **Tier 2 compliance**: Are core features documented (basic docs)? + +## Steps + +### 1. Read the canonical feature list + +Read `references/feature-list.md` for the definitive list of 48 non-experimental features (plus 5 experimental) to evaluate. That file is the single source of truth — use every feature listed there, in order. + +### 2. Find all documentation sources + +The SDK is available at `{local-path}`. Search for documentation in these locations: + +- `README.md` (root and any subdirectory READMEs) +- `docs/` directory +- `documentation/` directory +- `examples/` directory +- API documentation (generated or hand-written) +- `CONTRIBUTING.md` +- Inline code comments and docstrings on public API surfaces +- Any `*.md` files in the repo + +```bash +# Find all markdown files +find {local-path} -name "*.md" -not -path "*/node_modules/*" -not -path "*/.git/*" + +# Find example files +find {local-path} -path "*/examples/*" -not -path "*/node_modules/*" + +# Find API docs +find {local-path} -path "*/docs/*" -not -path "*/node_modules/*" +``` + +### 3. Evaluate each feature + +For each of the 48 non-experimental features in the canonical list, determine: + +- **Documented?**: Is there documentation explaining this feature? (Yes/No) +- **Where**: File path and line numbers where documentation exists +- **Has Examples?**: Are there code examples showing how to use this feature? (Yes/No/N/A) +- **Verdict**: PASS (documented with examples), PARTIAL (documented but no examples), or FAIL (not documented) + +## Required Output Format + +Produce your assessment in this exact format: + +```markdown +### Documentation Coverage Assessment + +**SDK path**: {local-path} +**Documentation locations found**: + +- {path1}: {description} +- {path2}: {description} +- ... + +#### Feature Documentation Table + +One row per feature from `references/feature-list.md`. Use the exact feature numbers and names from that file. + +| # | Feature | Documented? | Where | Has Examples? | Verdict | +| --- | -------------------------- | ----------- | -------------- | ----------------------- | ----------------- | +| 1 | Tools - listing | Yes/No | {file}:{lines} | Yes ({N} examples) / No | PASS/PARTIAL/FAIL | +| 2 | Tools - calling | Yes/No | {file}:{lines} | Yes ({N} examples) / No | PASS/PARTIAL/FAIL | +| ... | ... | ... | ... | ... | ... | +| 48 | JSON Schema 2020-12 | Yes/No | {file}:{lines} | Yes ({N} examples) / No | PASS/PARTIAL/FAIL | +| — | Tasks - get (experimental) | Yes/No | {file}:{lines} | Yes ({N} examples) / No | INFO | +| ... | ... | ... | ... | ... | ... | + +All 48 non-experimental features MUST appear in the table. Do not skip or merge rows. + +#### Summary + +**Total non-experimental features**: 48 +**PASS (documented with examples)**: {N}/48 +**PARTIAL (documented, no examples)**: {N}/48 +**FAIL (not documented)**: {N}/48 + +**Core features documented**: {N}/{total core} ({percentage}%) +**All features documented with examples**: {N}/48 ({percentage}%) + +#### Tier Verdicts + +**Tier 1** (all non-experimental features documented with examples): **PASS/FAIL** + +- {If FAIL: list the features missing documentation or examples} + +**Tier 2** (basic docs covering core features): **PASS/FAIL** + +- {If FAIL: list the core features missing documentation} +``` + +## What Counts as "Documented" + +A feature is "documented" only if there is **prose documentation** (in README, docs/, or similar) explaining what the feature does, when to use it, and how it works. The following do **not** count as documentation on their own: + +- Example code without accompanying prose explanation +- Conformance test servers or test fixtures +- Source code, even with comments or docstrings +- Mere existence of an API (e.g., a function existing in the SDK) + +**Examples supplement documentation but do not replace it.** A feature with a working example in `examples/` but no prose explaining the feature is PARTIAL, not PASS. A feature with only a conformance server implementation and no user-facing docs is FAIL. + +### Verdict criteria + +- **PASS**: Prose documentation exists explaining the feature AND at least one runnable or near-runnable code example +- **PARTIAL**: Either prose docs exist but no examples, OR examples exist but no prose docs +- **FAIL**: No prose documentation and no examples. Also use FAIL if the feature is only demonstrated in test/conformance code with no user-facing docs or examples + +### What counts as an "example" + +- Runnable code in an `examples/` directory +- Code snippets embedded in prose documentation (README, docs/\*.md) +- Go `Example*` test functions (these render on pkg.go.dev and are a language convention) +- Examples in test files count only if they are clearly labeled as examples or referenced from documentation + +### What does NOT count as an example + +- Conformance test server implementations +- Internal test fixtures +- Source code of the SDK itself + +## Other Important Notes + +- If the SDK does not implement a feature at all, mark it as "FAIL" for documentation but note "Not implemented" in the Where column. +- Be thorough: check README, docs/, examples/, API references, and inline docstrings. +- Apply these criteria consistently across all features. Do not give credit for documentation that doesn't exist. diff --git a/.claude/skills/mcp-sdk-tier-audit/references/feature-list.md b/.claude/skills/mcp-sdk-tier-audit/references/feature-list.md new file mode 100644 index 0000000..b0e019a --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/references/feature-list.md @@ -0,0 +1,80 @@ +# MCP SDK Canonical Feature List + +Single source of truth for all MCP features evaluated in the tier audit. **48 non-experimental features** plus 5 experimental (informational only). + +When updating this list, also update the total count referenced in `docs-coverage-prompt.md`. + +## Non-Experimental Features (48 total) + +### Core Features (36) + +| # | Feature | Protocol Method | +| --- | ----------------------------------- | -------------------------------------- | +| 1 | Tools - listing | `tools/list` | +| 2 | Tools - calling | `tools/call` | +| 3 | Tools - text results | | +| 4 | Tools - image results | | +| 5 | Tools - audio results | | +| 6 | Tools - embedded resources | | +| 7 | Tools - error handling | | +| 8 | Tools - change notifications | `notifications/tools/list_changed` | +| 9 | Resources - listing | `resources/list` | +| 10 | Resources - reading text | `resources/read` | +| 11 | Resources - reading binary | `resources/read` | +| 12 | Resources - templates | `resources/templates/list` | +| 13 | Resources - template reading | | +| 14 | Resources - subscribing | `resources/subscribe` | +| 15 | Resources - unsubscribing | `resources/unsubscribe` | +| 16 | Resources - change notifications | `notifications/resources/list_changed` | +| 17 | Prompts - listing | `prompts/list` | +| 18 | Prompts - getting simple | `prompts/get` | +| 19 | Prompts - getting with arguments | `prompts/get` | +| 20 | Prompts - embedded resources | | +| 21 | Prompts - image content | | +| 22 | Prompts - change notifications | `notifications/prompts/list_changed` | +| 23 | Sampling - creating messages | `sampling/createMessage` | +| 24 | Elicitation - form mode | `elicitation/create` | +| 25 | Elicitation - URL mode | `elicitation/create` (mode: "url") | +| 26 | Elicitation - schema validation | | +| 27 | Elicitation - default values | | +| 28 | Elicitation - enum values | | +| 29 | Elicitation - complete notification | `notifications/elicitation/complete` | +| 30 | Roots - listing | `roots/list` | +| 31 | Roots - change notifications | `notifications/roots/list_changed` | +| 32 | Logging - sending log messages | `notifications/message` | +| 33 | Logging - setting level | `logging/setLevel` | +| 34 | Completions - resource argument | `completion/complete` | +| 35 | Completions - prompt argument | `completion/complete` | +| 36 | Ping | `ping` | + +### Transport Features (6) + +| # | Feature | +| --- | ---------------------------------- | +| 37 | Streamable HTTP transport (client) | +| 38 | Streamable HTTP transport (server) | +| 39 | SSE transport - legacy (client) | +| 40 | SSE transport - legacy (server) | +| 41 | stdio transport (client) | +| 42 | stdio transport (server) | + +### Protocol Features (6) + +| # | Feature | +| --- | ---------------------------- | +| 43 | Progress notifications | +| 44 | Cancellation | +| 45 | Pagination | +| 46 | Capability negotiation | +| 47 | Protocol version negotiation | +| 48 | JSON Schema 2020-12 support | + +## Experimental Features (5, informational only) + +| # | Feature | Protocol Method | +| --- | ---------------------------- | ---------------------------- | +| — | Tasks - get | `tasks/get` | +| — | Tasks - result | `tasks/result` | +| — | Tasks - cancel | `tasks/cancel` | +| — | Tasks - list | `tasks/list` | +| — | Tasks - status notifications | `notifications/tasks/status` | diff --git a/.claude/skills/mcp-sdk-tier-audit/references/policy-evaluation-prompt.md b/.claude/skills/mcp-sdk-tier-audit/references/policy-evaluation-prompt.md new file mode 100644 index 0000000..b3960e6 --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/references/policy-evaluation-prompt.md @@ -0,0 +1,145 @@ +# Policy Evaluation Prompt + +You are evaluating the governance and policy documentation of an MCP SDK repository for the SEP-1730 tier assessment. + +## Input + +- **SDK path**: {local-path} (absolute path to local SDK checkout) +- **Repository**: {repo} (GitHub `owner/repo`, derived from git remote) +- **CLI policy_signals**: {policy_signals_json} (from the tier-check CLI output — shows which files exist) + +## Your Task + +The CLI has already determined which policy files exist in the repository. Your job is to **read and evaluate the content** of the files that were found. Do NOT search for files in other locations — only evaluate what the CLI reported as present. + +Three policy areas to evaluate: + +1. **Dependency update policy** (required for Tier 1 and Tier 2) +2. **Roadmap** (Tier 1: published roadmap; Tier 2: published plan toward Tier 1) +3. **Versioning policy** (Tier 1 only: documented breaking change policy) + +## Steps + +### 1. Identify which files exist from CLI output + +From the `policy_signals.files` object in the CLI JSON output, note which files have `true` (exist) vs `false` (missing). + +The CLI checks these files: + +**Dependency policy**: `DEPENDENCY_POLICY.md`, `docs/dependency-policy.md`, `.github/dependabot.yml`, `.github/renovate.json`, `renovate.json` + +**Roadmap**: `ROADMAP.md`, `docs/roadmap.md` + +**Versioning**: `VERSIONING.md`, `docs/versioning.md`, `BREAKING_CHANGES.md` + +**General** (may contain relevant sections): `CONTRIBUTING.md` + +### 2. Read and evaluate files that exist + +For each file that the CLI reported as present, read its content at `{local-path}/{file}` and evaluate: + +- Is the content substantive (not just a placeholder title)? +- Does it meet the criteria below? + +**Do NOT** search the repo for policy information in other files. If the dedicated file doesn't exist, the policy is not published. + +## Evaluation Criteria + +### Dependency Update Policy + +**PASS** if any of these exist with substantive content: + +- `DEPENDENCY_POLICY.md` or `docs/dependency-policy.md` — must describe how and when dependencies are updated +- `.github/dependabot.yml` or `.github/renovate.json` or `renovate.json` — automated tooling counts as a published policy in practice + +**FAIL** if none of the above exist (per CLI output). + +### Roadmap + +**PASS for Tier 1**: `ROADMAP.md` or `docs/roadmap.md` exists with concrete work items tracking MCP spec components. + +**PASS for Tier 2**: Same file exists with at least a plan toward Tier 1, or explanation for remaining at Tier 2. + +**FAIL** if no roadmap file exists (per CLI output). + +### Versioning Policy + +**PASS for Tier 1** if any of these exist with substantive content: + +- `VERSIONING.md` or `docs/versioning.md` or `BREAKING_CHANGES.md` +- A clearly labeled "Versioning" or "Breaking Changes" section in `CONTRIBUTING.md` (only check if CONTRIBUTING.md exists per CLI output) + +The content must describe: what constitutes a breaking change, how breaking changes are communicated, and the versioning scheme. + +**Not required for Tier 2.** + +**FAIL** if no versioning documentation found in the above files. + +## Required Output Format + +```markdown +### Policy Evaluation Assessment + +**SDK path**: {local-path} +**Repository**: {repo} + +--- + +#### 1. Dependency Update Policy: {PASS/FAIL} + +| File | Exists (CLI) | Content Verdict | +| ------------------------- | ------------ | ------------------------------- | +| DEPENDENCY_POLICY.md | Yes/No | Substantive / Placeholder / N/A | +| docs/dependency-policy.md | Yes/No | Substantive / Placeholder / N/A | +| .github/dependabot.yml | Yes/No | Configured / N/A | +| .github/renovate.json | Yes/No | Configured / N/A | + +**Verdict**: **PASS/FAIL** — {one-line explanation} + +--- + +#### 2. Roadmap: {PASS/FAIL} + +| File | Exists (CLI) | Content Verdict | +| --------------- | ------------ | ------------------------------- | +| ROADMAP.md | Yes/No | Substantive / Placeholder / N/A | +| docs/roadmap.md | Yes/No | Substantive / Placeholder / N/A | + +**Verdict**: + +- **Tier 1**: **PASS/FAIL** — {one-line explanation} +- **Tier 2**: **PASS/FAIL** — {one-line explanation} + +--- + +#### 3. Versioning Policy: {PASS/FAIL} + +| File | Exists (CLI) | Content Verdict | +| ------------------------------------ | ------------ | ------------------------------- | +| VERSIONING.md | Yes/No | Substantive / Placeholder / N/A | +| docs/versioning.md | Yes/No | Substantive / Placeholder / N/A | +| BREAKING_CHANGES.md | Yes/No | Substantive / Placeholder / N/A | +| CONTRIBUTING.md (versioning section) | Yes/No | Found / Not found / N/A | + +**Verdict**: + +- **Tier 1**: **PASS/FAIL** — {one-line explanation} +- **Tier 2**: **N/A** — only requires stable release + +--- + +#### Overall Policy Summary + +| Policy Area | Tier 1 | Tier 2 | +| ------------------------ | --------- | --------- | +| Dependency Update Policy | PASS/FAIL | PASS/FAIL | +| Roadmap | PASS/FAIL | PASS/FAIL | +| Versioning Policy | PASS/FAIL | N/A | +``` + +## Important Notes + +- Only evaluate files the CLI reported as existing. Do not search the repo for alternatives. +- If a file exists but is just a placeholder (e.g., only has a title with no content), mark it as "Placeholder" and FAIL. +- Dependabot/Renovate config files pass automatically if they exist and are properly configured. +- CHANGELOG.md showing past releases does NOT count as a roadmap. diff --git a/.claude/skills/mcp-sdk-tier-audit/references/report-template.md b/.claude/skills/mcp-sdk-tier-audit/references/report-template.md new file mode 100644 index 0000000..d77e199 --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/references/report-template.md @@ -0,0 +1,144 @@ +# Report Templates + +Write two files to `results/` in the conformance repo: + +- `results/--assessment.md` +- `results/--remediation.md` + +## assessment.md + +```markdown +# MCP SDK Tier Audit: {repo} + +**Date**: {date} +**Branch**: {branch} +**Auditor**: mcp-sdk-tier-audit skill (automated + subagent evaluation) + +## Tier Assessment: Tier {X} + +{Brief 1-2 sentence summary of the overall assessment and key factors.} + +### Requirements Summary + +| # | Requirement | Tier 1 Standard | Tier 2 Standard | Current Value | T1? | T2? | Gap | +| --- | ----------------------- | --------------------------------- | ---------------------------- | --------------------------------- | ----------- | ----------- | ------------------ | +| 1a | Server Conformance | 100% pass rate | >= 80% pass rate | {X}% ({passed}/{total}) | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 1b | Client Conformance | 100% pass rate | >= 80% pass rate | {X}% ({passed}/{total}) | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 2 | Issue Triage | >= 90% within 2 biz days | >= 80% within 1 month | {compliance}% ({triaged}/{total}) | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 2b | Labels | 12 required labels | 12 required labels | {present}/{required} | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 3 | Critical Bug Resolution | All P0s within 7 days | All P0s within 2 weeks | {open P0 count} open | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 4 | Stable Release | Required + clear versioning | At least one stable release | {version} | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 4b | Spec Tracking | Timeline agreed per release | Within 6 months | {days_gap}d gap ({PASS/FAIL}) | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 5 | Documentation | Comprehensive w/ examples | Basic docs for core features | {pass}/{total} features | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 6 | Dependency Policy | Published update policy | Published update policy | {Found/Not found} | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 7 | Roadmap | Published roadmap | Plan toward Tier 1 | {Found/Not found} | {PASS/FAIL} | {PASS/FAIL} | {detail or "None"} | +| 8 | Versioning Policy | Documented breaking change policy | N/A | {Found/Not found} | {PASS/FAIL} | N/A | {detail or "None"} | + +### Tier Determination + +- Tier 1: {PASS/FAIL} -- {count}/8 requirements met (failing: {list}) +- Tier 2: {PASS/FAIL} -- {count}/7 requirements met (failing: {list}) +- **Final Tier: {X}** + +--- + +## Server Conformance Details + +Pass rate: {X}% ({passed}/{total}) + +| Scenario | Status | Checks | +| -------- | ----------- | ---------------- | +| {name} | {PASS/FAIL} | {passed}/{total} | +| ... | ... | ... | + +--- + +## Client Conformance Details + +Full suite pass rate: {X}% ({passed}/{total}) + +> **Suite breakdown**: Core: {core_pass}/{core_total} ({core_rate}%), Auth: {auth_pass}/{auth_total} ({auth_rate}%) +> **Baseline**: {N} known expected failures documented in `{baseline_file}` ({categories}) + +### Core Scenarios + +| Scenario | Status | Checks | +| -------- | ----------- | ---------------- | +| {name} | {PASS/FAIL} | {passed}/{total} | +| ... | ... | ... | + +### Auth Scenarios + +| Scenario | Status | Checks | Notes | +| -------- | ----------- | ---------------- | --------------------------- | +| {name} | {PASS/FAIL} | {passed}/{total} | {in baseline? / unexpected} | +| ... | ... | ... | ... | + +--- + +## Issue Triage Details + +Analysis period: Last {N} issues +Labels: {present/missing list} + +| Metric | Value | T1 Req | T2 Req | Verdict | +| --------------- | ----- | ------ | ------ | --------- | +| Compliance rate | {X}% | >= 90% | >= 80% | {verdict} | +| Exceeding SLA | {N} | -- | -- | -- | +| Open P0s | {N} | 0 | 0 | {verdict} | + +{If open P0s, list them with issue number, title, age} + +--- + +## Documentation Coverage + +{Paste subagent 1 output: feature table with Documented/Where/Examples/Verdict columns} + +--- + +## Policy Evaluation + +{Paste subagent 2 output: dependency policy, roadmap, versioning policy sections with evidence tables} +``` + +## remediation.md + +```markdown +# Remediation Guide: {repo} + +**Date**: {date} +**Current Tier**: {X} + +## Path to Tier 2 + +{Only include this section if current tier is 3. List every requirement not met for Tier 2.} + +| # | Action | Requirement | Effort | Where | +| --- | ------------- | ------------- | -------------------- | ------------ | +| 1 | {description} | {requirement} | {Small/Medium/Large} | {file paths} | +| ... | ... | ... | ... | ... | + +## Path to Tier 1 + +{Always include this section. List every requirement not met for Tier 1 (including any Tier 2 gaps).} + +| # | Action | Requirement | Effort | Where | +| --- | ------------- | ------------- | -------------------- | ------------ | +| 1 | {description} | {requirement} | {Small/Medium/Large} | {file paths} | +| ... | ... | ... | ... | ... | + +## Recommended Next Steps + +1. {First priority action with brief rationale} +2. {Second priority action} +3. {Third priority action} +``` + +## Formatting Rules + +1. Every PASS/FAIL must be based on evidence, not assumption. +2. If data is unavailable, mark as "N/A - {reason}" and note in remediation. +3. All file references must include file path and line numbers where possible. +4. Remediation items ordered by impact: tier-advancing items first. +5. Effort estimates: Small (< 1 day), Medium (1-3 days), Large (> 3 days). diff --git a/.claude/skills/mcp-sdk-tier-audit/references/tier-requirements.md b/.claude/skills/mcp-sdk-tier-audit/references/tier-requirements.md new file mode 100644 index 0000000..077917b --- /dev/null +++ b/.claude/skills/mcp-sdk-tier-audit/references/tier-requirements.md @@ -0,0 +1,104 @@ +# SEP-1730: SDK Tier Requirements Reference + +This is the authoritative reference table for MCP SDK tiering requirements, extracted from SEP-1730. + +Source: `modelcontextprotocol/docs/community/sdk-tiers.mdx` in the spec repository + +## Full Requirements Table + +| Requirement | Tier 1: Fully Supported | Tier 2: Commitment to Full Support | Tier 3: Experimental | +| --------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------- | +| **Conformance Tests** | 100% pass rate | >= 80% pass rate | No minimum | +| **New Protocol Features** | Before new spec version release, timeline agreed per release based on feature complexity | Within 6 months | No timeline commitment | +| **Issue Triage** | Within 2 business days | Within a month | No requirement | +| **Critical Bug Resolution** | Within 7 days | Within two weeks | No requirement | +| **Stable Release** | Required with clear versioning | At least one stable release | Not required | +| **Documentation** | Comprehensive with examples for all features | Basic documentation covering core features | No minimum | +| **Dependency Policy** | Published update policy | Published update policy | Not required | +| **Roadmap** | Published roadmap | Published plan toward Tier 1 or explanation for remaining Tier 2 | Not required | + +## Exact Thresholds for Automated Checking + +| Metric | Tier 1 Threshold | Tier 2 Threshold | How to Measure | +| ---------------------- | ------------------------------------------------------ | ------------------------------- | ---------------------------------------------------------------------------------------- | +| Conformance pass rate | == 100% | >= 80% | `passed / (passed + failed) * 100` from conformance suite | +| Issue triage time | <= 2 business days | <= 1 month (30 calendar days) | Time from issue creation to first label application | +| P0 resolution time | <= 7 calendar days | <= 14 calendar days | Time from P0 label application to issue close | +| Stable release version | >= 1.0.0, no pre-release suffix | >= 1.0.0 (at least one) | Check `gh release list` for version matching `^[0-9]+\.[0-9]+\.[0-9]+$` where major >= 1 | +| Documentation coverage | All non-experimental features documented with examples | Core features documented | Subagent evaluation | +| Dependency policy | Published and findable in repo | Published and findable in repo | Subagent evaluation | +| Roadmap | Published with concrete steps tracking spec components | Published plan toward Tier 1 | Subagent evaluation | +| Versioning policy | Documented breaking change policy | N/A (just needs stable release) | Subagent evaluation | + +## Conformance Score Calculation + +Every scenario in the conformance suite has a `specVersions` field indicating which spec version it targets. The valid values are defined as the `SpecVersion` type (as a list) in `src/types.ts` — run `node dist/index.js list` to see the current mapping of scenarios to spec versions. + +Date-versioned scenarios (e.g. `2025-06-18`, `2025-11-25`) count toward tier scoring. `draft` and `extension` scenarios are listed separately as informational. + +The `--spec-version` CLI flag filters scenarios cumulatively for date versions (e.g. `--spec-version 2025-06-18` includes `2025-03-26` + `2025-06-18`). For `draft`/`extension`, it returns exact matches only. + +The tier-check output includes a per-version pass rate breakdown alongside the aggregate. + +## Tier Relegation Rules + +- **Tier 1 to Tier 2**: Any conformance test fails continuously for 4 weeks +- **Tier 2 to Tier 3**: More than 20% of conformance tests fail continuously for 4 weeks + +## Issue Triage Label Taxonomy + +SDK repositories must use these consistent labels to enable automated reporting. + +### Type Labels (pick one) + +| Label | Description | +| ------------- | ----------------------------- | +| `bug` | Something isn't working | +| `enhancement` | Request for new feature | +| `question` | Further information requested | + +Note: Repositories using GitHub's native issue types satisfy this requirement without needing type labels. + +### Status Labels (pick one) + +| Label | Description | +| -------------------- | ------------------------------------------------------- | +| `needs confirmation` | Unclear if still relevant | +| `needs repro` | Insufficient information to reproduce | +| `ready for work` | Has enough information to start | +| `good first issue` | Good for newcomers | +| `help wanted` | Contributions welcome from those familiar with codebase | + +### Priority Labels (only if actionable) + +| Label | Description | +| ----- | --------------------------------------------------------------- | +| `P0` | Critical: core functionality failures or high-severity security | +| `P1` | Significant bug affecting many users | +| `P2` | Moderate issues, valuable feature requests | +| `P3` | Nice to haves, rare edge cases | + +**Total: 12 labels** (3 type + 5 status + 4 priority) + +## Key Definitions + +### Issue Triage + +Labeling and determining whether an issue is valid. This is NOT the same as resolving the issue. An issue is considered triaged when it receives its first label. + +### Critical Bug (P0) + +- **Security vulnerabilities** with CVSS score >= 7.0 (High or Critical severity) +- **Core functionality failures** that prevent basic MCP operations: connection establishment, message exchange, or use of core primitives (tools, resources, prompts) + +### Stable Release + +A published version explicitly marked as production-ready. Specifically: version `1.0.0` or higher without pre-release identifiers like `-alpha`, `-beta`, or `-rc`. + +### Clear Versioning + +Following idiomatic versioning patterns with documented breaking change policies, so users can understand compatibility expectations when upgrading. + +### Roadmap + +Outlines concrete steps and work items that track implementation of required MCP specification components (non-experimental features and optional capabilities), giving users visibility into upcoming feature support. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8f4711..ea43b51 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,6 +5,12 @@ on: pull_request: release: types: [published] + workflow_dispatch: + inputs: + publish_alpha: + description: 'Publish to npm as alpha prerelease. If unchecked, only tests run (no publish). Version must already be bumped in package.json on the selected branch.' + type: boolean + default: false permissions: contents: read @@ -31,7 +37,7 @@ jobs: publish: runs-on: ubuntu-latest - if: github.event_name == 'release' + if: github.event_name == 'release' || (github.event_name == 'workflow_dispatch' && github.event.inputs.publish_alpha == 'true') environment: release needs: [test] @@ -48,6 +54,11 @@ jobs: registry-url: 'https://registry.npmjs.org' - run: npm ci - - run: npm publish --provenance --access public - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Publish to npm + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + npm publish --access public --tag alpha + else + npm publish --access public + fi diff --git a/.gitignore b/.gitignore index 5c745fc..bd9f0f5 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ results/ lefthook-local.yml dist/ .vscode/ +.idea/ diff --git a/LICENSE b/LICENSE index 136059a..4a93985 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,193 @@ +The MCP project is undergoing a licensing transition from the MIT License to the Apache License, Version 2.0 ("Apache-2.0"). All new code and specification contributions to the project are licensed under Apache-2.0. Documentation contributions (excluding specifications) are licensed under CC-BY-4.0. + +Contributions for which relicensing consent has been obtained are licensed under Apache-2.0. Contributions made by authors who originally licensed their work under the MIT License and who have not yet granted explicit permission to relicense remain licensed under the MIT License. + +No rights beyond those granted by the applicable original license are conveyed for such contributions. + +--- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright + owner or by an individual or Legal Entity authorized to submit on behalf + of the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +--- + MIT License -Copyright (c) 2024 Anthropic, PBC +Copyright (c) 2024-2025 Model Context Protocol a Series of LF Projects, LLC. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +205,12 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. + +--- + +Creative Commons Attribution 4.0 International (CC-BY-4.0) + +Documentation in this project (excluding specifications) is licensed under +CC-BY-4.0. See https://creativecommons.org/licenses/by/4.0/legalcode for +the full license text. diff --git a/README.md b/README.md index f1e7ddf..8096214 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,10 @@ A framework for testing MCP (Model Context Protocol) client and server implementations against the specification. -> [!WARNING] This repository is a work in progress and is unstable. Join the conversation in the #conformance-testing-wg in the MCP Contributors discord. +> [!WARNING] +> This repository is a work in progress and is unstable. Join the conversation in the #conformance-testing-wg in the MCP Contributors discord. + +**For SDK maintainers:** See [SDK Integration Guide](./SDK_INTEGRATION.md) for a streamlined guide on integrating conformance tests into your SDK repository. ## Quick Start @@ -64,6 +67,7 @@ npx @modelcontextprotocol/conformance client --command "" --scen - `--command` - The command to run your MCP client (can include flags) - `--scenario` - The test scenario to run (e.g., "initialize") - `--suite` - Run a suite of tests in parallel (e.g., "auth") +- `--expected-failures ` - Path to YAML baseline file of known failures (see [Expected Failures](#expected-failures)) - `--timeout` - Timeout in milliseconds (default: 30000) - `--verbose` - Show verbose output @@ -78,7 +82,10 @@ npx @modelcontextprotocol/conformance server --url [--scenario ] **Options:** - `--url` - URL of the server to test -- `--scenario ` - Test scenario to run (e.g., "server-initialize". Runs all available scenarios by default +- `--scenario ` - Test scenario to run (e.g., "server-initialize"). Runs all available scenarios by default +- `--suite ` - Suite to run: "active" (default), "all", or "pending" +- `--expected-failures ` - Path to YAML baseline file of known failures (see [Expected Failures](#expected-failures)) +- `--verbose` - Show verbose output ## Test Results @@ -92,6 +99,91 @@ npx @modelcontextprotocol/conformance server --url [--scenario ] - `checks.json` - Array of conformance check results with pass/fail status +## Expected Failures + +SDKs that don't yet pass all conformance tests can specify a baseline of known failures. This allows running conformance tests in CI without failing, while still catching regressions. + +Create a YAML file listing expected failures by mode: + +```yaml +# conformance-baseline.yml +server: + - tools-call-with-progress + - resources-subscribe +client: + - sse-retry +``` + +Then pass it to the CLI: + +```bash +npx @modelcontextprotocol/conformance server --url http://localhost:3000/mcp --expected-failures ./conformance-baseline.yml +``` + +**Exit code behavior:** + +| Scenario Result | In Baseline? | Outcome | +| --------------- | ------------ | ----------------------------------------- | +| Fails | Yes | Exit 0 — expected failure | +| Fails | No | Exit 1 — unexpected regression | +| Passes | Yes | Exit 1 — stale baseline, remove the entry | +| Passes | No | Exit 0 — normal pass | + +This ensures: + +- CI passes when only known failures occur +- CI fails on new regressions (unexpected failures) +- CI fails when a fix lands but the baseline isn't updated (stale entries) + +## GitHub Action + +This repo provides a composite GitHub Action so SDK repos don't need to write their own conformance scripts. + +### Server Testing + +```yaml +steps: + - uses: actions/checkout@v4 + + # Start your server (SDK-specific) + - run: | + my-server --port 3001 & + timeout 15 bash -c 'until curl -s http://localhost:3001/mcp; do sleep 0.5; done' + + - uses: modelcontextprotocol/conformance@v0.1.11 + with: + mode: server + url: http://localhost:3001/mcp + expected-failures: ./conformance-baseline.yml # optional +``` + +### Client Testing + +```yaml +steps: + - uses: actions/checkout@v4 + + - uses: modelcontextprotocol/conformance@v0.1.11 + with: + mode: client + command: 'python tests/conformance/client.py' + expected-failures: ./conformance-baseline.yml # optional +``` + +### Action Inputs + +| Input | Required | Description | +| ------------------- | ----------- | ----------------------------------------------- | +| `mode` | Yes | `server` or `client` | +| `url` | Server mode | URL of the server to test | +| `command` | Client mode | Command to run the client under test | +| `expected-failures` | No | Path to YAML baseline file | +| `suite` | No | Test suite to run | +| `scenario` | No | Run a single scenario by name | +| `timeout` | No | Timeout in ms for client tests (default: 30000) | +| `verbose` | No | Show verbose output (default: false) | +| `node-version` | No | Node.js version (default: 20) | + ## Example Clients - `examples/clients/typescript/everything-client.ts` - Single client that handles all scenarios based on scenario name (recommended) @@ -120,6 +212,29 @@ Run `npx @modelcontextprotocol/conformance list --server` to see all available s - **resources-\*** - Resource management scenarios - **prompts-\*** - Prompt management scenarios +## SDK Tier Assessment + +The `tier-check` subcommand evaluates an MCP SDK repository against [SEP-1730](https://github.com/modelcontextprotocol/modelcontextprotocol/issues/1730) (the SDK Tiering System): + +```bash +# Without conformance tests (fastest) +gh auth login +npm run --silent tier-check -- --repo modelcontextprotocol/typescript-sdk --skip-conformance + +# With conformance tests (start the everything server first) +npm run --silent tier-check -- \ + --repo modelcontextprotocol/typescript-sdk \ + --conformance-server-url http://localhost:3000/mcp +``` + +For a full AI-assisted assessment with remediation guide, use Claude Code: + +``` +/mcp-sdk-tier-audit +``` + +See [`.claude/skills/mcp-sdk-tier-audit/README.md`](.claude/skills/mcp-sdk-tier-audit/README.md) for full documentation. + ## Architecture See `src/runner/DESIGN.md` for detailed architecture documentation. diff --git a/SDK_INTEGRATION.md b/SDK_INTEGRATION.md new file mode 100644 index 0000000..a092115 --- /dev/null +++ b/SDK_INTEGRATION.md @@ -0,0 +1,208 @@ +# Using MCP Conformance Tests in SDK Repositories + +This guide explains how to integrate the MCP conformance test suite into your language SDK repository. The conformance framework tests your MCP implementation against the protocol specification to ensure compatibility. + +## Quick Start + +Install and run conformance tests: + +```bash +# Client testing (framework starts a test server, runs your client against it) +npx @modelcontextprotocol/conformance client --command "your-client-command" --scenario initialize + +# Server testing (your server must already be running) +npx @modelcontextprotocol/conformance server --url http://localhost:3000/mcp --scenario server-initialize +``` + +## Two Testing Modes + +### Client Testing + +The framework **starts a test server** and spawns your client against it. Your client receives the server URL as its final command-line argument. + +```bash +# Run a single scenario +npx @modelcontextprotocol/conformance client \ + --command "python tests/conformance/client.py" \ + --scenario initialize + +# Run a suite of tests +npx @modelcontextprotocol/conformance client \ + --command "python tests/conformance/client.py" \ + --suite auth +``` + +**Available client suites:** `all`, `core`, `extensions`, `auth`, `metadata`, `sep-835` + +Your client should: + +1. Accept the server URL as its last argument +2. Read `MCP_CONFORMANCE_SCENARIO` env var to determine which scenario is being tested +3. Read `MCP_CONFORMANCE_CONTEXT` env var for scenario-specific data (e.g., OAuth credentials) + +### Server Testing + +Your server must be **running before** invoking the conformance tool. The framework connects to it as an MCP client. + +```bash +# Start your server first +your-server --port 3001 & + +# Then run conformance tests +npx @modelcontextprotocol/conformance server \ + --url http://localhost:3001/mcp \ + --suite active +``` + +**Available server suites:** `active` (default), `all`, `pending` + +**Note:** Server testing requires you to manage server lifecycle (start, health-check, cleanup) yourself. + +--- + +## Expected Failures (Baseline) File + +The expected-failures feature lets your CI pass while you work on fixing known issues. It catches regressions by failing when: + +- A previously passing test starts failing (regression) +- A previously failing test starts passing (stale baseline - remove the entry) + +### File Format + +Create a YAML file (e.g., `conformance-baseline.yml`): + +```yaml +server: + - tools-call-with-progress + - resources-subscribe +client: + - auth/client-credentials-jwt +``` + +### Usage + +```bash +npx @modelcontextprotocol/conformance server \ + --url http://localhost:3000/mcp \ + --expected-failures ./conformance-baseline.yml +``` + +### Exit Code Behavior + +| Scenario Result | In Baseline? | Exit Code | Meaning | +| --------------- | ------------ | --------- | ----------------------------- | +| Fails | Yes | 0 | Expected failure | +| Fails | No | 1 | Unexpected regression | +| Passes | Yes | 1 | Stale baseline - remove entry | +| Passes | No | 0 | Normal pass | + +--- + +## GitHub Action + +The conformance repo provides a reusable GitHub Action that handles Node.js setup and conformance execution. + +### Client Testing Example + +```yaml +name: Conformance Tests +on: [push, pull_request] + +jobs: + conformance: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up your SDK + run: | + # Your SDK setup (pip install, npm install, etc.) + pip install -e . + + - uses: modelcontextprotocol/conformance@v0.1.10 + with: + mode: client + command: 'python tests/conformance/client.py' + suite: auth + expected-failures: ./conformance-baseline.yml +``` + +### Server Testing Example + +```yaml +name: Conformance Tests +on: [push, pull_request] + +jobs: + conformance: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up and start server + run: | + pip install -e . + python -m myserver --port 3001 & + # Wait for server to be ready + timeout 15 bash -c 'until curl -s http://localhost:3001/mcp; do sleep 0.5; done' + + - uses: modelcontextprotocol/conformance@v0.1.10 + with: + mode: server + url: http://localhost:3001/mcp + suite: active + expected-failures: ./conformance-baseline.yml +``` + +### Action Inputs + +| Input | Required | Description | +| ------------------- | ----------- | ----------------------------------------------- | +| `mode` | Yes | `server` or `client` | +| `url` | Server mode | URL of the server to test | +| `command` | Client mode | Command to run the client | +| `expected-failures` | No | Path to YAML baseline file | +| `suite` | No | Test suite to run | +| `scenario` | No | Run a single scenario by name | +| `timeout` | No | Timeout in ms for client tests (default: 30000) | +| `verbose` | No | Show verbose output (default: false) | +| `node-version` | No | Node.js version (default: 20) | + +--- + +## Writing Conformance Clients/Servers + +### Example Client Pattern + +See [`src/conformance/everything-client.ts`](https://github.com/modelcontextprotocol/typescript-sdk/blob/main/src/conformance/everything-client.ts) in the TypeScript SDK for a reference implementation. The recommended pattern is a single client that routes behavior based on the scenario: + +```python +import os +import sys +import json + +def main(): + server_url = sys.argv[-1] # URL passed as last argument + scenario = os.environ.get("MCP_CONFORMANCE_SCENARIO", "") + context = json.loads(os.environ.get("MCP_CONFORMANCE_CONTEXT", "{}")) + + if scenario.startswith("auth/"): + run_auth_scenario(server_url, scenario, context) + else: + run_default_scenario(server_url) + +if __name__ == "__main__": + main() +``` + +### Example Server Pattern + +See [`src/conformance/everything-server.ts`](https://github.com/modelcontextprotocol/typescript-sdk/blob/main/src/conformance/everything-server.ts) in the TypeScript SDK for a reference implementation that handles all server scenarios. + +--- + +## Additional Resources + +- [Conformance README](./README.md) +- [Design documentation](./src/runner/DESIGN.md) +- [TypeScript SDK conformance examples](https://github.com/modelcontextprotocol/typescript-sdk/tree/main/src/conformance) diff --git a/action.yml b/action.yml new file mode 100644 index 0000000..e5fe549 --- /dev/null +++ b/action.yml @@ -0,0 +1,97 @@ +name: 'MCP Conformance Tests' +description: 'Run MCP conformance tests against a server or client implementation' +inputs: + mode: + description: 'Test mode: "server" or "client"' + required: true + url: + description: 'Server URL to test against (required for server mode)' + required: false + command: + description: 'Command to run the client under test (required for client mode)' + required: false + expected-failures: + description: 'Path to YAML file listing expected failures (baseline)' + required: false + suite: + description: 'Test suite to run (server: "active"|"all"|"pending", client: "all"|"auth"|"metadata"|"sep-835")' + required: false + scenario: + description: 'Run a single scenario by name' + required: false + timeout: + description: 'Timeout in milliseconds for client tests (default: 30000)' + required: false + default: '30000' + verbose: + description: 'Show verbose output (default: false)' + required: false + default: 'false' + node-version: + description: 'Node.js version to use (default: 20)' + required: false + default: '20' +runs: + using: 'composite' + steps: + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node-version }} + + - name: Build conformance tests + shell: bash + run: | + cd "${{ github.action_path }}" + npm ci + npm run build + + - name: Run conformance tests + shell: bash + run: | + CONFORMANCE="${{ github.action_path }}/dist/index.js" + + # Build the command arguments + ARGS="${{ inputs.mode }}" + + # Mode-specific required options + if [ "${{ inputs.mode }}" = "server" ]; then + if [ -z "${{ inputs.url }}" ]; then + echo "::error::The 'url' input is required for server mode" + exit 1 + fi + ARGS="${ARGS} --url ${{ inputs.url }}" + elif [ "${{ inputs.mode }}" = "client" ]; then + if [ -z "${{ inputs.command }}" ]; then + echo "::error::The 'command' input is required for client mode" + exit 1 + fi + ARGS="${ARGS} --command '${{ inputs.command }}'" + else + echo "::error::Invalid mode '${{ inputs.mode }}'. Must be 'server' or 'client'." + exit 1 + fi + + # Optional arguments + if [ -n "${{ inputs.expected-failures }}" ]; then + ARGS="${ARGS} --expected-failures ${{ inputs.expected-failures }}" + fi + + if [ -n "${{ inputs.suite }}" ]; then + ARGS="${ARGS} --suite ${{ inputs.suite }}" + fi + + if [ -n "${{ inputs.scenario }}" ]; then + ARGS="${ARGS} --scenario ${{ inputs.scenario }}" + fi + + if [ "${{ inputs.mode }}" = "client" ]; then + ARGS="${ARGS} --timeout ${{ inputs.timeout }}" + fi + + if [ "${{ inputs.verbose }}" = "true" ]; then + ARGS="${ARGS} --verbose" + fi + + echo "Running: node ${CONFORMANCE} ${ARGS}" + eval "node ${CONFORMANCE} ${ARGS}" diff --git a/examples/clients/typescript/auth-test-attempts-dcr.ts b/examples/clients/typescript/auth-test-attempts-dcr.ts new file mode 100644 index 0000000..f9a6510 --- /dev/null +++ b/examples/clients/typescript/auth-test-attempts-dcr.ts @@ -0,0 +1,50 @@ +#!/usr/bin/env node + +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; +import { withOAuthRetry } from './helpers/withOAuthRetry'; +import { runAsCli } from './helpers/cliRunner'; +import { logger } from './helpers/logger'; + +/** + * Non-compliant client that ignores pre-registered credentials and attempts DCR. + * + * This client intentionally ignores the client_id and client_secret passed via + * MCP_CONFORMANCE_CONTEXT and instead attempts to do Dynamic Client Registration. + * When run against a server that does not support DCR (no registration_endpoint), + * this client will fail. + * + * Used to test that conformance checks detect clients that don't properly + * use pre-registered credentials when server doesn't support DCR. + */ +export async function runClient(serverUrl: string): Promise { + const client = new Client( + { name: 'test-auth-client-attempts-dcr', version: '1.0.0' }, + { capabilities: {} } + ); + + // Non-compliant: ignores pre-registered credentials from context + // and creates a fresh provider that will attempt DCR + const oauthFetch = withOAuthRetry( + 'test-auth-client-attempts-dcr', + new URL(serverUrl) + )(fetch); + + const transport = new StreamableHTTPClientTransport(new URL(serverUrl), { + fetch: oauthFetch + }); + + await client.connect(transport); + logger.debug('Connected to MCP server (attempted DCR instead of pre-reg)'); + + await client.listTools(); + logger.debug('Successfully listed tools'); + + await client.callTool({ name: 'test-tool', arguments: {} }); + logger.debug('Successfully called tool'); + + await transport.close(); + logger.debug('Connection closed successfully'); +} + +runAsCli(runClient, import.meta.url, 'auth-test-attempts-dcr '); diff --git a/examples/clients/typescript/auth-test-no-pkce.ts b/examples/clients/typescript/auth-test-no-pkce.ts new file mode 100644 index 0000000..308351d --- /dev/null +++ b/examples/clients/typescript/auth-test-no-pkce.ts @@ -0,0 +1,173 @@ +#!/usr/bin/env node + +/** + * Broken client that doesn't use PKCE. + * + * BUG: Skips PKCE entirely - doesn't send code_challenge in authorization + * request and doesn't send code_verifier in token request. + * + * Per MCP spec: "MCP clients MUST implement PKCE according to OAuth 2.1" + */ + +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; +import { extractWWWAuthenticateParams } from '@modelcontextprotocol/sdk/client/auth.js'; +import type { FetchLike } from '@modelcontextprotocol/sdk/shared/transport.js'; +import type { Middleware } from '@modelcontextprotocol/sdk/client/middleware.js'; +import { runAsCli } from './helpers/cliRunner'; +import { logger } from './helpers/logger'; + +interface OAuthTokens { + access_token: string; + token_type: string; + expires_in?: number; + refresh_token?: string; + scope?: string; +} + +/** + * Custom OAuth flow that deliberately skips PKCE. + * This is intentionally broken behavior for conformance testing. + */ +async function oauthFlowWithoutPkce( + _serverUrl: string | URL, + resourceMetadataUrl: string | URL, + fetchFn: FetchLike +): Promise { + // 1. Fetch Protected Resource Metadata + const prmResponse = await fetchFn(resourceMetadataUrl); + if (!prmResponse.ok) { + throw new Error(`Failed to fetch PRM: ${prmResponse.status}`); + } + const prm = await prmResponse.json(); + const authServerUrl = prm.authorization_servers?.[0]; + if (!authServerUrl) { + throw new Error('No authorization server in PRM'); + } + + // 2. Fetch Authorization Server Metadata + const asMetadataUrl = new URL( + '/.well-known/oauth-authorization-server', + authServerUrl + ); + const asResponse = await fetchFn(asMetadataUrl.toString()); + if (!asResponse.ok) { + throw new Error(`Failed to fetch AS metadata: ${asResponse.status}`); + } + const asMetadata = await asResponse.json(); + + // 3. Register client (DCR) + const dcrResponse = await fetchFn(asMetadata.registration_endpoint, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + client_name: 'test-auth-client-no-pkce', + redirect_uris: ['http://localhost:3000/callback'] + }) + }); + if (!dcrResponse.ok) { + throw new Error(`DCR failed: ${dcrResponse.status}`); + } + const clientInfo = await dcrResponse.json(); + + // 4. Build authorization URL WITHOUT PKCE (BUG!) + const authUrl = new URL(asMetadata.authorization_endpoint); + authUrl.searchParams.set('response_type', 'code'); + authUrl.searchParams.set('client_id', clientInfo.client_id); + authUrl.searchParams.set('redirect_uri', 'http://localhost:3000/callback'); + authUrl.searchParams.set('state', 'test-state'); + // BUG: NOT setting code_challenge or code_challenge_method + + // 5. Fetch authorization endpoint (simulates redirect) + const authResponse = await fetchFn(authUrl.toString(), { + redirect: 'manual' + }); + const location = authResponse.headers.get('location'); + if (!location) { + throw new Error('No redirect from authorization endpoint'); + } + const redirectUrl = new URL(location); + const authCode = redirectUrl.searchParams.get('code'); + if (!authCode) { + throw new Error('No auth code in redirect'); + } + + // 6. Exchange code for token WITHOUT code_verifier (BUG!) + const tokenResponse = await fetchFn(asMetadata.token_endpoint, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: new URLSearchParams({ + grant_type: 'authorization_code', + code: authCode, + redirect_uri: 'http://localhost:3000/callback', + client_id: clientInfo.client_id + // BUG: NOT sending code_verifier + }).toString() + }); + + if (!tokenResponse.ok) { + const error = await tokenResponse.text(); + throw new Error(`Token request failed: ${tokenResponse.status} - ${error}`); + } + + return tokenResponse.json(); +} + +/** + * Creates a fetch wrapper that uses OAuth without PKCE. + */ +function withOAuthNoPkce(baseUrl: string | URL): Middleware { + let tokens: OAuthTokens | undefined; + + return (next: FetchLike) => { + return async ( + input: string | URL, + init?: RequestInit + ): Promise => { + const makeRequest = async (): Promise => { + const headers = new Headers(init?.headers); + if (tokens) { + headers.set('Authorization', `Bearer ${tokens.access_token}`); + } + return next(input, { ...init, headers }); + }; + + let response = await makeRequest(); + + if (response.status === 401) { + const { resourceMetadataUrl } = extractWWWAuthenticateParams(response); + if (!resourceMetadataUrl) { + throw new Error('No resource_metadata in WWW-Authenticate'); + } + tokens = await oauthFlowWithoutPkce(baseUrl, resourceMetadataUrl, next); + response = await makeRequest(); + } + + return response; + }; + }; +} + +export async function runClient(serverUrl: string): Promise { + const client = new Client( + { name: 'test-auth-client-no-pkce', version: '1.0.0' }, + { capabilities: {} } + ); + + const oauthFetch = withOAuthNoPkce(new URL(serverUrl))(fetch); + + const transport = new StreamableHTTPClientTransport(new URL(serverUrl), { + fetch: oauthFetch + }); + + await client.connect(transport); + logger.debug('Successfully connected to MCP server'); + + await client.listTools(); + logger.debug('Successfully listed tools'); + + await transport.close(); + logger.debug('Connection closed successfully'); +} + +runAsCli(runClient, import.meta.url, 'auth-test-no-pkce '); diff --git a/examples/clients/typescript/everything-client.ts b/examples/clients/typescript/everything-client.ts index 4491fe6..21804a8 100644 --- a/examples/clients/typescript/everything-client.ts +++ b/examples/clients/typescript/everything-client.ts @@ -21,7 +21,12 @@ import { } from '@modelcontextprotocol/sdk/client/auth-extensions.js'; import { ElicitRequestSchema } from '@modelcontextprotocol/sdk/types.js'; import { ClientConformanceContextSchema } from '../../../src/schemas/context.js'; -import { withOAuthRetry, handle401 } from './helpers/withOAuthRetry.js'; +import { + withOAuthRetry, + withOAuthRetryWithProvider, + handle401 +} from './helpers/withOAuthRetry.js'; +import { ConformanceOAuthProvider } from './helpers/ConformanceOAuthProvider.js'; import { logger } from './helpers/logger.js'; /** @@ -139,7 +144,9 @@ registerScenarios( // Token endpoint auth method scenarios 'auth/token-endpoint-auth-basic', 'auth/token-endpoint-auth-post', - 'auth/token-endpoint-auth-none' + 'auth/token-endpoint-auth-none', + // Resource mismatch (client should error when PRM resource doesn't match) + 'auth/resource-mismatch' ], runAuthClient ); @@ -300,6 +307,220 @@ export async function runClientCredentialsBasic( registerScenario('auth/client-credentials-basic', runClientCredentialsBasic); +// ============================================================================ +// Pre-registration scenario +// ============================================================================ + +/** + * Pre-registration: client uses pre-registered credentials (no DCR). + * + * Server does not advertise registration_endpoint, so client must use + * pre-configured client_id and client_secret passed via context. + */ +export async function runPreRegistration(serverUrl: string): Promise { + const ctx = parseContext(); + if (ctx.name !== 'auth/pre-registration') { + throw new Error(`Expected pre-registration context, got ${ctx.name}`); + } + + const client = new Client( + { name: 'conformance-pre-registration', version: '1.0.0' }, + { capabilities: {} } + ); + + // Create provider with pre-registered credentials + const provider = new ConformanceOAuthProvider( + 'http://localhost:3000/callback', + { + client_name: 'conformance-pre-registration', + redirect_uris: ['http://localhost:3000/callback'] + } + ); + + // Pre-set the client information so the SDK won't attempt DCR + provider.saveClientInformation({ + client_id: ctx.client_id, + client_secret: ctx.client_secret, + redirect_uris: ['http://localhost:3000/callback'] + }); + + // Use the provider-based middleware + const oauthFetch = withOAuthRetryWithProvider( + provider, + new URL(serverUrl), + handle401 + )(fetch); + + const transport = new StreamableHTTPClientTransport(new URL(serverUrl), { + fetch: oauthFetch + }); + + await client.connect(transport); + logger.debug('Successfully connected with pre-registered credentials'); + + await client.listTools(); + logger.debug('Successfully listed tools'); + + await transport.close(); + logger.debug('Connection closed successfully'); +} + +registerScenario('auth/pre-registration', runPreRegistration); + +// ============================================================================ +// Cross-App Access (SEP-990) scenarios +// ============================================================================ + +/** + * Cross-app access: Complete Flow (SEP-990) + * Tests the complete flow: IDP ID token -> authorization grant -> access token -> MCP access. + */ +export async function runCrossAppAccessCompleteFlow( + serverUrl: string +): Promise { + const ctx = parseContext(); + if (ctx.name !== 'auth/cross-app-access-complete-flow') { + throw new Error( + `Expected cross-app-access-complete-flow context, got ${ctx.name}` + ); + } + + logger.debug('Starting complete cross-app access flow...'); + logger.debug('IDP Issuer:', ctx.idp_issuer); + logger.debug('IDP Token Endpoint:', ctx.idp_token_endpoint); + + // Step 0: Discover resource and auth server from PRM metadata + logger.debug('Step 0: Discovering resource and auth server via PRM...'); + const prmUrl = new URL( + '/.well-known/oauth-protected-resource/mcp', + serverUrl + ); + const prmResponse = await fetch(prmUrl.toString()); + if (!prmResponse.ok) { + throw new Error(`PRM discovery failed: ${prmResponse.status}`); + } + const prm = await prmResponse.json(); + const resource = prm.resource; + const authServerUrl = prm.authorization_servers[0]; + logger.debug('Discovered resource:', resource); + logger.debug('Discovered auth server:', authServerUrl); + + // Discover auth server metadata to find token endpoint + const asMetadataUrl = new URL( + '/.well-known/oauth-authorization-server', + authServerUrl + ); + const asMetadataResponse = await fetch(asMetadataUrl.toString()); + if (!asMetadataResponse.ok) { + throw new Error( + `Auth server metadata discovery failed: ${asMetadataResponse.status}` + ); + } + const asMetadata = await asMetadataResponse.json(); + const asTokenEndpoint = asMetadata.token_endpoint; + const asIssuer = asMetadata.issuer; + logger.debug('Auth server issuer:', asIssuer); + logger.debug('Auth server token endpoint:', asTokenEndpoint); + + // Verify AS supports jwt-bearer grant type + const grantTypes: string[] = asMetadata.grant_types_supported || []; + if (!grantTypes.includes('urn:ietf:params:oauth:grant-type:jwt-bearer')) { + throw new Error( + `Auth server does not support jwt-bearer grant type. Supported: ${grantTypes.join(', ')}` + ); + } + logger.debug('Auth server supports jwt-bearer grant type'); + + // Step 1: Token Exchange at IdP (IDP ID token -> ID-JAG) + logger.debug('Step 1: Exchanging IDP ID token for ID-JAG at IdP...'); + const tokenExchangeParams = new URLSearchParams({ + grant_type: 'urn:ietf:params:oauth:grant-type:token-exchange', + requested_token_type: 'urn:ietf:params:oauth:token-type:id-jag', + audience: asIssuer, + resource: resource, + subject_token: ctx.idp_id_token, + subject_token_type: 'urn:ietf:params:oauth:token-type:id_token', + client_id: ctx.idp_client_id + }); + + const tokenExchangeResponse = await fetch(ctx.idp_token_endpoint, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: tokenExchangeParams + }); + + if (!tokenExchangeResponse.ok) { + const error = await tokenExchangeResponse.text(); + throw new Error(`Token exchange failed: ${error}`); + } + + const tokenExchangeResult = await tokenExchangeResponse.json(); + const idJag = tokenExchangeResult.access_token; // ID-JAG (ID-bound JSON Assertion Grant) + logger.debug('Token exchange successful, ID-JAG obtained'); + logger.debug('Issued token type:', tokenExchangeResult.issued_token_type); + + // Step 2: JWT Bearer Grant at AS (ID-JAG -> access token) + // Client authenticates via client_secret_basic (RFC 7523 Section 5) + logger.debug('Step 2: Exchanging ID-JAG for access token at Auth Server...'); + const jwtBearerParams = new URLSearchParams({ + grant_type: 'urn:ietf:params:oauth:grant-type:jwt-bearer', + assertion: idJag + }); + + const basicAuth = Buffer.from( + `${encodeURIComponent(ctx.client_id)}:${encodeURIComponent(ctx.client_secret)}` + ).toString('base64'); + + const tokenResponse = await fetch(asTokenEndpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + Authorization: `Basic ${basicAuth}` + }, + body: jwtBearerParams + }); + + if (!tokenResponse.ok) { + const error = await tokenResponse.text(); + throw new Error(`JWT bearer grant failed: ${error}`); + } + + const tokenResult = await tokenResponse.json(); + logger.debug('JWT bearer grant successful, access token obtained'); + + // Step 3: Use access token to access MCP server + logger.debug('Step 3: Accessing MCP server with access token...'); + const client = new Client( + { name: 'conformance-cross-app-access', version: '1.0.0' }, + { capabilities: {} } + ); + + const transport = new StreamableHTTPClientTransport(new URL(serverUrl), { + requestInit: { + headers: { + Authorization: `Bearer ${tokenResult.access_token}` + } + } + }); + + await client.connect(transport); + logger.debug('Successfully connected to MCP server'); + + await client.listTools(); + logger.debug('Successfully listed tools'); + + await client.callTool({ name: 'test-tool', arguments: {} }); + logger.debug('Successfully called tool'); + + await transport.close(); + logger.debug('Complete cross-app access flow completed successfully'); +} + +registerScenario( + 'auth/cross-app-access-complete-flow', + runCrossAppAccessCompleteFlow +); + // ============================================================================ // Main entry point // ============================================================================ diff --git a/examples/clients/typescript/helpers/withOAuthRetry.ts b/examples/clients/typescript/helpers/withOAuthRetry.ts index 497f4a4..429ca53 100644 --- a/examples/clients/typescript/helpers/withOAuthRetry.ts +++ b/examples/clients/typescript/helpers/withOAuthRetry.ts @@ -45,6 +45,7 @@ export const handle401 = async ( } } }; + /** * Creates a fetch wrapper that handles OAuth authentication with retry logic. * @@ -53,8 +54,10 @@ export const handle401 = async ( * - Does not throw UnauthorizedError on redirect, but instead retries * - Calls next() instead of throwing for redirect-based auth * - * @param provider - OAuth client provider for authentication + * @param clientName - Client name for OAuth registration * @param baseUrl - Base URL for OAuth server discovery (defaults to request URL domain) + * @param handle401Fn - Custom 401 handler function + * @param clientMetadataUrl - Optional CIMD URL for URL-based client IDs * @returns A fetch middleware function */ export const withOAuthRetry = ( @@ -71,6 +74,18 @@ export const withOAuthRetry = ( }, clientMetadataUrl ); + return withOAuthRetryWithProvider(provider, baseUrl, handle401Fn); +}; + +/** + * Creates a fetch wrapper using a pre-configured OAuth provider. + * Use this when you need to pre-set client credentials (e.g., for pre-registration tests). + */ +export const withOAuthRetryWithProvider = ( + provider: ConformanceOAuthProvider, + baseUrl?: string | URL, + handle401Fn: typeof handle401 = handle401 +): Middleware => { return (next: FetchLike) => { return async ( input: string | URL, diff --git a/examples/servers/typescript/README.md b/examples/servers/typescript/README.md index 5beacf1..53c28fd 100644 --- a/examples/servers/typescript/README.md +++ b/examples/servers/typescript/README.md @@ -170,3 +170,21 @@ If you're implementing MCP in another language/SDK: 5. **Handle Notifications Carefully**: Catch/ignore errors when no client is connected **Goal**: All SDK example servers provide the same interface, enabling a single test suite to verify conformance across all implementations. + +## Negative Test Cases + +### no-dns-rebinding-protection.ts + +A minimal MCP server that intentionally omits DNS rebinding protection. This is a **negative test case** that demonstrates what a vulnerable server looks like and is expected to **FAIL** the `dns-rebinding-protection` conformance scenario. + +```bash +# Run the vulnerable server +npx tsx no-dns-rebinding-protection.ts + +# This should FAIL the dns-rebinding-protection checks +npx @modelcontextprotocol/conformance server \ + --url http://localhost:3003/mcp \ + --scenario dns-rebinding-protection +``` + +**DO NOT** use this pattern in production servers. Always use `createMcpExpressApp()` or the `localhostHostValidation()` middleware for localhost servers. diff --git a/examples/servers/typescript/everything-server.ts b/examples/servers/typescript/everything-server.ts index 9dd382a..374e48c 100644 --- a/examples/servers/typescript/everything-server.ts +++ b/examples/servers/typescript/everything-server.ts @@ -18,6 +18,7 @@ import { EventId, StreamId } from '@modelcontextprotocol/sdk/server/streamableHttp.js'; +import { createMcpExpressApp } from '@modelcontextprotocol/sdk/server/express.js'; import { ElicitResultSchema, ListToolsRequestSchema, @@ -26,7 +27,6 @@ import { } from '@modelcontextprotocol/sdk/types.js'; import { zodToJsonSchema } from 'zod-to-json-schema'; import { z } from 'zod'; -import express from 'express'; import cors from 'cors'; import { randomUUID } from 'crypto'; @@ -1055,8 +1055,8 @@ function isInitializeRequest(body: any): boolean { // ===== EXPRESS APP ===== -const app = express(); -app.use(express.json()); +// Use createMcpExpressApp for DNS rebinding protection on localhost +const app = createMcpExpressApp(); // Configure CORS to expose Mcp-Session-Id header for browser-based clients app.use( diff --git a/examples/servers/typescript/no-dns-rebinding-protection.ts b/examples/servers/typescript/no-dns-rebinding-protection.ts new file mode 100644 index 0000000..1e0fc3c --- /dev/null +++ b/examples/servers/typescript/no-dns-rebinding-protection.ts @@ -0,0 +1,71 @@ +#!/usr/bin/env node + +/** + * MCP Server WITHOUT DNS Rebinding Protection - Negative Test Case + * + * This is the simplest possible vulnerable server to demonstrate what happens + * when DNS rebinding protection is omitted. DO NOT use this pattern in production. + * + * This server should FAIL the dns-rebinding-protection conformance scenario. + */ + +import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; +import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js'; +import express from 'express'; + +// Create a fresh MCP server per request to avoid "Already connected" errors +// after the v1.26.0 security fix (GHSA-345p-7cg4-v4c7) +function createMcpServer() { + const server = new McpServer({ + name: 'no-dns-rebinding-protection-server', + version: '1.0.0' + }); + + server.registerTool( + 'echo', + { + description: 'Echo the input back', + inputSchema: { message: { type: 'string' } } + }, + async ({ message }) => ({ + content: [{ type: 'text', text: `Echo: ${message}` }] + }) + ); + + return server; +} + +// === VULNERABLE EXPRESS APP === +// This intentionally does NOT use createMcpExpressApp() or localhostHostValidation() +const app = express(); +app.use(express.json()); +// NO DNS rebinding protection middleware here! + +app.post('/mcp', async (req, res) => { + try { + const server = createMcpServer(); + // Stateless: no session ID + const transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: undefined + }); + await server.connect(transport); + await transport.handleRequest(req, res, req.body); + } catch (error) { + if (!res.headersSent) { + res.status(500).json({ + jsonrpc: '2.0', + error: { + code: -32603, + message: `Internal error: ${error instanceof Error ? error.message : String(error)}` + }, + id: null + }); + } + } +}); + +const PORT = parseInt(process.env.PORT || '3003', 10); +app.listen(PORT, '127.0.0.1', () => { + console.log(`Vulnerable server running on http://localhost:${PORT}/mcp`); + console.log(`WARNING: No DNS rebinding protection enabled!`); +}); diff --git a/examples/servers/typescript/package.json b/examples/servers/typescript/package.json index 5819d2a..eab4eb7 100644 --- a/examples/servers/typescript/package.json +++ b/examples/servers/typescript/package.json @@ -15,7 +15,7 @@ "testing" ], "dependencies": { - "@modelcontextprotocol/sdk": "^1.24.0", + "@modelcontextprotocol/sdk": "^1.25.2", "@types/cors": "^2.8.19", "cors": "^2.8.5", "express": "^5.2.1", diff --git a/package-lock.json b/package-lock.json index 5335f3f..2b9252e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,23 +1,27 @@ { "name": "@modelcontextprotocol/conformance", - "version": "0.1.8", + "version": "0.1.14", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@modelcontextprotocol/conformance", - "version": "0.1.8", + "version": "0.1.14", "license": "MIT", "dependencies": { - "@modelcontextprotocol/sdk": "^1.25.1", + "@modelcontextprotocol/sdk": "^1.26.0", + "@octokit/rest": "^22.0.0", "commander": "^14.0.2", "eventsource-parser": "^3.0.6", "express": "^5.1.0", "jose": "^6.1.2", + "undici": "^7.19.0", + "yaml": "^2.8.2", "zod": "^3.25.76" }, "bin": { - "conformance": "dist/index.js" + "conformance": "dist/index.js", + "fake-auth-server": "dist/fake-auth-server.js" }, "devDependencies": { "@eslint/js": "^9.39.1", @@ -37,14 +41,14 @@ } }, "node_modules/@babel/generator": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", - "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", "dev": true, "license": "MIT", "dependencies": { - "@babel/parser": "^7.28.5", - "@babel/types": "^7.28.5", + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" @@ -74,13 +78,13 @@ } }, "node_modules/@babel/parser": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", - "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", "dev": true, "license": "MIT", "dependencies": { - "@babel/types": "^7.28.5" + "@babel/types": "^7.29.0" }, "bin": { "parser": "bin/babel-parser.js" @@ -90,9 +94,9 @@ } }, "node_modules/@babel/types": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", - "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", "dev": true, "license": "MIT", "dependencies": { @@ -104,9 +108,9 @@ } }, "node_modules/@emnapi/core": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.6.0.tgz", - "integrity": "sha512-zq/ay+9fNIJJtJiZxdTnXS20PllcYMX3OE23ESc4HK/bdYu3cOWYVhsOhVnXALfU/uqJIxn5NBPd9z4v+SfoSg==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.8.1.tgz", + "integrity": "sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==", "dev": true, "license": "MIT", "optional": true, @@ -116,9 +120,9 @@ } }, "node_modules/@emnapi/runtime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.6.0.tgz", - "integrity": "sha512-obtUmAHTMjll499P+D9A3axeJFlhdjOWdKUNs/U6QIGT7V5RjcUW1xToAzjvmgTSQhDbYn/NwfTRoJcQ2rNBxA==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.8.1.tgz", + "integrity": "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==", "dev": true, "license": "MIT", "optional": true, @@ -138,9 +142,9 @@ } }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.11.tgz", - "integrity": "sha512-Xt1dOL13m8u0WE8iplx9Ibbm+hFAO0GsU2P34UNoDGvZYkY8ifSiy6Zuc1lYxfG7svWE2fzqCUmFp5HCn51gJg==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", "cpu": [ "ppc64" ], @@ -155,9 +159,9 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.11.tgz", - "integrity": "sha512-uoa7dU+Dt3HYsethkJ1k6Z9YdcHjTrSb5NUy66ZfZaSV8hEYGD5ZHbEMXnqLFlbBflLsl89Zke7CAdDJ4JI+Gg==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", "cpu": [ "arm" ], @@ -172,9 +176,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.11.tgz", - "integrity": "sha512-9slpyFBc4FPPz48+f6jyiXOx/Y4v34TUeDDXJpZqAWQn/08lKGeD8aDp9TMn9jDz2CiEuHwfhRmGBvpnd/PWIQ==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", "cpu": [ "arm64" ], @@ -189,9 +193,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.11.tgz", - "integrity": "sha512-Sgiab4xBjPU1QoPEIqS3Xx+R2lezu0LKIEcYe6pftr56PqPygbB7+szVnzoShbx64MUupqoE0KyRlN7gezbl8g==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", "cpu": [ "x64" ], @@ -206,9 +210,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.11.tgz", - "integrity": "sha512-VekY0PBCukppoQrycFxUqkCojnTQhdec0vevUL/EDOCnXd9LKWqD/bHwMPzigIJXPhC59Vd1WFIL57SKs2mg4w==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", "cpu": [ "arm64" ], @@ -223,9 +227,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.11.tgz", - "integrity": "sha512-+hfp3yfBalNEpTGp9loYgbknjR695HkqtY3d3/JjSRUyPg/xd6q+mQqIb5qdywnDxRZykIHs3axEqU6l1+oWEQ==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", "cpu": [ "x64" ], @@ -240,9 +244,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.11.tgz", - "integrity": "sha512-CmKjrnayyTJF2eVuO//uSjl/K3KsMIeYeyN7FyDBjsR3lnSJHaXlVoAK8DZa7lXWChbuOk7NjAc7ygAwrnPBhA==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", "cpu": [ "arm64" ], @@ -257,9 +261,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.11.tgz", - "integrity": "sha512-Dyq+5oscTJvMaYPvW3x3FLpi2+gSZTCE/1ffdwuM6G1ARang/mb3jvjxs0mw6n3Lsw84ocfo9CrNMqc5lTfGOw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", "cpu": [ "x64" ], @@ -274,9 +278,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.11.tgz", - "integrity": "sha512-TBMv6B4kCfrGJ8cUPo7vd6NECZH/8hPpBHHlYI3qzoYFvWu2AdTvZNuU/7hsbKWqu/COU7NIK12dHAAqBLLXgw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", "cpu": [ "arm" ], @@ -291,9 +295,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.11.tgz", - "integrity": "sha512-Qr8AzcplUhGvdyUF08A1kHU3Vr2O88xxP0Tm8GcdVOUm25XYcMPp2YqSVHbLuXzYQMf9Bh/iKx7YPqECs6ffLA==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", "cpu": [ "arm64" ], @@ -308,9 +312,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.11.tgz", - "integrity": "sha512-TmnJg8BMGPehs5JKrCLqyWTVAvielc615jbkOirATQvWWB1NMXY77oLMzsUjRLa0+ngecEmDGqt5jiDC6bfvOw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", "cpu": [ "ia32" ], @@ -325,9 +329,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.11.tgz", - "integrity": "sha512-DIGXL2+gvDaXlaq8xruNXUJdT5tF+SBbJQKbWy/0J7OhU8gOHOzKmGIlfTTl6nHaCOoipxQbuJi7O++ldrxgMw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", "cpu": [ "loong64" ], @@ -342,9 +346,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.11.tgz", - "integrity": "sha512-Osx1nALUJu4pU43o9OyjSCXokFkFbyzjXb6VhGIJZQ5JZi8ylCQ9/LFagolPsHtgw6himDSyb5ETSfmp4rpiKQ==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", "cpu": [ "mips64el" ], @@ -359,9 +363,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.11.tgz", - "integrity": "sha512-nbLFgsQQEsBa8XSgSTSlrnBSrpoWh7ioFDUmwo158gIm5NNP+17IYmNWzaIzWmgCxq56vfr34xGkOcZ7jX6CPw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", "cpu": [ "ppc64" ], @@ -376,9 +380,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.11.tgz", - "integrity": "sha512-HfyAmqZi9uBAbgKYP1yGuI7tSREXwIb438q0nqvlpxAOs3XnZ8RsisRfmVsgV486NdjD7Mw2UrFSw51lzUk1ww==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", "cpu": [ "riscv64" ], @@ -393,9 +397,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.11.tgz", - "integrity": "sha512-HjLqVgSSYnVXRisyfmzsH6mXqyvj0SA7pG5g+9W7ESgwA70AXYNpfKBqh1KbTxmQVaYxpzA/SvlB9oclGPbApw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", "cpu": [ "s390x" ], @@ -410,9 +414,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.11.tgz", - "integrity": "sha512-HSFAT4+WYjIhrHxKBwGmOOSpphjYkcswF449j6EjsjbinTZbp8PJtjsVK1XFJStdzXdy/jaddAep2FGY+wyFAQ==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", "cpu": [ "x64" ], @@ -427,9 +431,9 @@ } }, "node_modules/@esbuild/netbsd-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.11.tgz", - "integrity": "sha512-hr9Oxj1Fa4r04dNpWr3P8QKVVsjQhqrMSUzZzf+LZcYjZNqhA3IAfPQdEh1FLVUJSiu6sgAwp3OmwBfbFgG2Xg==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", "cpu": [ "arm64" ], @@ -444,9 +448,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.11.tgz", - "integrity": "sha512-u7tKA+qbzBydyj0vgpu+5h5AeudxOAGncb8N6C9Kh1N4n7wU1Xw1JDApsRjpShRpXRQlJLb9wY28ELpwdPcZ7A==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", "cpu": [ "x64" ], @@ -461,9 +465,9 @@ } }, "node_modules/@esbuild/openbsd-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.11.tgz", - "integrity": "sha512-Qq6YHhayieor3DxFOoYM1q0q1uMFYb7cSpLD2qzDSvK1NAvqFi8Xgivv0cFC6J+hWVw2teCYltyy9/m/14ryHg==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", "cpu": [ "arm64" ], @@ -478,9 +482,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.11.tgz", - "integrity": "sha512-CN+7c++kkbrckTOz5hrehxWN7uIhFFlmS/hqziSFVWpAzpWrQoAG4chH+nN3Be+Kzv/uuo7zhX716x3Sn2Jduw==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", "cpu": [ "x64" ], @@ -495,9 +499,9 @@ } }, "node_modules/@esbuild/openharmony-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.11.tgz", - "integrity": "sha512-rOREuNIQgaiR+9QuNkbkxubbp8MSO9rONmwP5nKncnWJ9v5jQ4JxFnLu4zDSRPf3x4u+2VN4pM4RdyIzDty/wQ==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", "cpu": [ "arm64" ], @@ -512,9 +516,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.11.tgz", - "integrity": "sha512-nq2xdYaWxyg9DcIyXkZhcYulC6pQ2FuCgem3LI92IwMgIZ69KHeY8T4Y88pcwoLIjbed8n36CyKoYRDygNSGhA==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", "cpu": [ "x64" ], @@ -529,9 +533,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.11.tgz", - "integrity": "sha512-3XxECOWJq1qMZ3MN8srCJ/QfoLpL+VaxD/WfNRm1O3B4+AZ/BnLVgFbUV3eiRYDMXetciH16dwPbbHqwe1uU0Q==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", "cpu": [ "arm64" ], @@ -546,9 +550,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.11.tgz", - "integrity": "sha512-3ukss6gb9XZ8TlRyJlgLn17ecsK4NSQTmdIXRASVsiS2sQ6zPPZklNJT5GR5tE/MUarymmy8kCEf5xPCNCqVOA==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", "cpu": [ "ia32" ], @@ -563,9 +567,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.11.tgz", - "integrity": "sha512-D7Hpz6A2L4hzsRpPaCYkQnGOotdUpDzSGRIv9I+1ITdHROSFUWW95ZPZWQmGka1Fg7W3zFJowyn9WGwMJ0+KPA==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", "cpu": [ "x64" ], @@ -580,9 +584,9 @@ } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", - "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", "dev": true, "license": "MIT", "dependencies": { @@ -649,7 +653,7 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@eslint/config-helpers/node_modules/@eslint/core": { + "node_modules/@eslint/core": { "version": "0.17.0", "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", @@ -662,23 +666,10 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@eslint/core": { - "version": "0.16.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.16.0.tgz", - "integrity": "sha512-nmC8/totwobIiFcGkDza3GIKfAw1+hLiYVrh3I1nIomQ8PEr5cxg34jnkmGawul/ep52wGRAcyeDCNtWKSOj4Q==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@types/json-schema": "^7.0.15" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "node_modules/@eslint/eslintrc": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", - "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", "dev": true, "license": "MIT", "dependencies": { @@ -688,7 +679,7 @@ "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", + "js-yaml": "^4.1.1", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" }, @@ -699,10 +690,34 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, "node_modules/@eslint/js": { - "version": "9.39.1", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.1.tgz", - "integrity": "sha512-S26Stp4zCy88tH94QbBv3XCuzRQiZ9yXofEILmglYTh/Ug/a9/umqvgFtYBAo3Lp0nsI/5/qH1CCrbdK3AP1Tw==", + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", "dev": true, "license": "MIT", "engines": { @@ -736,23 +751,10 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@eslint/plugin-kit/node_modules/@eslint/core": { - "version": "0.17.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", - "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@types/json-schema": "^7.0.15" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "node_modules/@hono/node-server": { - "version": "1.19.7", - "resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.7.tgz", - "integrity": "sha512-vUcD0uauS7EU2caukW8z5lJKtoGMokxNbJtBiwHgpqxEXokaHCBkQUmCHhjFB1VUTWdqj25QoMkMKzgjq+uhrw==", + "version": "1.19.9", + "resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.9.tgz", + "integrity": "sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==", "license": "MIT", "engines": { "node": ">=18.14.1" @@ -853,12 +855,12 @@ } }, "node_modules/@modelcontextprotocol/sdk": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.25.1.tgz", - "integrity": "sha512-yO28oVFFC7EBoiKdAn+VqRm+plcfv4v0xp6osG/VsCB0NlPZWi87ajbCZZ8f/RvOFLEu7//rSRmuZZ7lMoe3gQ==", + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.26.0.tgz", + "integrity": "sha512-Y5RmPncpiDtTXDbLKswIJzTqu2hyBKxTNsgKqKclDbhIgg1wgtf1fRuvxgTnRfcnxtvvgbIEcqUOzZrJ6iSReg==", "license": "MIT", "dependencies": { - "@hono/node-server": "^1.19.7", + "@hono/node-server": "^1.19.9", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", @@ -866,14 +868,15 @@ "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", - "express": "^5.0.1", - "express-rate-limit": "^7.5.0", - "jose": "^6.1.1", + "express": "^5.2.1", + "express-rate-limit": "^8.2.1", + "hono": "^4.11.4", + "jose": "^6.1.3", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", - "zod-to-json-schema": "^3.25.0" + "zod-to-json-schema": "^3.25.1" }, "engines": { "node": ">=18" @@ -891,39 +894,175 @@ } } }, - "node_modules/@modelcontextprotocol/sdk/node_modules/ajv": { - "version": "8.17.1", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", - "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.1.tgz", + "integrity": "sha512-p64ah1M1ld8xjWv3qbvFwHiFVWrq1yFvV4f7w+mzaqiR4IlSgkqhcRdHwsGgomwzBH51sRY4NEowLxnaBjcW/A==", + "dev": true, "license": "MIT", + "optional": true, "dependencies": { - "fast-deep-equal": "^3.1.3", - "fast-uri": "^3.0.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2" + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@tybys/wasm-util": "^0.10.1" }, "funding": { "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "url": "https://github.com/sponsors/Brooooooklyn" } }, - "node_modules/@modelcontextprotocol/sdk/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "node_modules/@octokit/auth-token": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-6.0.0.tgz", + "integrity": "sha512-P4YJBPdPSpWTQ1NU4XYdvHvXJJDxM6YwpS0FZHRgP7YFkdVxsWcpWGy/NVqlAA7PcPCnMacXlRm1y2PFZRWL/w==", + "license": "MIT", + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/core": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-7.0.6.tgz", + "integrity": "sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q==", + "license": "MIT", + "dependencies": { + "@octokit/auth-token": "^6.0.0", + "@octokit/graphql": "^9.0.3", + "@octokit/request": "^10.0.6", + "@octokit/request-error": "^7.0.2", + "@octokit/types": "^16.0.0", + "before-after-hook": "^4.0.0", + "universal-user-agent": "^7.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/endpoint": { + "version": "11.0.2", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-11.0.2.tgz", + "integrity": "sha512-4zCpzP1fWc7QlqunZ5bSEjxc6yLAlRTnDwKtgXfcI/FxxGoqedDG8V2+xJ60bV2kODqcGB+nATdtap/XYq2NZQ==", + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0", + "universal-user-agent": "^7.0.2" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/graphql": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-9.0.3.tgz", + "integrity": "sha512-grAEuupr/C1rALFnXTv6ZQhFuL1D8G5y8CN04RgrO4FIPMrtm+mcZzFG7dcBm+nq+1ppNixu+Jd78aeJOYxlGA==", + "license": "MIT", + "dependencies": { + "@octokit/request": "^10.0.6", + "@octokit/types": "^16.0.0", + "universal-user-agent": "^7.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "27.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-27.0.0.tgz", + "integrity": "sha512-whrdktVs1h6gtR+09+QsNk2+FO+49j6ga1c55YZudfEG+oKJVvJLQi3zkOm5JjiUXAagWK2tI2kTGKJ2Ys7MGA==", "license": "MIT" }, - "node_modules/@napi-rs/wasm-runtime": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.0.7.tgz", - "integrity": "sha512-SeDnOO0Tk7Okiq6DbXmmBODgOAb9dp9gjlphokTUxmt8U3liIP1ZsozBahH69j/RJv+Rfs6IwUKHTgQYJ/HBAw==", - "dev": true, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-14.0.0.tgz", + "integrity": "sha512-fNVRE7ufJiAA3XUrha2omTA39M6IXIc6GIZLvlbsm8QOQCYvpq/LkMNGyFlB1d8hTDzsAXa3OKtybdMAYsV/fw==", "license": "MIT", - "optional": true, "dependencies": { - "@emnapi/core": "^1.5.0", - "@emnapi/runtime": "^1.5.0", - "@tybys/wasm-util": "^0.10.1" + "@octokit/types": "^16.0.0" + }, + "engines": { + "node": ">= 20" + }, + "peerDependencies": { + "@octokit/core": ">=6" + } + }, + "node_modules/@octokit/plugin-request-log": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-6.0.0.tgz", + "integrity": "sha512-UkOzeEN3W91/eBq9sPZNQ7sUBvYCqYbrrD8gTbBuGtHEuycE4/awMXcYvx6sVYo7LypPhmQwwpUe4Yyu4QZN5Q==", + "license": "MIT", + "engines": { + "node": ">= 20" + }, + "peerDependencies": { + "@octokit/core": ">=6" + } + }, + "node_modules/@octokit/plugin-rest-endpoint-methods": { + "version": "17.0.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-17.0.0.tgz", + "integrity": "sha512-B5yCyIlOJFPqUUeiD0cnBJwWJO8lkJs5d8+ze9QDP6SvfiXSz1BF+91+0MeI1d2yxgOhU/O+CvtiZ9jSkHhFAw==", + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0" + }, + "engines": { + "node": ">= 20" + }, + "peerDependencies": { + "@octokit/core": ">=6" + } + }, + "node_modules/@octokit/request": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-10.0.7.tgz", + "integrity": "sha512-v93h0i1yu4idj8qFPZwjehoJx4j3Ntn+JhXsdJrG9pYaX6j/XRz2RmasMUHtNgQD39nrv/VwTWSqK0RNXR8upA==", + "license": "MIT", + "dependencies": { + "@octokit/endpoint": "^11.0.2", + "@octokit/request-error": "^7.0.2", + "@octokit/types": "^16.0.0", + "fast-content-type-parse": "^3.0.0", + "universal-user-agent": "^7.0.2" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/request-error": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-7.1.0.tgz", + "integrity": "sha512-KMQIfq5sOPpkQYajXHwnhjCC0slzCNScLHs9JafXc4RAJI+9f+jNDlBNaIMTvazOPLgb4BnlhGJOTbnN0wIjPw==", + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/rest": { + "version": "22.0.1", + "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-22.0.1.tgz", + "integrity": "sha512-Jzbhzl3CEexhnivb1iQ0KJ7s5vvjMWcmRtq5aUsKmKDrRW6z3r84ngmiFKFvpZjpiU/9/S6ITPFRpn5s/3uQJw==", + "license": "MIT", + "dependencies": { + "@octokit/core": "^7.0.6", + "@octokit/plugin-paginate-rest": "^14.0.0", + "@octokit/plugin-request-log": "^6.0.0", + "@octokit/plugin-rest-endpoint-methods": "^17.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/@octokit/types": { + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-16.0.0.tgz", + "integrity": "sha512-sKq+9r1Mm4efXW1FCk7hFSeJo4QKreL/tTbR0rz/qx/r1Oa2VV83LTA/H/MuCOX7uCIJmQVRKBcbmWoySjAnSg==", + "license": "MIT", + "dependencies": { + "@octokit/openapi-types": "^27.0.0" } }, "node_modules/@oxc-project/types": { @@ -937,13 +1076,13 @@ } }, "node_modules/@quansync/fs": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@quansync/fs/-/fs-0.1.5.tgz", - "integrity": "sha512-lNS9hL2aS2NZgNW7BBj+6EBl4rOf8l+tQ0eRY6JWCI8jI2kc53gSoqbjojU0OnAWhzoXiOjFyGsHcDGePB3lhA==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@quansync/fs/-/fs-1.0.0.tgz", + "integrity": "sha512-4TJ3DFtlf1L5LDMaM6CanJ/0lckGNtJcMjQ1NAV6zDmA0tEHKZtxNKin8EgPaVX1YzljbxckyT2tJrpQKAtngQ==", "dev": true, "license": "MIT", "dependencies": { - "quansync": "^0.2.11" + "quansync": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/sxzz" @@ -1195,9 +1334,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz", - "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", "cpu": [ "arm" ], @@ -1209,9 +1348,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz", - "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", "cpu": [ "arm64" ], @@ -1223,9 +1362,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz", - "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", "cpu": [ "arm64" ], @@ -1237,9 +1376,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz", - "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", "cpu": [ "x64" ], @@ -1251,9 +1390,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz", - "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", "cpu": [ "arm64" ], @@ -1265,9 +1404,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz", - "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", "cpu": [ "x64" ], @@ -1279,9 +1418,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz", - "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", "cpu": [ "arm" ], @@ -1293,9 +1432,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz", - "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", "cpu": [ "arm" ], @@ -1307,9 +1446,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz", - "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", "cpu": [ "arm64" ], @@ -1321,9 +1460,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz", - "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", "cpu": [ "arm64" ], @@ -1335,9 +1474,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz", - "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", "cpu": [ "loong64" ], @@ -1349,9 +1488,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz", - "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", "cpu": [ "loong64" ], @@ -1363,9 +1502,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz", - "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", "cpu": [ "ppc64" ], @@ -1377,9 +1516,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz", - "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", "cpu": [ "ppc64" ], @@ -1391,9 +1530,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz", - "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", "cpu": [ "riscv64" ], @@ -1405,9 +1544,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz", - "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", "cpu": [ "riscv64" ], @@ -1419,9 +1558,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz", - "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", "cpu": [ "s390x" ], @@ -1433,9 +1572,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz", - "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", "cpu": [ "x64" ], @@ -1447,9 +1586,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz", - "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", "cpu": [ "x64" ], @@ -1461,9 +1600,9 @@ ] }, "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz", - "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", "cpu": [ "x64" ], @@ -1475,9 +1614,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz", - "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", "cpu": [ "arm64" ], @@ -1489,9 +1628,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz", - "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", "cpu": [ "arm64" ], @@ -1503,9 +1642,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz", - "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", "cpu": [ "ia32" ], @@ -1517,9 +1656,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz", - "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", "cpu": [ "x64" ], @@ -1531,9 +1670,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz", - "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", "cpu": [ "x64" ], @@ -1609,21 +1748,21 @@ "license": "MIT" }, "node_modules/@types/express": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.3.tgz", - "integrity": "sha512-wGA0NX93b19/dZC1J18tKWVIYWyyF2ZjT9vin/NRu0qzzvfVzWjs04iq2rQ3H65vCTQYlRqs3YHfY7zjdV+9Kw==", + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.6.tgz", + "integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==", "dev": true, "license": "MIT", "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^5.0.0", - "@types/serve-static": "*" + "@types/serve-static": "^2" } }, "node_modules/@types/express-serve-static-core": { - "version": "5.0.7", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.0.7.tgz", - "integrity": "sha512-R+33OsgWw7rOhD1emjU7dzCDHucJrgJXMA5PYCzJxVil0dsyx5iBEPHqpPfiKNJQb7lZ1vxwoLR4Z87bBUpeGQ==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.1.1.tgz", + "integrity": "sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A==", "dev": true, "license": "MIT", "dependencies": { @@ -1644,13 +1783,15 @@ "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/node": { - "version": "22.18.12", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.18.12.tgz", - "integrity": "sha512-BICHQ67iqxQGFSzfCFTT7MRQ5XcBjG5aeKh5Ok38UBbPe5fxTyE+aHFxwVrGyr8GNlqFMLKD1D3P2K/1ks8tog==", + "version": "22.19.11", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.11.tgz", + "integrity": "sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==", "dev": true, + "license": "MIT", "dependencies": { "undici-types": "~6.21.0" } @@ -1670,9 +1811,9 @@ "license": "MIT" }, "node_modules/@types/send": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.0.tgz", - "integrity": "sha512-zBF6vZJn1IaMpg3xUF25VK3gd3l8zwE0ZLRX7dsQyQi+jp4E8mMDJNGDYnYse+bQhYwWERTxVwHpi3dMOq7RKQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz", + "integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1736,7 +1877,6 @@ "integrity": "sha512-jCzKdm/QK0Kg4V4IK/oMlRZlY+QOcdjv89U2NgKHZk1CYTj82/RVSx1mV/0gqCVMJ/DA+Zf/S4NBWNF8GQ+eqQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.48.0", "@typescript-eslint/types": "8.48.0", @@ -1949,29 +2089,28 @@ } }, "node_modules/@typescript/native-preview": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview/-/native-preview-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-EiPEgGwNa2uHyyKgeoWTL6wWHKUBmF3xsfZ3OHofk7TxUuxb2mpLG5igEuaBe8iUwkCUl9uZgJvOu6o0wE5NSA==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview/-/native-preview-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-vy52DLNMYVTizp02/Uu8TrHQrt3BU0b7foE7qqxPAZF63zXpwvGg1g4EAgFtu7ZDJlYrAlUqSdZg6INb/3iY6w==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsgo": "bin/tsgo.js" }, "optionalDependencies": { - "@typescript/native-preview-darwin-arm64": "7.0.0-dev.20251201.1", - "@typescript/native-preview-darwin-x64": "7.0.0-dev.20251201.1", - "@typescript/native-preview-linux-arm": "7.0.0-dev.20251201.1", - "@typescript/native-preview-linux-arm64": "7.0.0-dev.20251201.1", - "@typescript/native-preview-linux-x64": "7.0.0-dev.20251201.1", - "@typescript/native-preview-win32-arm64": "7.0.0-dev.20251201.1", - "@typescript/native-preview-win32-x64": "7.0.0-dev.20251201.1" + "@typescript/native-preview-darwin-arm64": "7.0.0-dev.20260210.1", + "@typescript/native-preview-darwin-x64": "7.0.0-dev.20260210.1", + "@typescript/native-preview-linux-arm": "7.0.0-dev.20260210.1", + "@typescript/native-preview-linux-arm64": "7.0.0-dev.20260210.1", + "@typescript/native-preview-linux-x64": "7.0.0-dev.20260210.1", + "@typescript/native-preview-win32-arm64": "7.0.0-dev.20260210.1", + "@typescript/native-preview-win32-x64": "7.0.0-dev.20260210.1" } }, "node_modules/@typescript/native-preview-darwin-arm64": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-darwin-arm64/-/native-preview-darwin-arm64-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-PY0BrlRF3YCZEMxzuk79IFSgpGqUErkdrW7Aq+/mF8DEET5uaDypTMb8Vz4CLYJ7Xvvxz8eZsLimPbv6hYDIvA==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-darwin-arm64/-/native-preview-darwin-arm64-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-taEYpsrCbdcyHkqNMBiVcqKR7ZHMC1jwTBM9kn3eUgOjXn68ASRrmyzYBdrujluBJMO7rl+Gm5QRT68onYt53A==", "cpu": [ "arm64" ], @@ -1983,9 +2122,9 @@ ] }, "node_modules/@typescript/native-preview-darwin-x64": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-darwin-x64/-/native-preview-darwin-x64-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-YeDrjnsvXwm/MNG8aURT3J+cmHQIhpiElBKOVOy/H6ky4S2Ro9ufG+Bj9CqS3etbTCLhV5btk+QNh86DZ4VDkQ==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-darwin-x64/-/native-preview-darwin-x64-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-TSgIk2osa3UpivKybsyglBx7KBL+vTNayagmpzYvxBXbPvBnbgGOgzE/5iHkzFJYVUFxqmuj1gopmDT9X/obaQ==", "cpu": [ "x64" ], @@ -1997,9 +2136,9 @@ ] }, "node_modules/@typescript/native-preview-linux-arm": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-linux-arm/-/native-preview-linux-arm-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-gr2EQYK888YdGROMc7l3N3MeKY1V3QVImKIQZNgqprV+N2rXaFnxGAZ+gql3LqZgRGel4a12vCUJeP7Pjl2gww==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-linux-arm/-/native-preview-linux-arm-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-2matUA2ZU/1Zdv/pWLsdNwdzkOxBPeLa1581wgnaANrzZD3IJm4eCMfidRFTh9fVPN/eMsthYOeSnuVJa/mPmg==", "cpu": [ "arm" ], @@ -2011,9 +2150,9 @@ ] }, "node_modules/@typescript/native-preview-linux-arm64": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-linux-arm64/-/native-preview-linux-arm64-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-HbEn+SBTDZEtwN/VUxA2To+6vEr7x++SCRc6yGp5y4onpBL2xnH17UoxWiqN9J4Bu1DbQ9jZv3D5CzwBlofPQA==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-linux-arm64/-/native-preview-linux-arm64-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-aSdY/1Uh+4hOpQT1jHvM16cNqXv6lihe3oZmGTV6DmgkeH9soGXRumbu+oA73E3w0Hm6PjD/aIzbvK53yjvN1Q==", "cpu": [ "arm64" ], @@ -2025,9 +2164,9 @@ ] }, "node_modules/@typescript/native-preview-linux-x64": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-linux-x64/-/native-preview-linux-x64-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-q94K/LZ3Ab/SbUBMBsf37VdsumeZ1dZmymJYlhGBqk/fdXBayL0diLR3RdzyeQWbCXAxWL5KFKLIiIc3cI/fcA==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-linux-x64/-/native-preview-linux-x64-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-7C5mhiOFzWB+hdoCuog9roQuNFFHALw1jz0zrA9ikH18DOgnnGJpGLuekQJdXG1yQSdrALZROXLidTmVxFYSgg==", "cpu": [ "x64" ], @@ -2039,9 +2178,9 @@ ] }, "node_modules/@typescript/native-preview-win32-arm64": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-win32-arm64/-/native-preview-win32-arm64-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-/AFwpsX/G05bBsfVURfg4+/JC6gfvqj9jfFe/7oe1Y1J42koN5C8TH+eSmMOOEcPYpFjR1e+NWckqBJKaCXJ4A==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-win32-arm64/-/native-preview-win32-arm64-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-n8/tI1rOrqy+kFqrNc4xBYaVc1eGn5SYS9HHDZOPZ8E2b3Oq7RAPSZdNi+YYwMcOx3MFon0Iu6mZ1N6lqer9Dw==", "cpu": [ "arm64" ], @@ -2053,9 +2192,9 @@ ] }, "node_modules/@typescript/native-preview-win32-x64": { - "version": "7.0.0-dev.20251201.1", - "resolved": "https://registry.npmjs.org/@typescript/native-preview-win32-x64/-/native-preview-win32-x64-7.0.0-dev.20251201.1.tgz", - "integrity": "sha512-vTUCDEuSP4ifLHqb8aljuj44v6+M1HDKo1WLnboTDpwU7IIrTux/0jzkPfEHd9xd5FU4EhSA8ZrYDwKI0BcRcg==", + "version": "7.0.0-dev.20260210.1", + "resolved": "https://registry.npmjs.org/@typescript/native-preview-win32-x64/-/native-preview-win32-x64-7.0.0-dev.20260210.1.tgz", + "integrity": "sha512-wC/Aoxf/5/m/7alzb7RxLivGuYwZw3/Iq7RO73egG70LL2RLUuP306MDg1sj2TyeAe+S3zZX3rU1L6qMOW439A==", "cpu": [ "x64" ], @@ -2067,16 +2206,16 @@ ] }, "node_modules/@vitest/expect": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.16.tgz", - "integrity": "sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz", + "integrity": "sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==", "dev": true, "license": "MIT", "dependencies": { "@standard-schema/spec": "^1.0.0", "@types/chai": "^5.2.2", - "@vitest/spy": "4.0.16", - "@vitest/utils": "4.0.16", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", "chai": "^6.2.1", "tinyrainbow": "^3.0.3" }, @@ -2085,13 +2224,13 @@ } }, "node_modules/@vitest/mocker": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.16.tgz", - "integrity": "sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.18.tgz", + "integrity": "sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/spy": "4.0.16", + "@vitest/spy": "4.0.18", "estree-walker": "^3.0.3", "magic-string": "^0.30.21" }, @@ -2112,9 +2251,9 @@ } }, "node_modules/@vitest/pretty-format": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.16.tgz", - "integrity": "sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.18.tgz", + "integrity": "sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==", "dev": true, "license": "MIT", "dependencies": { @@ -2125,13 +2264,13 @@ } }, "node_modules/@vitest/runner": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.16.tgz", - "integrity": "sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.18.tgz", + "integrity": "sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/utils": "4.0.16", + "@vitest/utils": "4.0.18", "pathe": "^2.0.3" }, "funding": { @@ -2139,13 +2278,13 @@ } }, "node_modules/@vitest/snapshot": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.16.tgz", - "integrity": "sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.18.tgz", + "integrity": "sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.0.16", + "@vitest/pretty-format": "4.0.18", "magic-string": "^0.30.21", "pathe": "^2.0.3" }, @@ -2154,9 +2293,9 @@ } }, "node_modules/@vitest/spy": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.16.tgz", - "integrity": "sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.18.tgz", + "integrity": "sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==", "dev": true, "license": "MIT", "funding": { @@ -2164,13 +2303,13 @@ } }, "node_modules/@vitest/utils": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.16.tgz", - "integrity": "sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.18.tgz", + "integrity": "sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/pretty-format": "4.0.16", + "@vitest/pretty-format": "4.0.18", "tinyrainbow": "^3.0.3" }, "funding": { @@ -2196,7 +2335,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -2209,21 +2347,21 @@ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, + "license": "MIT", "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", "license": "MIT", "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" }, "funding": { "type": "github", @@ -2247,28 +2385,6 @@ } } }, - "node_modules/ajv-formats/node_modules/ajv": { - "version": "8.17.1", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", - "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "fast-uri": "^3.0.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "license": "MIT" - }, "node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", @@ -2313,13 +2429,13 @@ } }, "node_modules/ast-kit": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ast-kit/-/ast-kit-2.1.3.tgz", - "integrity": "sha512-TH+b3Lv6pUjy/Nu0m6A2JULtdzLpmqF9x1Dhj00ZoEiML8qvVA9j1flkzTKNYgdEhWrjDwtWNpyyCUbfQe514g==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ast-kit/-/ast-kit-2.2.0.tgz", + "integrity": "sha512-m1Q/RaVOnTp9JxPX+F+Zn7IcLYMzM8kZofDImfsKZd8MbR+ikdOzTeztStWqfrqIxZnYWryyI9ePm3NGjnZgGw==", "dev": true, "license": "MIT", "dependencies": { - "@babel/parser": "^7.28.4", + "@babel/parser": "^7.28.5", "pathe": "^2.0.3" }, "engines": { @@ -2329,24 +2445,6 @@ "url": "https://github.com/sponsors/sxzz" } }, - "node_modules/async-function": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", - "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/async-generator-function": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/async-generator-function/-/async-generator-function-1.0.0.tgz", - "integrity": "sha512-+NAXNqgCrB95ya4Sr66i1CL2hqLVckAk7xwRYWdcm39/ELQ6YNn1aw5r0bdQtqNZgQpEWzc5yc/igXc7aL5SLA==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -2354,10 +2452,16 @@ "dev": true, "license": "MIT" }, + "node_modules/before-after-hook": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-4.0.0.tgz", + "integrity": "sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ==", + "license": "Apache-2.0" + }, "node_modules/birpc": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/birpc/-/birpc-2.6.1.tgz", - "integrity": "sha512-LPnFhlDpdSH6FJhJyn4M0kFO7vtQ5iPw24FnG0y21q09xC7e8+1LeR31S1MAIrDAHp4m7aas4bEkTDTvMAtebQ==", + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/birpc/-/birpc-2.9.0.tgz", + "integrity": "sha512-KrayHS5pBi69Xi9JmvoqrIgYGDkD6mcSe/i6YKi3w5kekCLzrX4+nawcXqrj2tIp50Kw/mT/s3p+GVK0A0sKxw==", "dev": true, "license": "MIT", "funding": { @@ -2365,9 +2469,9 @@ } }, "node_modules/body-parser": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.1.tgz", - "integrity": "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.2.tgz", + "integrity": "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==", "license": "MIT", "dependencies": { "bytes": "^3.1.2", @@ -2376,7 +2480,7 @@ "http-errors": "^2.0.0", "iconv-lite": "^0.7.0", "on-finished": "^2.4.1", - "qs": "^6.14.0", + "qs": "^6.14.1", "raw-body": "^3.0.1", "type-is": "^2.0.1" }, @@ -2521,9 +2625,9 @@ "license": "MIT" }, "node_modules/commander": { - "version": "14.0.2", - "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.2.tgz", - "integrity": "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==", + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.3.tgz", + "integrity": "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==", "license": "MIT", "engines": { "node": ">=20" @@ -2537,15 +2641,16 @@ "license": "MIT" }, "node_modules/content-disposition": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.0.tgz", - "integrity": "sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz", + "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==", "license": "MIT", - "dependencies": { - "safe-buffer": "5.2.1" - }, "engines": { - "node": ">= 0.6" + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/content-type": { @@ -2576,9 +2681,9 @@ } }, "node_modules/cors": { - "version": "2.8.5", - "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", - "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "version": "2.8.6", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.6.tgz", + "integrity": "sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==", "license": "MIT", "dependencies": { "object-assign": "^4", @@ -2586,6 +2691,10 @@ }, "engines": { "node": ">= 0.10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/cross-spawn": { @@ -2643,9 +2752,9 @@ } }, "node_modules/diff": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-8.0.2.tgz", - "integrity": "sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==", + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/diff/-/diff-8.0.3.tgz", + "integrity": "sha512-qejHi7bcSD4hQAZE0tNAawRK1ZtafHDmMTMkrrIGgSLl7hTnQHmKCeB45xAcbfTqK2zowkM3j3bHt/4b/ARbYQ==", "dev": true, "license": "BSD-3-Clause", "engines": { @@ -2653,13 +2762,13 @@ } }, "node_modules/dts-resolver": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/dts-resolver/-/dts-resolver-2.1.2.tgz", - "integrity": "sha512-xeXHBQkn2ISSXxbJWD828PFjtyg+/UrMDo7W4Ffcs7+YWCquxU8YjV1KoxuiL+eJ5pg3ll+bC6flVv61L3LKZg==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/dts-resolver/-/dts-resolver-2.1.3.tgz", + "integrity": "sha512-bihc7jPC90VrosXNzK0LTE2cuLP6jr0Ro8jk+kMugHReJVLIpHz/xadeq3MhuwyO4TD4OA3L1Q8pBBFRc08Tsw==", "dev": true, "license": "MIT", "engines": { - "node": ">=20.18.0" + "node": ">=20.19.0" }, "funding": { "url": "https://github.com/sponsors/sxzz" @@ -2750,9 +2859,9 @@ } }, "node_modules/esbuild": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.11.tgz", - "integrity": "sha512-KohQwyzrKTQmhXDW1PjCv3Tyspn9n5GcY2RTDqeORIdIJY8yKIF7sTSopFmn/wpMPW4rdPXI0UE5LJLuq3bx0Q==", + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -2763,32 +2872,32 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.11", - "@esbuild/android-arm": "0.25.11", - "@esbuild/android-arm64": "0.25.11", - "@esbuild/android-x64": "0.25.11", - "@esbuild/darwin-arm64": "0.25.11", - "@esbuild/darwin-x64": "0.25.11", - "@esbuild/freebsd-arm64": "0.25.11", - "@esbuild/freebsd-x64": "0.25.11", - "@esbuild/linux-arm": "0.25.11", - "@esbuild/linux-arm64": "0.25.11", - "@esbuild/linux-ia32": "0.25.11", - "@esbuild/linux-loong64": "0.25.11", - "@esbuild/linux-mips64el": "0.25.11", - "@esbuild/linux-ppc64": "0.25.11", - "@esbuild/linux-riscv64": "0.25.11", - "@esbuild/linux-s390x": "0.25.11", - "@esbuild/linux-x64": "0.25.11", - "@esbuild/netbsd-arm64": "0.25.11", - "@esbuild/netbsd-x64": "0.25.11", - "@esbuild/openbsd-arm64": "0.25.11", - "@esbuild/openbsd-x64": "0.25.11", - "@esbuild/openharmony-arm64": "0.25.11", - "@esbuild/sunos-x64": "0.25.11", - "@esbuild/win32-arm64": "0.25.11", - "@esbuild/win32-ia32": "0.25.11", - "@esbuild/win32-x64": "0.25.11" + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" } }, "node_modules/escape-html": { @@ -2811,20 +2920,20 @@ } }, "node_modules/eslint": { - "version": "9.38.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.38.0.tgz", - "integrity": "sha512-t5aPOpmtJcZcz5UJyY2GbvpDlsK5E8JqRqoKtfiKE3cNh437KIqfJr3A3AKf5k64NPx6d0G3dno6XDY05PqPtw==", + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", "dev": true, - "peer": true, + "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.21.1", - "@eslint/config-helpers": "^0.4.1", - "@eslint/core": "^0.16.0", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", "@eslint/eslintrc": "^3.3.1", - "@eslint/js": "9.38.0", - "@eslint/plugin-kit": "^0.4.0", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", @@ -2916,19 +3025,30 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/@eslint/js": { - "version": "9.38.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.38.0.tgz", - "integrity": "sha512-UZ1VpFvXf9J06YG9xQBdnzU+kthors6KjhMAl6f4gH4usHyh31rUf2DLGInT8RFYIReYXNSydgPY0V2LuWgl7A==", + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" }, "funding": { - "url": "https://eslint.org/donate" + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, "node_modules/espree": { "version": "10.4.0", "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", @@ -2948,9 +3068,9 @@ } }, "node_modules/esquery": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", - "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", "dev": true, "license": "BSD-3-Clause", "dependencies": { @@ -3034,9 +3154,9 @@ } }, "node_modules/expect-type": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", - "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -3044,18 +3164,19 @@ } }, "node_modules/express": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz", - "integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==", - "peer": true, + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", + "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + "license": "MIT", "dependencies": { "accepts": "^2.0.0", - "body-parser": "^2.2.0", + "body-parser": "^2.2.1", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", + "depd": "^2.0.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", @@ -3086,10 +3207,13 @@ } }, "node_modules/express-rate-limit": { - "version": "7.5.1", - "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.1.tgz", - "integrity": "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==", + "version": "8.2.1", + "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.2.1.tgz", + "integrity": "sha512-PCZEIEIxqwhzw4KF0n7QF4QqruVTcF73O5kFKUnGOyjbCCgizBBiFaYpd/fnBLUMPw/BWw9OsiN7GgrNYr7j6g==", "license": "MIT", + "dependencies": { + "ip-address": "10.0.1" + }, "engines": { "node": ">= 16" }, @@ -3100,12 +3224,28 @@ "express": ">= 4.11" } }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "license": "MIT" - }, + "node_modules/fast-content-type-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-3.0.0.tgz", + "integrity": "sha512-ZvLdcY8P+N8mGQJahJV5G4U88CSvT1rP8ApL6uETe88MBXrBHAkZlSEySdUlyztF7ccb+Znos3TFqaepHxdhBg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", @@ -3159,6 +3299,7 @@ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", "dev": true, + "license": "MIT", "dependencies": { "flat-cache": "^4.0.0" }, @@ -3167,9 +3308,9 @@ } }, "node_modules/finalhandler": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.0.tgz", - "integrity": "sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz", + "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", "license": "MIT", "dependencies": { "debug": "^4.4.0", @@ -3180,7 +3321,11 @@ "statuses": "^2.0.1" }, "engines": { - "node": ">= 0.8" + "node": ">= 18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/find-up": { @@ -3244,6 +3389,7 @@ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, + "hasInstallScript": true, "license": "MIT", "optional": true, "os": [ @@ -3262,29 +3408,17 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/generator-function": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz", - "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, "node_modules/get-intrinsic": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.1.tgz", - "integrity": "sha512-fk1ZVEeOX9hVZ6QzoBNEC55+Ucqg4sTVwrVuigZhuRPESVFpMyXnd3sbXvPOwp7Y9riVyANiqhEuRF0G1aVSeQ==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "license": "MIT", "dependencies": { - "async-function": "^1.0.0", - "async-generator-function": "^1.0.0", "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", - "generator-function": "^2.0.0", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", @@ -3312,9 +3446,9 @@ } }, "node_modules/get-tsconfig": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.0.tgz", - "integrity": "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==", + "version": "4.13.6", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.6.tgz", + "integrity": "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==", "dev": true, "license": "MIT", "dependencies": { @@ -3404,11 +3538,10 @@ } }, "node_modules/hono": { - "version": "4.11.3", - "resolved": "https://registry.npmjs.org/hono/-/hono-4.11.3.tgz", - "integrity": "sha512-PmQi306+M/ct/m5s66Hrg+adPnkD5jiO6IjA7WhWw0gSBSo1EcRegwuI1deZ+wd5pzCGynCcn2DprnE4/yEV4w==", + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/hono/-/hono-4.11.9.tgz", + "integrity": "sha512-Eaw2YTGM6WOxA6CXbckaEvslr2Ne4NFsKrvc0v97JD5awbmeBLO5w9Ho9L9kmKonrwF9RJlW6BxT1PVv/agBHQ==", "license": "MIT", - "peer": true, "engines": { "node": ">=16.9.0" } @@ -3421,34 +3554,29 @@ "license": "MIT" }, "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", "license": "MIT", "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" }, "engines": { "node": ">= 0.8" - } - }, - "node_modules/http-errors/node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/iconv-lite": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.0.tgz", - "integrity": "sha512-cf6L2Ds3h57VVmkZe+Pn+5APsT7FpqJtEhhieDCvrE2MK5Qk9MyffgQyuxQTm6BChfeZNtcOLHp9IcWRVcIcBQ==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.2.tgz", + "integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==", "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" @@ -3504,6 +3632,15 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "license": "ISC" }, + "node_modules/ip-address": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.0.1.tgz", + "integrity": "sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, "node_modules/ipaddr.js": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", @@ -3559,9 +3696,9 @@ } }, "node_modules/jose": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.2.tgz", - "integrity": "sha512-MpcPtHLE5EmztuFIqB0vzHAWJPpmN1E6L4oo+kze56LIs3MyXIj9ZHMDxqOvkP38gBR7K1v3jqd4WU2+nrfONQ==", + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.3.tgz", + "integrity": "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==", "license": "MIT", "funding": { "url": "https://github.com/sponsors/panva" @@ -3601,10 +3738,9 @@ "license": "MIT" }, "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true, + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "license": "MIT" }, "node_modules/json-schema-typed": { @@ -3631,31 +3767,32 @@ } }, "node_modules/lefthook": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook/-/lefthook-2.0.2.tgz", - "integrity": "sha512-2lrSva53G604ZWjK5kHYvDdwb5GzbhciIPWhebv0A8ceveqSsnG2JgVEt+DnhOPZ4VfNcXvt3/ohFBPNpuAlVw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook/-/lefthook-2.1.0.tgz", + "integrity": "sha512-+vS+yywGQW6CN1J1hbGkez//6ixGHIQqfxDN/d3JDm531w9GfGt2lAWTDfZTw/CEl80XsN0raFcnEraR3ldw9g==", "dev": true, "hasInstallScript": true, + "license": "MIT", "bin": { "lefthook": "bin/index.js" }, "optionalDependencies": { - "lefthook-darwin-arm64": "2.0.2", - "lefthook-darwin-x64": "2.0.2", - "lefthook-freebsd-arm64": "2.0.2", - "lefthook-freebsd-x64": "2.0.2", - "lefthook-linux-arm64": "2.0.2", - "lefthook-linux-x64": "2.0.2", - "lefthook-openbsd-arm64": "2.0.2", - "lefthook-openbsd-x64": "2.0.2", - "lefthook-windows-arm64": "2.0.2", - "lefthook-windows-x64": "2.0.2" + "lefthook-darwin-arm64": "2.1.0", + "lefthook-darwin-x64": "2.1.0", + "lefthook-freebsd-arm64": "2.1.0", + "lefthook-freebsd-x64": "2.1.0", + "lefthook-linux-arm64": "2.1.0", + "lefthook-linux-x64": "2.1.0", + "lefthook-openbsd-arm64": "2.1.0", + "lefthook-openbsd-x64": "2.1.0", + "lefthook-windows-arm64": "2.1.0", + "lefthook-windows-x64": "2.1.0" } }, "node_modules/lefthook-darwin-arm64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-darwin-arm64/-/lefthook-darwin-arm64-2.0.2.tgz", - "integrity": "sha512-x/4AOinpMS2abZyA/krDd50cRPZit/6P670Z1mJjfS0+fPZkFw7AXpjxroiN0rgglg78vD7BwcA5331z4YZa5g==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-darwin-arm64/-/lefthook-darwin-arm64-2.1.0.tgz", + "integrity": "sha512-u2hjHLQXWSFfzO7ln2n/uEydSzfC9sc5cDC7tvKSuOdhvBwaJ0AQ7ZeuqqCQ4YfVIJfYOom1SVE9CBd10FVyig==", "cpu": [ "arm64" ], @@ -3667,9 +3804,9 @@ ] }, "node_modules/lefthook-darwin-x64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-darwin-x64/-/lefthook-darwin-x64-2.0.2.tgz", - "integrity": "sha512-MSb8XZBfmlNvCpuLiQqrJS+sPiSEAyuoHOMZOHjlceYqO0leVVw9YfePVcb4Vi/PqOYngTdJk83MmYvqhsSNTQ==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-darwin-x64/-/lefthook-darwin-x64-2.1.0.tgz", + "integrity": "sha512-zz5rcyrtOZpxon7uE+c0KC/o2ypJeLZql5CL0Y9oaTuECbmhfokm8glsGnyWstW/++PuMpZYYr/qsCJA5elxkQ==", "cpu": [ "x64" ], @@ -3681,9 +3818,9 @@ ] }, "node_modules/lefthook-freebsd-arm64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-freebsd-arm64/-/lefthook-freebsd-arm64-2.0.2.tgz", - "integrity": "sha512-gewPsUPc3J/n2/RrhHLS9jtL3qK4HcTED25vfExhvFRW3eT1SDYaBbXnUUmB8SE0zE8Bl6AfEdT2zzZcPbOFuA==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-freebsd-arm64/-/lefthook-freebsd-arm64-2.1.0.tgz", + "integrity": "sha512-+mXNCNuFHNGYLrDqYWDeHH7kWCLCJFPpspx5PAAm+PD37PRMZJrTqDbaNK9qCghC1tdmT4/Lvilf/ewXHPlaKw==", "cpu": [ "arm64" ], @@ -3695,9 +3832,9 @@ ] }, "node_modules/lefthook-freebsd-x64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-freebsd-x64/-/lefthook-freebsd-x64-2.0.2.tgz", - "integrity": "sha512-fsLlaChiKAWiSavQO2LXPR8Z9OcBnyMDvmkIlXC0lG3SjBb9xbVdBdDVlcrsUyDCs5YstmGYHuzw6DfJYpAE1g==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-freebsd-x64/-/lefthook-freebsd-x64-2.1.0.tgz", + "integrity": "sha512-+AU2HD7szuDsUdHue/E3OnF84B2ae/h7CGKpuIUHJntgoJ4kxf89oDvq2/xl8kDCn9cT76UUjgeZUgFYLRj+6Q==", "cpu": [ "x64" ], @@ -3709,9 +3846,9 @@ ] }, "node_modules/lefthook-linux-arm64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-linux-arm64/-/lefthook-linux-arm64-2.0.2.tgz", - "integrity": "sha512-vNl3HiZud9T2nGHMngvLw3hSJgutjlN/Lzf5/5jKt/2IIuyd9L3UYktWC9HLUb03Zukr7jeaxG3+VxdAohQwAw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-linux-arm64/-/lefthook-linux-arm64-2.1.0.tgz", + "integrity": "sha512-KM70eV1tsEib1/tk+3TFxIdH84EaYlIg5KTQWAg+LB1N23nTQ7lL4Dnh1je6f6KW4tf21nmoMUqsh0xvMkQk8Q==", "cpu": [ "arm64" ], @@ -3723,9 +3860,9 @@ ] }, "node_modules/lefthook-linux-x64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-linux-x64/-/lefthook-linux-x64-2.0.2.tgz", - "integrity": "sha512-0ghHMPu4fixIieS8V2k2yZHvcFd9pP0q+sIAIaWo8x7ce/AOQIXFCPHGPAOc8/wi5uVtfyEvCnhxIDKf+lHA2A==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-linux-x64/-/lefthook-linux-x64-2.1.0.tgz", + "integrity": "sha512-6Bxmv+l7LiYq9W0IE6v2lmlRtBp6pisnlzhcouMGvH3rDwEGw11NAyRJZA3IPGEMAkIuhnlnVTUwAUzKomfJLg==", "cpu": [ "x64" ], @@ -3737,9 +3874,9 @@ ] }, "node_modules/lefthook-openbsd-arm64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-openbsd-arm64/-/lefthook-openbsd-arm64-2.0.2.tgz", - "integrity": "sha512-qfXnDM8jffut9rylvi3T+HOqlNRkFYqIDUXeVXlY7dmwCW4u2K46p0W4M3BmAVUeL/MRxBRnjze//Yy6aCbGQw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-openbsd-arm64/-/lefthook-openbsd-arm64-2.1.0.tgz", + "integrity": "sha512-ppJNK0bBSPLC8gqksRw5zI/0uLeMA5cK+hmZ4ofcuGNmdrN1dfl2Tx84fdeef0NcQY0ii9Y3j3icIKngIoid/g==", "cpu": [ "arm64" ], @@ -3751,9 +3888,9 @@ ] }, "node_modules/lefthook-openbsd-x64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-openbsd-x64/-/lefthook-openbsd-x64-2.0.2.tgz", - "integrity": "sha512-RXqR0FiDTwsQv1X3QVsuBFneWeNXS+tmPFIX8F6Wz9yDPHF8+vBnkWCju6HdkTVTY71Ba5HbYGKEVDvscJkU7Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-openbsd-x64/-/lefthook-openbsd-x64-2.1.0.tgz", + "integrity": "sha512-8k9lQsMYqQGu4spaQ8RNSOJidxIcOyfaoF2FPZhthtBfRV3cgVFGrsQ0hbIi5pvQRGUlCqYuCN79qauXHmnL3Q==", "cpu": [ "x64" ], @@ -3765,9 +3902,9 @@ ] }, "node_modules/lefthook-windows-arm64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-windows-arm64/-/lefthook-windows-arm64-2.0.2.tgz", - "integrity": "sha512-KfLKhiUPHP9Aea+9D7or2hgL9wtKEV+GHpx7LBg82ZhCXkAml6rop7mWsBgL80xPYLqMahKolZGO+8z5H6W4HQ==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-windows-arm64/-/lefthook-windows-arm64-2.1.0.tgz", + "integrity": "sha512-0WN+grrxt9zP9NGRcztoPXcz25tteem91rfLWgQFab+50csJ47zldlsB7/eOS/eHG5mUg5g5NPR4XefnXtjOcQ==", "cpu": [ "arm64" ], @@ -3779,9 +3916,9 @@ ] }, "node_modules/lefthook-windows-x64": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lefthook-windows-x64/-/lefthook-windows-x64-2.0.2.tgz", - "integrity": "sha512-TdysWxGRNtuRg5bN6Uj00tZJIsHTrF/7FavoR5rp1sq21QJhJi36M4I3UVlmOKAUCKhibAIAauZWmX7yaW3eHA==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lefthook-windows-x64/-/lefthook-windows-x64-2.1.0.tgz", + "integrity": "sha512-XbO/5nAZQLpUn0tPpgCYfFBFJHnymSglQ73jD6wymNrR1j8I5EcXGlP6YcLhnZ83yzsdLC+gup+N6IqUeiyRdw==", "cpu": [ "x64" ], @@ -3826,7 +3963,8 @@ "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/magic-string": { "version": "0.30.21", @@ -3878,15 +4016,19 @@ } }, "node_modules/mime-types": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.1.tgz", - "integrity": "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", "license": "MIT", "dependencies": { "mime-db": "^1.54.0" }, "engines": { - "node": ">= 0.6" + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/minimatch": { @@ -4117,7 +4259,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -4178,6 +4319,7 @@ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, + "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, @@ -4212,9 +4354,9 @@ } }, "node_modules/qs": { - "version": "6.14.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", - "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", "license": "BSD-3-Clause", "dependencies": { "side-channel": "^1.1.0" @@ -4227,9 +4369,9 @@ } }, "node_modules/quansync": { - "version": "0.2.11", - "resolved": "https://registry.npmjs.org/quansync/-/quansync-0.2.11.tgz", - "integrity": "sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/quansync/-/quansync-1.0.0.tgz", + "integrity": "sha512-5xZacEEufv3HSTPQuchrvV6soaiACMFnq1H8wkVioctoH3TRha9Sz66lOxRwPK/qZj7HPiSveih9yAyh98gvqA==", "dev": true, "funding": [ { @@ -4253,15 +4395,15 @@ } }, "node_modules/raw-body": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.1.tgz", - "integrity": "sha512-9G8cA+tuMS75+6G/TzW8OtLzmBDMo8p1JRxN5AZ+LAp8uxGA8V8GZm4GQ4/N5QNQEnLmg6SS7wyuSmbKepiKqA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz", + "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", "license": "MIT", "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.7.0", - "unpipe": "1.0.0" + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0" }, "engines": { "node": ">= 0.10" @@ -4315,7 +4457,7 @@ "resolved": "https://registry.npmjs.org/rolldown/-/rolldown-1.0.0-beta.45.tgz", "integrity": "sha512-iMmuD72XXLf26Tqrv1cryNYLX6NNPLhZ3AmNkSf8+xda0H+yijjGJ+wVT9UdBUHOpKzq9RjKtQKRCWoEKQQBZQ==", "dev": true, - "peer": true, + "license": "MIT", "dependencies": { "@oxc-project/types": "=0.95.0", "@rolldown/pluginutils": "1.0.0-beta.45" @@ -4344,24 +4486,24 @@ } }, "node_modules/rolldown-plugin-dts": { - "version": "0.17.3", - "resolved": "https://registry.npmjs.org/rolldown-plugin-dts/-/rolldown-plugin-dts-0.17.3.tgz", - "integrity": "sha512-8mGnNUVNrqEdTnrlcaDxs4sAZg0No6njO+FuhQd4L56nUbJO1tHxOoKDH3mmMJg7f/BhEj/1KjU5W9kZ9zM/kQ==", + "version": "0.17.8", + "resolved": "https://registry.npmjs.org/rolldown-plugin-dts/-/rolldown-plugin-dts-0.17.8.tgz", + "integrity": "sha512-76EEBlhF00yeY6M7VpMkWKI4r9WjuoMiOGey7j4D6zf3m0BR+ZrrY9hvSXdueJ3ljxSLq4DJBKFpX/X9+L7EKw==", "dev": true, "license": "MIT", "dependencies": { "@babel/generator": "^7.28.5", "@babel/parser": "^7.28.5", "@babel/types": "^7.28.5", - "ast-kit": "^2.1.3", - "birpc": "^2.6.1", - "debug": "^4.4.3", - "dts-resolver": "^2.1.2", + "ast-kit": "^2.2.0", + "birpc": "^2.8.0", + "dts-resolver": "^2.1.3", "get-tsconfig": "^4.13.0", - "magic-string": "^0.30.21" + "magic-string": "^0.30.21", + "obug": "^2.0.0" }, "engines": { - "node": ">=20.18.0" + "node": ">=20.19.0" }, "funding": { "url": "https://github.com/sponsors/sxzz" @@ -4389,9 +4531,9 @@ } }, "node_modules/rollup": { - "version": "4.55.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz", - "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==", + "version": "4.57.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", "dev": true, "license": "MIT", "dependencies": { @@ -4405,31 +4547,31 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.55.1", - "@rollup/rollup-android-arm64": "4.55.1", - "@rollup/rollup-darwin-arm64": "4.55.1", - "@rollup/rollup-darwin-x64": "4.55.1", - "@rollup/rollup-freebsd-arm64": "4.55.1", - "@rollup/rollup-freebsd-x64": "4.55.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.55.1", - "@rollup/rollup-linux-arm-musleabihf": "4.55.1", - "@rollup/rollup-linux-arm64-gnu": "4.55.1", - "@rollup/rollup-linux-arm64-musl": "4.55.1", - "@rollup/rollup-linux-loong64-gnu": "4.55.1", - "@rollup/rollup-linux-loong64-musl": "4.55.1", - "@rollup/rollup-linux-ppc64-gnu": "4.55.1", - "@rollup/rollup-linux-ppc64-musl": "4.55.1", - "@rollup/rollup-linux-riscv64-gnu": "4.55.1", - "@rollup/rollup-linux-riscv64-musl": "4.55.1", - "@rollup/rollup-linux-s390x-gnu": "4.55.1", - "@rollup/rollup-linux-x64-gnu": "4.55.1", - "@rollup/rollup-linux-x64-musl": "4.55.1", - "@rollup/rollup-openbsd-x64": "4.55.1", - "@rollup/rollup-openharmony-arm64": "4.55.1", - "@rollup/rollup-win32-arm64-msvc": "4.55.1", - "@rollup/rollup-win32-ia32-msvc": "4.55.1", - "@rollup/rollup-win32-x64-gnu": "4.55.1", - "@rollup/rollup-win32-x64-msvc": "4.55.1", + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", "fsevents": "~2.3.2" } }, @@ -4449,26 +4591,6 @@ "node": ">= 18" } }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", @@ -4476,9 +4598,9 @@ "license": "MIT" }, "node_modules/semver": { - "version": "7.7.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", - "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", "dev": true, "license": "ISC", "bin": { @@ -4489,31 +4611,35 @@ } }, "node_modules/send": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/send/-/send-1.2.0.tgz", - "integrity": "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", "license": "MIT", "dependencies": { - "debug": "^4.3.5", + "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", - "http-errors": "^2.0.0", - "mime-types": "^3.0.1", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", - "statuses": "^2.0.1" + "statuses": "^2.0.2" }, "engines": { "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/serve-static": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.0.tgz", - "integrity": "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ==", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz", + "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", "license": "MIT", "dependencies": { "encodeurl": "^2.0.0", @@ -4523,6 +4649,10 @@ }, "engines": { "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/setprototypeof": { @@ -4535,6 +4665,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" }, @@ -4753,9 +4884,9 @@ } }, "node_modules/ts-api-utils": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", - "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.4.0.tgz", + "integrity": "sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==", "dev": true, "license": "MIT", "engines": { @@ -4834,14 +4965,13 @@ "optional": true }, "node_modules/tsx": { - "version": "4.20.6", - "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.20.6.tgz", - "integrity": "sha512-ytQKuwgmrrkDTFP4LjR0ToE2nqgy886GpvRSpU0JAnrdBYppuY5rLkRUYPU1yCryb24SsKBTL/hlDQAEFVwtZg==", + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { - "esbuild": "~0.25.0", + "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "bin": { @@ -4886,7 +5016,7 @@ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, - "peer": true, + "license": "Apache-2.0", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -4920,20 +5050,45 @@ } }, "node_modules/unconfig": { - "version": "7.3.3", - "resolved": "https://registry.npmjs.org/unconfig/-/unconfig-7.3.3.tgz", - "integrity": "sha512-QCkQoOnJF8L107gxfHL0uavn7WD9b3dpBcFX6HtfQYmjw2YzWxGuFQ0N0J6tE9oguCBJn9KOvfqYDCMPHIZrBA==", + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/unconfig/-/unconfig-7.4.2.tgz", + "integrity": "sha512-nrMlWRQ1xdTjSnSUqvYqJzbTBFugoqHobQj58B2bc8qxHKBBHMNNsWQFP3Cd3/JZK907voM2geYPWqD4VK3MPQ==", "dev": true, + "license": "MIT", "dependencies": { - "@quansync/fs": "^0.1.5", + "@quansync/fs": "^1.0.0", "defu": "^6.1.4", - "jiti": "^2.5.1", - "quansync": "^0.2.11" + "jiti": "^2.6.1", + "quansync": "^1.0.0", + "unconfig-core": "7.4.2" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/unconfig-core": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/unconfig-core/-/unconfig-core-7.4.2.tgz", + "integrity": "sha512-VgPCvLWugINbXvMQDf8Jh0mlbvNjNC6eSUziHsBCMpxR05OPrNrvDnyatdMjRgcHaaNsCqz+wjNXxNw1kRLHUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@quansync/fs": "^1.0.0", + "quansync": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/antfu" } }, + "node_modules/undici": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.21.0.tgz", + "integrity": "sha512-Hn2tCQpoDt1wv23a68Ctc8Cr/BHpUSfaPYrkajTXOS9IKpxVRx/X5m1K2YkbK2ipgZgxXSgsUinl3x+2YdSSfg==", + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, "node_modules/undici-types": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", @@ -4941,6 +5096,12 @@ "dev": true, "license": "MIT" }, + "node_modules/universal-user-agent": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz", + "integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==", + "license": "ISC" + }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", @@ -4970,9 +5131,9 @@ } }, "node_modules/vite": { - "version": "7.3.0", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.0.tgz", - "integrity": "sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==", + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", "dev": true, "license": "MIT", "dependencies": { @@ -5044,504 +5205,20 @@ } } }, - "node_modules/vite/node_modules/@esbuild/aix-ppc64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.2.tgz", - "integrity": "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/android-arm": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.2.tgz", - "integrity": "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/android-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.2.tgz", - "integrity": "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/android-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.2.tgz", - "integrity": "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/darwin-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.2.tgz", - "integrity": "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/darwin-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.2.tgz", - "integrity": "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/freebsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.2.tgz", - "integrity": "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/freebsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.2.tgz", - "integrity": "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-arm": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.2.tgz", - "integrity": "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.2.tgz", - "integrity": "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-ia32": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.2.tgz", - "integrity": "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-loong64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.2.tgz", - "integrity": "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-mips64el": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.2.tgz", - "integrity": "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-ppc64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.2.tgz", - "integrity": "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-riscv64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.2.tgz", - "integrity": "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-s390x": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.2.tgz", - "integrity": "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/linux-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.2.tgz", - "integrity": "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/netbsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.2.tgz", - "integrity": "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/netbsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.2.tgz", - "integrity": "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/openbsd-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.2.tgz", - "integrity": "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/openbsd-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.2.tgz", - "integrity": "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/openharmony-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.2.tgz", - "integrity": "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/sunos-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.2.tgz", - "integrity": "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-arm64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.2.tgz", - "integrity": "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-ia32": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.2.tgz", - "integrity": "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/@esbuild/win32-x64": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.2.tgz", - "integrity": "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/vite/node_modules/esbuild": { - "version": "0.27.2", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.2.tgz", - "integrity": "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=18" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.27.2", - "@esbuild/android-arm": "0.27.2", - "@esbuild/android-arm64": "0.27.2", - "@esbuild/android-x64": "0.27.2", - "@esbuild/darwin-arm64": "0.27.2", - "@esbuild/darwin-x64": "0.27.2", - "@esbuild/freebsd-arm64": "0.27.2", - "@esbuild/freebsd-x64": "0.27.2", - "@esbuild/linux-arm": "0.27.2", - "@esbuild/linux-arm64": "0.27.2", - "@esbuild/linux-ia32": "0.27.2", - "@esbuild/linux-loong64": "0.27.2", - "@esbuild/linux-mips64el": "0.27.2", - "@esbuild/linux-ppc64": "0.27.2", - "@esbuild/linux-riscv64": "0.27.2", - "@esbuild/linux-s390x": "0.27.2", - "@esbuild/linux-x64": "0.27.2", - "@esbuild/netbsd-arm64": "0.27.2", - "@esbuild/netbsd-x64": "0.27.2", - "@esbuild/openbsd-arm64": "0.27.2", - "@esbuild/openbsd-x64": "0.27.2", - "@esbuild/openharmony-arm64": "0.27.2", - "@esbuild/sunos-x64": "0.27.2", - "@esbuild/win32-arm64": "0.27.2", - "@esbuild/win32-ia32": "0.27.2", - "@esbuild/win32-x64": "0.27.2" - } - }, "node_modules/vitest": { - "version": "4.0.16", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.16.tgz", - "integrity": "sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==", + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.18.tgz", + "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", "dev": true, "license": "MIT", "dependencies": { - "@vitest/expect": "4.0.16", - "@vitest/mocker": "4.0.16", - "@vitest/pretty-format": "4.0.16", - "@vitest/runner": "4.0.16", - "@vitest/snapshot": "4.0.16", - "@vitest/spy": "4.0.16", - "@vitest/utils": "4.0.16", + "@vitest/expect": "4.0.18", + "@vitest/mocker": "4.0.18", + "@vitest/pretty-format": "4.0.18", + "@vitest/runner": "4.0.18", + "@vitest/snapshot": "4.0.18", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", "es-module-lexer": "^1.7.0", "expect-type": "^1.2.2", "magic-string": "^0.30.21", @@ -5569,10 +5246,10 @@ "@edge-runtime/vm": "*", "@opentelemetry/api": "^1.9.0", "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", - "@vitest/browser-playwright": "4.0.16", - "@vitest/browser-preview": "4.0.16", - "@vitest/browser-webdriverio": "4.0.16", - "@vitest/ui": "4.0.16", + "@vitest/browser-playwright": "4.0.18", + "@vitest/browser-preview": "4.0.18", + "@vitest/browser-webdriverio": "4.0.18", + "@vitest/ui": "4.0.18", "happy-dom": "*", "jsdom": "*" }, @@ -5651,9 +5328,24 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "license": "ISC" }, + "node_modules/yaml": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", @@ -5672,7 +5364,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/package.json b/package.json index cb755df..bc6f8be 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@modelcontextprotocol/conformance", - "version": "0.1.9", + "version": "0.1.14", "type": "module", "license": "MIT", "author": "Anthropic, PBC (https://anthropic.com)", @@ -18,6 +18,7 @@ "lint": "eslint src/ examples/ && prettier --check .", "lint:fix": "eslint src/ examples/ --fix && prettier --write .", "lint:fix_check": "npm run lint:fix && git diff --exit-code --quiet", + "tier-check": "node dist/index.js tier-check", "check": "npm run typecheck && npm run lint", "typecheck": "tsgo --noEmit", "prepack": "npm run build" @@ -46,11 +47,14 @@ "vitest": "^4.0.16" }, "dependencies": { - "@modelcontextprotocol/sdk": "^1.25.1", + "@modelcontextprotocol/sdk": "^1.26.0", "commander": "^14.0.2", "eventsource-parser": "^3.0.6", "express": "^5.1.0", "jose": "^6.1.2", - "zod": "^3.25.76" + "undici": "^7.19.0", + "yaml": "^2.8.2", + "zod": "^3.25.76", + "@octokit/rest": "^22.0.0" } } diff --git a/src/expected-failures.test.ts b/src/expected-failures.test.ts new file mode 100644 index 0000000..05cfc9c --- /dev/null +++ b/src/expected-failures.test.ts @@ -0,0 +1,261 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { promises as fs } from 'fs'; +import path from 'path'; +import os from 'os'; +import { loadExpectedFailures, evaluateBaseline } from './expected-failures'; +import { ConformanceCheck } from './types'; + +function makeCheck( + id: string, + status: 'SUCCESS' | 'FAILURE' | 'WARNING' | 'SKIPPED' | 'INFO' +): ConformanceCheck { + return { + id, + name: id, + description: `Check ${id}`, + status, + timestamp: new Date().toISOString() + }; +} + +describe('loadExpectedFailures', () => { + let tmpDir: string; + + beforeEach(async () => { + tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'conformance-test-')); + }); + + afterEach(async () => { + await fs.rm(tmpDir, { recursive: true }); + }); + + it('loads a valid YAML file with both server and client entries', async () => { + const filePath = path.join(tmpDir, 'baseline.yml'); + await fs.writeFile( + filePath, + `server: + - tools-call-with-progress + - resources-subscribe +client: + - sse-retry + - auth/basic-dcr +` + ); + + const result = await loadExpectedFailures(filePath); + expect(result.server).toEqual([ + 'tools-call-with-progress', + 'resources-subscribe' + ]); + expect(result.client).toEqual(['sse-retry', 'auth/basic-dcr']); + }); + + it('loads a file with only server entries', async () => { + const filePath = path.join(tmpDir, 'baseline.yml'); + await fs.writeFile( + filePath, + `server: + - tools-call-with-progress +` + ); + + const result = await loadExpectedFailures(filePath); + expect(result.server).toEqual(['tools-call-with-progress']); + expect(result.client).toBeUndefined(); + }); + + it('handles an empty file', async () => { + const filePath = path.join(tmpDir, 'baseline.yml'); + await fs.writeFile(filePath, ''); + + const result = await loadExpectedFailures(filePath); + expect(result).toEqual({}); + }); + + it('throws on invalid structure (array at top level)', async () => { + const filePath = path.join(tmpDir, 'baseline.yml'); + await fs.writeFile(filePath, '- foo\n- bar\n'); + + await expect(loadExpectedFailures(filePath)).rejects.toThrow( + 'expected an object' + ); + }); + + it('throws if server is not an array', async () => { + const filePath = path.join(tmpDir, 'baseline.yml'); + await fs.writeFile(filePath, 'server: not-an-array\n'); + + await expect(loadExpectedFailures(filePath)).rejects.toThrow( + "'server' must be an array" + ); + }); + + it('throws if client is not an array', async () => { + const filePath = path.join(tmpDir, 'baseline.yml'); + await fs.writeFile(filePath, 'client: 123\n'); + + await expect(loadExpectedFailures(filePath)).rejects.toThrow( + "'client' must be an array" + ); + }); + + it('throws on missing file', async () => { + await expect( + loadExpectedFailures('/nonexistent/path.yml') + ).rejects.toThrow(); + }); +}); + +describe('evaluateBaseline', () => { + it('returns exit 0 when all failures are expected', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'SUCCESS'), makeCheck('check2', 'FAILURE')] + }, + { + scenario: 'scenario-b', + checks: [makeCheck('check3', 'FAILURE')] + } + ]; + + const result = evaluateBaseline(results, ['scenario-a', 'scenario-b']); + expect(result.exitCode).toBe(0); + expect(result.expectedFailures).toEqual(['scenario-a', 'scenario-b']); + expect(result.unexpectedFailures).toEqual([]); + expect(result.staleEntries).toEqual([]); + }); + + it('returns exit 0 when no failures at all and no baseline', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'SUCCESS')] + } + ]; + + const result = evaluateBaseline(results, []); + expect(result.exitCode).toBe(0); + expect(result.expectedFailures).toEqual([]); + expect(result.unexpectedFailures).toEqual([]); + expect(result.staleEntries).toEqual([]); + }); + + it('returns exit 1 for unexpected failures', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'FAILURE')] + } + ]; + + const result = evaluateBaseline(results, []); + expect(result.exitCode).toBe(1); + expect(result.unexpectedFailures).toEqual(['scenario-a']); + }); + + it('returns exit 1 for stale baseline entries (scenario now passes)', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'SUCCESS')] + } + ]; + + const result = evaluateBaseline(results, ['scenario-a']); + expect(result.exitCode).toBe(1); + expect(result.staleEntries).toEqual(['scenario-a']); + }); + + it('returns exit 1 when both stale and unexpected failures exist', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'SUCCESS')] // was expected to fail but passes + }, + { + scenario: 'scenario-b', + checks: [makeCheck('check2', 'FAILURE')] // unexpected failure + } + ]; + + const result = evaluateBaseline(results, ['scenario-a']); + expect(result.exitCode).toBe(1); + expect(result.staleEntries).toEqual(['scenario-a']); + expect(result.unexpectedFailures).toEqual(['scenario-b']); + }); + + it('handles warnings as failures', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'WARNING')] + } + ]; + + // Not in baseline → unexpected + const result1 = evaluateBaseline(results, []); + expect(result1.exitCode).toBe(1); + expect(result1.unexpectedFailures).toEqual(['scenario-a']); + + // In baseline → expected + const result2 = evaluateBaseline(results, ['scenario-a']); + expect(result2.exitCode).toBe(0); + expect(result2.expectedFailures).toEqual(['scenario-a']); + }); + + it('ignores baseline entries for scenarios not in the run', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [makeCheck('check1', 'SUCCESS')] + } + ]; + + // scenario-z is in baseline but not in the results - should not be stale + const result = evaluateBaseline(results, ['scenario-z']); + expect(result.exitCode).toBe(0); + expect(result.staleEntries).toEqual([]); + }); + + it('handles mixed expected/unexpected/passing scenarios', () => { + const results = [ + { + scenario: 'expected-fail', + checks: [makeCheck('c1', 'FAILURE')] + }, + { + scenario: 'unexpected-fail', + checks: [makeCheck('c2', 'FAILURE')] + }, + { + scenario: 'normal-pass', + checks: [makeCheck('c3', 'SUCCESS')] + } + ]; + + const result = evaluateBaseline(results, ['expected-fail']); + expect(result.exitCode).toBe(1); + expect(result.expectedFailures).toEqual(['expected-fail']); + expect(result.unexpectedFailures).toEqual(['unexpected-fail']); + expect(result.staleEntries).toEqual([]); + }); + + it('skipped and info checks do not count as failures', () => { + const results = [ + { + scenario: 'scenario-a', + checks: [ + makeCheck('c1', 'SUCCESS'), + makeCheck('c2', 'SKIPPED'), + makeCheck('c3', 'INFO') + ] + } + ]; + + // In baseline but passes (only SUCCESS/SKIPPED/INFO) → stale + const result = evaluateBaseline(results, ['scenario-a']); + expect(result.exitCode).toBe(1); + expect(result.staleEntries).toEqual(['scenario-a']); + }); +}); diff --git a/src/expected-failures.ts b/src/expected-failures.ts new file mode 100644 index 0000000..8d0655f --- /dev/null +++ b/src/expected-failures.ts @@ -0,0 +1,166 @@ +import { promises as fs } from 'fs'; +import { parse as parseYaml } from 'yaml'; +import { ConformanceCheck } from './types'; +import { COLORS } from './runner/utils'; + +export interface ExpectedFailures { + server?: string[]; + client?: string[]; +} + +export interface BaselineResult { + /** Exit code: 0 if only expected failures, 1 if unexpected failures or stale baseline */ + exitCode: number; + /** Scenarios that failed unexpectedly (not in baseline) */ + unexpectedFailures: string[]; + /** Scenarios in baseline that now pass (stale entries) */ + staleEntries: string[]; + /** Scenarios that failed as expected */ + expectedFailures: string[]; +} + +/** + * Load and parse an expected-failures YAML file. + * + * Expected format: + * ```yaml + * server: + * - scenario-name-1 + * - scenario-name-2 + * client: + * - scenario-name-3 + * ``` + */ +export async function loadExpectedFailures( + filePath: string +): Promise { + const content = await fs.readFile(filePath, 'utf-8'); + const parsed = parseYaml(content); + + if (parsed === null || parsed === undefined) { + return {}; + } + + if (typeof parsed !== 'object' || Array.isArray(parsed)) { + throw new Error( + `Invalid expected-failures file: expected an object with 'server' and/or 'client' keys` + ); + } + + const result: ExpectedFailures = {}; + + if (parsed.server !== undefined) { + if (!Array.isArray(parsed.server)) { + throw new Error( + `Invalid expected-failures file: 'server' must be an array of scenario names` + ); + } + result.server = parsed.server.map(String); + } + + if (parsed.client !== undefined) { + if (!Array.isArray(parsed.client)) { + throw new Error( + `Invalid expected-failures file: 'client' must be an array of scenario names` + ); + } + result.client = parsed.client.map(String); + } + + return result; +} + +/** + * Evaluate scenario results against an expected-failures baseline. + * + * Rules: + * - Scenario fails and IS in baseline → expected (ok) + * - Scenario fails and is NOT in baseline → unexpected failure (exit 1) + * - Scenario passes and IS in baseline → stale entry (exit 1, must update baseline) + * - Scenario passes and is NOT in baseline → normal pass (ok) + */ +export function evaluateBaseline( + results: { scenario: string; checks: ConformanceCheck[] }[], + expectedScenarios: string[] +): BaselineResult { + const expectedSet = new Set(expectedScenarios); + const unexpectedFailures: string[] = []; + const staleEntries: string[] = []; + const expectedFailures: string[] = []; + + const seenScenarios = new Set(); + + for (const result of results) { + seenScenarios.add(result.scenario); + const hasFailed = + result.checks.some((c) => c.status === 'FAILURE') || + result.checks.some((c) => c.status === 'WARNING'); + const isExpected = expectedSet.has(result.scenario); + + if (hasFailed && isExpected) { + expectedFailures.push(result.scenario); + } else if (hasFailed && !isExpected) { + unexpectedFailures.push(result.scenario); + } else if (!hasFailed && isExpected) { + staleEntries.push(result.scenario); + } + // !hasFailed && !isExpected → normal pass, nothing to do + } + + // Also check for baseline entries that reference scenarios not in the run + // (these are not stale - they might just not be in this suite) + + const exitCode = + unexpectedFailures.length > 0 || staleEntries.length > 0 ? 1 : 0; + + return { exitCode, unexpectedFailures, staleEntries, expectedFailures }; +} + +/** + * Print baseline evaluation results. + */ +export function printBaselineResults(result: BaselineResult): void { + if (result.expectedFailures.length > 0) { + console.log( + `\n${COLORS.YELLOW}Expected failures (in baseline):${COLORS.RESET}` + ); + for (const scenario of result.expectedFailures) { + console.log(` ~ ${scenario}`); + } + } + + if (result.staleEntries.length > 0) { + console.log( + `\n${COLORS.RED}Stale baseline entries (now passing - remove from baseline):${COLORS.RESET}` + ); + for (const scenario of result.staleEntries) { + console.log(` ✓ ${scenario}`); + } + } + + if (result.unexpectedFailures.length > 0) { + console.log( + `\n${COLORS.RED}Unexpected failures (not in baseline):${COLORS.RESET}` + ); + for (const scenario of result.unexpectedFailures) { + console.log(` ✗ ${scenario}`); + } + } + + if (result.exitCode === 0) { + console.log( + `\n${COLORS.GREEN}Baseline check passed: all failures are expected.${COLORS.RESET}` + ); + } else { + if (result.staleEntries.length > 0) { + console.log( + `\n${COLORS.RED}Baseline is stale: update your expected-failures file to remove passing scenarios.${COLORS.RESET}` + ); + } + if (result.unexpectedFailures.length > 0) { + console.log( + `\n${COLORS.RED}Unexpected failures detected: these scenarios are not in your expected-failures baseline.${COLORS.RESET}` + ); + } + } +} diff --git a/src/index.ts b/src/index.ts index afa7fd9..f2bf103 100644 --- a/src/index.ts +++ b/src/index.ts @@ -19,12 +19,52 @@ import { listPendingClientScenarios, listAuthScenarios, listMetadataScenarios, - listServerAuthScenarios + listServerAuthScenarios, + listCoreScenarios, + listExtensionScenarios, + listBackcompatScenarios, + listScenariosForSpec, + listClientScenariosForSpec, + getScenarioSpecVersions, + ALL_SPEC_VERSIONS } from './scenarios'; +import type { SpecVersion } from './scenarios'; import { ConformanceCheck } from './types'; import { ClientOptionsSchema, ServerOptionsSchema } from './schemas'; +import { + loadExpectedFailures, + evaluateBaseline, + printBaselineResults +} from './expected-failures'; +import { createTierCheckCommand } from './tier-check'; import packageJson from '../package.json'; +function resolveSpecVersion(value: string): SpecVersion { + if (ALL_SPEC_VERSIONS.includes(value as SpecVersion)) { + return value as SpecVersion; + } + console.error(`Unknown spec version: ${value}`); + console.error(`Valid versions: ${ALL_SPEC_VERSIONS.join(', ')}`); + process.exit(1); +} + +// Note on naming: `command` refers to which CLI command is calling this. +// The `client` command tests Scenario objects (which test clients), +// and the `server` command tests ClientScenario objects (which test servers). +// This matches the inverted naming in scenarios/index.ts. +function filterScenariosBySpecVersion( + allScenarios: string[], + version: SpecVersion, + command: 'client' | 'server' +): string[] { + const versionScenarios = + command === 'client' + ? listScenariosForSpec(version) + : listClientScenariosForSpec(version); + const allowed = new Set(versionScenarios); + return allScenarios.filter((s) => allowed.has(s)); +} + const program = new Command(); program @@ -42,11 +82,24 @@ program .option('--scenario ', 'Scenario to test') .option('--suite ', 'Run a suite of tests in parallel (e.g., "auth")') .option('--timeout ', 'Timeout in milliseconds', '30000') + .option( + '--expected-failures ', + 'Path to YAML file listing expected failures (baseline)' + ) + .option('-o, --output-dir ', 'Save results to this directory') + .option( + '--spec-version ', + 'Filter scenarios by spec version (cumulative for date versions)' + ) .option('--verbose', 'Show verbose output') .action(async (options) => { try { const timeout = parseInt(options.timeout, 10); const verbose = options.verbose ?? false; + const outputDir = options.outputDir; + const specVersionFilter = options.specVersion + ? resolveSpecVersion(options.specVersion) + : undefined; // Handle suite mode if (options.suite) { @@ -57,6 +110,9 @@ program const suites: Record string[]> = { all: listScenarios, + core: listCoreScenarios, + extensions: listExtensionScenarios, + backcompat: listBackcompatScenarios, auth: listAuthScenarios, metadata: listMetadataScenarios, 'sep-835': () => @@ -70,7 +126,14 @@ program process.exit(1); } - const scenarios = suites[suiteName](); + let scenarios = suites[suiteName](); + if (specVersionFilter) { + scenarios = filterScenariosBySpecVersion( + scenarios, + specVersionFilter, + 'client' + ); + } console.log( `Running ${suiteName} suite (${scenarios.length} scenarios) in parallel...\n` ); @@ -81,7 +144,8 @@ program const result = await runConformanceTest( options.command, scenarioName, - timeout + timeout, + outputDir ); return { scenario: scenarioName, @@ -149,6 +213,17 @@ program console.log( `\nTotal: ${totalPassed} passed, ${totalFailed} failed, ${totalWarnings} warnings` ); + + if (options.expectedFailures) { + const expectedFailuresConfig = await loadExpectedFailures( + options.expectedFailures + ); + const baselineScenarios = expectedFailuresConfig.client ?? []; + const baselineResult = evaluateBaseline(results, baselineScenarios); + printBaselineResults(baselineResult); + process.exit(baselineResult.exitCode); + } + process.exit(totalFailed > 0 || totalWarnings > 0 ? 1 : 0); } @@ -157,7 +232,9 @@ program console.error('Either --scenario or --suite is required'); console.error('\nAvailable client scenarios:'); listScenarios().forEach((s) => console.error(` - ${s}`)); - console.error('\nAvailable suites: all, auth, metadata, sep-835'); + console.error( + '\nAvailable suites: all, core, extensions, backcompat, auth, metadata, sep-835' + ); process.exit(1); } @@ -166,7 +243,7 @@ program // If no command provided, run in interactive mode if (!validated.command) { - await runInteractiveMode(validated.scenario, verbose); + await runInteractiveMode(validated.scenario, verbose, outputDir); process.exit(0); } @@ -174,14 +251,30 @@ program const result = await runConformanceTest( validated.command, validated.scenario, - timeout + timeout, + outputDir ); const { overallFailure } = printClientResults( result.checks, verbose, - result.clientOutput + result.clientOutput, + result.allowClientError ); + + if (options.expectedFailures) { + const expectedFailuresConfig = await loadExpectedFailures( + options.expectedFailures + ); + const baselineScenarios = expectedFailuresConfig.client ?? []; + const baselineResult = evaluateBaseline( + [{ scenario: validated.scenario, checks: result.checks }], + baselineScenarios + ); + printBaselineResults(baselineResult); + process.exit(baselineResult.exitCode); + } + process.exit(overallFailure ? 1 : 0); } catch (error) { if (error instanceof ZodError) { @@ -220,23 +313,42 @@ program 'active' ) .option('--timeout ', 'Timeout in milliseconds', '30000') + .option( + '--expected-failures ', + 'Path to YAML file listing expected failures (baseline)' + ) + .option('-o, --output-dir ', 'Save results to this directory') + .option( + '--spec-version ', + 'Filter scenarios by spec version (cumulative for date versions)' + ) .option('--verbose', 'Show verbose output (JSON instead of pretty print)') .option( '--interactive', 'Interactive auth mode: opens browser for login instead of auto-redirect' ) + .option( + '--client-id ', + 'Pre-registered OAuth client ID (skips CIMD/DCR registration)' + ) + .option( + '--client-secret ', + 'Pre-registered OAuth client secret (used with --client-id)' + ) .action(async (options) => { try { const verbose = options.verbose ?? false; const timeout = parseInt(options.timeout, 10); + const outputDir = options.outputDir; const suite = options.suite?.toLowerCase() || 'active'; - - // Check if this is an auth test + const specVersionFilter = options.specVersion + ? resolveSpecVersion(options.specVersion) + : undefined; const isAuthTest = suite === 'auth' || options.scenario?.startsWith('server-auth/'); + // Input validation if (isAuthTest) { - // Auth testing mode - requires --url or --command if (!options.url && !options.command) { console.error( 'For auth testing, either --url or --command is required' @@ -247,140 +359,158 @@ program ); process.exit(1); } - - // Get scenarios to run - let scenarios: string[]; - if (options.scenario) { - scenarios = [options.scenario]; - } else { - scenarios = listServerAuthScenarios(); - } - - console.log(`Running auth suite (${scenarios.length} scenarios)...\n`); - - const allResults: { scenario: string; checks: ConformanceCheck[] }[] = - []; - - for (const scenarioName of scenarios) { - console.log(`\n=== Running scenario: ${scenarioName} ===`); - try { - const result = await runServerAuthConformanceTest({ - url: options.url, - command: options.command, - scenarioName, - timeout, - interactive: options.interactive - }); - allResults.push({ scenario: scenarioName, checks: result.checks }); - - if (verbose) { - printServerResults( - result.checks, - result.scenarioDescription, - verbose - ); - } - } catch (error) { - console.error(`Failed to run scenario ${scenarioName}:`, error); - allResults.push({ - scenario: scenarioName, - checks: [ - { - id: scenarioName, - name: scenarioName, - description: 'Failed to run scenario', - status: 'FAILURE', - timestamp: new Date().toISOString(), - errorMessage: - error instanceof Error ? error.message : String(error) - } - ] - }); - } - } - - const { totalFailed } = printServerSummary(allResults); - process.exit(totalFailed > 0 ? 1 : 0); } else { - // Standard server testing mode - requires --url if (!options.url) { console.error('--url is required for non-auth server testing'); process.exit(1); } + } - // Validate options with Zod - const validated = ServerOptionsSchema.parse(options); - - // If a single scenario is specified, run just that one - if (validated.scenario) { + // Single-scenario fast path (detailed per-check output) + if (options.scenario) { + let checks: ConformanceCheck[]; + let scenarioDescription: string; + + if (isAuthTest) { + const result = await runServerAuthConformanceTest({ + url: options.url, + command: options.command, + scenarioName: options.scenario, + timeout, + interactive: options.interactive, + clientId: options.clientId, + clientSecret: options.clientSecret + }); + checks = result.checks; + scenarioDescription = result.scenarioDescription; + } else { + const validated = ServerOptionsSchema.parse(options); const result = await runServerConformanceTest( validated.url, - validated.scenario + validated.scenario!, + outputDir ); + checks = result.checks; + scenarioDescription = result.scenarioDescription; + } + + const { failed } = printServerResults( + checks, + scenarioDescription, + verbose + ); - const { failed } = printServerResults( - result.checks, - result.scenarioDescription, - verbose + if (options.expectedFailures) { + const expectedFailuresConfig = await loadExpectedFailures( + options.expectedFailures ); - process.exit(failed > 0 ? 1 : 0); - } else { - // Run scenarios based on suite - let scenarios: string[]; - - if (suite === 'all') { - scenarios = listClientScenarios(); - } else if (suite === 'active') { - scenarios = listActiveClientScenarios(); - } else if (suite === 'pending') { - scenarios = listPendingClientScenarios(); - } else { - console.error(`Unknown suite: ${suite}`); - console.error('Available suites: active, all, pending, auth'); - process.exit(1); - } + const baselineScenarios = expectedFailuresConfig.server ?? []; + const baselineResult = evaluateBaseline( + [{ scenario: options.scenario, checks }], + baselineScenarios + ); + printBaselineResults(baselineResult); + process.exit(baselineResult.exitCode); + } - console.log( - `Running ${suite} suite (${scenarios.length} scenarios) against ${validated.url}\n` + process.exit(failed > 0 ? 1 : 0); + } + + // Suite resolution + let scenarios: string[]; + if (isAuthTest) { + scenarios = listServerAuthScenarios(); + } else { + if (suite === 'all') { + scenarios = listClientScenarios(); + } else if (suite === 'active' || suite === 'core') { + scenarios = listActiveClientScenarios(); + } else if (suite === 'pending') { + scenarios = listPendingClientScenarios(); + } else { + console.error(`Unknown suite: ${suite}`); + console.error('Available suites: active, all, core, pending, auth'); + process.exit(1); + } + if (specVersionFilter) { + scenarios = filterScenariosBySpecVersion( + scenarios, + specVersionFilter, + 'server' ); + } + } - const allResults: { scenario: string; checks: ConformanceCheck[] }[] = - []; + console.log( + `Running ${suite} suite (${scenarios.length} scenarios)${options.url ? ` against ${options.url}` : ''}...\n` + ); - for (const scenarioName of scenarios) { - console.log(`\n=== Running scenario: ${scenarioName} ===`); - try { - const result = await runServerConformanceTest( - validated.url, - scenarioName - ); - allResults.push({ - scenario: scenarioName, - checks: result.checks - }); - } catch (error) { - console.error(`Failed to run scenario ${scenarioName}:`, error); - allResults.push({ - scenario: scenarioName, - checks: [ - { - id: scenarioName, - name: scenarioName, - description: 'Failed to run scenario', - status: 'FAILURE', - timestamp: new Date().toISOString(), - errorMessage: - error instanceof Error ? error.message : String(error) - } - ] - }); - } - } + // Run loop + const allResults: { scenario: string; checks: ConformanceCheck[] }[] = []; - const { totalFailed } = printServerSummary(allResults); - process.exit(totalFailed > 0 ? 1 : 0); + for (const scenarioName of scenarios) { + console.log(`\n=== Running scenario: ${scenarioName} ===`); + try { + let checks: ConformanceCheck[]; + let scenarioDescription: string; + if (isAuthTest) { + const result = await runServerAuthConformanceTest({ + url: options.url, + command: options.command, + scenarioName, + timeout, + interactive: options.interactive, + clientId: options.clientId, + clientSecret: options.clientSecret + }); + checks = result.checks; + scenarioDescription = result.scenarioDescription; + } else { + const result = await runServerConformanceTest( + options.url, + scenarioName, + outputDir + ); + checks = result.checks; + scenarioDescription = result.scenarioDescription; + } + allResults.push({ scenario: scenarioName, checks }); + if (verbose) { + printServerResults(checks, scenarioDescription, verbose); + } + } catch (error) { + console.error(`Failed to run scenario ${scenarioName}:`, error); + allResults.push({ + scenario: scenarioName, + checks: [ + { + id: scenarioName, + name: scenarioName, + description: 'Failed to run scenario', + status: 'FAILURE', + timestamp: new Date().toISOString(), + errorMessage: + error instanceof Error ? error.message : String(error) + } + ] + }); } } + + // Summary + baseline + const { totalFailed } = printServerSummary(allResults); + + if (options.expectedFailures) { + const expectedFailuresConfig = await loadExpectedFailures( + options.expectedFailures + ); + const baselineScenarios = expectedFailuresConfig.server ?? []; + const baselineResult = evaluateBaseline(allResults, baselineScenarios); + printBaselineResults(baselineResult); + process.exit(baselineResult.exitCode); + } + + process.exit(totalFailed > 0 ? 1 : 0); } catch (error) { if (error instanceof ZodError) { console.error('Validation error:'); @@ -398,6 +528,9 @@ program } }); +// Tier check command +program.addCommand(createTierCheckCommand()); + // List scenarios command program .command('list') @@ -405,13 +538,30 @@ program .option('--client', 'List client scenarios') .option('--server', 'List server scenarios') .option('--server-auth', 'List server auth scenarios') + .option( + '--spec-version ', + 'Filter scenarios by spec version (cumulative for date versions)' + ) .action((options) => { const showAll = !options.client && !options.server && !options.serverAuth; + const specVersionFilter = options.specVersion + ? resolveSpecVersion(options.specVersion) + : undefined; if (options.server || showAll) { console.log('Server scenarios (test against a server):'); - const serverScenarios = listClientScenarios(); - serverScenarios.forEach((s) => console.log(` - ${s}`)); + let serverScenarios = listClientScenarios(); + if (specVersionFilter) { + serverScenarios = filterScenariosBySpecVersion( + serverScenarios, + specVersionFilter, + 'server' + ); + } + serverScenarios.forEach((s) => { + const v = getScenarioSpecVersions(s); + console.log(` - ${s}${v ? ` [${v}]` : ''}`); + }); } if (options.serverAuth || showAll) { @@ -428,8 +578,18 @@ program console.log(''); } console.log('Client scenarios (test against a client):'); - const clientScenarios = listScenarios(); - clientScenarios.forEach((s) => console.log(` - ${s}`)); + let clientScenarioNames = listScenarios(); + if (specVersionFilter) { + clientScenarioNames = filterScenariosBySpecVersion( + clientScenarioNames, + specVersionFilter, + 'client' + ); + } + clientScenarioNames.forEach((s) => { + const v = getScenarioSpecVersions(s); + console.log(` - ${s}${v ? ` [${v}]` : ''}`); + }); } }); diff --git a/src/runner/client.ts b/src/runner/client.ts index 7db5461..4525416 100644 --- a/src/runner/client.ts +++ b/src/runner/client.ts @@ -3,7 +3,7 @@ import { promises as fs } from 'fs'; import path from 'path'; import { ConformanceCheck } from '../types'; import { getScenario } from '../scenarios'; -import { ensureResultsDir, createResultDir, formatPrettyChecks } from './utils'; +import { createResultDir, formatPrettyChecks } from './utils'; export interface ClientExecutionResult { exitCode: number; @@ -91,15 +91,20 @@ async function executeClient( export async function runConformanceTest( clientCommand: string, scenarioName: string, - timeout: number = 30000 + timeout: number = 30000, + outputDir?: string ): Promise<{ checks: ConformanceCheck[]; clientOutput: ClientExecutionResult; - resultDir: string; + resultDir?: string; + allowClientError?: boolean; }> { - await ensureResultsDir(); - const resultDir = createResultDir(scenarioName); - await fs.mkdir(resultDir, { recursive: true }); + let resultDir: string | undefined; + + if (outputDir) { + resultDir = createResultDir(outputDir, scenarioName); + await fs.mkdir(resultDir, { recursive: true }); + } // Scenario is guaranteed to exist by CLI validation const scenario = getScenario(scenarioName)!; @@ -138,21 +143,30 @@ export async function runConformanceTest( const checks = scenario.getChecks(); - await fs.writeFile( - path.join(resultDir, 'checks.json'), - JSON.stringify(checks, null, 2) - ); + if (resultDir) { + await fs.writeFile( + path.join(resultDir, 'checks.json'), + JSON.stringify(checks, null, 2) + ); - await fs.writeFile(path.join(resultDir, 'stdout.txt'), clientOutput.stdout); + await fs.writeFile( + path.join(resultDir, 'stdout.txt'), + clientOutput.stdout + ); - await fs.writeFile(path.join(resultDir, 'stderr.txt'), clientOutput.stderr); + await fs.writeFile( + path.join(resultDir, 'stderr.txt'), + clientOutput.stderr + ); - console.error(`Results saved to ${resultDir}`); + console.error(`Results saved to ${resultDir}`); + } return { checks, clientOutput, - resultDir + resultDir, + allowClientError: scenario.allowClientError }; } finally { await scenario.stop(); @@ -162,7 +176,8 @@ export async function runConformanceTest( export function printClientResults( checks: ConformanceCheck[], verbose: boolean = false, - clientOutput?: ClientExecutionResult + clientOutput?: ClientExecutionResult, + allowClientError: boolean = false ): { passed: number; failed: number; @@ -183,7 +198,10 @@ export function printClientResults( ? clientOutput.exitCode !== 0 : false; const overallFailure = - failed > 0 || warnings > 0 || clientTimedOut || clientExitedWithError; + failed > 0 || + warnings > 0 || + clientTimedOut || + (clientExitedWithError && !allowClientError); if (verbose) { // Verbose mode: JSON goes to stdout for piping to jq/jless @@ -203,7 +221,7 @@ export function printClientResults( console.error(`\n⚠️ CLIENT TIMED OUT - Test incomplete`); } - if (clientExitedWithError && !clientTimedOut) { + if (clientExitedWithError && !clientTimedOut && !allowClientError) { console.error( `\n⚠️ CLIENT EXITED WITH ERROR (code ${clientOutput?.exitCode}) - Test may be incomplete` ); @@ -244,11 +262,15 @@ export function printClientResults( export async function runInteractiveMode( scenarioName: string, - verbose: boolean = false + verbose: boolean = false, + outputDir?: string ): Promise { - await ensureResultsDir(); - const resultDir = createResultDir(scenarioName); - await fs.mkdir(resultDir, { recursive: true }); + let resultDir: string | undefined; + + if (outputDir) { + resultDir = createResultDir(outputDir, scenarioName); + await fs.mkdir(resultDir, { recursive: true }); + } // Scenario is guaranteed to exist by CLI validation const scenario = getScenario(scenarioName)!; @@ -257,23 +279,29 @@ export async function runInteractiveMode( const urls = await scenario.start(); console.log(`Server URL: ${urls.serverUrl}`); - console.log('Press Ctrl+C to stop and save checks...'); + console.log('Press Ctrl+C to stop...'); const handleShutdown = async () => { console.log('\nShutting down...'); const checks = scenario.getChecks(); - await fs.writeFile( - path.join(resultDir, 'checks.json'), - JSON.stringify(checks, null, 2) - ); + + if (resultDir) { + await fs.writeFile( + path.join(resultDir, 'checks.json'), + JSON.stringify(checks, null, 2) + ); + } if (verbose) { console.log(`\nChecks:\n${JSON.stringify(checks, null, 2)}`); } else { console.log(`\nChecks:\n${formatPrettyChecks(checks)}`); } - console.log(`\nChecks saved to ${resultDir}/checks.json`); + + if (resultDir) { + console.log(`\nChecks saved to ${resultDir}/checks.json`); + } await scenario.stop(); process.exit(0); diff --git a/src/runner/index.ts b/src/runner/index.ts index 7f642cb..be9a33b 100644 --- a/src/runner/index.ts +++ b/src/runner/index.ts @@ -17,7 +17,6 @@ export { // Export utilities export { - ensureResultsDir, createResultDir, formatPrettyChecks, getStatusColor, diff --git a/src/runner/server.ts b/src/runner/server.ts index 21fa139..8fa4e20 100644 --- a/src/runner/server.ts +++ b/src/runner/server.ts @@ -3,7 +3,7 @@ import path from 'path'; import { spawn, ChildProcess } from 'child_process'; import { ConformanceCheck } from '../types'; import { getClientScenario, getServerAuthScenario } from '../scenarios'; -import { ensureResultsDir, createResultDir, formatPrettyChecks } from './utils'; +import { createResultDir, formatPrettyChecks } from './utils'; import { createAuthServer } from '../scenarios/client/auth/helpers/createAuthServer'; import { ServerLifecycle } from '../scenarios/client/auth/helpers/serverLifecycle'; @@ -22,15 +22,19 @@ function formatMarkdown(text: string): string { export async function runServerConformanceTest( serverUrl: string, - scenarioName: string + scenarioName: string, + outputDir?: string ): Promise<{ checks: ConformanceCheck[]; - resultDir: string; + resultDir?: string; scenarioDescription: string; }> { - await ensureResultsDir(); - const resultDir = createResultDir(scenarioName, 'server'); - await fs.mkdir(resultDir, { recursive: true }); + let resultDir: string | undefined; + + if (outputDir) { + resultDir = createResultDir(outputDir, scenarioName, 'server'); + await fs.mkdir(resultDir, { recursive: true }); + } // Scenario is guaranteed to exist by CLI validation const scenario = getClientScenario(scenarioName)!; @@ -41,12 +45,14 @@ export async function runServerConformanceTest( const checks = await scenario.run(serverUrl); - await fs.writeFile( - path.join(resultDir, 'checks.json'), - JSON.stringify(checks, null, 2) - ); + if (resultDir) { + await fs.writeFile( + path.join(resultDir, 'checks.json'), + JSON.stringify(checks, null, 2) + ); - console.log(`Results saved to ${resultDir}`); + console.log(`Results saved to ${resultDir}`); + } return { checks, @@ -163,6 +169,8 @@ export async function runServerAuthConformanceTest(options: { scenarioName: string; timeout?: number; interactive?: boolean; + clientId?: string; + clientSecret?: string; }): Promise<{ checks: ConformanceCheck[]; resultDir: string; @@ -173,11 +181,12 @@ export async function runServerAuthConformanceTest(options: { command, scenarioName, timeout = 30000, - interactive = false + interactive = false, + clientId, + clientSecret } = options; - await ensureResultsDir(); - const resultDir = createResultDir(scenarioName, 'server-auth'); + const resultDir = createResultDir('results', scenarioName, 'server-auth'); await fs.mkdir(resultDir, { recursive: true }); // Get the scenario @@ -268,14 +277,18 @@ export async function runServerAuthConformanceTest(options: { console.log( `Running server auth scenario '${scenarioName}' against server: ${serverUrl}` ); - const scenarioChecks = await scenario.run(serverUrl, { interactive }); + const scenarioChecks = await scenario.run(serverUrl, { + interactive, + clientId, + clientSecret + }); checks.push(...scenarioChecks); } else if (url) { // --url mode: Just run the scenario against the provided URL console.log( `Running server auth scenario '${scenarioName}' against: ${url}` ); - checks = await scenario.run(url, { interactive }); + checks = await scenario.run(url, { interactive, clientId, clientSecret }); } else { throw new Error( 'Either --url or --command must be provided for auth scenarios' diff --git a/src/runner/utils.ts b/src/runner/utils.ts index 6841bc6..14e9432 100644 --- a/src/runner/utils.ts +++ b/src/runner/utils.ts @@ -1,4 +1,3 @@ -import { promises as fs } from 'fs'; import path from 'path'; import { ConformanceCheck } from '../types'; @@ -51,14 +50,12 @@ export function formatPrettyChecks(checks: ConformanceCheck[]): string { .join('\n'); } -export async function ensureResultsDir(): Promise { - const resultsDir = path.join(process.cwd(), 'results'); - await fs.mkdir(resultsDir, { recursive: true }); - return resultsDir; -} - -export function createResultDir(scenario: string, prefix = ''): string { +export function createResultDir( + baseDir: string, + scenario: string, + prefix = '' +): string { const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); const dirName = prefix ? `${prefix}-${scenario}` : scenario; - return path.join('results', `${dirName}-${timestamp}`); + return path.join(baseDir, `${dirName}-${timestamp}`); } diff --git a/src/scenarios/client/auth/basic-cimd.ts b/src/scenarios/client/auth/basic-cimd.ts index 64c87d5..9e5fe67 100644 --- a/src/scenarios/client/auth/basic-cimd.ts +++ b/src/scenarios/client/auth/basic-cimd.ts @@ -1,5 +1,5 @@ import type { Scenario, ConformanceCheck } from '../../../types'; -import { ScenarioUrls } from '../../../types'; +import { ScenarioUrls, SpecVersion } from '../../../types'; import { createAuthServer } from './helpers/createAuthServer'; import { createServer } from './helpers/createServer'; import { ServerLifecycle } from './helpers/serverLifecycle'; @@ -22,6 +22,7 @@ export const CIMD_CLIENT_METADATA_URL = */ export class AuthBasicCIMDScenario implements Scenario { name = 'auth/basic-cimd'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests OAuth flow with Client ID Metadata Documents (SEP-991/URL-based client IDs). Server advertises client_id_metadata_document_supported=true and client should use URL as client_id instead of DCR.'; private authServer = new ServerLifecycle(); diff --git a/src/scenarios/client/auth/client-credentials.ts b/src/scenarios/client/auth/client-credentials.ts index b82b5e2..79ab1b2 100644 --- a/src/scenarios/client/auth/client-credentials.ts +++ b/src/scenarios/client/auth/client-credentials.ts @@ -1,6 +1,11 @@ import * as jose from 'jose'; import type { CryptoKey } from 'jose'; -import type { Scenario, ConformanceCheck, ScenarioUrls } from '../../../types'; +import type { + Scenario, + ConformanceCheck, + ScenarioUrls, + SpecVersion +} from '../../../types'; import { createAuthServer } from './helpers/createAuthServer'; import { createServer } from './helpers/createServer'; import { ServerLifecycle } from './helpers/serverLifecycle'; @@ -32,6 +37,7 @@ async function generateTestKeypair(): Promise<{ */ export class ClientCredentialsJwtScenario implements Scenario { name = 'auth/client-credentials-jwt'; + specVersions: SpecVersion[] = ['extension']; description = 'Tests OAuth client_credentials flow with private_key_jwt authentication (SEP-1046)'; @@ -250,6 +256,7 @@ export class ClientCredentialsJwtScenario implements Scenario { */ export class ClientCredentialsBasicScenario implements Scenario { name = 'auth/client-credentials-basic'; + specVersions: SpecVersion[] = ['extension']; description = 'Tests OAuth client_credentials flow with client_secret_basic authentication'; diff --git a/src/scenarios/client/auth/cross-app-access.ts b/src/scenarios/client/auth/cross-app-access.ts new file mode 100644 index 0000000..05a351b --- /dev/null +++ b/src/scenarios/client/auth/cross-app-access.ts @@ -0,0 +1,556 @@ +import * as jose from 'jose'; +import type { CryptoKey } from 'jose'; +import express, { type Request, type Response } from 'express'; +import type { + Scenario, + ConformanceCheck, + ScenarioUrls, + SpecVersion +} from '../../../types'; +import { createAuthServer } from './helpers/createAuthServer'; +import { createServer } from './helpers/createServer'; +import { MockTokenVerifier } from './helpers/mockTokenVerifier'; +import { ServerLifecycle } from './helpers/serverLifecycle'; +import { SpecReferences } from './spec-references'; + +const CONFORMANCE_TEST_CLIENT_ID = 'conformance-test-xaa-client'; +const CONFORMANCE_TEST_CLIENT_SECRET = 'conformance-test-xaa-secret'; +const IDP_CLIENT_ID = 'conformance-test-idp-client'; +const DEMO_USER_ID = 'demo-user@example.com'; + +/** + * Generate an EC P-256 keypair for IDP ID token signing. + */ +async function generateIdpKeypair(): Promise<{ + publicKey: CryptoKey; + privateKey: CryptoKey; +}> { + const { publicKey, privateKey } = await jose.generateKeyPair('ES256', { + extractable: true + }); + return { publicKey, privateKey }; +} + +/** + * Create a signed ID token from the IDP + */ +async function createIdpIdToken( + privateKey: CryptoKey, + idpIssuer: string, + audience: string, + userId: string = DEMO_USER_ID +): Promise { + return await new jose.SignJWT({ + sub: userId, + email: userId, + aud: audience + }) + .setProtectedHeader({ alg: 'ES256' }) + .setIssuer(idpIssuer) + .setIssuedAt() + .setExpirationTime('1h') + .sign(privateKey); +} + +/** + * Scenario: Complete Cross-App Access Flow + * + * Tests the complete SEP-990 flow: IDP ID token -> authorization grant -> access token + * This scenario combines both RFC 8693 token exchange and RFC 7523 JWT bearer grant. + */ +export class CrossAppAccessCompleteFlowScenario implements Scenario { + name = 'auth/cross-app-access-complete-flow'; + specVersions: SpecVersion[] = ['extension']; + description = + 'Tests complete SEP-990 flow: token exchange + JWT bearer grant (Enterprise Managed OAuth)'; + + private idpServer = new ServerLifecycle(); + private authServer = new ServerLifecycle(); + private mcpServer = new ServerLifecycle(); + private checks: ConformanceCheck[] = []; + private idpPublicKey?: CryptoKey; + private idpPrivateKey?: CryptoKey; + private grantKeypairs: Map = new Map(); + + async start(): Promise { + this.checks = []; + + // Generate IDP keypair + const { publicKey, privateKey } = await generateIdpKeypair(); + this.idpPublicKey = publicKey; + this.idpPrivateKey = privateKey; + + // Shared token verifier ensures MCP server only accepts tokens + // actually issued by the auth server + const tokenVerifier = new MockTokenVerifier(this.checks, []); + + // Start IDP server + await this.startIdpServer(); + + // Start auth server with JWT bearer grant support only + // Token exchange is handled by IdP + const authApp = createAuthServer(this.checks, this.authServer.getUrl, { + grantTypesSupported: ['urn:ietf:params:oauth:grant-type:jwt-bearer'], + tokenEndpointAuthMethodsSupported: ['client_secret_basic'], + tokenVerifier, + onTokenRequest: async ({ + grantType, + body, + timestamp, + authBaseUrl, + authorizationHeader + }) => { + // Auth server only handles JWT bearer grant (ID-JAG -> access token) + if (grantType === 'urn:ietf:params:oauth:grant-type:jwt-bearer') { + const mcpResourceUrl = `${this.mcpServer.getUrl()}/mcp`; + return await this.handleJwtBearerGrant( + body, + timestamp, + authBaseUrl, + authorizationHeader, + mcpResourceUrl + ); + } + + return { + error: 'unsupported_grant_type', + errorDescription: `Auth server only supports jwt-bearer grant, got ${grantType}` + }; + } + }); + + await this.authServer.start(authApp); + + // Start MCP server with shared token verifier + const mcpApp = createServer( + this.checks, + this.mcpServer.getUrl, + this.authServer.getUrl, + { tokenVerifier } + ); + + await this.mcpServer.start(mcpApp); + + // Generate IDP ID token for client + const idpIdToken = await createIdpIdToken( + this.idpPrivateKey!, + this.idpServer.getUrl(), + IDP_CLIENT_ID + ); + + return { + serverUrl: `${this.mcpServer.getUrl()}/mcp`, + context: { + client_id: CONFORMANCE_TEST_CLIENT_ID, + client_secret: CONFORMANCE_TEST_CLIENT_SECRET, + idp_client_id: IDP_CLIENT_ID, + idp_id_token: idpIdToken, + idp_issuer: this.idpServer.getUrl(), + idp_token_endpoint: `${this.idpServer.getUrl()}/token` + } + }; + } + + private async startIdpServer(): Promise { + const app = express(); + app.use(express.json()); + app.use(express.urlencoded({ extended: true })); + + // IDP metadata endpoint + app.get( + '/.well-known/openid-configuration', + (req: Request, res: Response) => { + res.json({ + issuer: this.idpServer.getUrl(), + authorization_endpoint: `${this.idpServer.getUrl()}/authorize`, + token_endpoint: `${this.idpServer.getUrl()}/token`, + jwks_uri: `${this.idpServer.getUrl()}/.well-known/jwks.json`, + grant_types_supported: [ + 'urn:ietf:params:oauth:grant-type:token-exchange' + ] + }); + } + ); + + // IDP token endpoint - handles token exchange (IDP ID token -> ID-JAG) + app.post('/token', async (req: Request, res: Response) => { + const timestamp = new Date().toISOString(); + const grantType = req.body.grant_type; + const subjectToken = req.body.subject_token; + const subjectTokenType = req.body.subject_token_type; + const requestedTokenType = req.body.requested_token_type; + const audience = req.body.audience; + const resource = req.body.resource; + + // Only handle token exchange at IdP + if (grantType !== 'urn:ietf:params:oauth:grant-type:token-exchange') { + this.checks.push({ + id: 'complete-flow-token-exchange', + name: 'CompleteFlowTokenExchange', + description: `IdP expected token-exchange grant, got ${grantType}`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.RFC_8693_TOKEN_EXCHANGE] + }); + res.status(400).json({ + error: 'unsupported_grant_type', + error_description: 'IdP only supports token-exchange' + }); + return; + } + + // Verify all required token exchange parameters per SEP-990 + const missingParams: string[] = []; + if (!subjectToken) missingParams.push('subject_token'); + if (subjectTokenType !== 'urn:ietf:params:oauth:token-type:id_token') { + missingParams.push( + `subject_token_type (expected urn:ietf:params:oauth:token-type:id_token, got ${subjectTokenType || 'missing'})` + ); + } + if (requestedTokenType !== 'urn:ietf:params:oauth:token-type:id-jag') { + missingParams.push( + `requested_token_type (expected urn:ietf:params:oauth:token-type:id-jag, got ${requestedTokenType || 'missing'})` + ); + } + if (!audience) missingParams.push('audience'); + if (!resource) missingParams.push('resource'); + + if (missingParams.length > 0) { + this.checks.push({ + id: 'complete-flow-token-exchange', + name: 'CompleteFlowTokenExchange', + description: `Token exchange missing or invalid required parameters: ${missingParams.join(', ')}`, + status: 'FAILURE', + timestamp, + specReferences: [ + SpecReferences.RFC_8693_TOKEN_EXCHANGE, + SpecReferences.SEP_990_ENTERPRISE_OAUTH + ] + }); + res.status(400).json({ + error: 'invalid_request', + error_description: `Missing or invalid required parameters: ${missingParams.join(', ')}` + }); + return; + } + + try { + // Verify the IDP ID token + const { payload } = await jose.jwtVerify( + subjectToken, + this.idpPublicKey!, + { + audience: IDP_CLIENT_ID, + issuer: this.idpServer.getUrl() + } + ); + + this.checks.push({ + id: 'complete-flow-token-exchange', + name: 'CompleteFlowTokenExchange', + description: + 'Successfully exchanged IDP ID token for ID-JAG at IdP with all required parameters', + status: 'SUCCESS', + timestamp, + specReferences: [ + SpecReferences.RFC_8693_TOKEN_EXCHANGE, + SpecReferences.SEP_990_ENTERPRISE_OAUTH + ] + }); + + // Create ID-JAG (ID-bound JSON Assertion Grant) + // Include resource and client_id claims per SEP-990 + const userId = payload.sub as string; + const { publicKey, privateKey } = await jose.generateKeyPair('ES256'); + this.grantKeypairs.set(userId, publicKey); + + // The IdP uses CONFORMANCE_TEST_CLIENT_ID (the MCP Client's client_id + // at the AS), not the IdP client_id from the request body. + // Per Section 6.1: "the IdP will need to be aware of the MCP Client's + // client_id that it normally uses with the MCP Server." + const idJag = await new jose.SignJWT({ + sub: userId, + resource: resource, + client_id: CONFORMANCE_TEST_CLIENT_ID + }) + .setProtectedHeader({ alg: 'ES256', typ: 'oauth-id-jag+jwt' }) + .setIssuer(this.idpServer.getUrl()) + .setAudience(audience) + .setIssuedAt() + .setExpirationTime('5m') + .setJti(crypto.randomUUID()) + .sign(privateKey); + + res.json({ + access_token: idJag, + issued_token_type: 'urn:ietf:params:oauth:token-type:id-jag', + token_type: 'N_A' + }); + } catch (e) { + const errorMessage = e instanceof Error ? e.message : String(e); + this.checks.push({ + id: 'complete-flow-token-exchange', + name: 'CompleteFlowTokenExchange', + description: `Token exchange failed: ${errorMessage}`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.RFC_8693_TOKEN_EXCHANGE] + }); + res.status(400).json({ + error: 'invalid_grant', + error_description: 'Invalid ID token' + }); + } + }); + + await this.idpServer.start(app); + } + + private async handleJwtBearerGrant( + body: Record, + timestamp: string, + authBaseUrl: string, + authorizationHeader?: string, + mcpResourceUrl?: string + ): Promise { + // 1. Verify client authentication (client_secret_basic) + if (!authorizationHeader || !authorizationHeader.startsWith('Basic ')) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: + 'Missing or invalid Authorization header for client_secret_basic authentication', + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.SEP_990_ENTERPRISE_OAUTH], + details: { + expected: 'Authorization: Basic ', + received: authorizationHeader || 'missing' + } + }); + return { + error: 'invalid_client', + errorDescription: + 'Client authentication required (client_secret_basic)', + statusCode: 401 + }; + } + + const base64Credentials = authorizationHeader.slice('Basic '.length); + const decoded = Buffer.from(base64Credentials, 'base64').toString('utf-8'); + const separatorIndex = decoded.indexOf(':'); + if (separatorIndex === -1) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: 'Malformed Basic auth header (no colon separator)', + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.SEP_990_ENTERPRISE_OAUTH] + }); + return { + error: 'invalid_client', + errorDescription: 'Malformed Basic auth', + statusCode: 401 + }; + } + + const authClientId = decodeURIComponent(decoded.slice(0, separatorIndex)); + const authClientSecret = decodeURIComponent( + decoded.slice(separatorIndex + 1) + ); + + if ( + authClientId !== CONFORMANCE_TEST_CLIENT_ID || + authClientSecret !== CONFORMANCE_TEST_CLIENT_SECRET + ) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: `Client authentication failed: invalid credentials (client_id: ${authClientId})`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.SEP_990_ENTERPRISE_OAUTH] + }); + return { + error: 'invalid_client', + errorDescription: 'Invalid client credentials', + statusCode: 401 + }; + } + + // 2. Verify assertion is present + const assertion = body.assertion; + if (!assertion) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: 'Missing assertion in JWT bearer grant', + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.RFC_7523_JWT_BEARER] + }); + return { + error: 'invalid_request', + errorDescription: 'Missing assertion' + }; + } + + try { + // 3. Verify the ID-JAG header has the correct typ + const header = jose.decodeProtectedHeader(assertion); + if (header.typ !== 'oauth-id-jag+jwt') { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: `ID-JAG has wrong typ header: expected oauth-id-jag+jwt, got ${header.typ}`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.SEP_990_ENTERPRISE_OAUTH] + }); + return { + error: 'invalid_grant', + errorDescription: 'Invalid ID-JAG typ header' + }; + } + + // 4. Decode and verify the ID-JAG + const decoded = jose.decodeJwt(assertion); + const userId = decoded.sub as string; + const publicKey = this.grantKeypairs.get(userId); + + if (!publicKey) { + throw new Error('Unknown authorization grant'); + } + + // Verify signature and audience + const withoutSlash = authBaseUrl.replace(/\/+$/, ''); + const withSlash = `${withoutSlash}/`; + + await jose.jwtVerify(assertion, publicKey, { + audience: [withoutSlash, withSlash], + clockTolerance: 30 + }); + + // 5. Verify client_id in ID-JAG matches the authenticating client (Section 5.1) + const jagClientId = decoded.client_id as string | undefined; + if (jagClientId !== authClientId) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: `ID-JAG client_id (${jagClientId}) does not match authenticating client (${authClientId})`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.SEP_990_ENTERPRISE_OAUTH], + details: { + jagClientId, + authClientId + } + }); + return { + error: 'invalid_grant', + errorDescription: + 'ID-JAG client_id does not match authenticating client' + }; + } + + // 6. Verify resource claim in ID-JAG matches the MCP server resource + const jagResource = decoded.resource as string | undefined; + if (mcpResourceUrl && jagResource !== mcpResourceUrl) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: `ID-JAG resource (${jagResource}) does not match MCP server resource (${mcpResourceUrl})`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.SEP_990_ENTERPRISE_OAUTH], + details: { + jagResource, + expectedResource: mcpResourceUrl + } + }); + return { + error: 'invalid_grant', + errorDescription: 'ID-JAG resource does not match MCP server resource' + }; + } + + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: + 'Successfully verified client auth, ID-JAG claims, and exchanged for access token', + status: 'SUCCESS', + timestamp, + specReferences: [ + SpecReferences.RFC_7523_JWT_BEARER, + SpecReferences.SEP_990_ENTERPRISE_OAUTH + ] + }); + + const scopes = body.scope ? body.scope.split(' ') : []; + return { + token: `test-token-${Date.now()}`, + scopes + }; + } catch (e) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: `JWT bearer grant failed: ${e}`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.RFC_7523_JWT_BEARER] + }); + return { + error: 'invalid_grant', + errorDescription: 'Invalid authorization grant' + }; + } + } + + async stop() { + await this.idpServer.stop(); + await this.authServer.stop(); + await this.mcpServer.stop(); + } + + getChecks(): ConformanceCheck[] { + const hasTokenExchangeCheck = this.checks.some( + (c) => c.id === 'complete-flow-token-exchange' + ); + const hasJwtBearerCheck = this.checks.some( + (c) => c.id === 'complete-flow-jwt-bearer' + ); + + if (!hasTokenExchangeCheck) { + this.checks.push({ + id: 'complete-flow-token-exchange', + name: 'CompleteFlowTokenExchange', + description: 'Client did not perform token exchange', + status: 'FAILURE', + timestamp: new Date().toISOString(), + specReferences: [ + SpecReferences.RFC_8693_TOKEN_EXCHANGE, + SpecReferences.SEP_990_ENTERPRISE_OAUTH + ] + }); + } + + if (!hasJwtBearerCheck) { + this.checks.push({ + id: 'complete-flow-jwt-bearer', + name: 'CompleteFlowJwtBearer', + description: 'Client did not perform JWT bearer grant exchange', + status: 'FAILURE', + timestamp: new Date().toISOString(), + specReferences: [ + SpecReferences.RFC_7523_JWT_BEARER, + SpecReferences.SEP_990_ENTERPRISE_OAUTH + ] + }); + } + + return this.checks; + } +} diff --git a/src/scenarios/client/auth/discovery-metadata.ts b/src/scenarios/client/auth/discovery-metadata.ts index db7e81b..be79a15 100644 --- a/src/scenarios/client/auth/discovery-metadata.ts +++ b/src/scenarios/client/auth/discovery-metadata.ts @@ -87,6 +87,7 @@ function createMetadataScenario(config: MetadataScenarioConfig): Scenario { return { name: `auth/${config.name}`, + specVersions: ['2025-11-25'], description: `Tests Basic OAuth metadata discovery flow. **PRM:** ${config.prmLocation}${config.inWwwAuth ? '' : ' (not in WWW-Authenticate)'} diff --git a/src/scenarios/client/auth/helpers/createAuthServer.ts b/src/scenarios/client/auth/helpers/createAuthServer.ts index c9b37ae..361b87d 100644 --- a/src/scenarios/client/auth/helpers/createAuthServer.ts +++ b/src/scenarios/client/auth/helpers/createAuthServer.ts @@ -1,9 +1,23 @@ import express, { Request, Response } from 'express'; +import { createHash } from 'crypto'; import type { ConformanceCheck } from '../../../../types'; import { createRequestLogger } from '../../../request-logger'; import { SpecReferences } from '../spec-references'; import { MockTokenVerifier } from './mockTokenVerifier'; +/** + * Compute S256 code challenge from a code verifier. + * BASE64URL(SHA256(code_verifier)) + */ +function computeS256Challenge(codeVerifier: string): string { + const hash = createHash('sha256').update(codeVerifier).digest(); + return hash + .toString('base64') + .replace(/\+/g, '-') + .replace(/\//g, '_') + .replace(/=+$/, ''); +} + export interface TokenRequestResult { token: string; scopes: string[]; @@ -29,6 +43,10 @@ export interface AuthServerOptions { * Defaults to true - CIMD is preferred over DCR when available. */ clientIdMetadataDocumentSupported?: boolean; + /** Set to true to NOT advertise registration_endpoint (for pre-registration tests) */ + disableDynamicRegistration?: boolean; + /** PKCE code_challenge_methods_supported. Set to null to omit from metadata. Default: ['S256'] */ + codeChallengeMethodsSupported?: string[] | null; tokenVerifier?: MockTokenVerifier; onTokenRequest?: (requestData: { scope?: string; @@ -45,6 +63,7 @@ export interface AuthServerOptions { onAuthorizationRequest?: (requestData: { clientId?: string; scope?: string; + resource?: string; timestamp: string; }) => void; onRegistrationRequest?: (req: Request) => { @@ -70,6 +89,8 @@ export function createAuthServer( tokenEndpointAuthSigningAlgValuesSupported, // Default to true - CIMD is preferred over DCR clientIdMetadataDocumentSupported = true, + disableDynamicRegistration = false, + codeChallengeMethodsSupported = ['S256'], tokenVerifier, onTokenRequest, onAuthorizationRequest, @@ -78,6 +99,8 @@ export function createAuthServer( // Track scopes from the most recent authorization request let lastAuthorizationScopes: string[] = []; + // Track PKCE code_challenge for verification in token request + let storedCodeChallenge: string | undefined; const authRoutes = { authorization_endpoint: `${routePrefix}/authorize`, @@ -120,11 +143,16 @@ export function createAuthServer( issuer: getAuthBaseUrl(), authorization_endpoint: `${getAuthBaseUrl()}${authRoutes.authorization_endpoint}`, token_endpoint: `${getAuthBaseUrl()}${authRoutes.token_endpoint}`, - registration_endpoint: `${getAuthBaseUrl()}${authRoutes.registration_endpoint}`, + ...(!disableDynamicRegistration && { + registration_endpoint: `${getAuthBaseUrl()}${authRoutes.registration_endpoint}` + }), introspection_endpoint: `${getAuthBaseUrl()}${authRoutes.introspection_endpoint}`, response_types_supported: ['code'], grant_types_supported: grantTypesSupported, - code_challenge_methods_supported: ['S256'], + // PKCE support - null means omit from metadata (for negative testing) + ...(codeChallengeMethodsSupported !== null && { + code_challenge_methods_supported: codeChallengeMethodsSupported + }), token_endpoint_auth_methods_supported: tokenEndpointAuthMethodsSupported, ...(tokenEndpointAuthSigningAlgValuesSupported && { token_endpoint_auth_signing_alg_values_supported: @@ -189,6 +217,41 @@ export function createAuthServer( } }); + // PKCE: Store code_challenge for later verification + const codeChallenge = req.query.code_challenge as string | undefined; + const codeChallengeMethod = req.query.code_challenge_method as + | string + | undefined; + storedCodeChallenge = codeChallenge; + + // PKCE: Check code_challenge is present + checks.push({ + id: 'pkce-code-challenge-sent', + name: 'PKCE Code Challenge', + description: codeChallenge + ? 'Client sent code_challenge in authorization request' + : 'Client MUST send code_challenge in authorization request', + status: codeChallenge ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: [SpecReferences.MCP_PKCE] + }); + + // PKCE: Check S256 method is used + checks.push({ + id: 'pkce-s256-method-used', + name: 'PKCE S256 Method', + description: + codeChallengeMethod === 'S256' + ? 'Client used S256 code challenge method' + : 'Client MUST use S256 code challenge method when technically capable', + status: codeChallengeMethod === 'S256' ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: [SpecReferences.MCP_PKCE], + details: { + method: codeChallengeMethod || 'not specified' + } + }); + // Track scopes from authorization request for token issuance const scopeParam = req.query.scope as string | undefined; lastAuthorizationScopes = scopeParam ? scopeParam.split(' ') : []; @@ -197,6 +260,7 @@ export function createAuthServer( onAuthorizationRequest({ clientId, scope: scopeParam, + resource: req.query.resource as string | undefined, timestamp }); } @@ -230,6 +294,61 @@ export function createAuthServer( } }); + // PKCE: Check code_verifier is present (only for authorization_code grant) + const codeVerifier = req.body.code_verifier as string | undefined; + if (grantType === 'authorization_code') { + checks.push({ + id: 'pkce-code-verifier-sent', + name: 'PKCE Code Verifier', + description: codeVerifier + ? 'Client sent code_verifier in token request' + : 'Client MUST send code_verifier in token request', + status: codeVerifier ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: [SpecReferences.MCP_PKCE] + }); + + // PKCE: Validate code_verifier matches code_challenge (S256) + // Fail if either is missing + const computedChallenge = + codeVerifier && storedCodeChallenge + ? computeS256Challenge(codeVerifier) + : undefined; + const matches = + computedChallenge !== undefined && + computedChallenge === storedCodeChallenge; + + let description: string; + if (!storedCodeChallenge && !codeVerifier) { + description = + 'Neither code_challenge nor code_verifier were sent - PKCE is required'; + } else if (!storedCodeChallenge) { + description = + 'code_challenge was not sent in authorization request - PKCE is required'; + } else if (!codeVerifier) { + description = + 'code_verifier was not sent in token request - PKCE is required'; + } else if (matches) { + description = 'code_verifier correctly matches code_challenge (S256)'; + } else { + description = 'code_verifier does not match code_challenge'; + } + + checks.push({ + id: 'pkce-verifier-matches-challenge', + name: 'PKCE Verifier Validation', + description, + status: matches ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: [SpecReferences.MCP_PKCE], + details: { + matches, + storedChallenge: storedCodeChallenge || 'not sent', + computedChallenge: computedChallenge || 'not computed' + } + }); + } + let token = `test-token-${Date.now()}`; let scopes: string[] = lastAuthorizationScopes; diff --git a/src/scenarios/client/auth/helpers/createServer.ts b/src/scenarios/client/auth/helpers/createServer.ts index de230c8..c836630 100644 --- a/src/scenarios/client/auth/helpers/createServer.ts +++ b/src/scenarios/client/auth/helpers/createServer.ts @@ -22,6 +22,8 @@ export interface ServerOptions { includeScopeInWwwAuth?: boolean; authMiddleware?: express.RequestHandler; tokenVerifier?: MockTokenVerifier; + /** Override the resource field in PRM response (for testing resource mismatch) */ + prmResourceOverride?: string; } export function createServer( @@ -36,45 +38,52 @@ export function createServer( scopesSupported, includePrmInWwwAuth = true, includeScopeInWwwAuth = false, - tokenVerifier + tokenVerifier, + prmResourceOverride } = options; - const server = new Server( - { - name: 'auth-prm-pathbased-server', - version: '1.0.0' - }, - { - capabilities: { - tools: {} + // Factory: create a fresh Server per request to avoid "Already connected" errors + // after the v1.26.0 security fix (GHSA-345p-7cg4-v4c7) + function createMcpServer() { + const server = new Server( + { + name: 'auth-prm-pathbased-server', + version: '1.0.0' + }, + { + capabilities: { + tools: {} + } } - } - ); + ); + + server.setRequestHandler(ListToolsRequestSchema, async () => { + return { + tools: [ + { + name: 'test-tool', + inputSchema: { type: 'object' } + } + ] + }; + }); - server.setRequestHandler(ListToolsRequestSchema, async () => { - return { - tools: [ - { - name: 'test-tool', - inputSchema: { type: 'object' } + server.setRequestHandler( + CallToolRequestSchema, + async (request): Promise => { + if (request.params.name === 'test-tool') { + return { + content: [{ type: 'text', text: 'test' }] + }; } - ] - }; - }); - - server.setRequestHandler( - CallToolRequestSchema, - async (request): Promise => { - if (request.params.name === 'test-tool') { - return { - content: [{ type: 'text', text: 'test' }] - }; + throw new McpError( + ErrorCode.InvalidParams, + `Tool ${request.params.name} not found` + ); } - throw new McpError( - ErrorCode.InvalidParams, - `Tool ${request.params.name} not found` - ); - } - ); + ); + + return server; + } const app = express(); app.use(express.json()); @@ -107,10 +116,12 @@ export function createServer( // Resource is usually $baseUrl/mcp, but if PRM is at the root, // the resource identifier is the root. + // Can be overridden via prmResourceOverride for testing resource mismatch. const resource = - prmPath === '/.well-known/oauth-protected-resource' + prmResourceOverride ?? + (prmPath === '/.well-known/oauth-protected-resource' ? getBaseUrl() - : `${getBaseUrl()}/mcp`; + : `${getBaseUrl()}/mcp`); const prmResponse: any = { resource, @@ -146,6 +157,7 @@ export function createServer( authMiddleware(req, res, async (err?: any) => { if (err) return next(err); + const server = createMcpServer(); const transport = new StreamableHTTPServerTransport({ sessionIdGenerator: undefined }); diff --git a/src/scenarios/client/auth/helpers/mockTokenVerifier.ts b/src/scenarios/client/auth/helpers/mockTokenVerifier.ts index 8cbfae1..022fa4d 100644 --- a/src/scenarios/client/auth/helpers/mockTokenVerifier.ts +++ b/src/scenarios/client/auth/helpers/mockTokenVerifier.ts @@ -1,5 +1,6 @@ import { OAuthTokenVerifier } from '@modelcontextprotocol/sdk/server/auth/provider.js'; import { AuthInfo } from '@modelcontextprotocol/sdk/server/auth/types.js'; +import { InvalidTokenError } from '@modelcontextprotocol/sdk/server/auth/errors.js'; import type { ConformanceCheck } from '../../../../types'; import { SpecReferences } from '../spec-references'; @@ -53,6 +54,6 @@ export class MockTokenVerifier implements OAuthTokenVerifier { token: token ? token.substring(0, 10) + '...' : 'missing' } }); - throw new Error('Invalid token'); + throw new InvalidTokenError('Invalid token'); } } diff --git a/src/scenarios/client/auth/index.test.ts b/src/scenarios/client/auth/index.test.ts index 6dcc020..e43f0d3 100644 --- a/src/scenarios/client/auth/index.test.ts +++ b/src/scenarios/client/auth/index.test.ts @@ -1,4 +1,4 @@ -import { authScenariosList } from './index'; +import { authScenariosList, backcompatScenariosList } from './index'; import { runClientAgainstScenario, InlineClientRunner @@ -9,6 +9,7 @@ import { runClient as ignoreScopeClient } from '../../../../examples/clients/typ import { runClient as partialScopesClient } from '../../../../examples/clients/typescript/auth-test-partial-scopes'; import { runClient as ignore403Client } from '../../../../examples/clients/typescript/auth-test-ignore-403'; import { runClient as noRetryLimitClient } from '../../../../examples/clients/typescript/auth-test-no-retry-limit'; +import { runClient as noPkceClient } from '../../../../examples/clients/typescript/auth-test-no-pkce'; import { getHandler } from '../../../../examples/clients/typescript/everything-client'; import { setLogLevel } from '../../../../examples/clients/typescript/helpers/logger'; @@ -22,7 +23,9 @@ const skipScenarios = new Set([ const allowClientErrorScenarios = new Set([ // Client is expected to give up (error) after limited retries, but check should pass - 'auth/scope-retry-limit' + 'auth/scope-retry-limit', + // Client is expected to error when PRM resource doesn't match server URL + 'auth/resource-mismatch' ]); describe('Client Auth Scenarios', () => { @@ -45,6 +48,19 @@ describe('Client Auth Scenarios', () => { } }); +describe('Client Back-compat Scenarios', () => { + for (const scenario of backcompatScenariosList) { + test(`${scenario.name} passes`, async () => { + const clientFn = getHandler(scenario.name); + if (!clientFn) { + throw new Error(`No handler registered for scenario: ${scenario.name}`); + } + const runner = new InlineClientRunner(clientFn); + await runClientAgainstScenario(runner, scenario.name); + }); + } +}); + describe('Negative tests', () => { test('bad client requests root PRM location', async () => { const runner = new InlineClientRunner(badPrmClient); @@ -99,4 +115,16 @@ describe('Negative tests', () => { allowClientError: true }); }); + + test('client does not use PKCE', async () => { + const runner = new InlineClientRunner(noPkceClient); + await runClientAgainstScenario(runner, 'auth/metadata-default', { + expectedFailureSlugs: [ + 'pkce-code-challenge-sent', + 'pkce-s256-method-used', + 'pkce-code-verifier-sent', + 'pkce-verifier-matches-challenge' + ] + }); + }); }); diff --git a/src/scenarios/client/auth/index.ts b/src/scenarios/client/auth/index.ts index 6a3ad1c..73c9ddb 100644 --- a/src/scenarios/client/auth/index.ts +++ b/src/scenarios/client/auth/index.ts @@ -21,12 +21,14 @@ import { ClientCredentialsJwtScenario, ClientCredentialsBasicScenario } from './client-credentials'; +import { ResourceMismatchScenario } from './resource-mismatch'; +import { PreRegistrationScenario } from './pre-registration'; +import { CrossAppAccessCompleteFlowScenario } from './cross-app-access'; +// Auth scenarios (required for tier 1) export const authScenariosList: Scenario[] = [ ...metadataScenarios, new AuthBasicCIMDScenario(), - new Auth20250326OAuthMetadataBackcompatScenario(), - new Auth20250326OEndpointFallbackScenario(), new ScopeFromWwwAuthenticateScenario(), new ScopeFromScopesSupportedScenario(), new ScopeOmittedWhenUndefinedScenario(), @@ -35,6 +37,19 @@ export const authScenariosList: Scenario[] = [ new ClientSecretBasicAuthScenario(), new ClientSecretPostAuthScenario(), new PublicClientAuthScenario(), + new ResourceMismatchScenario(), + new PreRegistrationScenario() +]; + +// Back-compat scenarios (optional - backward compatibility with older spec versions) +export const backcompatScenariosList: Scenario[] = [ + new Auth20250326OAuthMetadataBackcompatScenario(), + new Auth20250326OEndpointFallbackScenario() +]; + +// Extension scenarios (optional for tier 1 - protocol extensions) +export const extensionScenariosList: Scenario[] = [ new ClientCredentialsJwtScenario(), - new ClientCredentialsBasicScenario() + new ClientCredentialsBasicScenario(), + new CrossAppAccessCompleteFlowScenario() ]; diff --git a/src/scenarios/client/auth/march-spec-backcompat.ts b/src/scenarios/client/auth/march-spec-backcompat.ts index e9f8184..61262d8 100644 --- a/src/scenarios/client/auth/march-spec-backcompat.ts +++ b/src/scenarios/client/auth/march-spec-backcompat.ts @@ -1,5 +1,5 @@ import type { Scenario, ConformanceCheck } from '../../../types'; -import { ScenarioUrls } from '../../../types'; +import { ScenarioUrls, SpecVersion } from '../../../types'; import { createAuthServer } from './helpers/createAuthServer'; import { createServer } from './helpers/createServer'; import { ServerLifecycle } from './helpers/serverLifecycle'; @@ -8,6 +8,7 @@ import { SpecReferences } from './spec-references'; export class Auth20250326OAuthMetadataBackcompatScenario implements Scenario { name = 'auth/2025-03-26-oauth-metadata-backcompat'; + specVersions: SpecVersion[] = ['2025-03-26']; description = 'Tests 2025-03-26 spec OAuth flow: no PRM (Protected Resource Metadata), OAuth metadata at root location'; private server = new ServerLifecycle(); @@ -72,6 +73,7 @@ export class Auth20250326OAuthMetadataBackcompatScenario implements Scenario { export class Auth20250326OEndpointFallbackScenario implements Scenario { name = 'auth/2025-03-26-oauth-endpoint-fallback'; + specVersions: SpecVersion[] = ['2025-03-26']; description = 'Tests OAuth flow with no metadata endpoints, relying on fallback to standard OAuth endpoints at server root (2025-03-26 spec behavior)'; private server = new ServerLifecycle(); diff --git a/src/scenarios/client/auth/pre-registration.ts b/src/scenarios/client/auth/pre-registration.ts new file mode 100644 index 0000000..00673df --- /dev/null +++ b/src/scenarios/client/auth/pre-registration.ts @@ -0,0 +1,157 @@ +import type { + Scenario, + ConformanceCheck, + ScenarioUrls, + SpecVersion +} from '../../../types'; +import { createAuthServer } from './helpers/createAuthServer'; +import { createServer } from './helpers/createServer'; +import { ServerLifecycle } from './helpers/serverLifecycle'; +import { SpecReferences } from './spec-references'; +import { MockTokenVerifier } from './helpers/mockTokenVerifier'; + +const PRE_REGISTERED_CLIENT_ID = 'pre-registered-client'; +const PRE_REGISTERED_CLIENT_SECRET = 'pre-registered-secret'; + +/** + * Scenario: Pre-registration (static client credentials) + * + * Tests OAuth flow where the server does NOT support Dynamic Client Registration. + * Clients must use pre-registered credentials passed via context. + * + * This tests the pre-registration approach described in the MCP spec: + * https://modelcontextprotocol.io/specification/draft/basic/authorization#preregistration + */ +export class PreRegistrationScenario implements Scenario { + name = 'auth/pre-registration'; + specVersions: SpecVersion[] = ['2025-11-25']; + description = + 'Tests OAuth flow with pre-registered client credentials. Server does not support DCR.'; + + private authServer = new ServerLifecycle(); + private server = new ServerLifecycle(); + private checks: ConformanceCheck[] = []; + + async start(): Promise { + this.checks = []; + const tokenVerifier = new MockTokenVerifier(this.checks, []); + + const authApp = createAuthServer(this.checks, this.authServer.getUrl, { + tokenVerifier, + disableDynamicRegistration: true, + tokenEndpointAuthMethodsSupported: ['client_secret_basic'], + onTokenRequest: ({ authorizationHeader, timestamp }) => { + // Verify client used pre-registered credentials via Basic auth + if (!authorizationHeader?.startsWith('Basic ')) { + this.checks.push({ + id: 'pre-registration-auth', + name: 'Pre-registration authentication', + description: + 'Client did not use Basic authentication with pre-registered credentials', + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.MCP_PREREGISTRATION] + }); + return { + error: 'invalid_client', + errorDescription: 'Missing or invalid Authorization header', + statusCode: 401 + }; + } + + const base64Credentials = authorizationHeader.slice(6); + const credentials = Buffer.from(base64Credentials, 'base64').toString( + 'utf-8' + ); + const [clientId, clientSecret] = credentials.split(':'); + + if ( + clientId !== PRE_REGISTERED_CLIENT_ID || + clientSecret !== PRE_REGISTERED_CLIENT_SECRET + ) { + this.checks.push({ + id: 'pre-registration-auth', + name: 'Pre-registration authentication', + description: `Client used incorrect pre-registered credentials. Expected client_id '${PRE_REGISTERED_CLIENT_ID}', got '${clientId}'`, + status: 'FAILURE', + timestamp, + specReferences: [SpecReferences.MCP_PREREGISTRATION], + details: { + expectedClientId: PRE_REGISTERED_CLIENT_ID, + actualClientId: clientId + } + }); + return { + error: 'invalid_client', + errorDescription: 'Invalid pre-registered credentials', + statusCode: 401 + }; + } + + // Success - client used correct pre-registered credentials + this.checks.push({ + id: 'pre-registration-auth', + name: 'Pre-registration authentication', + description: + 'Client correctly used pre-registered credentials when server does not support DCR', + status: 'SUCCESS', + timestamp, + specReferences: [SpecReferences.MCP_PREREGISTRATION], + details: { clientId } + }); + + return { + token: `test-token-prereg-${Date.now()}`, + scopes: [] + }; + } + }); + + await this.authServer.start(authApp); + + const app = createServer( + this.checks, + this.server.getUrl, + this.authServer.getUrl, + { + prmPath: '/.well-known/oauth-protected-resource/mcp', + requiredScopes: [], + tokenVerifier + } + ); + + await this.server.start(app); + + return { + serverUrl: `${this.server.getUrl()}/mcp`, + context: { + client_id: PRE_REGISTERED_CLIENT_ID, + client_secret: PRE_REGISTERED_CLIENT_SECRET + } + }; + } + + async stop() { + await this.authServer.stop(); + await this.server.stop(); + } + + getChecks(): ConformanceCheck[] { + // Ensure we have the pre-registration check + const hasPreRegCheck = this.checks.some( + (c) => c.id === 'pre-registration-auth' + ); + if (!hasPreRegCheck) { + this.checks.push({ + id: 'pre-registration-auth', + name: 'Pre-registration authentication', + description: 'Client did not make a token request', + status: 'FAILURE', + timestamp: new Date().toISOString(), + specReferences: [SpecReferences.MCP_PREREGISTRATION] + }); + } + + return this.checks; + } +} diff --git a/src/scenarios/client/auth/resource-mismatch.ts b/src/scenarios/client/auth/resource-mismatch.ts new file mode 100644 index 0000000..dd76c68 --- /dev/null +++ b/src/scenarios/client/auth/resource-mismatch.ts @@ -0,0 +1,114 @@ +import type { Scenario, ConformanceCheck } from '../../../types.js'; +import { ScenarioUrls, SpecVersion } from '../../../types.js'; +import { createAuthServer } from './helpers/createAuthServer.js'; +import { createServer } from './helpers/createServer.js'; +import { ServerLifecycle } from './helpers/serverLifecycle.js'; +import { SpecReferences } from './spec-references.js'; +import { MockTokenVerifier } from './helpers/mockTokenVerifier.js'; + +/** + * Scenario: Resource Mismatch Detection + * + * Tests that clients correctly detect and reject when the Protected Resource + * Metadata returns a `resource` field that doesn't match the server URL + * the client is trying to access. + * + * Per RFC 8707 and MCP spec, clients MUST validate that the resource from + * PRM matches the expected server before proceeding with authorization. + * + * Setup: + * - Server returns PRM with resource: "https://evil.example.com/mcp" (different origin) + * - Client is trying to access the actual server at localhost:/mcp + * + * Expected behavior: + * - Client should NOT proceed with authorization + * - Client should abort due to resource mismatch + * - Test passes if client does NOT complete the auth flow (no authorization request) + */ +export class ResourceMismatchScenario implements Scenario { + name = 'auth/resource-mismatch'; + specVersions: SpecVersion[] = ['draft']; + description = + 'Tests that client rejects when PRM resource does not match server URL'; + allowClientError = true; + + private authServer = new ServerLifecycle(); + private server = new ServerLifecycle(); + private checks: ConformanceCheck[] = []; + private authorizationRequestMade = false; + + async start(): Promise { + this.checks = []; + this.authorizationRequestMade = false; + + const tokenVerifier = new MockTokenVerifier(this.checks, []); + + const authApp = createAuthServer(this.checks, this.authServer.getUrl, { + tokenVerifier, + tokenEndpointAuthMethodsSupported: ['none'], + onAuthorizationRequest: () => { + // If we get here, the client incorrectly proceeded with auth + this.authorizationRequestMade = true; + }, + onRegistrationRequest: () => ({ + clientId: `test-client-${Date.now()}`, + clientSecret: undefined, + tokenEndpointAuthMethod: 'none' + }) + }); + await this.authServer.start(authApp); + + // Create server that returns a mismatched resource in PRM + const app = createServer( + this.checks, + this.server.getUrl, + this.authServer.getUrl, + { + prmPath: '/.well-known/oauth-protected-resource/mcp', + requiredScopes: [], + tokenVerifier, + // Return a different origin in PRM - this should be rejected by the client + prmResourceOverride: 'https://evil.example.com/mcp' + } + ); + await this.server.start(app); + + return { serverUrl: `${this.server.getUrl()}/mcp` }; + } + + async stop() { + await this.authServer.stop(); + await this.server.stop(); + } + + getChecks(): ConformanceCheck[] { + const timestamp = new Date().toISOString(); + const specRefs = [ + SpecReferences.RFC_8707_RESOURCE_INDICATORS, + SpecReferences.MCP_RESOURCE_PARAMETER + ]; + + // The test passes if the client did NOT make an authorization request + // (meaning it correctly rejected the mismatched resource) + if (!this.checks.some((c) => c.id === 'resource-mismatch-rejected')) { + const correctlyRejected = !this.authorizationRequestMade; + this.checks.push({ + id: 'resource-mismatch-rejected', + name: 'Client rejects mismatched resource', + description: correctlyRejected + ? 'Client correctly rejected authorization when PRM resource does not match server URL' + : 'Client MUST validate that PRM resource matches the server URL before proceeding with authorization', + status: correctlyRejected ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: specRefs, + details: { + prmResource: 'https://evil.example.com/mcp', + expectedBehavior: 'Client should NOT proceed with authorization', + authorizationRequestMade: this.authorizationRequestMade + } + }); + } + + return this.checks; + } +} diff --git a/src/scenarios/client/auth/scope-handling.ts b/src/scenarios/client/auth/scope-handling.ts index d94760b..166a9c3 100644 --- a/src/scenarios/client/auth/scope-handling.ts +++ b/src/scenarios/client/auth/scope-handling.ts @@ -1,5 +1,5 @@ import type { Scenario, ConformanceCheck } from '../../../types'; -import { ScenarioUrls } from '../../../types'; +import { ScenarioUrls, SpecVersion } from '../../../types'; import { createAuthServer } from './helpers/createAuthServer'; import { createServer } from './helpers/createServer'; import { ServerLifecycle } from './helpers/serverLifecycle'; @@ -15,6 +15,7 @@ import type { Request, Response, NextFunction } from 'express'; */ export class ScopeFromWwwAuthenticateScenario implements Scenario { name = 'auth/scope-from-www-authenticate'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests that client uses scope parameter from WWW-Authenticate header when provided'; private authServer = new ServerLifecycle(); @@ -100,6 +101,7 @@ export class ScopeFromWwwAuthenticateScenario implements Scenario { */ export class ScopeFromScopesSupportedScenario implements Scenario { name = 'auth/scope-from-scopes-supported'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests that client uses all scopes from scopes_supported when scope not in WWW-Authenticate header'; private authServer = new ServerLifecycle(); @@ -195,6 +197,7 @@ export class ScopeFromScopesSupportedScenario implements Scenario { */ export class ScopeOmittedWhenUndefinedScenario implements Scenario { name = 'auth/scope-omitted-when-undefined'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests that client omits scope parameter when scopes_supported is undefined'; private authServer = new ServerLifecycle(); @@ -281,6 +284,7 @@ export class ScopeOmittedWhenUndefinedScenario implements Scenario { */ export class ScopeStepUpAuthScenario implements Scenario { name = 'auth/scope-step-up'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests that client handles step-up authentication with different scope requirements per operation'; private authServer = new ServerLifecycle(); @@ -477,8 +481,10 @@ export class ScopeStepUpAuthScenario implements Scenario { */ export class ScopeRetryLimitScenario implements Scenario { name = 'auth/scope-retry-limit'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests that client implements retry limits to prevent infinite authorization loops on repeated 403 responses'; + allowClientError = true; private authServer = new ServerLifecycle(); private server = new ServerLifecycle(); private checks: ConformanceCheck[] = []; diff --git a/src/scenarios/client/auth/spec-references.ts b/src/scenarios/client/auth/spec-references.ts index 52a08ca..4020bfc 100644 --- a/src/scenarios/client/auth/spec-references.ts +++ b/src/scenarios/client/auth/spec-references.ts @@ -19,15 +19,15 @@ export const SpecReferences: { [key: string]: SpecReference } = { }, MCP_PRM_DISCOVERY: { id: 'MCP-2025-06-18-PRM-discovery', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#protected-resource-metadata-discovery-requirements' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#protected-resource-metadata-discovery-requirements' }, MCP_AUTH_DISCOVERY: { id: 'MCP-Authorization-metadata-discovery', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#authorization-server-metadata-discovery' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#authorization-server-metadata-discovery' }, MCP_DCR: { id: 'MCP-Dynamic-client-registration', - url: 'https://modelcontextprotocol.io/specification/draft/basic/client#dynamic-client-registration' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/client#dynamic-client-registration' }, OAUTH_2_1_AUTHORIZATION_ENDPOINT: { id: 'OAUTH-2.1-authorization-endpoint', @@ -39,23 +39,23 @@ export const SpecReferences: { [key: string]: SpecReference } = { }, MCP_ACCESS_TOKEN_USAGE: { id: 'MCP-Access-token-usage', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#access-token-usage' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#access-token-usage' }, MCP_SCOPE_SELECTION_STRATEGY: { id: 'MCP-Scope-selection-strategy', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#scope-selection-strategy' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#scope-selection-strategy' }, MCP_SCOPE_CHALLENGE_HANDLING: { id: 'MCP-Scope-challenge-handling', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#scope-challenge-handling' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#scope-challenge-handling' }, MCP_AUTH_ERROR_HANDLING: { id: 'MCP-Auth-error-handling', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#error-handling' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#error-handling' }, MCP_CLIENT_ID_METADATA_DOCUMENTS: { id: 'MCP-Client-ID-Metadata-Documents', - url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#client-id-metadata-documents' + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#client-id-metadata-documents' }, IETF_CIMD: { id: 'IETF-OAuth-Client-ID-Metadata-Document', @@ -72,5 +72,33 @@ export const SpecReferences: { [key: string]: SpecReference } = { SEP_1046_CLIENT_CREDENTIALS: { id: 'SEP-1046-Client-Credentials', url: 'https://github.com/modelcontextprotocol/ext-auth/blob/main/specification/draft/oauth-client-credentials.mdx' + }, + RFC_8707_RESOURCE_INDICATORS: { + id: 'RFC-8707-Resource-Indicators', + url: 'https://www.rfc-editor.org/rfc/rfc8707.html' + }, + MCP_RESOURCE_PARAMETER: { + id: 'MCP-Resource-Parameter-Implementation', + url: 'https://modelcontextprotocol.io/specification/draft/basic/authorization#resource-parameter-implementation' + }, + MCP_PREREGISTRATION: { + id: 'MCP-Preregistration', + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#preregistration' + }, + MCP_PKCE: { + id: 'MCP-PKCE-requirement', + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/authorization#authorization-code-protection' + }, + RFC_8693_TOKEN_EXCHANGE: { + id: 'RFC-8693-Token-Exchange', + url: 'https://datatracker.ietf.org/doc/html/rfc8693' + }, + RFC_7523_JWT_BEARER: { + id: 'RFC-7523-JWT-Bearer-Grant', + url: 'https://datatracker.ietf.org/doc/html/rfc7523' + }, + SEP_990_ENTERPRISE_OAUTH: { + id: 'SEP-990-Enterprise-Managed-OAuth', + url: 'https://github.com/modelcontextprotocol/ext-auth/blob/main/specification/draft/enterprise-oauth.mdx' } }; diff --git a/src/scenarios/client/auth/token-endpoint-auth.ts b/src/scenarios/client/auth/token-endpoint-auth.ts index 979987e..419fb6b 100644 --- a/src/scenarios/client/auth/token-endpoint-auth.ts +++ b/src/scenarios/client/auth/token-endpoint-auth.ts @@ -1,5 +1,5 @@ import type { Scenario, ConformanceCheck } from '../../../types.js'; -import { ScenarioUrls } from '../../../types.js'; +import { ScenarioUrls, SpecVersion } from '../../../types.js'; import { createAuthServer } from './helpers/createAuthServer.js'; import { createServer } from './helpers/createServer.js'; import { ServerLifecycle } from './helpers/serverLifecycle.js'; @@ -45,12 +45,17 @@ const AUTH_METHOD_NAMES: Record = { class TokenEndpointAuthScenario implements Scenario { name: string; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description: string; private expectedAuthMethod: AuthMethod; private authServer = new ServerLifecycle(); private server = new ServerLifecycle(); private checks: ConformanceCheck[] = []; + // Track resource parameters for RFC 8707 validation + private authorizationResource?: string; + private tokenResource?: string; + constructor(expectedAuthMethod: AuthMethod) { this.expectedAuthMethod = expectedAuthMethod; this.name = `auth/token-endpoint-auth-${expectedAuthMethod === 'client_secret_basic' ? 'basic' : expectedAuthMethod === 'client_secret_post' ? 'post' : 'none'}`; @@ -59,6 +64,8 @@ class TokenEndpointAuthScenario implements Scenario { async start(): Promise { this.checks = []; + this.authorizationResource = undefined; + this.tokenResource = undefined; const tokenVerifier = new MockTokenVerifier(this.checks, []); const authApp = createAuthServer(this.checks, this.authServer.getUrl, { @@ -66,7 +73,12 @@ class TokenEndpointAuthScenario implements Scenario { tokenEndpointAuthMethodsSupported: [this.expectedAuthMethod], // Disable CIMD to force DCR - we need client_secret for auth method testing clientIdMetadataDocumentSupported: false, + onAuthorizationRequest: ({ resource }) => { + this.authorizationResource = resource; + }, onTokenRequest: ({ authorizationHeader, body, timestamp }) => { + // Track resource from token request for RFC 8707 validation + this.tokenResource = body.resource; const bodyClientSecret = body.client_secret; const actualMethod = detectAuthMethod( authorizationHeader, @@ -147,18 +159,132 @@ class TokenEndpointAuthScenario implements Scenario { } getChecks(): ConformanceCheck[] { + const timestamp = new Date().toISOString(); + if (!this.checks.some((c) => c.id === 'token-endpoint-auth-method')) { this.checks.push({ id: 'token-endpoint-auth-method', name: 'Token endpoint authentication method', description: 'Client did not make a token request', status: 'FAILURE', - timestamp: new Date().toISOString(), + timestamp, specReferences: [SpecReferences.OAUTH_2_1_TOKEN] }); } + + // RFC 8707 Resource Parameter Validation Checks + this.addResourceParameterChecks(timestamp); + return this.checks; } + + private addResourceParameterChecks(timestamp: string): void { + const specRefs = [ + SpecReferences.RFC_8707_RESOURCE_INDICATORS, + SpecReferences.MCP_RESOURCE_PARAMETER + ]; + + // Check 1: Resource parameter in authorization request + if ( + !this.checks.some((c) => c.id === 'resource-parameter-in-authorization') + ) { + const hasResource = !!this.authorizationResource; + this.checks.push({ + id: 'resource-parameter-in-authorization', + name: 'Resource parameter in authorization request', + description: hasResource + ? 'Client included resource parameter in authorization request' + : 'Client MUST include resource parameter in authorization request per RFC 8707', + status: hasResource ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: specRefs, + details: { + resource: this.authorizationResource || 'not provided' + } + }); + } + + // Check 2: Resource parameter in token request + if (!this.checks.some((c) => c.id === 'resource-parameter-in-token')) { + const hasResource = !!this.tokenResource; + this.checks.push({ + id: 'resource-parameter-in-token', + name: 'Resource parameter in token request', + description: hasResource + ? 'Client included resource parameter in token request' + : 'Client MUST include resource parameter in token request per RFC 8707', + status: hasResource ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: specRefs, + details: { + resource: this.tokenResource || 'not provided' + } + }); + } + + // Check 3: Resource parameter is valid canonical URI + if (!this.checks.some((c) => c.id === 'resource-parameter-valid-uri')) { + const resourceToValidate = + this.authorizationResource || this.tokenResource; + if (resourceToValidate) { + const validation = this.validateCanonicalUri(resourceToValidate); + this.checks.push({ + id: 'resource-parameter-valid-uri', + name: 'Resource parameter is valid canonical URI', + description: validation.valid + ? 'Resource parameter is a valid canonical URI (has scheme, no fragment)' + : `Resource parameter is invalid: ${validation.error}`, + status: validation.valid ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: specRefs, + details: { + resource: resourceToValidate, + ...(validation.error && { error: validation.error }) + } + }); + } + } + + // Check 4: Resource parameter consistency between requests + if (!this.checks.some((c) => c.id === 'resource-parameter-consistency')) { + if (this.authorizationResource && this.tokenResource) { + const consistent = this.authorizationResource === this.tokenResource; + this.checks.push({ + id: 'resource-parameter-consistency', + name: 'Resource parameter consistency', + description: consistent + ? 'Resource parameter is consistent between authorization and token requests' + : 'Resource parameter MUST be consistent between authorization and token requests', + status: consistent ? 'SUCCESS' : 'FAILURE', + timestamp, + specReferences: specRefs, + details: { + authorizationResource: this.authorizationResource, + tokenResource: this.tokenResource + } + }); + } + } + } + + private validateCanonicalUri(uri: string): { + valid: boolean; + error?: string; + } { + try { + const parsed = new URL(uri); + // Check for fragment (RFC 8707: MUST NOT include fragment) + if (parsed.hash) { + return { + valid: false, + error: 'contains fragment (not allowed per RFC 8707)' + }; + } + return { valid: true }; + } catch { + return { valid: false, error: 'invalid URI format' }; + } + } } export class ClientSecretBasicAuthScenario extends TokenEndpointAuthScenario { diff --git a/src/scenarios/client/elicitation-defaults.ts b/src/scenarios/client/elicitation-defaults.ts index 88e6bf0..73bc07c 100644 --- a/src/scenarios/client/elicitation-defaults.ts +++ b/src/scenarios/client/elicitation-defaults.ts @@ -11,7 +11,7 @@ import { ListToolsRequestSchema, ElicitResultSchema } from '@modelcontextprotocol/sdk/types.js'; -import type { Scenario, ConformanceCheck } from '../../types'; +import type { Scenario, ConformanceCheck, SpecVersion } from '../../types'; import express, { Request, Response } from 'express'; import { ScenarioUrls } from '../../types'; import { createRequestLogger } from '../request-logger'; @@ -474,6 +474,7 @@ function createServer(checks: ConformanceCheck[]): { export class ElicitationClientDefaultsScenario implements Scenario { name = 'elicitation-sep1034-client-defaults'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests client applies default values for omitted elicitation fields (SEP-1034)'; private app: express.Application | null = null; diff --git a/src/scenarios/client/initialize.ts b/src/scenarios/client/initialize.ts index a351700..70fb0d1 100644 --- a/src/scenarios/client/initialize.ts +++ b/src/scenarios/client/initialize.ts @@ -1,9 +1,15 @@ import http from 'http'; -import { Scenario, ScenarioUrls, ConformanceCheck } from '../../types'; +import { + Scenario, + ScenarioUrls, + ConformanceCheck, + SpecVersion +} from '../../types'; import { clientChecks } from '../../checks/index'; export class InitializeScenario implements Scenario { name = 'initialize'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = 'Tests MCP client initialization handshake'; private server: http.Server | null = null; diff --git a/src/scenarios/client/sse-retry.ts b/src/scenarios/client/sse-retry.ts index 6e44a06..c8f0792 100644 --- a/src/scenarios/client/sse-retry.ts +++ b/src/scenarios/client/sse-retry.ts @@ -8,10 +8,16 @@ */ import http from 'http'; -import { Scenario, ScenarioUrls, ConformanceCheck } from '../../types.js'; +import { + Scenario, + ScenarioUrls, + ConformanceCheck, + SpecVersion +} from '../../types.js'; export class SSERetryScenario implements Scenario { name = 'sse-retry'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Tests that client respects SSE retry field timing and reconnects properly (SEP-1699)'; diff --git a/src/scenarios/client/tools_call.ts b/src/scenarios/client/tools_call.ts index ab7e312..807ad55 100644 --- a/src/scenarios/client/tools_call.ts +++ b/src/scenarios/client/tools_call.ts @@ -4,12 +4,12 @@ import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'; -import type { Scenario, ConformanceCheck } from '../../types'; +import type { Scenario, ConformanceCheck, SpecVersion } from '../../types'; import express, { Request, Response } from 'express'; import { ScenarioUrls } from '../../types'; import { createRequestLogger } from '../request-logger'; -function createServer(checks: ConformanceCheck[]): express.Application { +function createMcpServer(checks: ConformanceCheck[]): Server { const server = new Server( { name: 'add-numbers-server', @@ -84,6 +84,10 @@ function createServer(checks: ConformanceCheck[]): express.Application { throw new Error(`Unknown tool: ${request.params.name}`); }); + return server; +} + +function createServerApp(checks: ConformanceCheck[]): express.Application { const app = express(); app.use(express.json()); @@ -96,6 +100,8 @@ function createServer(checks: ConformanceCheck[]): express.Application { ); app.post('/mcp', async (req: Request, res: Response) => { + // Stateless: create a fresh server and transport per request + const server = createMcpServer(checks); const transport = new StreamableHTTPServerTransport({ sessionIdGenerator: undefined }); @@ -109,6 +115,7 @@ function createServer(checks: ConformanceCheck[]): express.Application { export class ToolsCallScenario implements Scenario { name = 'tools_call'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = 'Tests calling tools with various parameter types'; private app: express.Application | null = null; private httpServer: any = null; @@ -116,7 +123,7 @@ export class ToolsCallScenario implements Scenario { async start(): Promise { this.checks = []; - this.app = createServer(this.checks); + this.app = createServerApp(this.checks); this.httpServer = this.app.listen(0); const port = this.httpServer.address().port; return { serverUrl: `http://localhost:${port}/mcp` }; diff --git a/src/scenarios/index.ts b/src/scenarios/index.ts index d4ac2a3..df47f5e 100644 --- a/src/scenarios/index.ts +++ b/src/scenarios/index.ts @@ -1,4 +1,4 @@ -import { Scenario, ClientScenario } from '../types'; +import { Scenario, ClientScenario, SpecVersion } from '../types'; import { InitializeScenario } from './client/initialize'; import { ToolsCallScenario } from './client/tools_call'; import { ElicitationClientDefaultsScenario } from './client/elicitation-defaults'; @@ -51,7 +51,13 @@ import { PromptsGetWithImageScenario } from './server/prompts'; -import { authScenariosList } from './client/auth/index'; +import { DNSRebindingProtectionScenario } from './server/dns-rebinding'; + +import { + authScenariosList, + backcompatScenariosList, + extensionScenariosList +} from './client/auth/index'; import { listMetadataScenarios } from './client/auth/discovery-metadata'; import { serverAuthScenarios as serverAuthScenariosList, @@ -60,19 +66,11 @@ import { // Pending client scenarios (not yet fully tested/implemented) const pendingClientScenariosList: ClientScenario[] = [ - // Elicitation scenarios (SEP-1330) - new ElicitationEnumsScenario(), - // JSON Schema 2020-12 (SEP-1613) // This test is pending until the SDK includes PR #1135 which preserves // $schema, $defs, and additionalProperties fields in tool schemas. new JsonSchema2020_12Scenario(), - // On hold until elicitation schema types are fixed - // https://github.com/modelcontextprotocol/modelcontextprotocol/pull/1863 - new ToolsCallElicitationScenario(), - new ElicitationDefaultsScenario(), - // On hold until server-side SSE improvements are made // https://github.com/modelcontextprotocol/typescript-sdk/pull/1129 new ServerSSEPollingScenario() @@ -127,7 +125,10 @@ const allClientScenariosList: ClientScenario[] = [ new PromptsGetSimpleScenario(), new PromptsGetWithArgsScenario(), new PromptsGetEmbeddedResourceScenario(), - new PromptsGetWithImageScenario() + new PromptsGetWithImageScenario(), + + // Security scenarios + new DNSRebindingProtectionScenario() ]; // Active client scenarios (excludes pending) @@ -144,8 +145,19 @@ export const clientScenarios = new Map( allClientScenariosList.map((scenario) => [scenario.name, scenario]) ); -// Scenario scenarios +// All client test scenarios (core + backcompat + extensions) const scenariosList: Scenario[] = [ + new InitializeScenario(), + new ToolsCallScenario(), + new ElicitationClientDefaultsScenario(), + new SSERetryScenario(), + ...authScenariosList, + ...backcompatScenariosList, + ...extensionScenariosList +]; + +// Core scenarios (tier 1 requirements) +const coreScenariosList: Scenario[] = [ new InitializeScenario(), new ToolsCallScenario(), new ElicitationClientDefaultsScenario(), @@ -190,6 +202,18 @@ export function listAuthScenarios(): string[] { return authScenariosList.map((scenario) => scenario.name); } +export function listCoreScenarios(): string[] { + return coreScenariosList.map((scenario) => scenario.name); +} + +export function listExtensionScenarios(): string[] { + return extensionScenariosList.map((scenario) => scenario.name); +} + +export function listBackcompatScenarios(): string[] { + return backcompatScenariosList.map((scenario) => scenario.name); +} + export { listMetadataScenarios }; // Server auth scenario helpers @@ -207,3 +231,34 @@ export function getServerAuthScenario( export function listServerAuthScenarios(): string[] { return serverAuthScenariosList.map((s) => s.name); } + +// All valid spec versions, used by the CLI to validate --spec-version input. +export const ALL_SPEC_VERSIONS: SpecVersion[] = [ + '2025-03-26', + '2025-06-18', + '2025-11-25', + 'draft', + 'extension' +]; + +export function listScenariosForSpec(version: SpecVersion): string[] { + return scenariosList + .filter((s) => s.specVersions.includes(version)) + .map((s) => s.name); +} + +export function listClientScenariosForSpec(version: SpecVersion): string[] { + return allClientScenariosList + .filter((s) => s.specVersions.includes(version)) + .map((s) => s.name); +} + +export function getScenarioSpecVersions( + name: string +): SpecVersion[] | undefined { + return ( + scenarios.get(name)?.specVersions ?? clientScenarios.get(name)?.specVersions + ); +} + +export type { SpecVersion }; diff --git a/src/scenarios/server-auth/basic-dcr-flow.ts b/src/scenarios/server-auth/basic-dcr-flow.ts deleted file mode 100644 index 994edd6..0000000 --- a/src/scenarios/server-auth/basic-dcr-flow.ts +++ /dev/null @@ -1,583 +0,0 @@ -/** - * Basic DCR Flow Scenario - * - * Tests the complete OAuth authentication flow using Dynamic Client Registration: - * 1. Unauthenticated MCP request triggers 401 + WWW-Authenticate header - * 2. Protected Resource Metadata (PRM) discovery - * 3. Authorization Server (AS) metadata discovery - * 4. Dynamic Client Registration (DCR) - * 5. Token acquisition via authorization_code flow - * 6. Authenticated MCP tool call with Bearer token - * - * This scenario uses the MCP SDK's real client with observation middleware - * to verify server conformance. - */ - -import type { - ClientScenario, - ClientScenarioOptions, - ConformanceCheck -} from '../../types'; -import { Client } from '@modelcontextprotocol/sdk/client/index.js'; -import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; -import { applyMiddlewares } from '@modelcontextprotocol/sdk/client/middleware.js'; -import { - auth, - extractWWWAuthenticateParams, - UnauthorizedError -} from '@modelcontextprotocol/sdk/client/auth.js'; -import type { FetchLike } from '@modelcontextprotocol/sdk/shared/transport.js'; -import { - ConformanceOAuthProvider, - createObservationMiddleware, - type ObservedRequest -} from './helpers/oauth-client'; -import { ServerAuthSpecReferences } from './spec-references'; - -/** - * Basic DCR Flow - Tests complete OAuth flow with Dynamic Client Registration. - */ -export class BasicDcrFlowScenario implements ClientScenario { - name = 'server-auth/basic-dcr-flow'; - description = `Tests the complete OAuth authentication flow using Dynamic Client Registration. - -**Flow tested:** -1. Unauthenticated MCP request -> 401 + WWW-Authenticate -2. PRM Discovery -> authorization_servers -3. AS Metadata Discovery -> registration_endpoint, token_endpoint -4. DCR Registration -> client_id, client_secret -5. Token Acquisition -> access_token -6. Authenticated MCP Call -> success - -**Spec References:** -- RFC 9728 (Protected Resource Metadata) -- RFC 8414 (Authorization Server Metadata) -- RFC 7591 (Dynamic Client Registration) -- RFC 6750 (Bearer Token Usage) -- MCP Authorization Specification`; - - async run( - serverUrl: string, - options?: ClientScenarioOptions - ): Promise { - const checks: ConformanceCheck[] = []; - const observedRequests: ObservedRequest[] = []; - const timestamp = () => new Date().toISOString(); - const interactive = options?.interactive ?? false; - - // Create observation middleware to record all requests - const observationMiddleware = createObservationMiddleware((req) => { - observedRequests.push(req); - }); - - // Create OAuth provider for conformance testing - const provider = new ConformanceOAuthProvider( - { - client_name: 'MCP Conformance Test Client', - grant_types: ['authorization_code', 'refresh_token'], - response_types: ['code'], - token_endpoint_auth_method: 'client_secret_post' - }, - { interactive } - ); - - // Handle 401 with OAuth flow - const handle401 = async ( - response: Response, - next: FetchLike, - url: string - ): Promise => { - const { resourceMetadataUrl, scope } = - extractWWWAuthenticateParams(response); - - let result = await auth(provider, { - serverUrl: url, - resourceMetadataUrl, - scope, - fetchFn: next - }); - - if (result === 'REDIRECT') { - // Get auth code from the redirect (auto-login) - const authorizationCode = await provider.getAuthCode(); - - result = await auth(provider, { - serverUrl: url, - resourceMetadataUrl, - scope, - authorizationCode, - fetchFn: next - }); - - if (result !== 'AUTHORIZED') { - throw new UnauthorizedError( - `Authentication failed with result: ${result}` - ); - } - } - }; - - // Create middleware that handles OAuth with observation - const oauthMiddleware = (next: FetchLike): FetchLike => { - return async (input, init) => { - const headers = new Headers(init?.headers); - const tokens = await provider.tokens(); - if (tokens) { - headers.set('Authorization', `Bearer ${tokens.access_token}`); - } - - const response = await next(input, { ...init, headers }); - - if (response.status === 401) { - const url = typeof input === 'string' ? input : input.toString(); - await handle401(response.clone(), next, url); - // Retry with fresh tokens - const newTokens = await provider.tokens(); - if (newTokens) { - headers.set('Authorization', `Bearer ${newTokens.access_token}`); - } - return await next(input, { ...init, headers }); - } - - return response; - }; - }; - - // Compose middlewares: observation wraps oauth handling - const enhancedFetch = applyMiddlewares( - observationMiddleware, - oauthMiddleware - )(fetch); - - try { - // Create MCP client - const client = new Client( - { name: 'conformance-test-client', version: '1.0.0' }, - { capabilities: {} } - ); - - const transport = new StreamableHTTPClientTransport(new URL(serverUrl), { - fetch: enhancedFetch - }); - - // Connect triggers the OAuth flow - await client.connect(transport); - - // Make an authenticated call - try { - await client.listTools(); - } catch { - // Tool listing may fail if server doesn't have tools, but that's ok - } - - await transport.close(); - - // Analyze observed requests to generate conformance checks - this.analyzeRequests(observedRequests, checks, timestamp); - } catch (error) { - // Still analyze what we observed before the error - this.analyzeRequests(observedRequests, checks, timestamp); - - checks.push({ - id: 'auth-flow-completion', - name: 'OAuth Flow Completion', - description: 'Complete OAuth authentication flow', - status: 'FAILURE', - timestamp: timestamp(), - errorMessage: error instanceof Error ? error.message : String(error), - specReferences: [ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN] - }); - } - - return checks; - } - - /** - * Analyze observed requests and generate conformance checks. - */ - private analyzeRequests( - requests: ObservedRequest[], - checks: ConformanceCheck[], - timestamp: () => string - ): void { - // Phase 1: Check for 401 response with WWW-Authenticate - const unauthorizedRequest = requests.find( - (r) => r.responseStatus === 401 && r.requestType === 'mcp-request' - ); - - if (unauthorizedRequest) { - checks.push({ - id: 'auth-401-response', - name: 'Unauthenticated Request Returns 401', - description: - 'Server returns 401 Unauthorized for unauthenticated MCP requests', - status: 'SUCCESS', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_7235_401_RESPONSE, - ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN - ], - details: { - url: unauthorizedRequest.url, - status: unauthorizedRequest.responseStatus - } - }); - - // Check WWW-Authenticate header - if (unauthorizedRequest.wwwAuthenticate) { - const wwwAuth = unauthorizedRequest.wwwAuthenticate; - - checks.push({ - id: 'auth-www-authenticate-header', - name: 'WWW-Authenticate Header Present', - description: - 'Server includes WWW-Authenticate header in 401 response', - status: - wwwAuth.scheme.toLowerCase() === 'bearer' ? 'SUCCESS' : 'WARNING', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_6750_WWW_AUTHENTICATE, - ServerAuthSpecReferences.RFC_7235_WWW_AUTHENTICATE - ], - details: { - scheme: wwwAuth.scheme, - params: wwwAuth.params - } - }); - - // Check for resource_metadata parameter - if (wwwAuth.params.resource_metadata) { - checks.push({ - id: 'auth-resource-metadata-param', - name: 'Resource Metadata URL in WWW-Authenticate', - description: - 'WWW-Authenticate header includes resource_metadata parameter', - status: 'INFO', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_9728_WWW_AUTHENTICATE - ], - details: { - resourceMetadata: wwwAuth.params.resource_metadata - } - }); - } - } else { - checks.push({ - id: 'auth-www-authenticate-header', - name: 'WWW-Authenticate Header Present', - description: - 'Server should include WWW-Authenticate header in 401 response', - status: 'INFO', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.RFC_6750_WWW_AUTHENTICATE] - }); - } - } else { - checks.push({ - id: 'auth-401-response', - name: 'Unauthenticated Request Returns 401', - description: - 'No 401 response observed - server may not require authentication', - status: 'FAILURE', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.RFC_7235_401_RESPONSE] - }); - } - - // Phase 2: PRM Discovery - const prmRequest = requests.find((r) => r.requestType === 'prm-discovery'); - if (prmRequest) { - checks.push({ - id: 'auth-prm-discovery', - name: 'Protected Resource Metadata Discovery', - description: 'Client discovered Protected Resource Metadata endpoint', - status: prmRequest.responseStatus === 200 ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_9728_PRM_DISCOVERY, - ServerAuthSpecReferences.MCP_AUTH_PRM_DISCOVERY - ], - details: { - url: prmRequest.url, - status: prmRequest.responseStatus, - body: prmRequest.responseBody - } - }); - - // Check PRM response content - if ( - prmRequest.responseStatus === 200 && - typeof prmRequest.responseBody === 'object' - ) { - const prm = prmRequest.responseBody as Record; - - if ( - prm.authorization_servers && - Array.isArray(prm.authorization_servers) - ) { - checks.push({ - id: 'auth-prm-authorization-servers', - name: 'PRM Contains Authorization Servers', - description: - 'Protected Resource Metadata includes authorization_servers array', - status: 'SUCCESS', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.RFC_9728_PRM_RESPONSE], - details: { - authorizationServers: prm.authorization_servers - } - }); - } else { - checks.push({ - id: 'auth-prm-authorization-servers', - name: 'PRM Contains Authorization Servers', - description: - 'Protected Resource Metadata must include authorization_servers array', - status: 'FAILURE', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.RFC_9728_PRM_RESPONSE] - }); - } - } - } else { - checks.push({ - id: 'auth-prm-discovery', - name: 'Protected Resource Metadata Discovery', - description: - 'No PRM discovery request observed - required for OAuth flow', - status: 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_9728_PRM_DISCOVERY, - ServerAuthSpecReferences.MCP_AUTH_PRM_DISCOVERY - ] - }); - } - - // Phase 3: AS Metadata Discovery - const asMetadataRequest = requests.find( - (r) => r.requestType === 'as-metadata' - ); - if (asMetadataRequest) { - checks.push({ - id: 'auth-as-metadata-discovery', - name: 'Authorization Server Metadata Discovery', - description: 'Client discovered Authorization Server metadata', - status: - asMetadataRequest.responseStatus === 200 ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_8414_AS_DISCOVERY, - ServerAuthSpecReferences.MCP_AUTH_SERVER_METADATA - ], - details: { - url: asMetadataRequest.url, - status: asMetadataRequest.responseStatus - } - }); - - // Check AS metadata required fields - if ( - asMetadataRequest.responseStatus === 200 && - typeof asMetadataRequest.responseBody === 'object' - ) { - const metadata = asMetadataRequest.responseBody as Record< - string, - unknown - >; - - // Required fields per RFC 8414 and MCP auth spec - const hasIssuer = !!metadata.issuer; - const hasAuthorizationEndpoint = !!metadata.authorization_endpoint; - const hasTokenEndpoint = !!metadata.token_endpoint; - const codeChallengeMethodsSupported = - metadata.code_challenge_methods_supported; - const supportsPkceS256 = - Array.isArray(codeChallengeMethodsSupported) && - codeChallengeMethodsSupported.includes('S256'); - - // Build list of missing/invalid fields - const issues = []; - if (!hasIssuer) issues.push('missing issuer'); - if (!hasAuthorizationEndpoint) - issues.push('missing authorization_endpoint'); - if (!hasTokenEndpoint) issues.push('missing token_endpoint'); - if (!supportsPkceS256) - issues.push('code_challenge_methods_supported must include S256'); - - const allValid = issues.length === 0; - - checks.push({ - id: 'auth-as-metadata-fields', - name: 'AS Metadata Required Fields', - description: allValid - ? 'Authorization Server metadata includes all required fields' - : `Authorization Server metadata issues: ${issues.join(', ')}`, - status: allValid ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_8414_AS_FIELDS, - ServerAuthSpecReferences.MCP_AUTH_SERVER_METADATA - ], - details: { - issuer: metadata.issuer, - authorizationEndpoint: metadata.authorization_endpoint, - tokenEndpoint: metadata.token_endpoint, - codeChallengeMethodsSupported, - registrationEndpoint: metadata.registration_endpoint - } - }); - } - } else { - checks.push({ - id: 'auth-as-metadata-discovery', - name: 'Authorization Server Metadata Discovery', - description: - 'No AS metadata discovery request observed - required for OAuth flow', - status: 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_8414_AS_DISCOVERY, - ServerAuthSpecReferences.MCP_AUTH_SERVER_METADATA - ] - }); - } - - // Phase 4: DCR Registration - const dcrRequest = requests.find( - (r) => r.requestType === 'dcr-registration' - ); - if (dcrRequest) { - checks.push({ - id: 'auth-dcr-registration', - name: 'Dynamic Client Registration', - description: 'Client registered via Dynamic Client Registration', - status: dcrRequest.responseStatus === 201 ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_7591_DCR_ENDPOINT, - ServerAuthSpecReferences.MCP_AUTH_DCR - ], - details: { - url: dcrRequest.url, - status: dcrRequest.responseStatus - } - }); - - // Check DCR response - if ( - dcrRequest.responseStatus === 201 && - typeof dcrRequest.responseBody === 'object' - ) { - const client = dcrRequest.responseBody as Record; - - checks.push({ - id: 'auth-dcr-response', - name: 'DCR Response Contains Client Credentials', - description: 'DCR response includes client_id', - status: client.client_id ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.RFC_7591_DCR_RESPONSE], - details: { - hasClientId: !!client.client_id, - hasClientSecret: !!client.client_secret - } - }); - } - } - - // Phase 5: Token Request - const tokenRequest = requests.find( - (r) => r.requestType === 'token-request' - ); - if (tokenRequest) { - checks.push({ - id: 'auth-token-request', - name: 'Token Acquisition', - description: 'Client obtained access token from token endpoint', - status: tokenRequest.responseStatus === 200 ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.OAUTH_2_1_TOKEN_REQUEST, - ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN - ], - details: { - url: tokenRequest.url, - status: tokenRequest.responseStatus - } - }); - - // Check token response - if ( - tokenRequest.responseStatus === 200 && - typeof tokenRequest.responseBody === 'object' - ) { - const tokens = tokenRequest.responseBody as Record; - - checks.push({ - id: 'auth-token-response', - name: 'Token Response Contains Access Token', - description: 'Token response includes access_token', - status: tokens.access_token ? 'SUCCESS' : 'FAILURE', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.OAUTH_2_1_TOKEN_REQUEST], - details: { - hasAccessToken: !!tokens.access_token, - hasRefreshToken: !!tokens.refresh_token, - tokenType: tokens.token_type - } - }); - } - } else { - checks.push({ - id: 'auth-token-request', - name: 'Token Acquisition', - description: - 'No token request observed - required to complete OAuth flow', - status: 'FAILURE', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.OAUTH_2_1_TOKEN_REQUEST, - ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN - ] - }); - } - - // Phase 6: Authenticated MCP Request - const authenticatedRequest = requests.find( - (r) => - r.requestType === 'mcp-request' && - r.requestHeaders['authorization']?.startsWith('Bearer ') && - r.responseStatus === 200 - ); - - if (authenticatedRequest) { - checks.push({ - id: 'auth-authenticated-request', - name: 'Authenticated MCP Request Succeeds', - description: 'MCP request with Bearer token succeeds', - status: 'SUCCESS', - timestamp: timestamp(), - specReferences: [ - ServerAuthSpecReferences.RFC_6750_BEARER_TOKEN, - ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN - ], - details: { - url: authenticatedRequest.url, - status: authenticatedRequest.responseStatus - } - }); - - // Overall flow success - checks.push({ - id: 'auth-flow-completion', - name: 'OAuth Flow Completion', - description: 'Complete OAuth authentication flow succeeded', - status: 'SUCCESS', - timestamp: timestamp(), - specReferences: [ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN] - }); - } - } -} diff --git a/src/scenarios/server-auth/helpers/client-metadata.json b/src/scenarios/server-auth/helpers/client-metadata.json new file mode 100644 index 0000000..b9b999d --- /dev/null +++ b/src/scenarios/server-auth/helpers/client-metadata.json @@ -0,0 +1,9 @@ +{ + "client_id": "https://raw.githubusercontent.com/modelcontextprotocol/conformance/main/src/scenarios/server-auth/helpers/client-metadata.json", + "client_name": "MCP Conformance Test Suite", + "client_uri": "https://github.com/modelcontextprotocol/conformance", + "redirect_uris": ["http://localhost:3333/callback"], + "grant_types": ["authorization_code", "refresh_token"], + "response_types": ["code"], + "token_endpoint_auth_method": "none" +} diff --git a/src/scenarios/server-auth/helpers/oauth-client.ts b/src/scenarios/server-auth/helpers/oauth-client.ts index 1155392..f6b5f0b 100644 --- a/src/scenarios/server-auth/helpers/oauth-client.ts +++ b/src/scenarios/server-auth/helpers/oauth-client.ts @@ -192,12 +192,10 @@ export function createObservationMiddleware( } /** - * Fixed client metadata URL for CIMD conformance tests. - * When server supports client_id_metadata_document_supported, this URL - * will be used as the client_id instead of doing dynamic registration. + * Default CIMD URL hosted in the conformance repo for these server tests. */ const DEFAULT_CIMD_CLIENT_METADATA_URL = - 'https://conformance-test.local/client-metadata.json'; + 'https://raw.githubusercontent.com/modelcontextprotocol/conformance/main/src/scenarios/server-auth/helpers/client-metadata.json'; /** Callback URL for OAuth redirects */ const CALLBACK_URL = 'http://localhost:3333/callback'; @@ -346,7 +344,7 @@ export class ConformanceOAuthProvider implements OAuthClientProvider { res.writeHead(400, { 'Content-Type': 'text/plain' }); res.end('Missing authorization code'); } else { - res.writeHead(404); + res.writeHead(404, { 'Content-Type': 'text/plain' }); res.end('Not found'); } }); diff --git a/src/scenarios/server-auth/index.ts b/src/scenarios/server-auth/index.ts index 6e3fcb9..a5ba26f 100644 --- a/src/scenarios/server-auth/index.ts +++ b/src/scenarios/server-auth/index.ts @@ -7,18 +7,18 @@ */ import type { ClientScenario } from '../../types'; -import { BasicDcrFlowScenario } from './basic-dcr-flow'; +import { BasicAuthFlowScenario } from './run-auth-flow'; // Re-export helpers and spec references export * from './helpers/oauth-client'; export * from './spec-references'; -export { BasicDcrFlowScenario } from './basic-dcr-flow'; +export { BasicAuthFlowScenario } from './run-auth-flow'; /** * All server authentication scenarios. */ export const serverAuthScenarios: ClientScenario[] = [ - new BasicDcrFlowScenario() + new BasicAuthFlowScenario() ]; /** diff --git a/src/scenarios/server-auth/run-auth-flow.ts b/src/scenarios/server-auth/run-auth-flow.ts new file mode 100644 index 0000000..f8b38af --- /dev/null +++ b/src/scenarios/server-auth/run-auth-flow.ts @@ -0,0 +1,931 @@ +/** + * Server Auth Flow + * + * Runs the complete OAuth authentication flow against an MCP server and + * generates conformance checks based on observed behavior. Supports all + * client registration approaches (CIMD, DCR, pre-registration). + * + * This module uses the MCP SDK's real client with observation middleware + * to verify server conformance. + */ + +import type { + ClientScenario, + ClientScenarioOptions, + ConformanceCheck, + SpecVersion +} from '../../types'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; +import { applyMiddlewares } from '@modelcontextprotocol/sdk/client/middleware.js'; +import { + auth, + extractWWWAuthenticateParams, + UnauthorizedError +} from '@modelcontextprotocol/sdk/client/auth.js'; +import type { FetchLike } from '@modelcontextprotocol/sdk/shared/transport.js'; +import { + ConformanceOAuthProvider, + createObservationMiddleware, + type ObservedRequest +} from './helpers/oauth-client'; +import { ServerAuthSpecReferences } from './spec-references'; + +/** + * Basic Auth Flow Scenario - Tests complete OAuth flow. + */ +export class BasicAuthFlowScenario implements ClientScenario { + name = 'server-auth/basic-auth-flow'; + specVersions: SpecVersion[] = ['2025-11-25']; + description = `Tests the complete OAuth authentication flow. + +**Flow tested:** +1. Invalid token rejection -> 401 +2. Unauthenticated MCP request -> 401 + WWW-Authenticate +3. PRM Discovery -> resource, authorization_servers +4. AS Metadata Discovery -> endpoints, PKCE support +5. Client Registration (CIMD or DCR, as supported) +6. Token Acquisition -> access_token +7. Authenticated MCP Call -> success + +**Spec References:** +- RFC 9728 (Protected Resource Metadata) +- RFC 8414 (Authorization Server Metadata) +- RFC 7591 (Dynamic Client Registration) +- RFC 6750 (Bearer Token Usage) +- MCP Authorization Specification`; + + async run( + serverUrl: string, + options?: ClientScenarioOptions + ): Promise { + const checks: ConformanceCheck[] = []; + const observedRequests: ObservedRequest[] = []; + const timestamp = () => new Date().toISOString(); + const interactive = options?.interactive ?? false; + + // Verify server rejects invalid tokens with 401 + // Per MCP spec: "Invalid or expired tokens MUST receive a HTTP 401 response" + try { + const invalidTokenResponse = await fetch(serverUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Accept: 'application/json, text/event-stream', + Authorization: 'Bearer invalid' + }, + body: JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'initialize', + params: { + protocolVersion: '2025-11-25', + capabilities: {}, + clientInfo: { name: 'conformance-test', version: '1.0.0' } + } + }) + }); + + checks.push({ + id: 'auth-invalid-token-rejected', + name: 'Invalid Token Rejected', + description: + 'Server returns 401 for requests with invalid Bearer token', + status: invalidTokenResponse.status === 401 ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: + invalidTokenResponse.status !== 401 + ? `Expected 401 but received ${invalidTokenResponse.status}` + : undefined, + specReferences: [ + ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN, + ServerAuthSpecReferences.RFC_6750_BEARER_TOKEN + ], + details: { + status: invalidTokenResponse.status + } + }); + } catch (error) { + checks.push({ + id: 'auth-invalid-token-rejected', + name: 'Invalid Token Rejected', + description: + 'Server returns 401 for requests with invalid Bearer token', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: error instanceof Error ? error.message : String(error), + specReferences: [ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN] + }); + } + + // Create observation middleware to record all requests + const observationMiddleware = createObservationMiddleware((req) => { + observedRequests.push(req); + }); + + // Create OAuth provider for conformance testing with minimal client metadata for the broadest compatibility + const provider = new ConformanceOAuthProvider( + { + client_name: 'MCP Conformance Test Client' + }, + { interactive } + ); + + // Pre-populate client credentials for pre-registration flow + if (options?.clientId) { + provider.saveClientInformation({ + client_id: options.clientId, + redirect_uris: [provider.redirectUrl as string], + ...(options.clientSecret && { + client_secret: options.clientSecret + }) + }); + } + + // Handle 401 with OAuth flow + const handle401 = async ( + response: Response, + next: FetchLike, + url: string + ): Promise => { + const { resourceMetadataUrl, scope } = + extractWWWAuthenticateParams(response); + + let result = await auth(provider, { + serverUrl: url, + resourceMetadataUrl, + scope, + fetchFn: next + }); + + if (result === 'REDIRECT') { + // Get auth code from the redirect (auto-login) + const authorizationCode = await provider.getAuthCode(); + + result = await auth(provider, { + serverUrl: url, + resourceMetadataUrl, + scope, + authorizationCode, + fetchFn: next + }); + + if (result !== 'AUTHORIZED') { + throw new UnauthorizedError( + `Authentication failed with result: ${result}` + ); + } + } + }; + + // Create middleware that handles OAuth with observation + const oauthMiddleware = (next: FetchLike): FetchLike => { + return async (input, init) => { + const headers = new Headers(init?.headers); + const tokens = await provider.tokens(); + if (tokens) { + headers.set('Authorization', `Bearer ${tokens.access_token}`); + } + + const response = await next(input, { ...init, headers }); + + if (response.status === 401) { + const url = typeof input === 'string' ? input : input.toString(); + await handle401(response.clone(), next, url); + // Retry with fresh tokens + const newTokens = await provider.tokens(); + if (newTokens) { + headers.set('Authorization', `Bearer ${newTokens.access_token}`); + } + return await next(input, { ...init, headers }); + } + + return response; + }; + }; + + // Compose middlewares: observation wraps oauth handling + const enhancedFetch = applyMiddlewares( + observationMiddleware, + oauthMiddleware + )(fetch); + + try { + // Create MCP client + const client = new Client( + { name: 'conformance-test-client', version: '1.0.0' }, + { capabilities: {} } + ); + + const transport = new StreamableHTTPClientTransport(new URL(serverUrl), { + fetch: enhancedFetch + }); + + // Connect triggers the OAuth flow + await client.connect(transport); + + // Make an authenticated call + try { + await client.listTools(); + } catch { + // Tool listing may fail if server doesn't have tools, but that's ok + } + + await transport.close(); + + // Analyze observed requests to generate conformance checks + this.analyzeRequests(observedRequests, checks, timestamp, serverUrl); + } catch (error) { + // Still analyze what we observed before the error + this.analyzeRequests(observedRequests, checks, timestamp, serverUrl); + + checks.push({ + id: 'auth-flow-completion', + name: 'OAuth Flow Completion', + description: 'Complete OAuth authentication flow succeeded', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: error instanceof Error ? error.message : String(error), + specReferences: [ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN] + }); + } + + // If DCR is supported but wasn't exercised in the flow (e.g. CIMD was preferred), + // do a standalone DCR registration test + await this.testStandaloneDcr(observedRequests, checks, timestamp); + + // If CIMD is supported but wasn't exercised in the flow (e.g. pre-registered creds + // were used or DCR was preferred), do a standalone CIMD auth flow test + await this.testStandaloneCimd( + observedRequests, + checks, + timestamp, + serverUrl, + options?.interactive ?? false + ); + + return checks; + } + + /** + * If DCR is supported but wasn't exercised in the flow, do a standalone + * DCR registration to verify the server accepts it. + */ + private async testStandaloneDcr( + observedRequests: ObservedRequest[], + checks: ConformanceCheck[], + timestamp: () => string + ): Promise { + const asMetadataRequest = observedRequests.find( + (r) => r.requestType === 'as-metadata' + ); + const asMetadata = + asMetadataRequest?.responseStatus === 200 && + typeof asMetadataRequest.responseBody === 'object' + ? (asMetadataRequest.responseBody as Record) + : null; + + // Skip if DCR is not supported or already tested + // Client prefers CIMD over DCR, so skip if there's already a DCR request from the original flow + const dcrSupported = !!asMetadata?.registration_endpoint; + const dcrAlreadyTested = observedRequests.some( + (r) => r.requestType === 'dcr-registration' + ); + + if (!dcrSupported || dcrAlreadyTested) { + return; + } + + const registrationEndpoint = asMetadata!.registration_endpoint as string; + + try { + const response = await fetch(registrationEndpoint, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + client_name: 'MCP Conformance DCR Test', + redirect_uris: ['http://localhost:3333/callback'] + }) + }); + + let responseBody: unknown; + try { + responseBody = await response.json(); + } catch { + // Not valid JSON + } + + performDcrChecks(checks, timestamp, { + url: registrationEndpoint, + status: response.status, + body: responseBody + }); + } catch (error) { + checks.push({ + id: 'auth-dcr-registration', + name: 'Dynamic Client Registration', + description: 'Server accepted Dynamic Client Registration', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: error instanceof Error ? error.message : String(error), + specReferences: [ServerAuthSpecReferences.RFC_7591_DCR_ENDPOINT] + }); + } + } + + /** + * If CIMD is supported but wasn't exercised in the flow, do a standalone + * CIMD auth flow to verify the AS accepts a URL-based client_id. + */ + private async testStandaloneCimd( + observedRequests: ObservedRequest[], + checks: ConformanceCheck[], + timestamp: () => string, + serverUrl: string, + interactive: boolean + ): Promise { + const asMetadataRequest = observedRequests.find( + (r) => r.requestType === 'as-metadata' + ); + const asMetadata = + asMetadataRequest?.responseStatus === 200 && + typeof asMetadataRequest.responseBody === 'object' + ? (asMetadataRequest.responseBody as Record) + : null; + + const cimdSupported = + asMetadata?.client_id_metadata_document_supported === true; + + // Check if the main flow already used CIMD (URL-based client_id in auth request) + const authorizationRequest = observedRequests.find( + (r) => r.requestType === 'authorization' + ); + const cimdAlreadyTested = + authorizationRequest && + typeof authorizationRequest.url === 'string' && + /client_id=https?%3A/.test(authorizationRequest.url); + + if (!cimdSupported || cimdAlreadyTested) { + return; + } + + // Reuse WWW-Authenticate params from the main flow's observed 401 + const unauthorizedRequest = observedRequests.find( + (r) => r.responseStatus === 401 && r.requestType === 'mcp-request' + ); + const resourceMetadataUrlStr = + unauthorizedRequest?.wwwAuthenticate?.params.resource_metadata; + const resourceMetadataUrl = resourceMetadataUrlStr + ? new URL(resourceMetadataUrlStr) + : undefined; + const scope = unauthorizedRequest?.wwwAuthenticate?.params.scope; + + try { + const cimdProvider = new ConformanceOAuthProvider( + { client_name: 'MCP Conformance CIMD Test' }, + { interactive } + ); + + // Run auth flow with CIMD provider + let result = await auth(cimdProvider, { + serverUrl, + resourceMetadataUrl, + scope, + fetchFn: fetch + }); + + if (result === 'REDIRECT') { + const authorizationCode = await cimdProvider.getAuthCode(); + result = await auth(cimdProvider, { + serverUrl, + resourceMetadataUrl, + scope, + authorizationCode, + fetchFn: fetch + }); + } + + const tokens = await cimdProvider.tokens(); + + checks.push({ + id: 'auth-cimd-flow', + name: 'CIMD Authentication Flow', + description: + 'AS accepts URL-based client_id via CIMD authentication flow', + status: tokens?.access_token ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: !tokens?.access_token + ? `Auth flow completed with result "${result}" but no access token obtained` + : undefined, + specReferences: [ServerAuthSpecReferences.MCP_AUTH_DCR], + details: { + hasAccessToken: !!tokens?.access_token, + tokenType: tokens?.token_type + } + }); + } catch (error) { + checks.push({ + id: 'auth-cimd-flow', + name: 'CIMD Authentication Flow', + description: + 'AS accepts URL-based client_id via CIMD authentication flow', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: error instanceof Error ? error.message : String(error), + specReferences: [ServerAuthSpecReferences.MCP_AUTH_DCR] + }); + } + } + + /** + * Analyze observed requests and generate conformance checks. + */ + private analyzeRequests( + requests: ObservedRequest[], + checks: ConformanceCheck[], + timestamp: () => string, + serverUrl: string + ): void { + // Phase 1: Check for 401 response with WWW-Authenticate + const unauthorizedRequest = requests.find( + (r) => r.responseStatus === 401 && r.requestType === 'mcp-request' + ); + + if (unauthorizedRequest) { + checks.push({ + id: 'auth-401-response', + name: 'Unauthenticated Request Returns 401', + description: + 'Server returns 401 Unauthorized for unauthenticated MCP requests', + status: 'SUCCESS', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_7235_401_RESPONSE, + ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN + ], + details: { + url: unauthorizedRequest.url, + status: unauthorizedRequest.responseStatus + } + }); + + // Check WWW-Authenticate header + if (unauthorizedRequest.wwwAuthenticate) { + const wwwAuth = unauthorizedRequest.wwwAuthenticate; + + const isBearer = wwwAuth.scheme.toLowerCase() === 'bearer'; + checks.push({ + id: 'auth-www-authenticate-header', + name: 'WWW-Authenticate Header Present', + description: + 'Server includes WWW-Authenticate header with Bearer scheme in 401 response', + status: isBearer ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: !isBearer + ? `Expected Bearer scheme but received "${wwwAuth.scheme}"` + : undefined, + specReferences: [ + ServerAuthSpecReferences.RFC_6750_WWW_AUTHENTICATE, + ServerAuthSpecReferences.RFC_7235_WWW_AUTHENTICATE + ], + details: { + scheme: wwwAuth.scheme, + params: wwwAuth.params + } + }); + + // Check for resource_metadata parameter + if (wwwAuth.params.resource_metadata) { + checks.push({ + id: 'auth-resource-metadata-param', + name: 'Resource Metadata URL in WWW-Authenticate', + description: + 'WWW-Authenticate header includes resource_metadata parameter', + status: 'INFO', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_9728_WWW_AUTHENTICATE + ], + details: { + resourceMetadata: wwwAuth.params.resource_metadata + } + }); + } + + // Check for scope parameter (MCP spec: servers SHOULD include scope) + checks.push({ + id: 'auth-www-authenticate-scope', + name: 'Scope in WWW-Authenticate', + description: + 'Server includes scope parameter in WWW-Authenticate header', + status: wwwAuth.params.scope ? 'SUCCESS' : 'WARNING', + timestamp: timestamp(), + specReferences: [ServerAuthSpecReferences.RFC_6750_WWW_AUTHENTICATE], + details: wwwAuth.params.scope + ? { scope: wwwAuth.params.scope } + : undefined + }); + } else { + checks.push({ + id: 'auth-www-authenticate-header', + name: 'WWW-Authenticate Header Present', + description: + 'Server includes WWW-Authenticate header with Bearer scheme in 401 response', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: + 'WWW-Authenticate header missing from 401 response (required by RFC 7235 Section 3.1)', + specReferences: [ + ServerAuthSpecReferences.RFC_6750_WWW_AUTHENTICATE, + ServerAuthSpecReferences.RFC_7235_WWW_AUTHENTICATE + ] + }); + } + } else { + checks.push({ + id: 'auth-401-response', + name: 'Unauthenticated Request Returns 401', + description: + 'Server returns 401 Unauthorized for unauthenticated MCP requests', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: 'No 401 response observed', + specReferences: [ServerAuthSpecReferences.RFC_7235_401_RESPONSE] + }); + } + + // Phase 2: PRM Discovery + const prmRequest = requests.find((r) => r.requestType === 'prm-discovery'); + if (prmRequest) { + checks.push({ + id: 'auth-prm-discovery', + name: 'Protected Resource Metadata Discovery', + description: 'Client discovered Protected Resource Metadata endpoint', + status: prmRequest.responseStatus === 200 ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_9728_PRM_DISCOVERY, + ServerAuthSpecReferences.MCP_AUTH_PRM_DISCOVERY + ], + details: { + url: prmRequest.url, + status: prmRequest.responseStatus, + body: prmRequest.responseBody + } + }); + + // Check PRM response content + if ( + prmRequest.responseStatus === 200 && + typeof prmRequest.responseBody === 'object' + ) { + const prm = prmRequest.responseBody as Record; + + // Check PRM resource field (RFC 9728 Section 3.2) + if (prm.resource) { + const resource = prm.resource as string; + const resourceMatches = + resource === serverUrl || serverUrl.startsWith(resource); + + checks.push({ + id: 'auth-prm-resource', + name: 'PRM Resource Field', + description: + 'Protected Resource Metadata includes resource field matching server URL', + status: resourceMatches ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: !resourceMatches + ? `PRM resource "${resource}" does not match server URL "${serverUrl}"` + : undefined, + specReferences: [ServerAuthSpecReferences.RFC_9728_PRM_RESPONSE], + details: { + resource, + serverUrl + } + }); + } else { + checks.push({ + id: 'auth-prm-resource', + name: 'PRM Resource Field', + description: + 'Protected Resource Metadata includes resource field matching server URL', + status: 'FAILURE', + timestamp: timestamp(), + errorMessage: 'PRM response missing required resource field', + specReferences: [ServerAuthSpecReferences.RFC_9728_PRM_RESPONSE] + }); + } + + const hasAuthServers = + prm.authorization_servers && Array.isArray(prm.authorization_servers); + checks.push({ + id: 'auth-prm-authorization-servers', + name: 'PRM Contains Authorization Servers', + description: + 'Protected Resource Metadata includes authorization_servers array', + status: hasAuthServers ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: !hasAuthServers + ? 'PRM response missing required authorization_servers array' + : undefined, + specReferences: [ServerAuthSpecReferences.RFC_9728_PRM_RESPONSE], + details: hasAuthServers + ? { authorizationServers: prm.authorization_servers } + : undefined + }); + } + } else { + checks.push({ + id: 'auth-prm-discovery', + name: 'Protected Resource Metadata Discovery', + description: 'Client discovered Protected Resource Metadata endpoint', + status: 'FAILURE', + errorMessage: 'No PRM discovery request observed', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_9728_PRM_DISCOVERY, + ServerAuthSpecReferences.MCP_AUTH_PRM_DISCOVERY + ] + }); + } + + // Phase 3: AS Metadata Discovery + const asMetadataRequest = requests.find( + (r) => r.requestType === 'as-metadata' + ); + if (asMetadataRequest) { + checks.push({ + id: 'auth-as-metadata-discovery', + name: 'Authorization Server Metadata Discovery', + description: 'Client discovered Authorization Server metadata', + status: + asMetadataRequest.responseStatus === 200 ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_8414_AS_DISCOVERY, + ServerAuthSpecReferences.MCP_AUTH_SERVER_METADATA + ], + details: { + url: asMetadataRequest.url, + status: asMetadataRequest.responseStatus + } + }); + + // Check AS metadata required fields + if ( + asMetadataRequest.responseStatus === 200 && + typeof asMetadataRequest.responseBody === 'object' + ) { + const metadata = asMetadataRequest.responseBody as Record< + string, + unknown + >; + + // Required fields per RFC 8414 and MCP auth spec + const hasIssuer = !!metadata.issuer; + const hasAuthorizationEndpoint = !!metadata.authorization_endpoint; + const hasTokenEndpoint = !!metadata.token_endpoint; + const codeChallengeMethodsSupported = + metadata.code_challenge_methods_supported; + const supportsPkceS256 = + Array.isArray(codeChallengeMethodsSupported) && + codeChallengeMethodsSupported.includes('S256'); + + // Build list of missing/invalid fields + const issues = []; + if (!hasIssuer) issues.push('missing issuer'); + if (!hasAuthorizationEndpoint) + issues.push('missing authorization_endpoint'); + if (!hasTokenEndpoint) issues.push('missing token_endpoint'); + if (!supportsPkceS256) + issues.push('code_challenge_methods_supported must include S256'); + + const allValid = issues.length === 0; + + checks.push({ + id: 'auth-as-metadata-fields', + name: 'AS Metadata Required Fields', + description: + 'Authorization Server metadata includes all required fields', + status: allValid ? 'SUCCESS' : 'FAILURE', + errorMessage: !allValid ? issues.join('; ') : undefined, + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_8414_AS_FIELDS, + ServerAuthSpecReferences.MCP_AUTH_SERVER_METADATA + ], + details: { + issuer: metadata.issuer, + authorizationEndpoint: metadata.authorization_endpoint, + tokenEndpoint: metadata.token_endpoint, + codeChallengeMethodsSupported, + registrationEndpoint: metadata.registration_endpoint + } + }); + } + } else { + checks.push({ + id: 'auth-as-metadata-discovery', + name: 'Authorization Server Metadata Discovery', + description: 'Client discovered Authorization Server metadata', + status: 'FAILURE', + errorMessage: 'No AS metadata discovery request observed', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_8414_AS_DISCOVERY, + ServerAuthSpecReferences.MCP_AUTH_SERVER_METADATA + ] + }); + } + + // Phase 4: Client Registration + // Determine AS capabilities from observed metadata + const asMetadata = + asMetadataRequest?.responseStatus === 200 && + typeof asMetadataRequest.responseBody === 'object' + ? (asMetadataRequest.responseBody as Record) + : null; + + const cimdSupported = + asMetadata?.client_id_metadata_document_supported === true; + const dcrSupported = !!asMetadata?.registration_endpoint; + + const dcrRequest = requests.find( + (r) => r.requestType === 'dcr-registration' + ); + + // Report AS registration capabilities + checks.push({ + id: 'auth-as-cimd-supported', + name: 'AS Supports CIMD', + description: + 'Authorization server advertises client_id_metadata_document_supported', + status: cimdSupported ? 'SUCCESS' : 'INFO', + timestamp: timestamp(), + specReferences: [ServerAuthSpecReferences.MCP_AUTH_DCR], + details: { cimdSupported } + }); + + checks.push({ + id: 'auth-as-dcr-supported', + name: 'AS Supports DCR', + description: 'Authorization server advertises registration_endpoint', + status: dcrSupported ? 'SUCCESS' : 'INFO', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_7591_DCR_ENDPOINT, + ServerAuthSpecReferences.MCP_AUTH_DCR + ], + details: { + registrationEndpoint: asMetadata?.registration_endpoint + } + }); + + // Validate server accepted DCR registration if it occurred + if (dcrRequest) { + performDcrChecks(checks, timestamp, { + url: dcrRequest.url, + status: dcrRequest.responseStatus, + body: dcrRequest.responseBody + }); + } + + // Phase 5: Token Request + const tokenRequest = requests.find( + (r) => r.requestType === 'token-request' + ); + if (tokenRequest) { + checks.push({ + id: 'auth-token-request', + name: 'Token Acquisition', + description: 'Client obtained access token from token endpoint', + status: tokenRequest.responseStatus === 200 ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.OAUTH_2_1_TOKEN_REQUEST, + ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN + ], + details: { + url: tokenRequest.url, + status: tokenRequest.responseStatus + } + }); + + // Check token response + if ( + tokenRequest.responseStatus === 200 && + typeof tokenRequest.responseBody === 'object' + ) { + const tokens = tokenRequest.responseBody as Record; + + checks.push({ + id: 'auth-token-response', + name: 'Token Response Contains Access Token', + description: 'Token response includes access_token', + status: tokens.access_token ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + specReferences: [ServerAuthSpecReferences.OAUTH_2_1_TOKEN_REQUEST], + details: { + hasAccessToken: !!tokens.access_token, + hasRefreshToken: !!tokens.refresh_token, + tokenType: tokens.token_type + } + }); + } + } else { + checks.push({ + id: 'auth-token-request', + name: 'Token Acquisition', + description: 'Client obtained access token from token endpoint', + status: 'FAILURE', + errorMessage: 'No token request observed', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.OAUTH_2_1_TOKEN_REQUEST, + ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN + ] + }); + } + + // Phase 6: Authenticated MCP Request + const authenticatedRequest = requests.find( + (r) => + r.requestType === 'mcp-request' && + r.requestHeaders['authorization']?.startsWith('Bearer ') && + r.responseStatus === 200 + ); + + if (authenticatedRequest) { + checks.push({ + id: 'auth-authenticated-request', + name: 'Authenticated MCP Request Succeeds', + description: 'MCP request with Bearer token succeeds', + status: 'SUCCESS', + timestamp: timestamp(), + specReferences: [ + ServerAuthSpecReferences.RFC_6750_BEARER_TOKEN, + ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN + ], + details: { + url: authenticatedRequest.url, + status: authenticatedRequest.responseStatus + } + }); + + // Overall flow success + checks.push({ + id: 'auth-flow-completion', + name: 'OAuth Flow Completion', + description: 'Complete OAuth authentication flow succeeded', + status: 'SUCCESS', + timestamp: timestamp(), + specReferences: [ServerAuthSpecReferences.MCP_AUTH_ACCESS_TOKEN] + }); + } + } +} + +function performDcrChecks( + checks: ConformanceCheck[], + timestamp: () => string, + response: { url: string; status: number; body?: unknown } +): void { + const success = response.status === 201; + checks.push({ + id: 'auth-dcr-registration', + name: 'Dynamic Client Registration', + description: 'Server accepted Dynamic Client Registration', + status: success ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: !success + ? `Registration endpoint returned ${response.status}` + : undefined, + specReferences: [ + ServerAuthSpecReferences.RFC_7591_DCR_ENDPOINT, + ServerAuthSpecReferences.MCP_AUTH_DCR + ], + details: { + url: response.url, + status: response.status + } + }); + + if (success && typeof response.body === 'object' && response.body !== null) { + const client = response.body as Record; + checks.push({ + id: 'auth-dcr-response', + name: 'DCR Response Contains Client Credentials', + description: 'DCR response includes client_id', + status: client.client_id ? 'SUCCESS' : 'FAILURE', + timestamp: timestamp(), + errorMessage: !client.client_id + ? 'DCR response missing client_id' + : undefined, + specReferences: [ServerAuthSpecReferences.RFC_7591_DCR_RESPONSE], + details: { + hasClientId: !!client.client_id, + hasClientSecret: !!client.client_secret + } + }); + } +} diff --git a/src/scenarios/server/dns-rebinding.ts b/src/scenarios/server/dns-rebinding.ts new file mode 100644 index 0000000..cd6c5f4 --- /dev/null +++ b/src/scenarios/server/dns-rebinding.ts @@ -0,0 +1,239 @@ +/** + * DNS Rebinding Protection test scenarios for MCP servers + * + * Tests that localhost MCP servers properly validate Host or Origin headers + * to prevent DNS rebinding attacks. See GHSA-w48q-cv73-mx4w for details. + */ + +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; +import { request } from 'undici'; + +const SPEC_REFERENCES = [ + { + id: 'MCP-DNS-Rebinding-Protection', + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/security_best_practices#local-mcp-server-compromise' + }, + { + id: 'MCP-Transport-Security', + url: 'https://modelcontextprotocol.io/specification/2025-11-25/basic/transports#security-warning' + } +]; + +/** + * Check if URL is a localhost URL + */ +function isLocalhostUrl(serverUrl: string): boolean { + const url = new URL(serverUrl); + const hostname = url.hostname.toLowerCase(); + return ( + hostname === 'localhost' || + hostname === '127.0.0.1' || + hostname === '[::1]' || + hostname === '::1' + ); +} + +/** + * Get the host header value from a URL (hostname:port) + */ +function getHostFromUrl(serverUrl: string): string { + const url = new URL(serverUrl); + return url.host; // includes port if present +} + +/** + * Send an MCP initialize request with custom Host and Origin headers. + * Both headers are set to the same value so that servers checking either + * Host or Origin will properly detect the rebinding attempt. + */ +async function sendRequestWithHostAndOrigin( + serverUrl: string, + hostOrOrigin: string +): Promise<{ statusCode: number; body: unknown }> { + const response = await request(serverUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Host: hostOrOrigin, + Origin: `http://${hostOrOrigin}`, + Accept: 'application/json, text/event-stream' + }, + body: JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'initialize', + params: { + protocolVersion: '2025-11-25', + capabilities: {}, + clientInfo: { name: 'conformance-dns-rebinding-test', version: '1.0.0' } + } + }) + }); + + let body: unknown; + try { + body = await response.body.json(); + } catch { + body = null; + } + + return { + statusCode: response.statusCode, + body + }; +} + +export class DNSRebindingProtectionScenario implements ClientScenario { + name = 'dns-rebinding-protection'; + specVersions: SpecVersion[] = ['2025-11-25']; + description = `Test DNS rebinding protection for localhost servers. + +**Scope:** This test applies to localhost MCP servers running without HTTPS and without +authentication. These servers are vulnerable to DNS rebinding attacks where a malicious +website tricks a user's browser into making requests to the local server. + +**Attack scenario:** +1. User visits malicious website (e.g., evil.com) +2. evil.com's DNS is configured to resolve to 127.0.0.1 +3. Browser makes request to evil.com which actually goes to localhost +4. Without Host/Origin header validation, the local MCP server processes the request + +**Requirements:** +- Server **MUST** validate the Host or Origin header on incoming requests +- Server **MUST** reject requests with non-localhost Host/Origin headers (HTTP 4xx) +- Server **MUST** accept requests with valid localhost Host/Origin headers + +**Valid localhost values:** \`localhost\`, \`127.0.0.1\`, \`[::1]\` (with optional port) + +**Note:** This test requires a localhost server URL. Non-localhost URLs will fail. + +See: https://github.com/modelcontextprotocol/typescript-sdk/security/advisories/GHSA-w48q-cv73-mx4w`; + + async run(serverUrl: string): Promise { + const checks: ConformanceCheck[] = []; + const timestamp = new Date().toISOString(); + + // Common check properties + const rejectedCheckBase = { + id: 'localhost-host-rebinding-rejected', + name: 'DNSRebindingRejected', + description: + 'Server rejects requests with non-localhost Host/Origin headers', + timestamp, + specReferences: SPEC_REFERENCES + }; + + const acceptedCheckBase = { + id: 'localhost-host-valid-accepted', + name: 'LocalhostHostAccepted', + description: + 'Server accepts requests with valid localhost Host/Origin headers', + timestamp, + specReferences: SPEC_REFERENCES + }; + + // First check: Is this a localhost URL? + if (!isLocalhostUrl(serverUrl)) { + const errorMessage = + 'DNS rebinding tests require a localhost server URL (localhost, 127.0.0.1, or [::1])'; + const details = { serverUrl, reason: 'non-localhost-url' }; + + checks.push({ + ...rejectedCheckBase, + status: 'FAILURE', + errorMessage, + details + }); + checks.push({ + ...acceptedCheckBase, + status: 'FAILURE', + errorMessage, + details + }); + return checks; + } + + const validHost = getHostFromUrl(serverUrl); + const attackerHost = 'evil.example.com'; + + // Check 1: Invalid Host/Origin headers should be rejected with a 4xx error + try { + const response = await sendRequestWithHostAndOrigin( + serverUrl, + attackerHost + ); + const isRejected = + response.statusCode >= 400 && response.statusCode < 500; + + const details = { + hostHeader: attackerHost, + originHeader: `http://${attackerHost}`, + statusCode: response.statusCode, + body: response.body + }; + + if (isRejected) { + checks.push({ + ...rejectedCheckBase, + status: 'SUCCESS', + details + }); + } else { + checks.push({ + ...rejectedCheckBase, + status: 'FAILURE', + errorMessage: `Expected HTTP 4xx for invalid Host/Origin headers, got ${response.statusCode}`, + details + }); + } + } catch (error) { + checks.push({ + ...rejectedCheckBase, + status: 'FAILURE', + errorMessage: `Request failed: ${error instanceof Error ? error.message : String(error)}`, + details: { + hostHeader: attackerHost, + originHeader: `http://${attackerHost}` + } + }); + } + + // Check 2: Valid localhost Host/Origin headers should be accepted (2xx response) + try { + const response = await sendRequestWithHostAndOrigin(serverUrl, validHost); + const isAccepted = + response.statusCode >= 200 && response.statusCode < 300; + + const details = { + hostHeader: validHost, + originHeader: `http://${validHost}`, + statusCode: response.statusCode, + body: response.body + }; + + if (isAccepted) { + checks.push({ + ...acceptedCheckBase, + status: 'SUCCESS', + details + }); + } else { + checks.push({ + ...acceptedCheckBase, + status: 'FAILURE', + errorMessage: `Expected HTTP 2xx for valid localhost Host/Origin headers, got ${response.statusCode}`, + details + }); + } + } catch (error) { + checks.push({ + ...acceptedCheckBase, + status: 'FAILURE', + errorMessage: `Request failed: ${error instanceof Error ? error.message : String(error)}`, + details: { hostHeader: validHost, originHeader: `http://${validHost}` } + }); + } + + return checks; + } +} diff --git a/src/scenarios/server/elicitation-defaults.ts b/src/scenarios/server/elicitation-defaults.ts index 2be114c..a458fec 100644 --- a/src/scenarios/server/elicitation-defaults.ts +++ b/src/scenarios/server/elicitation-defaults.ts @@ -2,12 +2,13 @@ * SEP-1034: Elicitation default values test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer } from './client-helper'; import { ElicitRequestSchema } from '@modelcontextprotocol/sdk/types.js'; export class ElicitationDefaultsScenario implements ClientScenario { name = 'elicitation-sep1034-defaults'; + specVersions: SpecVersion[] = ['2025-11-25']; description = `Test elicitation with default values for all primitive types (SEP-1034). **Server Implementation Requirements:** diff --git a/src/scenarios/server/elicitation-enums.ts b/src/scenarios/server/elicitation-enums.ts index e5c1fa3..c9eb598 100644 --- a/src/scenarios/server/elicitation-enums.ts +++ b/src/scenarios/server/elicitation-enums.ts @@ -2,12 +2,13 @@ * SEP-1330: Elicitation enum schema improvements test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer } from './client-helper'; import { ElicitRequestSchema } from '@modelcontextprotocol/sdk/types.js'; export class ElicitationEnumsScenario implements ClientScenario { name = 'elicitation-sep1330-enums'; + specVersions: SpecVersion[] = ['2025-11-25']; description = `Test elicitation with enum schema improvements (SEP-1330). **Server Implementation Requirements:** diff --git a/src/scenarios/server/json-schema-2020-12.ts b/src/scenarios/server/json-schema-2020-12.ts index 2cfd08b..be20f2a 100644 --- a/src/scenarios/server/json-schema-2020-12.ts +++ b/src/scenarios/server/json-schema-2020-12.ts @@ -6,7 +6,7 @@ * or additionalProperties fields. */ -import { ClientScenario, ConformanceCheck } from '../../types.js'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types.js'; import { connectToServer } from './client-helper.js'; const EXPECTED_TOOL_NAME = 'json_schema_2020_12_tool'; @@ -14,6 +14,7 @@ const EXPECTED_SCHEMA_DIALECT = 'https://json-schema.org/draft/2020-12/schema'; export class JsonSchema2020_12Scenario implements ClientScenario { name = 'json-schema-2020-12'; + specVersions: SpecVersion[] = ['2025-11-25']; description = `Validates JSON Schema 2020-12 keyword preservation (SEP-1613). **Server Implementation Requirements:** diff --git a/src/scenarios/server/lifecycle.ts b/src/scenarios/server/lifecycle.ts index d9b341e..392a932 100644 --- a/src/scenarios/server/lifecycle.ts +++ b/src/scenarios/server/lifecycle.ts @@ -2,11 +2,12 @@ * Lifecycle test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer } from './client-helper'; export class ServerInitializeScenario implements ClientScenario { name = 'server-initialize'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test basic server initialization handshake. **Server Implementation Requirements:** diff --git a/src/scenarios/server/prompts.ts b/src/scenarios/server/prompts.ts index 436564b..f62faac 100644 --- a/src/scenarios/server/prompts.ts +++ b/src/scenarios/server/prompts.ts @@ -2,11 +2,12 @@ * Prompts test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer } from './client-helper'; export class PromptsListScenario implements ClientScenario { name = 'prompts-list'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test listing available prompts. **Server Implementation Requirements:** @@ -87,6 +88,7 @@ export class PromptsListScenario implements ClientScenario { export class PromptsGetSimpleScenario implements ClientScenario { name = 'prompts-get-simple'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test getting a simple prompt without arguments. **Server Implementation Requirements:** @@ -171,6 +173,7 @@ Implement a prompt named \`test_simple_prompt\` with no arguments that returns: export class PromptsGetWithArgsScenario implements ClientScenario { name = 'prompts-get-with-args'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test parameterized prompt. **Server Implementation Requirements:** @@ -266,6 +269,7 @@ Returns (with args \`{arg1: "hello", arg2: "world"}\`): export class PromptsGetEmbeddedResourceScenario implements ClientScenario { name = 'prompts-get-embedded-resource'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test prompt with embedded resource content. **Server Implementation Requirements:** @@ -371,6 +375,7 @@ Returns: export class PromptsGetWithImageScenario implements ClientScenario { name = 'prompts-get-with-image'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test prompt with image content. **Server Implementation Requirements:** diff --git a/src/scenarios/server/resources.ts b/src/scenarios/server/resources.ts index a4ed241..ec3c5fc 100644 --- a/src/scenarios/server/resources.ts +++ b/src/scenarios/server/resources.ts @@ -2,7 +2,7 @@ * Resources test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer } from './client-helper'; import { TextResourceContents, @@ -11,6 +11,7 @@ import { export class ResourcesListScenario implements ClientScenario { name = 'resources-list'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test listing available resources. **Server Implementation Requirements:** @@ -91,6 +92,7 @@ export class ResourcesListScenario implements ClientScenario { export class ResourcesReadTextScenario implements ClientScenario { name = 'resources-read-text'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test reading text resource. **Server Implementation Requirements:** @@ -177,6 +179,7 @@ Implement resource \`test://static-text\` that returns: export class ResourcesReadBinaryScenario implements ClientScenario { name = 'resources-read-binary'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test reading binary resource. **Server Implementation Requirements:** @@ -261,6 +264,7 @@ Implement resource \`test://static-binary\` that returns: export class ResourcesTemplateReadScenario implements ClientScenario { name = 'resources-templates-read'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test reading resource from template. **Server Implementation Requirements:** @@ -362,6 +366,7 @@ Returns (for \`uri: "test://template/123/data"\`): export class ResourcesSubscribeScenario implements ClientScenario { name = 'resources-subscribe'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test subscribing to resource updates. **Server Implementation Requirements:** @@ -432,6 +437,7 @@ Example request: export class ResourcesUnsubscribeScenario implements ClientScenario { name = 'resources-unsubscribe'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test unsubscribing from resource. **Server Implementation Requirements:** diff --git a/src/scenarios/server/sse-multiple-streams.ts b/src/scenarios/server/sse-multiple-streams.ts index 7cda3f2..ea025de 100644 --- a/src/scenarios/server/sse-multiple-streams.ts +++ b/src/scenarios/server/sse-multiple-streams.ts @@ -9,13 +9,14 @@ * Multiple concurrent streams are achieved via POST requests, each getting their own stream. */ -import { ClientScenario, ConformanceCheck } from '../../types.js'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types.js'; import { EventSourceParserStream } from 'eventsource-parser/stream'; import { Client } from '@modelcontextprotocol/sdk/client/index.js'; import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; export class ServerSSEMultipleStreamsScenario implements ClientScenario { name = 'server-sse-multiple-streams'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Test server supports multiple concurrent POST SSE streams (SEP-1699)'; diff --git a/src/scenarios/server/sse-polling.ts b/src/scenarios/server/sse-polling.ts index 5a3a240..30deee7 100644 --- a/src/scenarios/server/sse-polling.ts +++ b/src/scenarios/server/sse-polling.ts @@ -8,7 +8,7 @@ * - Replaying events when client reconnects with Last-Event-ID */ -import { ClientScenario, ConformanceCheck } from '../../types.js'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types.js'; import { EventSourceParserStream } from 'eventsource-parser/stream'; import { Client } from '@modelcontextprotocol/sdk/client/index.js'; import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; @@ -67,6 +67,7 @@ function createLoggingFetch(checks: ConformanceCheck[]) { export class ServerSSEPollingScenario implements ClientScenario { name = 'server-sse-polling'; + specVersions: SpecVersion[] = ['2025-11-25']; description = 'Test server SSE polling via test_reconnection tool that closes stream mid-call (SEP-1699)'; diff --git a/src/scenarios/server/tools.ts b/src/scenarios/server/tools.ts index e445a5c..7ecbfdd 100644 --- a/src/scenarios/server/tools.ts +++ b/src/scenarios/server/tools.ts @@ -2,7 +2,7 @@ * Tools test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer, NotificationCollector } from './client-helper'; import { CallToolResultSchema, @@ -13,6 +13,7 @@ import { export class ToolsListScenario implements ClientScenario { name = 'tools-list'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test listing available tools. **Server Implementation Requirements:** @@ -95,6 +96,7 @@ export class ToolsListScenario implements ClientScenario { export class ToolsCallSimpleTextScenario implements ClientScenario { name = 'tools-call-simple-text'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test calling a tool that returns simple text. **Server Implementation Requirements:** @@ -179,6 +181,7 @@ Implement tool \`test_simple_text\` with no arguments that returns: export class ToolsCallImageScenario implements ClientScenario { name = 'tools-call-image'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test calling a tool that returns image content. **Server Implementation Requirements:** @@ -266,6 +269,7 @@ Implement tool \`test_image_content\` with no arguments that returns: export class ToolsCallMultipleContentTypesScenario implements ClientScenario { name = 'tools-call-mixed-content'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test tool returning multiple content types. **Server Implementation Requirements:** @@ -366,6 +370,7 @@ Implement tool \`test_multiple_content_types\` with no arguments that returns: export class ToolsCallWithLoggingScenario implements ClientScenario { name = 'tools-call-with-logging'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test tool that sends log messages during execution. **Server Implementation Requirements:** @@ -454,6 +459,7 @@ Implement tool \`test_tool_with_logging\` with no arguments. export class ToolsCallErrorScenario implements ClientScenario { name = 'tools-call-error'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test tool error reporting. **Server Implementation Requirements:** @@ -538,6 +544,7 @@ Implement tool \`test_error_handling\` with no arguments. export class ToolsCallWithProgressScenario implements ClientScenario { name = 'tools-call-with-progress'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test tool that reports progress notifications. **Server Implementation Requirements:** @@ -657,6 +664,7 @@ If no progress token provided, just execute with delays. export class ToolsCallSamplingScenario implements ClientScenario { name = 'tools-call-sampling'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test tool that requests LLM sampling from client. **Server Implementation Requirements:** @@ -784,6 +792,7 @@ Implement tool \`test_sampling\` with argument: export class ToolsCallElicitationScenario implements ClientScenario { name = 'tools-call-elicitation'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test tool that requests user input (elicitation) from client. **Server Implementation Requirements:** @@ -914,6 +923,7 @@ Implement tool \`test_elicitation\` with argument: export class ToolsCallAudioScenario implements ClientScenario { name = 'tools-call-audio'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test calling a tool that returns audio content. **Server Implementation Requirements:** @@ -1008,6 +1018,7 @@ Implement tool \`test_audio_content\` with no arguments that returns: export class ToolsCallEmbeddedResourceScenario implements ClientScenario { name = 'tools-call-embedded-resource'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test calling a tool that returns embedded resource content. **Server Implementation Requirements:** diff --git a/src/scenarios/server/utils.ts b/src/scenarios/server/utils.ts index 07b2ca6..0a4a391 100644 --- a/src/scenarios/server/utils.ts +++ b/src/scenarios/server/utils.ts @@ -2,11 +2,12 @@ * Utilities test scenarios for MCP servers */ -import { ClientScenario, ConformanceCheck } from '../../types'; +import { ClientScenario, ConformanceCheck, SpecVersion } from '../../types'; import { connectToServer } from './client-helper'; export class LoggingSetLevelScenario implements ClientScenario { name = 'logging-set-level'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test setting logging level. **Server Implementation Requirements:** @@ -85,6 +86,7 @@ export class LoggingSetLevelScenario implements ClientScenario { export class PingScenario implements ClientScenario { name = 'ping'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test ping utility for connection health check. **Server Implementation Requirements:** @@ -174,6 +176,7 @@ export class PingScenario implements ClientScenario { export class CompletionCompleteScenario implements ClientScenario { name = 'completion-complete'; + specVersions: SpecVersion[] = ['2025-06-18', '2025-11-25']; description = `Test completion endpoint. **Server Implementation Requirements:** diff --git a/src/scenarios/spec-version.test.ts b/src/scenarios/spec-version.test.ts new file mode 100644 index 0000000..0b8e652 --- /dev/null +++ b/src/scenarios/spec-version.test.ts @@ -0,0 +1,94 @@ +import { describe, it, expect } from 'vitest'; +import { + listScenarios, + listClientScenarios, + listScenariosForSpec, + getScenarioSpecVersions, + ALL_SPEC_VERSIONS +} from './index'; + +describe('specVersions helpers', () => { + it('every Scenario has specVersions', () => { + for (const name of listScenarios()) { + const versions = getScenarioSpecVersions(name); + expect( + versions, + `scenario "${name}" is missing specVersions` + ).toBeDefined(); + expect(versions!.length).toBeGreaterThan(0); + for (const v of versions!) { + expect(ALL_SPEC_VERSIONS).toContain(v); + } + } + }); + + it('every ClientScenario has specVersions', () => { + for (const name of listClientScenarios()) { + const versions = getScenarioSpecVersions(name); + expect( + versions, + `client scenario "${name}" is missing specVersions` + ).toBeDefined(); + expect(versions!.length).toBeGreaterThan(0); + for (const v of versions!) { + expect(ALL_SPEC_VERSIONS).toContain(v); + } + } + }); + + it('listScenariosForSpec returns scenarios that include that version', () => { + const scenarios = listScenariosForSpec('2025-06-18'); + expect(scenarios.length).toBeGreaterThan(0); + for (const name of scenarios) { + expect(getScenarioSpecVersions(name)).toContain('2025-06-18'); + } + }); + + it('2025-11-25 includes scenarios carried forward from 2025-06-18', () => { + const base = listScenariosForSpec('2025-06-18'); + const current = listScenariosForSpec('2025-11-25'); + // scenarios tagged with both versions should appear in both lists + const currentSet = new Set(current); + // at least some overlap (carried-forward scenarios) + const overlap = base.filter((s) => currentSet.has(s)); + expect(overlap.length).toBeGreaterThan(0); + // current should have more total (new 2025-11-25-only scenarios) + expect(current.length).toBeGreaterThan(overlap.length); + }); + + it('2025-11-25 does not include 2025-03-26-only scenarios', () => { + const backcompat = listScenariosForSpec('2025-03-26'); + const current = listScenariosForSpec('2025-11-25'); + const currentSet = new Set(current); + // backcompat-only scenarios should not appear in 2025-11-25 + for (const name of backcompat) { + const versions = getScenarioSpecVersions(name)!; + if (!versions.includes('2025-11-25')) { + expect(currentSet.has(name)).toBe(false); + } + } + }); + + it('draft and extension scenarios are isolated', () => { + const draft = listScenariosForSpec('draft'); + for (const name of draft) { + expect(getScenarioSpecVersions(name)).toContain('draft'); + } + const ext = listScenariosForSpec('extension'); + for (const name of ext) { + expect(getScenarioSpecVersions(name)).toContain('extension'); + } + }); + + it('draft scenarios are not in dated versions', () => { + const draft = listScenariosForSpec('draft'); + const dated = new Set([ + ...listScenariosForSpec('2025-03-26'), + ...listScenariosForSpec('2025-06-18'), + ...listScenariosForSpec('2025-11-25') + ]); + for (const name of draft) { + expect(dated.has(name)).toBe(false); + } + }); +}); diff --git a/src/schemas/context.ts b/src/schemas/context.ts index d7ea2a2..9a5e249 100644 --- a/src/schemas/context.ts +++ b/src/schemas/context.ts @@ -17,6 +17,20 @@ export const ClientConformanceContextSchema = z.discriminatedUnion('name', [ name: z.literal('auth/client-credentials-basic'), client_id: z.string(), client_secret: z.string() + }), + z.object({ + name: z.literal('auth/pre-registration'), + client_id: z.string(), + client_secret: z.string() + }), + z.object({ + name: z.literal('auth/cross-app-access-complete-flow'), + client_id: z.string(), + client_secret: z.string(), + idp_client_id: z.string(), + idp_id_token: z.string(), + idp_issuer: z.string(), + idp_token_endpoint: z.string() }) ]); diff --git a/src/tier-check/checks/files.ts b/src/tier-check/checks/files.ts new file mode 100644 index 0000000..1fd768d --- /dev/null +++ b/src/tier-check/checks/files.ts @@ -0,0 +1,58 @@ +import { Octokit } from '@octokit/rest'; +import { PolicySignalsResult } from '../types'; + +// Policy files checked deterministically by the CLI. +// The AI policy evaluation then reads ONLY files that exist here +// to judge whether content is substantive — it does not search for +// files in other locations. +const POLICY_SIGNAL_FILES = [ + // General project health + 'CHANGELOG.md', + 'SECURITY.md', + 'CONTRIBUTING.md', + // Dependency update policy + 'DEPENDENCY_POLICY.md', + 'docs/dependency-policy.md', + '.github/dependabot.yml', + '.github/renovate.json', + 'renovate.json', + // Roadmap + 'ROADMAP.md', + 'docs/roadmap.md', + // Versioning / breaking change policy + 'VERSIONING.md', + 'docs/versioning.md', + 'BREAKING_CHANGES.md' +]; + +export async function checkPolicySignals( + octokit: Octokit, + owner: string, + repo: string, + branch?: string +): Promise { + const files: Record = {}; + + for (const filePath of POLICY_SIGNAL_FILES) { + try { + await octokit.repos.getContent({ + owner, + repo, + path: filePath, + ...(branch ? { ref: branch } : {}) + }); + files[filePath] = true; + } catch { + files[filePath] = false; + } + } + + return { + status: Object.values(files).every((v) => v) + ? 'pass' + : Object.values(files).some((v) => v) + ? 'partial' + : 'fail', + files + }; +} diff --git a/src/tier-check/checks/labels.ts b/src/tier-check/checks/labels.ts new file mode 100644 index 0000000..a2b37b7 --- /dev/null +++ b/src/tier-check/checks/labels.ts @@ -0,0 +1,72 @@ +import { Octokit } from '@octokit/rest'; +import { LabelsResult } from '../types'; + +// Type labels can be satisfied by GitHub's native issue types (Bug, Enhancement, Question) +const TYPE_LABELS = ['bug', 'enhancement', 'question']; + +const STATUS_LABELS = [ + 'needs confirmation', + 'needs repro', + 'ready for work', + 'good first issue', + 'help wanted' +]; + +const PRIORITY_LABELS = ['P0', 'P1', 'P2', 'P3']; + +export async function checkLabels( + octokit: Octokit, + owner: string, + repo: string +): Promise { + const labels: string[] = []; + let page = 1; + while (true) { + const { data } = await octokit.issues.listLabelsForRepo({ + owner, + repo, + per_page: 100, + page + }); + labels.push(...data.map((l) => l.name)); + if (data.length < 100) break; + page++; + } + + // Check if the repo uses GitHub's native issue types + // If so, type labels (bug/enhancement/question) are satisfied + let usesIssueTypes = false; + try { + const { data: repoData } = await octokit.request( + 'GET /repos/{owner}/{repo}', + { owner, repo } + ); + // Repos with issue types enabled have them configured at the org or repo level. + // We detect this by checking for the presence of issue type configuration. + // As a heuristic: if the repo has no type labels but has issues, it likely uses types. + usesIssueTypes = !!(repoData as Record).issue_types; + } catch { + // If we can't determine, assume labels are needed + } + + const labelSet = new Set(labels.map((l) => l.toLowerCase())); + + // Build required labels list, excluding type labels if issue types are used + const requiredLabels = [ + ...(usesIssueTypes ? [] : TYPE_LABELS), + ...STATUS_LABELS, + ...PRIORITY_LABELS + ]; + + const missing = requiredLabels.filter((l) => !labelSet.has(l.toLowerCase())); + const found = requiredLabels.filter((l) => labelSet.has(l.toLowerCase())); + + return { + status: missing.length === 0 ? 'pass' : 'fail', + present: found.length, + required: requiredLabels.length, + missing, + found, + uses_issue_types: usesIssueTypes + }; +} diff --git a/src/tier-check/checks/p0.ts b/src/tier-check/checks/p0.ts new file mode 100644 index 0000000..25fad15 --- /dev/null +++ b/src/tier-check/checks/p0.ts @@ -0,0 +1,87 @@ +import { Octokit } from '@octokit/rest'; +import { P0Result } from '../types'; + +export async function checkP0Resolution( + octokit: Octokit, + owner: string, + repo: string +): Promise { + // Fetch all issues with P0 label + const p0Issues: Array<{ + number: number; + title: string; + state: string; + created_at: string; + closed_at: string | null; + labels: Array<{ name: string }>; + }> = []; + + let page = 1; + while (true) { + const { data } = await octokit.issues.listForRepo({ + owner, + repo, + labels: 'P0', + state: 'all', + per_page: 100, + page + }); + for (const issue of data) { + if (issue.pull_request) continue; + p0Issues.push({ + number: issue.number, + title: issue.title, + state: issue.state, + created_at: issue.created_at, + closed_at: issue.closed_at ?? null, + labels: issue.labels.filter( + (l): l is { name: string } => + typeof l === 'object' && l !== null && 'name' in l + ) + }); + } + if (data.length < 100) break; + page++; + } + + const openP0s = p0Issues.filter((i) => i.state === 'open'); + const closedP0s = p0Issues.filter((i) => i.state === 'closed' && i.closed_at); + + let closedWithin7d = 0; + let closedWithin14d = 0; + + for (const issue of closedP0s) { + const daysToClose = + (new Date(issue.closed_at!).getTime() - + new Date(issue.created_at).getTime()) / + (1000 * 60 * 60 * 24); + if (daysToClose <= 7) closedWithin7d++; + if (daysToClose <= 14) closedWithin14d++; + } + + const openP0Details = openP0s.map((i) => ({ + number: i.number, + title: i.title, + age_days: Math.round( + (Date.now() - new Date(i.created_at).getTime()) / (1000 * 60 * 60 * 24) + ) + })); + + const allResolved7d = + openP0s.length === 0 && + (closedP0s.length === 0 || closedWithin7d === closedP0s.length); + const allResolved14d = + openP0s.length === 0 && + (closedP0s.length === 0 || closedWithin14d === closedP0s.length); + + return { + status: allResolved7d ? 'pass' : allResolved14d ? 'partial' : 'fail', + open_p0s: openP0s.length, + open_p0_details: openP0Details, + closed_within_7d: closedWithin7d, + closed_within_14d: closedWithin14d, + closed_total: closedP0s.length, + all_p0s_resolved_within_7d: allResolved7d, + all_p0s_resolved_within_14d: allResolved14d + }; +} diff --git a/src/tier-check/checks/release.ts b/src/tier-check/checks/release.ts new file mode 100644 index 0000000..3eb53f1 --- /dev/null +++ b/src/tier-check/checks/release.ts @@ -0,0 +1,59 @@ +import { Octokit } from '@octokit/rest'; +import { ReleaseResult } from '../types'; + +export async function checkStableRelease( + octokit: Octokit, + owner: string, + repo: string +): Promise { + try { + const { data: releases } = await octokit.repos.listReleases({ + owner, + repo, + per_page: 20 + }); + + if (releases.length === 0) { + return { + status: 'fail', + version: null, + is_stable: false, + is_prerelease: false + }; + } + + // Find latest non-draft release + const latest = releases.find((r) => !r.draft); + if (!latest) { + return { + status: 'fail', + version: null, + is_stable: false, + is_prerelease: false + }; + } + + const version = latest.tag_name.replace(/^v/, ''); + const isPrerelease = + latest.prerelease || + /-(alpha|beta|rc|dev|preview|snapshot)/i.test(version); + + // Check if version is >= 1.0.0 + const parts = version.split('.').map((p) => parseInt(p, 10)); + const isStable = !isPrerelease && parts.length >= 2 && parts[0] >= 1; + + return { + status: isStable ? 'pass' : 'fail', + version, + is_stable: isStable, + is_prerelease: isPrerelease + }; + } catch { + return { + status: 'fail', + version: null, + is_stable: false, + is_prerelease: false + }; + } +} diff --git a/src/tier-check/checks/spec-tracking.ts b/src/tier-check/checks/spec-tracking.ts new file mode 100644 index 0000000..ce4c101 --- /dev/null +++ b/src/tier-check/checks/spec-tracking.ts @@ -0,0 +1,79 @@ +import { Octokit } from '@octokit/rest'; +import { SpecTrackingResult } from '../types'; + +export async function checkSpecTracking( + octokit: Octokit, + owner: string, + repo: string +): Promise { + try { + // Get latest spec release from modelcontextprotocol/modelcontextprotocol + const { data: specReleases } = await octokit.repos.listReleases({ + owner: 'modelcontextprotocol', + repo: 'modelcontextprotocol', + per_page: 5 + }); + const latestSpec = specReleases.find((r) => !r.draft && !r.prerelease); + + // Get SDK releases (API returns newest-first) + const { data: sdkReleases } = await octokit.repos.listReleases({ + owner, + repo, + per_page: 50 + }); + const nonDraftSdkReleases = sdkReleases.filter((r) => !r.draft); + + if (!latestSpec || nonDraftSdkReleases.length === 0) { + return { + status: 'skipped', + latest_spec_release: latestSpec?.published_at || null, + latest_sdk_release: nonDraftSdkReleases[0]?.published_at || null, + sdk_release_within_30d: null, + days_gap: null + }; + } + + const specDate = new Date(latestSpec.published_at!); + + // Reverse so oldest-first, then find the FIRST SDK release after the spec + const oldestFirst = [...nonDraftSdkReleases].reverse(); + const firstSdkAfterSpec = oldestFirst.find( + (r) => new Date(r.published_at!) >= specDate + ); + + if (!firstSdkAfterSpec) { + // No SDK release after the latest spec release + const daysSinceSpec = Math.round( + (Date.now() - specDate.getTime()) / (1000 * 60 * 60 * 24) + ); + return { + status: daysSinceSpec <= 30 ? 'pass' : 'fail', + latest_spec_release: latestSpec.published_at, + latest_sdk_release: nonDraftSdkReleases[0]?.published_at || null, + sdk_release_within_30d: daysSinceSpec <= 30, + days_gap: daysSinceSpec + }; + } + + const sdkDate = new Date(firstSdkAfterSpec.published_at!); + const daysGap = Math.round( + (sdkDate.getTime() - specDate.getTime()) / (1000 * 60 * 60 * 24) + ); + + return { + status: daysGap <= 30 ? 'pass' : 'fail', + latest_spec_release: latestSpec.published_at, + latest_sdk_release: firstSdkAfterSpec.published_at, + sdk_release_within_30d: daysGap <= 30, + days_gap: daysGap + }; + } catch { + return { + status: 'skipped', + latest_spec_release: null, + latest_sdk_release: null, + sdk_release_within_30d: null, + days_gap: null + }; + } +} diff --git a/src/tier-check/checks/test-conformance-results.ts b/src/tier-check/checks/test-conformance-results.ts new file mode 100644 index 0000000..5ba9849 --- /dev/null +++ b/src/tier-check/checks/test-conformance-results.ts @@ -0,0 +1,237 @@ +import { execSync } from 'child_process'; +import { mkdtempSync, readFileSync, existsSync, globSync } from 'fs'; +import { join, dirname } from 'path'; +import { tmpdir } from 'os'; +import { ConformanceResult } from '../types'; +import { + listScenarios, + listActiveClientScenarios, + getScenarioSpecVersions +} from '../../scenarios'; +import { ConformanceCheck, SpecVersion } from '../../types'; + +const NON_SCORING_VERSIONS: SpecVersion[] = ['draft', 'extension']; + +/** Whether a scenario counts toward tier scoring (has at least one date-versioned spec). */ +function isTierScoring(specVersions?: SpecVersion[]): boolean { + if (!specVersions || specVersions.length === 0) return true; // unknown = count it + return specVersions.some((v) => !NON_SCORING_VERSIONS.includes(v)); +} + +/** + * Parse conformance results from an output directory. + * The conformance CLI saves checks.json per scenario under outputDir//server/ or client/. + */ +function parseOutputDir(outputDir: string): ConformanceResult { + if (!existsSync(outputDir)) { + return { + status: 'fail', + pass_rate: 0, + passed: 0, + failed: 0, + total: 0, + details: [] + }; + } + + const details: ConformanceResult['details'] = []; + let totalPassed = 0; + let totalFailed = 0; + + // Find all checks.json files recursively to handle scenarios with '/' in + // their name (e.g. auth/metadata-default) which create nested subdirectories. + const checksFiles = globSync('**/checks.json', { cwd: outputDir }); + + for (const checksFile of checksFiles) { + const scenarioName = dirname(checksFile); + const checksPath = join(outputDir, checksFile); + + try { + const checks: ConformanceCheck[] = JSON.parse( + readFileSync(checksPath, 'utf-8') + ); + const passed = checks.filter((c) => c.status === 'SUCCESS').length; + const failed = checks.filter((c) => c.status === 'FAILURE').length; + const scenarioPassed = failed === 0 && passed > 0; + + totalPassed += scenarioPassed ? 1 : 0; + totalFailed += scenarioPassed ? 0 : 1; + details.push({ + scenario: scenarioName, + passed: scenarioPassed, + checks_passed: passed, + checks_failed: failed + }); + } catch { + totalFailed++; + details.push({ + scenario: scenarioName, + passed: false, + checks_passed: 0, + checks_failed: 1 + }); + } + } + + const total = totalPassed + totalFailed; + const pass_rate = total > 0 ? totalPassed / total : 0; + + return { + status: pass_rate >= 1.0 ? 'pass' : pass_rate >= 0.8 ? 'partial' : 'fail', + pass_rate, + passed: totalPassed, + failed: totalFailed, + total, + details + }; +} + +/** + * Strip the timestamp suffix from a result directory name. + * Result dirs are named `{scenario}-{ISO timestamp}` where the timestamp + * has colons/dots replaced with dashes (e.g., `initialize-2026-02-12T16-08-37-806Z`). + * Server scenarios also have a `server-` prefix (e.g., `server-ping-2026-02-12T16-08-37-806Z`). + */ +function stripTimestamp(dirName: string): string { + return dirName.replace(/-\d{4}-\d{2}-\d{2}T[\d-]+Z$/, ''); +} + +/** + * Reconcile parsed results against the full list of expected scenarios. + * Any expected scenario that didn't produce results is counted as a failure. + * This ensures the denominator reflects the full test suite, not just + * scenarios that ran successfully enough to write checks.json. + */ +function reconcileWithExpected( + result: ConformanceResult, + expectedScenarios: string[], + resultPrefix?: string +): ConformanceResult { + const reportedNames = new Set( + result.details.map((d) => { + let name = stripTimestamp(d.scenario); + if (resultPrefix) { + name = name.replace(new RegExp(`^${resultPrefix}-`), ''); + } + return name; + }) + ); + + // Attach specVersion to existing detail entries + for (const detail of result.details) { + let name = stripTimestamp(detail.scenario); + if (resultPrefix) { + name = name.replace(new RegExp(`^${resultPrefix}-`), ''); + } + detail.specVersions = getScenarioSpecVersions(name); + } + + for (const expected of expectedScenarios) { + if (!reportedNames.has(expected)) { + result.failed++; + result.total++; + result.details.push({ + scenario: expected, + passed: false, + checks_passed: 0, + checks_failed: 0, + specVersions: getScenarioSpecVersions(expected) + }); + } + } + + // pass_rate only counts tier-scoring scenarios (date-versioned, not draft/extension). + // passed/failed/total reflect ALL scenarios for full reporting; pass_rate and status + // reflect only tier-scoring scenarios for tier logic. + const tierDetails = result.details.filter((d) => + isTierScoring(d.specVersions) + ); + const tierPassed = tierDetails.filter((d) => d.passed).length; + const tierTotal = tierDetails.length; + + result.pass_rate = tierTotal > 0 ? tierPassed / tierTotal : 0; + result.status = + result.pass_rate >= 1.0 + ? 'pass' + : result.pass_rate >= 0.8 + ? 'partial' + : 'fail'; + + return result; +} + +/** + * Run server conformance tests by shelling out to the conformance CLI. + */ +export async function checkConformance(options: { + serverUrl?: string; + skip?: boolean; +}): Promise { + if (options.skip || !options.serverUrl) { + return { + status: 'skipped', + pass_rate: 0, + passed: 0, + failed: 0, + total: 0, + details: [] + }; + } + + const outputDir = mkdtempSync(join(tmpdir(), 'tier-check-server-')); + + try { + execSync( + `node dist/index.js server --url ${options.serverUrl} -o ${outputDir}`, + { + cwd: process.cwd(), + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 120_000 + } + ); + } catch { + // Non-zero exit is expected when tests fail — results are still in outputDir + } + + return reconcileWithExpected( + parseOutputDir(outputDir), + listActiveClientScenarios(), + 'server' + ); +} + +/** + * Run client conformance tests by shelling out to the conformance CLI. + */ +export async function checkClientConformance(options: { + clientCmd?: string; + skip?: boolean; +}): Promise { + if (options.skip || !options.clientCmd) { + return { + status: 'skipped', + pass_rate: 0, + passed: 0, + failed: 0, + total: 0, + details: [] + }; + } + + const outputDir = mkdtempSync(join(tmpdir(), 'tier-check-client-')); + + try { + execSync( + `node dist/index.js client --command '${options.clientCmd}' --suite all -o ${outputDir}`, + { + cwd: process.cwd(), + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 120_000 + } + ); + } catch { + // Non-zero exit is expected when tests fail — results are still in outputDir + } + + return reconcileWithExpected(parseOutputDir(outputDir), listScenarios()); +} diff --git a/src/tier-check/checks/triage.ts b/src/tier-check/checks/triage.ts new file mode 100644 index 0000000..523f4ec --- /dev/null +++ b/src/tier-check/checks/triage.ts @@ -0,0 +1,109 @@ +import { Octokit } from '@octokit/rest'; +import { TriageResult } from '../types'; + +export async function checkTriage( + octokit: Octokit, + owner: string, + repo: string, + days?: number +): Promise { + const since = days + ? new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString() + : undefined; + + // Fetch issues (not PRs) — the API returns labels inline + const issues: Array<{ + number: number; + created_at: string; + labels: string[]; + }> = []; + + let page = 1; + while (true) { + const { data } = await octokit.issues.listForRepo({ + owner, + repo, + state: 'open', + ...(since ? { since } : {}), + per_page: 100, + page, + sort: 'created', + direction: 'desc' + }); + if (data.length === 0) break; + for (const issue of data) { + if (issue.pull_request) continue; + if (since && new Date(issue.created_at) < new Date(since)) continue; + issues.push({ + number: issue.number, + created_at: issue.created_at, + labels: issue.labels + .filter( + (l): l is { name: string } => + typeof l === 'object' && l !== null && 'name' in l + ) + .map((l) => l.name) + }); + } + if (data.length < 100) break; + page++; + } + + if (issues.length === 0) { + return { + status: 'pass', + compliance_rate: 1, + total_issues: 0, + triaged_within_sla: 0, + exceeding_sla: 0, + median_hours: 0, + p95_hours: 0, + days_analyzed: days + }; + } + + // An issue is "triaged" if it has at least one label + const triaged = issues.filter((i) => i.labels.length > 0); + const untriaged = issues.filter((i) => i.labels.length === 0); + + // For untriaged issues, compute how long they've been open without a label + const untriagedAgeHours = untriaged.map( + (i) => (Date.now() - new Date(i.created_at).getTime()) / (1000 * 60 * 60) + ); + untriagedAgeHours.sort((a, b) => a - b); + + const total = issues.length; + const triagedRate = total > 0 ? triaged.length / total : 1; + + // For SLA: issues without labels that are older than 2BD are SLA violations + const TWO_BUSINESS_DAYS_HOURS = 2 * 24; + const exceeding = untriagedAgeHours.filter( + (h) => h > TWO_BUSINESS_DAYS_HOURS + ).length; + + // Median/p95 of untriaged issue ages (0 if all triaged) + const median = + untriagedAgeHours.length > 0 + ? untriagedAgeHours[Math.floor(untriagedAgeHours.length / 2)] + : 0; + const p95 = + untriagedAgeHours.length > 0 + ? untriagedAgeHours[Math.floor(untriagedAgeHours.length * 0.95)] + : 0; + + let status: 'pass' | 'partial' | 'fail'; + if (triagedRate >= 0.9) status = 'pass'; + else if (triagedRate >= 0.8) status = 'partial'; + else status = 'fail'; + + return { + status, + compliance_rate: triagedRate, + total_issues: total, + triaged_within_sla: triaged.length, + exceeding_sla: exceeding, + median_hours: Math.round(median * 10) / 10, + p95_hours: Math.round(p95 * 10) / 10, + days_analyzed: days + }; +} diff --git a/src/tier-check/index.ts b/src/tier-check/index.ts new file mode 100644 index 0000000..5416748 --- /dev/null +++ b/src/tier-check/index.ts @@ -0,0 +1,201 @@ +import { Command } from 'commander'; +import { Octokit } from '@octokit/rest'; +import { + checkConformance, + checkClientConformance +} from './checks/test-conformance-results'; +import { checkLabels } from './checks/labels'; +import { checkTriage } from './checks/triage'; +import { checkP0Resolution } from './checks/p0'; +import { checkStableRelease } from './checks/release'; +import { checkPolicySignals } from './checks/files'; +import { checkSpecTracking } from './checks/spec-tracking'; +import { computeTier } from './tier-logic'; +import { formatJson, formatMarkdown, formatTerminal } from './output'; +import { TierScorecard } from './types'; + +function parseRepo(repo: string): { owner: string; repo: string } { + const parts = repo.split('/'); + if (parts.length !== 2) + throw new Error(`Invalid repo format: ${repo}. Expected owner/repo`); + return { owner: parts[0], repo: parts[1] }; +} + +export function createTierCheckCommand(): Command { + const tierCheck = new Command('tier-check') + .description('Run SDK tier assessment checks against a GitHub repository') + .requiredOption( + '--repo ', + 'GitHub repository (e.g., modelcontextprotocol/typescript-sdk)' + ) + .option('--branch ', 'Branch to check') + .option( + '--conformance-server-url ', + 'URL of the already-running conformance server' + ) + .option( + '--client-cmd ', + 'Command to run the SDK conformance client (for client conformance tests)' + ) + .option('--skip-conformance', 'Skip conformance tests') + .option('--days ', 'Limit triage check to issues created in last N days') + .option( + '--output ', + 'Output format: json, markdown, terminal', + 'terminal' + ) + .option( + '--token ', + 'GitHub token (defaults to GITHUB_TOKEN env var)' + ) + .action(async (options) => { + const { owner, repo } = parseRepo(options.repo); + let token = options.token || process.env.GITHUB_TOKEN; + + if (!token) { + // Try to get token from GitHub CLI + try { + const { execSync } = await import('child_process'); + token = execSync('gh auth token', { encoding: 'utf-8' }).trim(); + } catch { + // gh not installed or not authenticated + } + } + + if (!token) { + console.error( + 'GitHub token required. Either:\n' + + ' gh auth login\n' + + ' export GITHUB_TOKEN=$(gh auth token)\n' + + ' or pass --token ' + ); + process.exit(1); + } + + const octokit = new Octokit({ auth: token }); + const days = options.days ? parseInt(options.days, 10) : undefined; + + console.error('Running tier assessment checks...\n'); + + // Run all checks + const [ + conformance, + clientConformance, + labels, + triage, + p0, + release, + files, + specTracking + ] = await Promise.all([ + checkConformance({ + serverUrl: options.conformanceServerUrl, + skip: options.skipConformance + }).then((r) => { + console.error(' ✓ Server Conformance'); + return r; + }), + checkClientConformance({ + clientCmd: options.clientCmd, + skip: options.skipConformance || !options.clientCmd + }).then((r) => { + console.error(' ✓ Client Conformance'); + return r; + }), + checkLabels(octokit, owner, repo).then((r) => { + console.error(' ✓ Labels'); + return r; + }), + checkTriage(octokit, owner, repo, days).then((r) => { + console.error(' \u2713 Triage'); + return r; + }), + checkP0Resolution(octokit, owner, repo).then((r) => { + console.error(' \u2713 P0 Resolution'); + return r; + }), + checkStableRelease(octokit, owner, repo).then((r) => { + console.error(' \u2713 Stable Release'); + return r; + }), + checkPolicySignals(octokit, owner, repo, options.branch).then((r) => { + console.error(' \u2713 Policy Signals'); + return r; + }), + checkSpecTracking(octokit, owner, repo).then((r) => { + console.error(' \u2713 Spec Tracking'); + return r; + }) + ]); + + const checks = { + conformance, + client_conformance: clientConformance, + labels, + triage, + p0_resolution: p0, + stable_release: release, + policy_signals: files, + spec_tracking: specTracking + }; + + const implied_tier = computeTier(checks); + + const scorecard: TierScorecard = { + repo: options.repo, + branch: options.branch || null, + timestamp: new Date().toISOString(), + version: release.version, + checks, + implied_tier + }; + + switch (options.output) { + case 'json': + console.log(formatJson(scorecard)); + break; + case 'markdown': + console.log(formatMarkdown(scorecard)); + break; + default: + formatTerminal(scorecard); + } + }); + + // Subcommands for individual checks + tierCheck + .command('labels') + .description('Check label taxonomy') + .requiredOption('--repo ', 'GitHub repository') + .option('--token ', 'GitHub token') + .action(async (options) => { + const { owner, repo } = parseRepo(options.repo); + const octokit = new Octokit({ + auth: options.token || process.env.GITHUB_TOKEN + }); + const result = await checkLabels(octokit, owner, repo); + console.log(JSON.stringify(result, null, 2)); + }); + + tierCheck + .command('triage') + .description('Check issue triage speed') + .requiredOption('--repo ', 'GitHub repository') + .option('--days ', 'Limit triage check to issues created in last N days') + .option('--token ', 'GitHub token') + .action(async (options) => { + const { owner, repo } = parseRepo(options.repo); + const octokit = new Octokit({ + auth: options.token || process.env.GITHUB_TOKEN + }); + const result = await checkTriage( + octokit, + owner, + repo, + options.days ? parseInt(options.days, 10) : undefined + ); + console.log(JSON.stringify(result, null, 2)); + }); + + return tierCheck; +} diff --git a/src/tier-check/output.ts b/src/tier-check/output.ts new file mode 100644 index 0000000..9e7aa57 --- /dev/null +++ b/src/tier-check/output.ts @@ -0,0 +1,347 @@ +import { TierScorecard, CheckStatus, ConformanceResult } from './types'; + +const COLORS = { + RESET: '\x1b[0m', + GREEN: '\x1b[32m', + YELLOW: '\x1b[33m', + RED: '\x1b[31m', + BLUE: '\x1b[36m', + BOLD: '\x1b[1m', + DIM: '\x1b[2m' +}; + +function statusIcon(status: CheckStatus): string { + switch (status) { + case 'pass': + return `${COLORS.GREEN}\u2713${COLORS.RESET}`; + case 'fail': + return `${COLORS.RED}\u2717${COLORS.RESET}`; + case 'partial': + return `${COLORS.YELLOW}~${COLORS.RESET}`; + case 'skipped': + return `${COLORS.DIM}-${COLORS.RESET}`; + } +} + +const TIER_SPEC_VERSIONS = ['2025-03-26', '2025-06-18', '2025-11-25'] as const; + +const INFO_SPEC_VERSIONS = ['draft', 'extension'] as const; + +type Cell = { passed: number; total: number }; + +interface MatrixRow { + cells: Map; + /** Unique scenario counts for tier-scoring versions only. */ + tierUnique: Cell; + /** Unique scenario counts for informational versions only. */ + infoUnique: Cell; +} + +const INFO_SET = new Set(INFO_SPEC_VERSIONS); + +function newRow(): MatrixRow { + return { + cells: new Map(), + tierUnique: { passed: 0, total: 0 }, + infoUnique: { passed: 0, total: 0 } + }; +} + +interface ConformanceMatrix { + server: MatrixRow; + clientCore: MatrixRow; + clientAuth: MatrixRow; +} + +function buildConformanceMatrix( + server: ConformanceResult, + client: ConformanceResult +): ConformanceMatrix { + const matrix: ConformanceMatrix = { + server: newRow(), + clientCore: newRow(), + clientAuth: newRow() + }; + + function addToRow( + row: MatrixRow, + d: { passed: boolean; specVersions?: string[] } + ) { + const versions = d.specVersions ?? ['unknown']; + const isTierScoring = versions.some((v) => !INFO_SET.has(v)); + const bucket = isTierScoring ? row.tierUnique : row.infoUnique; + bucket.total++; + if (d.passed) bucket.passed++; + for (const v of versions) { + const cell = row.cells.get(v) ?? { passed: 0, total: 0 }; + cell.total++; + if (d.passed) cell.passed++; + row.cells.set(v, cell); + } + } + + for (const d of server.details) { + addToRow(matrix.server, d); + } + + for (const d of client.details) { + const row = d.scenario.startsWith('auth/') + ? matrix.clientAuth + : matrix.clientCore; + addToRow(row, d); + } + + return matrix; +} + +function formatCell(cell: Cell | undefined): string { + if (!cell || cell.total === 0) return '\u2014'; + return `${cell.passed}/${cell.total}`; +} + +function formatRate(cell: Cell): string { + if (cell.total === 0) return '0/0'; + return `${cell.passed}/${cell.total} (${Math.round((cell.passed / cell.total) * 100)}%)`; +} + +export function formatJson(scorecard: TierScorecard): string { + return JSON.stringify(scorecard, null, 2); +} + +export function formatMarkdown(scorecard: TierScorecard): string { + const lines: string[] = []; + const c = scorecard.checks; + + lines.push(`# Tier Assessment: Tier ${scorecard.implied_tier.tier}`); + lines.push(''); + lines.push(`**Repo**: ${scorecard.repo}`); + if (scorecard.branch) lines.push(`**Branch**: ${scorecard.branch}`); + if (scorecard.version) lines.push(`**Version**: ${scorecard.version}`); + lines.push(`**Timestamp**: ${scorecard.timestamp}`); + lines.push(''); + lines.push('## Check Results'); + lines.push(''); + lines.push('| Check | Status | Detail |'); + lines.push('|-------|--------|--------|'); + // Conformance matrix + const matrix = buildConformanceMatrix( + c.conformance as ConformanceResult, + c.client_conformance as ConformanceResult + ); + + // Tier-scoring matrix + lines.push(''); + lines.push(`| | ${TIER_SPEC_VERSIONS.join(' | ')} | All* |`); + lines.push(`|---|${TIER_SPEC_VERSIONS.map(() => '---|').join('')}---|`); + + const mdRows: [string, MatrixRow][] = [ + ['Server', matrix.server], + ['Client: Core', matrix.clientCore], + ['Client: Auth', matrix.clientAuth] + ]; + + for (const [label, row] of mdRows) { + lines.push( + `| ${label} | ${TIER_SPEC_VERSIONS.map((v) => formatCell(row.cells.get(v))).join(' | ')} | ${formatRate(row.tierUnique)} |` + ); + } + + lines.push(''); + lines.push( + '_* unique scenarios — a scenario may apply to multiple spec versions_' + ); + + // Informational matrix (draft/extension) + const hasInfoMd = mdRows.some(([, row]) => + INFO_SPEC_VERSIONS.some((v) => { + const cell = row.cells.get(v); + return cell && cell.total > 0; + }) + ); + if (hasInfoMd) { + lines.push(''); + lines.push('_Informational (not scored for tier):_'); + lines.push(''); + lines.push(`| | ${INFO_SPEC_VERSIONS.join(' | ')} |`); + lines.push(`|---|${INFO_SPEC_VERSIONS.map(() => '---|').join('')}`); + for (const [label, row] of mdRows) { + const hasData = INFO_SPEC_VERSIONS.some((v) => { + const cell = row.cells.get(v); + return cell && cell.total > 0; + }); + if (!hasData) continue; + lines.push( + `| ${label} | ${INFO_SPEC_VERSIONS.map((v) => formatCell(row.cells.get(v))).join(' | ')} |` + ); + } + } + lines.push(''); + lines.push( + `| Labels | ${c.labels.status} | ${c.labels.present}/${c.labels.required} required labels${c.labels.missing.length > 0 ? ` (missing: ${c.labels.missing.join(', ')})` : ''} |` + ); + lines.push( + `| Triage | ${c.triage.status} | ${Math.round(c.triage.compliance_rate * 100)}% within 2BD, median ${c.triage.median_hours}h, p95 ${c.triage.p95_hours}h |` + ); + lines.push( + `| P0 Resolution | ${c.p0_resolution.status} | ${c.p0_resolution.open_p0s} open, ${c.p0_resolution.closed_within_7d}/${c.p0_resolution.closed_total} closed within 7d |` + ); + lines.push( + `| Stable Release | ${c.stable_release.status} | ${c.stable_release.version || 'none'} (stable: ${c.stable_release.is_stable}) |` + ); + lines.push( + `| Policy Signals | ${c.policy_signals.status} | ${Object.entries( + c.policy_signals.files + ) + .map(([f, e]) => `${f}: ${e ? '\u2713' : '\u2717'}`) + .join(', ')} |` + ); + lines.push( + `| Spec Tracking | ${c.spec_tracking.status} | ${c.spec_tracking.days_gap !== null ? `${c.spec_tracking.days_gap}d gap` : 'N/A'} |` + ); + lines.push(''); + + if (scorecard.implied_tier.tier1_blockers.length > 0) { + lines.push('## Tier 1 Blockers'); + lines.push(''); + for (const blocker of scorecard.implied_tier.tier1_blockers) { + lines.push(`- ${blocker}`); + } + lines.push(''); + } + + lines.push(`> ${scorecard.implied_tier.note}`); + + return lines.join('\n'); +} + +export function formatTerminal(scorecard: TierScorecard): void { + const c = scorecard.checks; + const tier = scorecard.implied_tier.tier; + const tierColor = + tier === 1 ? COLORS.GREEN : tier === 2 ? COLORS.YELLOW : COLORS.RED; + + console.log( + `\n${COLORS.BOLD}Tier Assessment: ${tierColor}Tier ${tier}${COLORS.RESET}\n` + ); + console.log(`Repo: ${scorecard.repo}`); + if (scorecard.branch) console.log(`Branch: ${scorecard.branch}`); + if (scorecard.version) console.log(`Version: ${scorecard.version}`); + console.log(`Timestamp: ${scorecard.timestamp}\n`); + + console.log(`${COLORS.BOLD}Conformance:${COLORS.RESET}\n`); + + // Conformance matrix + const matrix = buildConformanceMatrix( + c.conformance as ConformanceResult, + c.client_conformance as ConformanceResult + ); + + const vw = 10; // column width for version cells + const lw = 14; // label column width + const tw = 16; // total column width + const rp = (s: string, w: number) => s.padStart(w); + const lp = (s: string, w: number) => s.padEnd(w); + + // Tier-scoring matrix (date-versioned specs only) + console.log( + ` ${COLORS.DIM}${lp('', lw + 2)} ${TIER_SPEC_VERSIONS.map((v) => rp(v, vw)).join(' ')} ${rp('All*', tw)}${COLORS.RESET}` + ); + + const rows: [string, MatrixRow, CheckStatus | null, boolean][] = [ + ['Server', matrix.server, c.conformance.status, true], + ['Client: Core', matrix.clientCore, null, false], + ['Client: Auth', matrix.clientAuth, null, false] + ]; + + for (const [label, row, status, bold] of rows) { + const icon = status ? statusIcon(status) + ' ' : ' '; + const b = bold ? COLORS.BOLD : ''; + const r = bold ? COLORS.RESET : ''; + console.log( + ` ${icon}${b}${lp(label, lw)}${r} ${TIER_SPEC_VERSIONS.map((v) => rp(formatCell(row.cells.get(v)), vw)).join(' ')} ${b}${rp(formatRate(row.tierUnique), tw)}${r}` + ); + } + + // Client total line (tier-scoring only) + const clientTierTotal: Cell = { + passed: + matrix.clientCore.tierUnique.passed + matrix.clientAuth.tierUnique.passed, + total: + matrix.clientCore.tierUnique.total + matrix.clientAuth.tierUnique.total + }; + console.log( + ` ${statusIcon(c.client_conformance.status)} ${COLORS.BOLD}${lp('Client Total', lw)}${COLORS.RESET} ${' '.repeat(TIER_SPEC_VERSIONS.length * (vw + 1) - 1)} ${COLORS.BOLD}${rp(formatRate(clientTierTotal), tw)}${COLORS.RESET}` + ); + console.log( + `\n ${COLORS.DIM}* unique scenarios — a scenario may apply to multiple spec versions${COLORS.RESET}` + ); + + // Informational matrix (draft/extension) — only if there are any + const hasInfo = rows.some(([, row]) => + INFO_SPEC_VERSIONS.some((v) => { + const cell = row.cells.get(v); + return cell && cell.total > 0; + }) + ); + if (hasInfo) { + console.log(`\n Informational (not scored for tier):\n`); + console.log( + ` ${COLORS.DIM}${lp('', lw + 2)} ${INFO_SPEC_VERSIONS.map((v) => rp(v, vw)).join(' ')}${COLORS.RESET}` + ); + for (const [label, row, , bold] of rows) { + const hasData = INFO_SPEC_VERSIONS.some((v) => { + const cell = row.cells.get(v); + return cell && cell.total > 0; + }); + if (!hasData) continue; + const b = bold ? COLORS.BOLD : ''; + const r = bold ? COLORS.RESET : ''; + console.log( + ` ${b}${lp(label, lw)}${r} ${INFO_SPEC_VERSIONS.map((v) => rp(formatCell(row.cells.get(v)), vw)).join(' ')}` + ); + } + } + console.log(`\n${COLORS.BOLD}Repository Health:${COLORS.RESET}\n`); + console.log( + ` ${statusIcon(c.labels.status)} Labels ${c.labels.present}/${c.labels.required} required labels` + ); + if (c.labels.missing.length > 0) + console.log( + ` ${COLORS.DIM}Missing: ${c.labels.missing.join(', ')}${COLORS.RESET}` + ); + console.log( + ` ${statusIcon(c.triage.status)} Triage ${Math.round(c.triage.compliance_rate * 100)}% within 2BD (${c.triage.total_issues} issues, median ${c.triage.median_hours}h)` + ); + console.log( + ` ${statusIcon(c.p0_resolution.status)} P0 Resolution ${c.p0_resolution.open_p0s} open, ${c.p0_resolution.closed_within_7d}/${c.p0_resolution.closed_total} closed within 7d` + ); + if (c.p0_resolution.open_p0_details.length > 0) { + for (const p0 of c.p0_resolution.open_p0_details) { + console.log( + ` ${COLORS.RED}#${p0.number} (${p0.age_days}d old): ${p0.title}${COLORS.RESET}` + ); + } + } + console.log( + ` ${statusIcon(c.stable_release.status)} Stable Release ${c.stable_release.version || 'none'}` + ); + console.log( + ` ${statusIcon(c.policy_signals.status)} Policy Signals ${Object.entries( + c.policy_signals.files + ) + .map(([f, e]) => `${e ? '\u2713' : '\u2717'} ${f}`) + .join(', ')}` + ); + console.log( + ` ${statusIcon(c.spec_tracking.status)} Spec Tracking ${c.spec_tracking.days_gap !== null ? `${c.spec_tracking.days_gap}d gap` : 'N/A'}` + ); + + if (scorecard.implied_tier.tier1_blockers.length > 0) { + console.log(`\n${COLORS.BOLD}Tier 1 Blockers:${COLORS.RESET}`); + for (const blocker of scorecard.implied_tier.tier1_blockers) { + console.log(` ${COLORS.RED}\u2022${COLORS.RESET} ${blocker}`); + } + } + + console.log(`\n${COLORS.DIM}${scorecard.implied_tier.note}${COLORS.RESET}\n`); +} diff --git a/src/tier-check/tier-logic.ts b/src/tier-check/tier-logic.ts new file mode 100644 index 0000000..cf096c9 --- /dev/null +++ b/src/tier-check/tier-logic.ts @@ -0,0 +1,66 @@ +import { TierScorecard } from './types'; + +export function computeTier( + checks: TierScorecard['checks'] +): TierScorecard['implied_tier'] { + const tier1Blockers: string[] = []; + + // Check Tier 1 requirements — server conformance + if (checks.conformance.status === 'skipped') { + tier1Blockers.push('server_conformance (skipped)'); + } else if (checks.conformance.pass_rate < 1.0) { + tier1Blockers.push('server_conformance'); + } + + // Check Tier 1 requirements — client conformance + if (checks.client_conformance.status === 'skipped') { + tier1Blockers.push('client_conformance (skipped)'); + } else if (checks.client_conformance.pass_rate < 1.0) { + tier1Blockers.push('client_conformance'); + } + + if (checks.triage.compliance_rate < 0.9) { + tier1Blockers.push('triage'); + } + + if (!checks.p0_resolution.all_p0s_resolved_within_7d) { + tier1Blockers.push('p0_resolution'); + } + + if (!checks.stable_release.is_stable) { + tier1Blockers.push('stable_release'); + } + + // Policy signals (CHANGELOG, SECURITY, etc.) are informational evidence — + // they feed into the skill's judgment-based evaluation but don't independently + // block tier advancement since SEP-1730 doesn't list specific files. + + if (checks.spec_tracking.status === 'fail') { + tier1Blockers.push('spec_tracking'); + } + + if (checks.labels.missing.length > 0) { + tier1Blockers.push('labels'); + } + + // Check Tier 2 requirements + const tier2Met = + (checks.conformance.status === 'skipped' || + checks.conformance.pass_rate >= 0.8) && + (checks.client_conformance.status === 'skipped' || + checks.client_conformance.pass_rate >= 0.8) && + checks.p0_resolution.all_p0s_resolved_within_14d && + checks.stable_release.is_stable; + + const tier = tier1Blockers.length === 0 ? 1 : tier2Met ? 2 : 3; + + return { + tier, + tier1_blockers: tier1Blockers, + tier2_met: tier2Met, + note: + tier === 1 + ? 'All deterministic checks pass. Judgment-based checks (docs, policy, roadmap) require /mcp-sdk-tier-audit skill.' + : 'Partial assessment — judgment-based checks require /mcp-sdk-tier-audit skill' + }; +} diff --git a/src/tier-check/types.ts b/src/tier-check/types.ts new file mode 100644 index 0000000..a9830f4 --- /dev/null +++ b/src/tier-check/types.ts @@ -0,0 +1,90 @@ +import type { SpecVersion } from '../types'; + +export type CheckStatus = 'pass' | 'fail' | 'partial' | 'skipped'; + +export interface CheckResult { + status: CheckStatus; + [key: string]: unknown; +} + +export interface ConformanceResult extends CheckResult { + pass_rate: number; + passed: number; + failed: number; + total: number; + details: Array<{ + scenario: string; + passed: boolean; + checks_passed: number; + checks_failed: number; + specVersions?: SpecVersion[]; + }>; +} + +export interface LabelsResult extends CheckResult { + present: number; + required: number; + missing: string[]; + found: string[]; + uses_issue_types: boolean; +} + +export interface TriageResult extends CheckResult { + compliance_rate: number; + total_issues: number; + triaged_within_sla: number; + exceeding_sla: number; + median_hours: number; + p95_hours: number; + days_analyzed: number | undefined; +} + +export interface P0Result extends CheckResult { + open_p0s: number; + open_p0_details: Array<{ number: number; title: string; age_days: number }>; + closed_within_7d: number; + closed_within_14d: number; + closed_total: number; + all_p0s_resolved_within_7d: boolean; + all_p0s_resolved_within_14d: boolean; +} + +export interface ReleaseResult extends CheckResult { + version: string | null; + is_stable: boolean; + is_prerelease: boolean; +} + +export interface PolicySignalsResult extends CheckResult { + files: Record; +} + +export interface SpecTrackingResult extends CheckResult { + latest_spec_release: string | null; + latest_sdk_release: string | null; + sdk_release_within_30d: boolean | null; + days_gap: number | null; +} + +export interface TierScorecard { + repo: string; + branch: string | null; + timestamp: string; + version: string | null; + checks: { + conformance: ConformanceResult; + client_conformance: ConformanceResult; + labels: LabelsResult; + triage: TriageResult; + p0_resolution: P0Result; + stable_release: ReleaseResult; + policy_signals: PolicySignalsResult; + spec_tracking: SpecTrackingResult; + }; + implied_tier: { + tier: 1 | 2 | 3; + tier1_blockers: string[]; + tier2_met: boolean; + note: string; + }; +} diff --git a/src/types.ts b/src/types.ts index 1084e7e..2042c27 100644 --- a/src/types.ts +++ b/src/types.ts @@ -23,6 +23,13 @@ export interface ConformanceCheck { logs?: string[]; } +export type SpecVersion = + | '2025-03-26' + | '2025-06-18' + | '2025-11-25' + | 'draft' + | 'extension'; + export interface ScenarioUrls { serverUrl: string; authUrl?: string; @@ -36,6 +43,12 @@ export interface ScenarioUrls { export interface Scenario { name: string; description: string; + specVersions: SpecVersion[]; + /** + * If true, a non-zero client exit code is expected and will not cause the test to fail. + * Use this for scenarios where the client is expected to error (e.g., rejecting invalid auth). + */ + allowClientError?: boolean; start(): Promise; stop(): Promise; getChecks(): ConformanceCheck[]; @@ -43,11 +56,14 @@ export interface Scenario { export interface ClientScenarioOptions { interactive?: boolean; + clientId?: string; + clientSecret?: string; } export interface ClientScenario { name: string; description: string; + specVersions: SpecVersion[]; run( serverUrl: string, options?: ClientScenarioOptions