diff --git a/.continues.example.yml b/.continues.example.yml new file mode 100644 index 0000000..566877f --- /dev/null +++ b/.continues.example.yml @@ -0,0 +1,107 @@ +# ───────────────────────────────────────────────────────────────────────────── +# .continues.yml — Verbosity configuration for the `continues` CLI +# ───────────────────────────────────────────────────────────────────────────── +# +# Copy this file to one of: +# .continues.yml (in your project root — per-project settings) +# ~/.continues/config.yml (in your home dir — global default) +# +# Or pass explicitly: continues resume --config path/to/config.yml +# +# You only need to include the fields you want to change. +# Everything else inherits from the selected preset. +# +# Available presets: +# minimal — ~2KB output, essentials only +# standard — ~8KB output, good default (this file shows standard values) +# verbose — ~30KB output, rich context for complex tasks +# full — ~unlimited, complete session data +# ───────────────────────────────────────────────────────────────────────────── + +# Base preset — all unspecified fields inherit from this preset +preset: standard + +# How many recent conversation messages to include in the handoff +recentMessages: 10 + +# Truncate individual message content after this many characters +maxMessageChars: 500 + +# ── Shell commands (bash, terminal, exec) ──────────────────────────────────── +shell: + maxSamples: 8 # Number of shell invocations to show in detail + stdoutLines: 5 # Lines of stdout to include per command + stderrLines: 5 # Lines of stderr to include per command + maxChars: 2000 # Max characters per shell result block + showCommand: true # Show the command that was run + showExitCode: true # Show the exit code + +# ── File reads ─────────────────────────────────────────────────────────────── +read: + maxSamples: 20 # Number of file reads to list + maxChars: 0 # Max chars of file content to show (0 = path only) + showLineRange: true # Show line range when available (e.g. "lines 10-50") + +# ── File writes (new files) ────────────────────────────────────────────────── +write: + maxSamples: 5 # Number of writes to show in detail + diffLines: 200 # Max diff lines per write + maxChars: 5000 # Max characters for the whole write block + +# ── File edits (modifications) ─────────────────────────────────────────────── +edit: + maxSamples: 5 # Number of edits to show in detail + diffLines: 200 # Max diff lines per edit + maxChars: 5000 # Max characters for the whole edit block + +# ── Grep / search results ─────────────────────────────────────────────────── +grep: + maxSamples: 10 # Number of grep/search invocations to show + maxChars: 500 # Max chars per grep result + showPattern: true # Show the search pattern + matchLines: 5 # Number of matching lines to include + +# ── MCP tool calls ────────────────────────────────────────────────────────── +mcp: + maxSamplesPerNamespace: 5 # Max samples per MCP namespace + paramChars: 100 # Truncate MCP call parameters after N chars + resultChars: 100 # Truncate MCP call results after N chars + thinkingTools: + extractReasoning: true # Extract reasoning from thinking-style MCP tools + maxReasoningChars: 500 # Max chars of extracted reasoning + +# ── Subagent / task dispatches ─────────────────────────────────────────────── +task: + maxSamples: 5 # Number of task dispatches to show + includeSubagentResults: true # Include results from subagent completions + subagentResultChars: 500 # Max chars per subagent result + recurseSubagents: false # Recursively expand nested subagent chains + +# ── Thinking / reasoning blocks ────────────────────────────────────────────── +thinking: + include: true # Include thinking/reasoning blocks + maxChars: 1000 # Max chars of thinking content + maxHighlights: 5 # Max number of reasoning highlights to extract + +# ── Compact summary ───────────────────────────────────────────────────────── +compactSummary: + maxChars: 500 # Max chars for the compact summary section + +# ── Pending tasks extraction ──────────────────────────────────────────────── +pendingTasks: + extractFromThinking: true # Extract pending tasks from thinking blocks + extractFromSubagents: true # Extract pending tasks from subagent outputs + maxTasks: 10 # Max number of pending tasks to include + +# ── Per-agent feature flags ───────────────────────────────────────────────── +agents: + claude: + filterProgressEvents: true # Filter out progress/heartbeat events + parseSubagents: true # Parse subagent dispatches + parseToolResultsDir: true # Parse tool_results directory data + separateHumanFromToolResults: true # Separate human messages from tool results + # Add flags for other agents as needed: + # codex: + # someFlag: true + # gemini: + # someFlag: true diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..d77cd94 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,34 @@ +# continues — Code Review Standards + +## Read-Only Semantics + +- Session data is NEVER modified. Any PR that writes to `~/.claude/`, `~/.codex/`, `~/.copilot/`, `~/.gemini/`, `~/.factory/`, `~/.cursor/`, or `~/.local/share/opencode/` is a critical bug. +- Handoff files (`.continues-handoff.md`) write only to the project's own working directory — never to tool storage paths. + +## ESM Conventions + +- All local imports must use `.js` extensions, even for `.ts` source files: `import { foo } from './bar.js'` +- Never use `require()` — this is an ESM-only codebase (`"type": "module"` in `package.json`) +- Use `process.exitCode = N` instead of `process.exit(N)` — allows cleanup handlers to run + +## Error Handling + +- Use typed errors from `src/errors.ts` (`ParseError`, `SessionNotFoundError`, `ToolNotAvailableError`) for user-facing error paths — not bare `throw new Error()` +- Parsers must silently skip malformed session data with `catch {}` — never propagate parse errors to the caller + +## Registry Completeness + +- Every `SessionSource` member in `src/types/tool-names.ts` must have a registered `ToolAdapter` in `src/parsers/registry.ts` +- The completeness assertion at the bottom of `registry.ts` throws at module load if any adapter is missing — a missing entry crashes the CLI at startup +- Adding a tool to `TOOL_NAMES` without a registry entry is always a bug; changes to `SessionSource` require coordinated updates in the registry, fixtures, and tests + +## Code Quality + +- Biome handles all formatting and style — do not introduce ESLint, Prettier, or TSLint configs +- No synchronous file I/O (`readFileSync`, `writeFileSync`) in parser code — blocks the event loop when scanning large session directories +- Shared helpers `cleanSummary`, `extractRepoFromCwd`, and `homeDir` live in `src/utils/parser-helpers.ts` — do not duplicate them in individual parsers + +## Performance + +- Parsers run in parallel via `Promise.allSettled` in the session index builder — a slow parser delays only its own results +- The session index uses a 5-minute TTL cache at `~/.continues/sessions.jsonl` — cache bypasses should be explicitly justified diff --git a/.github/instructions/ci.instructions.md b/.github/instructions/ci.instructions.md new file mode 100644 index 0000000..f575f0f --- /dev/null +++ b/.github/instructions/ci.instructions.md @@ -0,0 +1,36 @@ +--- +applyTo: ".github/workflows/**/*.{yml,yaml}" +--- + +# CI Workflow Review Guidelines + +## Security + +- Pin action versions to at least a named tag (`actions/checkout@v4`); prefer full commit SHA for security-critical actions +- Set `permissions` explicitly on any job that needs elevated access (e.g., `pull-requests: write`) — do not rely on repository-wide defaults +- Never print secret values to logs — use GitHub's secret masking for dynamic secrets + +```yaml +# Prefer explicit permissions scoping +permissions: + pull-requests: write + contents: read +``` + +## Node.js Version Requirements + +- Node 22 is the minimum supported version (`engines.node >= 22.0.0` in `package.json`) +- The CI matrix must include at least Node 22 and the latest even-numbered LTS — do not drop below 22 +- `node:sqlite` (built-in, Node 22.5+) is used by OpenCode and Crush parsers — do not add third-party SQLite packages + +## Package Manager + +- Use `pnpm` exclusively — not `npm ci` or `yarn` — to stay consistent with `pnpm-lock.yaml` +- Always run `pnpm install --frozen-lockfile` in CI to prevent accidental lockfile mutations +- Use `pnpm/action-setup@v4` for pnpm setup + +## Build and Test Order + +- Run `pnpm run build` (TypeScript compile) before `pnpm test` — `tsc` validates type correctness; test failures may be caused by type errors caught at build time +- The `test-quality` job posts a PR comment summarizing test counts and flags source-file changes without corresponding test changes — do not remove this job without an equivalent replacement +- The `test-quality` job should only run on `pull_request` events (not push to `main`) diff --git a/.github/instructions/parsers.instructions.md b/.github/instructions/parsers.instructions.md new file mode 100644 index 0000000..6a20e6b --- /dev/null +++ b/.github/instructions/parsers.instructions.md @@ -0,0 +1,62 @@ +--- +applyTo: "src/parsers/**/*.ts" +--- + +# Parser Review Guidelines + +## Crash Safety (Critical) + +- Parsers MUST NOT throw to the caller — each runs inside `Promise.allSettled` and an uncaught error silently drops that tool's sessions from the index +- Wrap JSON.parse calls and file-read loops in try-catch with an empty catch block to silently skip malformed data + +```typescript +// Good — CLI continues if one line is malformed +for (const line of lines) { + try { + const data = JSON.parse(line); + // process... + } catch { + // Skip malformed line silently + } +} + +// Bad — one bad line crashes the entire parser +for (const line of lines) { + const data = JSON.parse(line); +} +``` + +## Required Exports + +Each parser file must export exactly two functions: + +- `parseSessions(): Promise` — file discovery and metadata extraction +- `extractContext(session: UnifiedSession, config?: VerbosityConfig): Promise` — full conversation and tool activity extraction + +Both must be registered in `src/parsers/registry.ts` with all `ToolAdapter` fields populated. + +## JSONL Streaming + +- Stream JSONL with `readline.createInterface` — never `fs.readFileSync` for session files +- Use helpers from `src/utils/jsonl.ts` (`readJsonlFile`, `scanJsonlHead`) when applicable +- Keep only the last ~10 messages in `recentMessages` — do not accumulate the entire conversation + +## Tool Summarizer + +- Always use `SummaryCollector` from `src/utils/tool-summarizer.ts` — never build `ToolUsageSummary[]` arrays manually +- Call `collector.add(category, summary, filePath?, isWrite?)` for each tool invocation + +## Shared Helpers + +- Import `cleanSummary`, `extractRepoFromCwd`, `homeDir` from `src/utils/parser-helpers.ts` +- Do not duplicate these utilities in individual parser files + +## New Tool Checklist + +Adding a new tool requires ALL five of: + +1. Parser file `src/parsers/.ts` exporting both required functions +2. Registry entry in `src/parsers/registry.ts` with all `ToolAdapter` fields +3. Type update in `src/types/tool-names.ts` — add to `SessionSource` union and `TOOL_NAMES` array +4. Fixture factory in `src/__tests__/fixtures/index.ts` +5. Conversion tests in `src/__tests__/unit-conversions.test.ts` for all N-1 paths in each direction diff --git a/.github/instructions/security.instructions.md b/.github/instructions/security.instructions.md new file mode 100644 index 0000000..0e84ca6 --- /dev/null +++ b/.github/instructions/security.instructions.md @@ -0,0 +1,35 @@ +--- +applyTo: "src/utils/**/*.ts" +--- + +# Security Review Guidelines — Core Utilities + +## Command Injection Prevention (resume.ts) + +- External processes must be spawned with `spawn()` and arguments as an **array** — NEVER `exec()` with string interpolation +- Session IDs and file paths from parsed sessions are user-controlled and may contain shell metacharacters (`;`, `|`, `&`, `$`, backticks) — they must always be array elements, never embedded in a shell string + +```typescript +// Avoid — command injection if sessionId contains ; or | or $() +exec(`claude --resume ${sessionId}`); + +// Prefer — safe regardless of sessionId content +spawn('claude', ['--resume', sessionId], { stdio: 'inherit' }); +``` + +## Forward Flag Security (forward-flags.ts) + +- `--dangerously-skip-permissions` (Claude) and `--dangerously-bypass-approvals-and-sandbox` (Codex) must ONLY be set when the source session **explicitly** requested auto-approve behavior +- Flag precedence is security-critical: auto-approve > full-auto > sandbox > ask-for-approval — deviations from this order could grant unintended permissions in the target tool +- Never map a "plan mode" flag or any scheduling flag to auto-approve behavior + +## Handoff Output Safety (markdown.ts) + +- Do NOT embed secrets, API keys, tokens, or environment variable values in handoff markdown output +- Tool activity summaries (shell command output, file diffs) may contain sensitive data — the verbosity config caps limit exposure; do not bypass these caps +- Home directory paths in handoff output must be tildified (`~/`) using `safePath()` — never expose the full absolute home path + +## General + +- No hardcoded credentials, API keys, or tokens anywhere in source files +- User-supplied paths (session file paths, `cwd` values) must not be passed to shell execution without sanitization — use `spawn()` with array args only diff --git a/.github/instructions/testing.instructions.md b/.github/instructions/testing.instructions.md new file mode 100644 index 0000000..619c670 --- /dev/null +++ b/.github/instructions/testing.instructions.md @@ -0,0 +1,35 @@ +--- +applyTo: "src/__tests__/**/*.ts" +--- + +# Test Review Guidelines + +## Fixture-Based Testing + +- Tests must NOT require real session files on the local machine — use fixture factories from `src/__tests__/fixtures/index.ts` +- Each tool has a `createFixture()` factory that creates a temp directory with realistic session data +- Ground fixture schemas in real session file formats — verify field names against actual tool storage before creating fixtures; do not invent schemas + +## Conversion Coverage + +- `unit-conversions.test.ts` is the primary suite — it must cover all N tools × (N-1) target conversion paths +- Adding a new tool requires: a fixture factory AND conversion tests covering all N-1 directions (as both source and target) +- PRs that add a new parser but do not update `unit-conversions.test.ts` are incomplete + +## Test Quality + +- Each test asserts ONE behavior — not multiple unrelated assertions bundled into a single test case +- Test names should describe the scenario: `should extract session summary from Claude JSONL` +- Tests must be independent — no shared mutable state between test cases +- Use `beforeAll` / `afterAll` for fixture setup and cleanup (create temp dir → run tests → delete temp dir) + +## Regression Tests + +- Bug fixes must include a regression test that fails before the fix and passes after +- PRs that modify parser logic without touching any test file should be flagged — the CI `test-quality` job will also flag this + +## Performance + +- Test timeout is 30 seconds (`vitest.config.ts`) — a test that times out indicates a parser with blocking or synchronous I/O +- Excluded by vitest config: `e2e*`, `real-e2e*`, `stress*`, `injection*`, `parsers.test*` — these require a real environment and are not run in CI +- Node.js 22+ is required; `node:sqlite` built-in is used by OpenCode/Crush fixtures — do not add third-party SQLite deps diff --git a/.github/instructions/typescript.instructions.md b/.github/instructions/typescript.instructions.md new file mode 100644 index 0000000..ea6bad4 --- /dev/null +++ b/.github/instructions/typescript.instructions.md @@ -0,0 +1,46 @@ +--- +applyTo: "**/*.ts" +--- + +# TypeScript Review Guidelines + +## Type Safety + +- Avoid `any` — use `unknown` for external data (JSON.parse results, JSONL lines), then narrow with type guards +- Define interfaces for all object shapes that cross module boundaries — not inline object literals +- Use `as const` for literal arrays like `TOOL_NAMES` to get narrower inferred types + +```typescript +// Avoid +const parsed = JSON.parse(line) as any; +return parsed.type; + +// Prefer +const parsed = JSON.parse(line) as unknown; +if (typeof parsed !== 'object' || parsed === null || !('type' in parsed)) return; +// Narrowed — safe to access (parsed as Record) +``` + +## Discriminated Unions + +- Use `switch (d.category)` for narrowing `StructuredToolSample` — not `instanceof` checks +- New tool sample types must be added to the `StructuredToolSample` union in `src/types/index.ts` +- The `category` field is the discriminant — never use string-equality checks outside of switch + +## Async Patterns + +- All file I/O must be async: use `fs.promises.*` not `fs.readFileSync` / `fs.writeFileSync` +- JSONL files must be streamed with `readline.createInterface` — never loaded into memory wholesale +- Avoid blocking the event loop in parsers — they run in parallel via `Promise.allSettled` + +## Import Rules + +- Local imports must end in `.js`: `import { foo } from './bar.js'` — required for Node.js ESM module resolution +- Never import from `dist/` in source files +- Prefer named exports over default exports for better refactoring support + +## Defensive Patterns + +- Use optional chaining (`?.`) and nullish coalescing (`??`) for optional fields from parsed session data +- Sessions returned from parsers must be sorted by `updatedAt` descending (newest first) +- Use `process.exitCode = N` over `process.exit(N)` to allow SIGTERM/SIGINT handlers to run diff --git a/.github/workflows/beta-publish.yml b/.github/workflows/beta-publish.yml new file mode 100644 index 0000000..587627c --- /dev/null +++ b/.github/workflows/beta-publish.yml @@ -0,0 +1,68 @@ +name: Beta Publish + +on: + push: + branches: [develop] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22 + - uses: pnpm/action-setup@v4 + with: + version: 10 + - run: pnpm install --frozen-lockfile + - run: pnpm run build + - run: pnpm test + + publish-beta: + needs: test + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 22 + registry-url: https://registry.npmjs.org + - uses: pnpm/action-setup@v4 + with: + version: 10 + - run: pnpm install --frozen-lockfile + - run: pnpm run build + + - name: Auto-increment beta version + run: | + # Get current beta version from npm and version from package.json + CURRENT_BETA=$(npm view continues@beta version 2>/dev/null || echo "") + PKG_VERSION=$(node -p "require('./package.json').version") + PKG_BASE=$(echo "$PKG_VERSION" | sed 's/-beta\..*//') + + if [ -z "$CURRENT_BETA" ]; then + echo "No existing beta on npm, using package.json version: $PKG_VERSION" + else + NPM_BASE=$(echo "$CURRENT_BETA" | sed 's/-beta\..*//') + + if [ "$PKG_BASE" != "$NPM_BASE" ]; then + # Major/minor version changed in package.json — use it as-is + echo "Version base changed: npm=$NPM_BASE, package.json=$PKG_BASE" + echo "Using package.json version: $PKG_VERSION" + else + # Same base — increment beta number + BETA_NUM=$(echo "$CURRENT_BETA" | grep -o 'beta\.[0-9]*' | cut -d. -f2) + NEXT_NUM=$((BETA_NUM + 1)) + NEW_VERSION="${NPM_BASE}-beta.${NEXT_NUM}" + echo "Current beta: $CURRENT_BETA → Next: $NEW_VERSION" + npm version "$NEW_VERSION" --no-git-tag-version + fi + fi + + - name: Publish beta to npm + run: npm publish --tag beta --access public + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 12a9c1d..aeb7e4e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI on: push: - branches: [main] + branches: [main, develop] pull_request: branches: [main] diff --git a/.gitignore b/.gitignore index 89c6ae7..e837f54 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,15 @@ dist/ .continues-handoff.md .agent-workspace/ stress-test-output.log +# Generated / local tooling +.agents/ +.claude/skills/ +.greptile/ +.superset/ +.windsurf/ +AGENTS.md +REVIEW.md +skills-lock.json +.claude/CLAUDE.md +.claude/settings.local.json +.emdash.json diff --git a/.greptile/config.json b/.greptile/config.json new file mode 100644 index 0000000..5b4c5b0 --- /dev/null +++ b/.greptile/config.json @@ -0,0 +1,129 @@ +{ + "strictness": 2, + "commentTypes": ["logic", "syntax"], + "triggerOnUpdates": false, + "fixWithAI": true, + "statusCheck": true, + "includeConfidenceScore": true, + "includeSequenceDiagram": false, + "summarySection": { "included": true, "collapsible": false, "defaultOpen": true }, + "issuesTableSection": { "included": true, "collapsible": true, "defaultOpen": true }, + "confidenceScoreSection": { "included": true, "collapsible": true, "defaultOpen": false }, + "ignorePatterns": "dist/**\nnode_modules/**\n*.d.ts\npnpm-lock.yaml\npackage-lock.json\n*.tgz\ntest-fixtures/**\n**/__snapshots__/**\n*.generated.*\n.claude/**\n.windsurf/**\n.agents/**\n.github/**\nCHANGELOG.md\nskills-lock.json\ndemo.mp4", + "rules": [ + { + "id": "registry-single-source-of-truth", + "rule": "All tool-specific behavior (CLI commands, resume logic, session discovery, color coding) must be derived from the adapter registry in src/parsers/registry.ts. No switch statements, hardcoded tool arrays, or if-else chains that enumerate SessionSource values outside the registry. If you see a new tool name added anywhere other than the registry's register() call, flag it.", + "scope": ["src/commands/**/*.ts", "src/utils/**/*.ts", "src/cli.ts"], + "severity": "high" + }, + { + "id": "tool-names-source-of-truth", + "rule": "New tools must be added to the TOOL_NAMES array in src/types/tool-names.ts FIRST. This frozen array is the single source of truth from which the SessionSource union type is derived. The registry completeness assertion in registry.ts will throw at module load if a tool is in TOOL_NAMES but not registered. Never define SessionSource manually or add tools only to the registry without updating TOOL_NAMES.", + "scope": ["src/types/tool-names.ts", "src/types/index.ts", "src/parsers/registry.ts"], + "severity": "high" + }, + { + "id": "parser-isolation", + "rule": "Parser files (src/parsers/*.ts, excluding registry.ts and index.ts) must not import from other parser files. Each parser is independent. Cross-parser shared logic belongs in src/utils/parser-helpers.ts, src/utils/tool-summarizer.ts, or src/utils/tool-extraction.ts.", + "scope": ["src/parsers/**/*.ts"], + "severity": "high" + }, + { + "id": "esm-import-extensions", + "rule": "All local imports must use .js file extensions (e.g., import { foo } from './utils/bar.js'). This is an ESM-only project using NodeNext module resolution. Bare specifier imports without .js will fail at runtime even though TypeScript compilation succeeds.", + "scope": ["src/**/*.ts"], + "severity": "high" + }, + { + "id": "session-data-read-only", + "rule": "Parsers must never write to, modify, or delete source session files. The tool's storage directories (~/.claude/, ~/.codex/, ~/.copilot/, etc.) are read-only. Only ~/.continues/ is writable for index cache and handoff files. Any fs.writeFile, fs.unlink, or fs.rename targeting a tool's storage path is a critical bug.", + "scope": ["src/parsers/**/*.ts"], + "severity": "high" + }, + { + "id": "zod-schema-validation", + "rule": "All raw session data read from disk (JSONL lines, JSON files, SQLite rows, YAML) must be validated through a Zod schema from src/types/schemas.ts before use. Never cast raw JSON to a TypeScript type with 'as' — parse through the schema to catch malformed data early. All schemas must use .passthrough() to tolerate extra fields from future tool versions.", + "scope": ["src/parsers/**/*.ts", "src/types/schemas.ts"], + "severity": "high" + }, + { + "id": "summary-collector-usage", + "rule": "All parsers must use SummaryCollector from src/utils/tool-summarizer.ts to accumulate tool activity and track modified files. Do not build ad-hoc summary strings or manually track file lists. Call collector.add(category, summary, filePath?, isWrite?) for each tool invocation found in session data.", + "scope": ["src/parsers/**/*.ts"], + "severity": "medium" + }, + { + "id": "promise-allsettled-for-parsers", + "rule": "When calling multiple parsers in parallel (e.g., during session index building), use Promise.allSettled() — never Promise.all(). A single broken or missing parser must not crash the entire CLI. Each parser result should be checked for status === 'fulfilled' before use.", + "scope": ["src/utils/index.ts"], + "severity": "high" + }, + { + "id": "typed-error-classes", + "rule": "Throw ContinuesError subclasses (ParseError, SessionNotFoundError, ToolNotAvailableError, UnknownSourceError, IndexError, StorageError) from src/errors.ts instead of raw Error objects. Each error class provides a machine-readable .name property for consistent error handling.", + "scope": ["src/commands/**/*.ts", "src/utils/**/*.ts", "src/parsers/**/*.ts"], + "severity": "medium" + }, + { + "id": "no-process-exit", + "rule": "Do not call process.exit() directly. Set process.exitCode instead and let the event loop drain naturally. This ensures cleanup handlers and pending I/O complete before the process terminates.", + "scope": ["src/**/*.ts"], + "severity": "medium" + }, + { + "id": "silent-parser-skip", + "rule": "When a parser encounters an unparseable or corrupted session file, it must catch the error silently (catch {}) and skip to the next file. Individual file failures must never propagate and crash the CLI or abort discovery of remaining sessions.", + "scope": ["src/parsers/**/*.ts"], + "severity": "high" + }, + { + "id": "recent-messages-user-included", + "rule": "When trimming recentMessages for the handoff document, use the trimMessages() helper from src/utils/parser-helpers.ts which guarantees at least one user message is included. A handoff with only assistant messages loses critical context about what was requested.", + "scope": ["src/parsers/**/*.ts", "src/utils/markdown.ts"], + "severity": "medium" + }, + { + "id": "new-parser-requires-registration", + "rule": "Adding a new AI tool parser requires exactly 4 changes: (1) add to TOOL_NAMES in src/types/tool-names.ts, (2) create src/parsers/.ts with parse and extract functions, (3) register in src/parsers/registry.ts with all ToolAdapter fields, (4) add tool name classification to the canonical sets in tool-names.ts if the tool introduces new tool call names. If a PR adds a parser file but is missing any of these, flag it as incomplete.", + "scope": ["src/parsers/**/*.ts", "src/types/tool-names.ts", "src/types/index.ts"], + "severity": "high" + }, + { + "id": "new-parser-requires-tests", + "rule": "Every new parser must include: (1) a fixture factory in src/__tests__/fixtures/index.ts, (2) conversion test cases in src/__tests__/unit-conversions.test.ts covering all N-1 conversion paths in each direction. PRs adding parsers without test coverage must not be merged.", + "scope": ["src/parsers/**/*.ts", "src/__tests__/**/*.ts"], + "severity": "high" + }, + { + "id": "no-experimental-warning-suppression-elsewhere", + "rule": "The node:sqlite ExperimentalWarning suppression must only exist at the top of src/cli.ts (the entry point). Do not add process.removeAllListeners or warning suppressions in parser files or utilities — it affects the entire process and belongs in exactly one place.", + "scope": ["src/**/*.ts"], + "severity": "low" + }, + { + "id": "streaming-large-files", + "rule": "For JSONL and large session files, use readline.createInterface with a read stream — do not read entire files into memory with fs.readFile. Session files can be several MB. Look at claude.ts or codex.ts for the established streaming pattern.", + "scope": ["src/parsers/**/*.ts"], + "severity": "medium" + }, + { + "id": "verbosity-config-passthrough", + "rule": "Parser extractContext functions that accept a VerbosityConfig parameter must forward it to generateHandoffMarkdown(). Do not call generateHandoffMarkdown without the config parameter — this silently falls back to the 'standard' preset and ignores user-configured verbosity from .continues.yml.", + "scope": ["src/parsers/**/*.ts"], + "severity": "medium" + }, + { + "id": "forward-flag-canonical-keys", + "rule": "Forward-flag mappers in registry.ts must use the CanonicalFlagKey union type from src/utils/forward-flags.ts. Adding ad-hoc string keys to context.all() or context.latestString() without updating the CanonicalFlagKey type will cause TypeScript errors. New flag names must first be added to the union.", + "scope": ["src/parsers/registry.ts", "src/utils/forward-flags.ts"], + "severity": "medium" + }, + { + "id": "tool-classification-sets", + "rule": "When a new tool alias appears (e.g., a new CLI names its bash tool 'run_command'), add it to the corresponding canonical set in src/types/tool-names.ts (SHELL_TOOLS, READ_TOOLS, WRITE_TOOLS, etc.). The classifyToolName() function and markdown renderer derive behavior from these sets — unlisted tool names silently fall through to the 'mcp' category, causing incorrect rendering in the handoff document.", + "scope": ["src/types/tool-names.ts", "src/parsers/**/*.ts"], + "severity": "medium" + } + ] +} diff --git a/.greptile/files.json b/.greptile/files.json new file mode 100644 index 0000000..42e95b5 --- /dev/null +++ b/.greptile/files.json @@ -0,0 +1,43 @@ +{ + "files": [ + { + "path": "CLAUDE.md", + "description": "Primary architecture guide. Contains the core flow diagram, module boundaries, adapter registry pattern, parser contract, and the 4-step process for adding new platforms. Read before reviewing any PR." + }, + { + "path": "src/types/index.ts", + "description": "Core type definitions — UnifiedSession, ConversationMessage, ToolCall, ToolUsageSummary, SessionContext, HandoffOptions, and the StructuredToolSample discriminated union. Validate that PRs keep types consistent with these definitions.", + "scope": ["src/**/*.ts"] + }, + { + "path": "src/types/tool-names.ts", + "description": "TOOL_NAMES frozen array (single source of truth for supported tools), SessionSource type derivation, and canonical tool name sets (SHELL_TOOLS, READ_TOOLS, etc.) used by classifyToolName(). New tools must be added here first.", + "scope": ["src/types/**/*.ts", "src/parsers/**/*.ts"] + }, + { + "path": "src/types/schemas.ts", + "description": "Zod schemas for all parser raw data formats — Claude, Codex, Copilot, Gemini, OpenCode, Droid, Cursor, and the serialized session index. All schemas use .passthrough() for forward compatibility.", + "scope": ["src/parsers/**/*.ts", "src/types/**/*.ts"] + }, + { + "path": "src/parsers/registry.ts", + "description": "Central adapter registry, ToolAdapter interface, and forward-flag mappers. All tool-specific behavior derives from this file. Has a completeness assertion that throws if TOOL_NAMES and registry are out of sync.", + "scope": ["src/parsers/**/*.ts", "src/commands/**/*.ts", "src/utils/**/*.ts"] + }, + { + "path": "src/errors.ts", + "description": "Typed error hierarchy (ContinuesError base + 6 subclasses: ParseError, SessionNotFoundError, ToolNotAvailableError, UnknownSourceError, IndexError, StorageError). Verify PRs use these instead of raw Error objects.", + "scope": ["src/**/*.ts"] + }, + { + "path": "src/__tests__/fixtures/index.ts", + "description": "Fixture factories for all parsers. When reviewing new parser PRs, verify a matching fixture factory exists here with realistic session data.", + "scope": ["src/__tests__/**/*.ts", "src/parsers/**/*.ts"] + }, + { + "path": ".continues.example.yml", + "description": "Configuration reference showing all verbosity presets and per-tool settings (shell, read, write, edit, grep, mcp, task, thinking). Useful context when reviewing config-related changes.", + "scope": ["src/config/**/*.ts"] + } + ] +} diff --git a/.greptile/rules.md b/.greptile/rules.md new file mode 100644 index 0000000..42c42ea --- /dev/null +++ b/.greptile/rules.md @@ -0,0 +1,159 @@ +## Adapter Registry Architecture + +This codebase uses a **registry-driven architecture** where all tool-specific behavior flows from a single `adapters` record in `src/parsers/registry.ts`. The CLI, session index, resume logic, and help text are all generated from the registry — no manual switch statements or hardcoded tool lists. + +When reviewing PRs that touch tool-specific behavior, verify the change originates from or integrates with the registry rather than introducing a parallel lookup mechanism. + +### Good — Derive behavior from registry +```ts +const adapter = adapters[session.source]; +const args = adapter.nativeResumeArgs(session); +``` + +### Bad — Hardcoded tool enumeration +```ts +if (source === 'claude') { args = ['--resume', id]; } +else if (source === 'codex') { args = ['--resume', id]; } +// ...fragile, will break when new tools are added +``` + +## Adding a New Tool — The 4-Step Checklist + +Adding a new AI coding tool requires changes in exactly 4 places, done in this order: + +1. **`src/types/tool-names.ts`** — Add to `TOOL_NAMES` array (drives `SessionSource` type) +2. **`src/parsers/.ts`** — Create parser with `parseSessions()` + `extractContext()` +3. **`src/parsers/registry.ts`** — Register with all `ToolAdapter` fields +4. **`src/__tests__/fixtures/index.ts`** — Add fixture factory + conversion tests + +The registry has a completeness assertion at module load — if a name is in `TOOL_NAMES` but not registered, the CLI throws immediately. This is intentional: it ensures no tool is partially added. + +### Good — Complete registration +```ts +// 1. tool-names.ts: add 'newtool' to TOOL_NAMES +// 2. parsers/newtool.ts: export parseNewtoolSessions + extractNewtoolContext +// 3. registry.ts: +register({ + name: 'newtool', + label: 'NewTool', + color: chalk.hex('#AABBCC'), + storagePath: '~/.newtool/sessions/', + binaryName: 'newtool', + parseSessions: parseNewtoolSessions, + extractContext: extractNewtoolContext, + nativeResumeArgs: (s) => ['--resume', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `newtool --resume ${s.id}`, +}); +``` + +### Bad — Incomplete registration +```ts +// Added parser file + registry entry, but forgot TOOL_NAMES +// Result: TypeScript error on SessionSource, or registry assertion throws at startup +``` + +## Parser Contract + +Every parser must export two functions following the `ToolAdapter` interface: + +1. `parseSessions()` — Discovers session files, returns `UnifiedSession[]` sorted by `updatedAt` descending +2. `extractContext()` — Full conversation extraction, returns `SessionContext` with handoff markdown + +Parsers must be **self-contained** (no cross-parser imports) and **fault-tolerant** (silently skip corrupted files). + +### Good — Silent skip on parse failure +```ts +for (const file of sessionFiles) { + try { + const data = await parseSessionFile(file); + sessions.push(data); + } catch { + // Skip corrupted/incompatible files silently + } +} +``` + +### Bad — Letting parse errors propagate +```ts +for (const file of sessionFiles) { + const data = await parseSessionFile(file); // Crashes CLI if any file is bad + sessions.push(data); +} +``` + +## Zod Schema Validation + +All raw data from disk must pass through Zod schemas defined in `src/types/schemas.ts`. Schemas use `.passthrough()` to tolerate unknown fields from future tool versions — never remove this. + +### Good — Validate through schema +```ts +import { ClaudeMessageSchema } from '../types/schemas.js'; + +const parsed = ClaudeMessageSchema.safeParse(JSON.parse(line)); +if (!parsed.success) continue; // Skip malformed line +const msg = parsed.data; +``` + +### Bad — Unsafe cast +```ts +const msg = JSON.parse(line) as ClaudeMessage; // No validation, crashes on malformed data +``` + +## ESM Module Discipline + +This project is ESM-only (`"type": "module"` in package.json) with `NodeNext` module resolution. All local imports require `.js` extensions — TypeScript compiles `.ts` to `.js`, so import paths must reference the output extension. + +### Good +```ts +import { cleanSummary } from '../utils/parser-helpers.js'; +import { SummaryCollector } from '../utils/tool-summarizer.js'; +``` + +### Bad +```ts +import { cleanSummary } from '../utils/parser-helpers'; // Missing .js — runtime crash +import { cleanSummary } from '../utils/parser-helpers.ts'; // .ts extension — wrong +``` + +## Error Hierarchy + +Use the typed error classes from `src/errors.ts` instead of bare `Error`. Each class has a `.name` property for programmatic error handling. + +| Error Class | When to Use | +|---|---| +| `ParseError` | Session file parsing failures | +| `SessionNotFoundError` | Session ID lookup misses | +| `ToolNotAvailableError` | CLI binary not found on PATH | +| `UnknownSourceError` | Invalid `SessionSource` value | +| `IndexError` | Cache read/write failures | +| `StorageError` | Handoff file I/O errors | + +### Good +```ts +throw new ParseError('claude', filePath, `Malformed JSONL at line ${i}: ${e.message}`); +``` + +### Bad +```ts +throw new Error(`Parse failed`); // No type, no context, not catchable by class +``` + +## Verbosity Config Passthrough + +Parsers receive a `VerbosityConfig` parameter — this must be forwarded to `generateHandoffMarkdown()`. Dropping it silently falls back to the `'standard'` preset, ignoring user settings from `.continues.yml`. + +### Good +```ts +async function extractToolContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + // ... parse messages, collect tool activity ... + const markdown = generateHandoffMarkdown(session, messages, files, tasks, summaries, notes, config); + return { session, recentMessages, filesModified, pendingTasks, toolSummaries, sessionNotes, markdown }; +} +``` + +### Bad +```ts +// Forgot to pass config — user's verbosity settings are silently ignored +const markdown = generateHandoffMarkdown(session, messages, files, tasks, summaries, notes); +``` diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..276b9b9 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,52 @@ +# AGENTS.md + +Agent behavior instructions for `continues` — the cross-tool AI session handoff CLI. Read `CLAUDE.md` for architecture, types, and the step-by-step guide for adding a new parser. This file covers workflow, coding standards, and anti-patterns. + +## Workflow + +- **Before any commit**: run `pnpm run check` (`pnpm lint && pnpm build`). Fix all Biome errors; warnings are advisory. +- **After changing a parser or fixture**: run `pnpm test` and confirm all tests pass. Do not commit with failing tests. +- **After adding a new tool**: run `pnpm run link` to test the global `continues` / `cont` binary locally. +- **Never run** `pnpm run test:watch`, `e2e-conversions.test.ts`, `real-e2e-full.ts`, or `stress-test.ts` in CI — these require live session files on the developer's machine. + +## Coding Standards + +- **ESM-only**: all local imports must end in `.js`, even for `.ts` source files (e.g., `import { foo } from './foo.js'`). +- **No `any`**: Biome reports `noExplicitAny` as a warning. Avoid it; document the reason if unavoidable. +- **Exit codes**: set `process.exitCode = N` rather than calling `process.exit(N)`. +- **Logging**: use `logger` from `src/logger.ts` for all diagnostic output. Never use bare `console.log`/`console.warn`/`console.error` in library code. TUI display goes through `@clack/prompts` or `chalk` via the display layer. +- **Error types**: throw typed errors from `src/errors.ts` on user-facing paths, not bare `new Error()`. +- **Tool activity**: use `SummaryCollector` from `src/utils/tool-summarizer.ts` in every parser. Do not build `ToolUsageSummary[]` arrays manually. +- **JSONL**: stream with `readline.createInterface`, never `fs.readFileSync` + `split('\n')`. +- **SQLite** (OpenCode, Crush parsers): use built-in `node:sqlite` — do not add third-party SQLite dependencies. +- **Biome rules in force**: `noEmptyBlockStatements` (error), `noUnusedImports` (error), `useConst` (error). Empty `catch {}` blocks fail the linter; use `catch (err) { logger.debug(...) }` instead. + +## Adding a New Tool — Checklist + +All five steps are required. Missing any one is a bug. See `CLAUDE.md` for detailed implementation guidance. + +1. Add tool name to `TOOL_NAMES` in `src/types/tool-names.ts` +2. Create `src/parsers/.ts` exporting `parseSessions()` and `extractContext()` +3. Register in `src/parsers/registry.ts` (the completeness assertion throws at module load if missing) +4. Add `createFixture()` in `src/__tests__/fixtures/index.ts` +5. Add conversion test cases in `src/__tests__/unit-conversions.test.ts` + +## Dependencies + +- **`@clack/prompts`** — all interactive TUI prompts and spinners. Do not use `readline` or `inquirer`. +- **`chalk`** — terminal color. Chalk v4 is installed (CommonJS compat import); do not upgrade to v5+ (ESM-only). +- **`commander`** — CLI argument parsing. +- **`ora`** — non-interactive spinners. +- **`yaml`** — YAML parsing for Copilot sessions. +- **`zod`** — runtime schema validation. Use `z.safeParse()` where failures are recoverable. +- Do not add new runtime dependencies without strong justification. The install footprint is intentionally small. + +## Anti-Patterns to Avoid + +- **Writing to tool storage directories** — the tool is read-only. Any write to `~/.claude/`, `~/.codex/`, etc. is a severe bug. +- **`exec()` with string interpolation** — always use `spawn()` with an argument array in `resume.ts`. Session IDs and paths can contain shell metacharacters. +- **`fs.readFileSync`/`fs.writeFileSync` in parsers** — these block the event loop. Use async fs APIs or `readline`. +- **Duplicating parser-helpers** — `cleanSummary`, `extractRepoFromCwd`, `homeDir` live in `src/utils/parser-helpers.ts`. Import them; do not reimplement. +- **Hardcoding tool names** — derive from `TOOL_NAMES` or `SessionSource`. Never write `if (tool === 'claude' || tool === 'codex' || ...)`. +- **Importing `node:sqlite` outside OpenCode/Crush parsers** — SQLite is only needed for those two tools. Do not spread this dependency. +- **Embedding secrets in handoff markdown** — `.continues-handoff.md` is written to project directories and may be committed or read by other AI tools. diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..fa1f4f1 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,332 @@ +# Changelog + +All notable changes to `continues` will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + +## [4.1.0] - 2026-03-02 + +### Session Origin Tracking + +Handoff output now includes the full file path of the original session, so the receiving tool (or you) can trace back to exactly where the data came from. + +### Added + +- **Session file path in handoff markdown** — the Session Overview table now has a `Session File` row pointing to the original session file on disk +- **Session Origin section** — new section at the bottom of every handoff document with source tool, file path, session ID, and project directory +- **Session path in resume prompts** — both inline and reference prompts now reference the original session file path + +### Fixed + +- **`dump` spinner leak** — spinner now stops properly when session loading throws an error, instead of spinning forever +- **`dump` --limit NaN guard** — passing invalid values like `--limit abc` now shows a clear error instead of silently doing nothing + +--- + +## [3.1.0] - 2026-02-22 + +### Smart Context Display + +The handoff markdown that `continues` generates has been completely redesigned. Instead of flat one-liner tool summaries, each tool category now gets **type-aware data capture** at extraction time and **type-aware rendering** at display time. + +The previous system discarded ~80-90% of available structured data from tool calls (diffs, stdout, exit codes, line ranges, match counts) and reduced everything to a single summary string. v3.1 captures what matters per tool type and renders it in the format most useful for the receiving AI. + +### Added + +- **Structured tool data pipeline** — new `StructuredToolSample` discriminated union type with 11 per-category data shapes: + - `ShellSampleData` — command, exit code, stdout tail, error flag + - `ReadSampleData` — file path, line start/end + - `WriteSampleData` — file path, new-file flag, unified diff, diff stats + - `EditSampleData` — file path, unified diff, diff stats + - `GrepSampleData` — pattern, target path, match count + - `GlobSampleData` — pattern, result count + - `SearchSampleData` — query + - `FetchSampleData` — URL, result preview + - `TaskSampleData` — description, agent type, result summary + - `AskSampleData` — question + - `McpSampleData` — tool name, truncated params, result preview + +- **Minimal diff utility** (`src/utils/diff.ts`) — zero external dependencies: + - `formatNewFileDiff()` — all-`+` lines for newly created files, capped at 200 lines + - `formatEditDiff()` — `-`/`+` blocks for search-and-replace edits, capped at 200 lines + - `extractStdoutTail()` — last N non-empty lines from command output + - `countDiffStats()` — count added/removed lines from a diff string + +- **Category-aware markdown renderer** with per-type templates: + - **Shell** — blockquote with `$ command`, exit code, and stdout tail in `` ```console `` blocks + - **Write** — file path with `(new file)` and `(+N lines)` tags, fenced `diff` code block + - **Edit** — file path with `(+N -M lines)` stats, fenced `diff` code block + - **Read** — bullet list with optional `(lines 50-120)` range annotations + - **Grep** — bullet list with `"pattern" in path — N matches` + - **Glob** — bullet list with `pattern — N files` + - **Search/Fetch/Task/Ask/MCP** — compact format with extracted key fields + +- **Display mode caps** — inline mode (piped into CLI) uses tighter limits; reference mode (`.continues-handoff.md` on disk) gets fuller output: + + | Cap | Inline | Reference | + |----------------------|--------|-----------| + | Shell detailed | 5 | 8 | + | Shell stdout lines | 3 | 5 | + | Write/Edit detailed | 3 | 5 | + | Write/Edit diff cap | 50 | 200 | + | Read entries | 15 | 20 | + | Grep/Glob/Search | 8 | 10 | + | MCP/Task/Ask | 3 | 5 | + +- **Per-category sample limits** in `SummaryCollector` — prevents any single tool type from dominating the handoff (Shell: 8, Write/Edit: 5, Read: 20, Grep/Glob: 10, MCP/Task/Ask: 5) + +- **Error tracking** — `errorCount` on `ToolUsageSummary`, `[ERROR]` tags on failed shell commands + +- **20 new tests** covering `classifyToolName`, diff utilities, structured data extraction, and category-aware rendering (258 total, up from 238) + +### Changed + +- **`SummaryCollector.add()`** — migrated from positional arguments `(category, summary, filePath?, isWrite?)` to options object `(category, summary, opts?: { data?, filePath?, isWrite?, isError? })` + +- **`extractAnthropicToolData()`** — full rewrite; first pass stores tool results up to 4000 chars (was 100) with error flags; second pass constructs `StructuredToolSample` per category with rich extracted fields + +- **Tool Activity section** in handoff markdown — replaced flat bullet list with category-grouped subsections (`### Shell`, `### Write`, `### Edit`, `### Read`, etc.) in fixed priority order + +### Parser updates + +- **Codex** — shell commands now capture exit code + stdout tail; `apply_patch` captures the patch as an edit diff; `web_search` and task events captured with structured data +- **Gemini** — `write_file` captures diff from `resultDisplay`, `read_file` captures file path with `ReadSampleData` +- **Copilot** — tool extraction added (was returning empty `toolSummaries[]`); now processes `toolRequests` arrays using `classifyToolName` +- **Claude/Droid/Cursor** — inherit rich structured data automatically via shared `extractAnthropicToolData()` +- **OpenCode** — unchanged (data format lacks structured tool call information) + +### Visual: Before vs After + +Here is exactly what a receiving AI editor sees when it gets a handoff document. + +#### BEFORE (v3.0 — flat summaries) + +```markdown +## Tool Activity + +- **Bash** (12 calls): `pnpm test`, `pnpm run build`, `git status`, `git diff --stat`... (+8 more) +- **Read** (8 calls): `src/utils/markdown.ts`, `src/types/index.ts`, `src/parsers/codex.ts`... (+5 more) +- **Edit** (6 calls): `src/utils/markdown.ts`, `src/types/index.ts`... (+4 more) +- **Write** (2 calls): `src/utils/diff.ts`, `src/__tests__/shared-utils.test.ts` +- **Grep** (4 calls): `ToolSample`, `extractAnthropicToolData`, `SummaryCollector`... (+1 more) +- **Glob** (3 calls): `**/*.ts`, `**/CHANGELOG*`, `src/__tests__/**` +``` + +> The receiving AI knows WHAT tools ran, but not WHAT HAPPENED. It cannot see +> diffs, exit codes, stdout, match counts, or line ranges. Every tool call +> is reduced to a one-line label — losing the context that actually matters +> for continuation. + +#### AFTER (v3.1 — smart context display) + +```markdown +## Tool Activity + +### Shell (12 calls, 1 errors) + +> `$ pnpm test` +> Exit: 0 +> ``` +> Test Files 5 passed (5) +> Tests 258 passed (258) +> Duration 241ms +> ``` + +> `$ pnpm run build` +> Exit: 0 + +> `$ git diff --stat` +> Exit: 0 +> ``` +> 11 files changed, 1390 insertions(+), 92 deletions(-) +> ``` + +> `$ tsc --noEmit` +> Exit: 2 **[ERROR]** +> ``` +> src/types/index.ts(45,3): error TS2304: Cannot find name 'StructuredToolSample'. +> ``` + +*...and 8 more shell calls (all exit 0)* + +### Write (2 calls) + +> **`src/utils/diff.ts`** (new file) (+87 lines) +> ```diff +> +export interface DiffResult { +> + diff: string; +> + truncated: number; +> +} +> + +> +export function formatNewFileDiff(content: string, filePath: string, maxLines = 200): DiffResult { +> + const lines = content.split('\n'); +> + const header = `--- /dev/null\n+++ b/${filePath}`; +> + const capped = lines.slice(0, maxLines); +> + const diffLines = capped.map((l) => `+${l}`); +> ``` +> *+77 lines truncated* + +> **`src/__tests__/shared-utils.test.ts`** (+267 lines) +> ```diff +> +describe('classifyToolName', () => { +> + it('classifies shell tools', () => { +> + expect(classifyToolName('Bash')).toBe('shell'); +> + }); +> ``` +> *+260 lines truncated* + +### Edit (6 calls) + +> **`src/utils/markdown.ts`** (+472 -36 lines) +> ```diff +> - if (toolSummaries.length > 0) { +> - lines.push('## Tool Activity'); +> - lines.push(''); +> - for (const tool of toolSummaries) { +> - lines.push(`- **${tool.name}** (${tool.count} calls): ${tool.samples.map(s => s.summary).join(', ')}`); +> - } +> + if (toolSummaries.length > 0) { +> + const caps = mode === 'reference' ? REFERENCE_CAPS : INLINE_CAPS; +> + lines.push('## Tool Activity'); +> + lines.push(''); +> + lines.push(...renderToolActivity(toolSummaries, caps)); +> ``` +> *+38 lines truncated* + +> **`src/types/index.ts`** (+117 -8 lines) +> ```diff +> +export interface ShellSampleData { +> + category: 'shell'; +> + command: string; +> + exitCode?: number; +> + stdoutTail?: string; +> + errored?: boolean; +> +} +> ``` +> *+105 lines truncated* + +*...and 4 more edits: `src/utils/tool-extraction.ts` (+257 -30), `src/utils/tool-summarizer.ts` (+70 -20)* + +### Read (8 calls) + +- `src/utils/markdown.ts` +- `src/types/index.ts` +- `src/parsers/codex.ts` +- `src/utils/tool-extraction.ts` +- `src/utils/tool-summarizer.ts` (lines 1-50) +- `src/parsers/copilot.ts` +- `src/parsers/gemini.ts` +- `src/__tests__/unit-conversions.test.ts` (lines 280-320) + +### Grep (4 calls) + +- `"ToolSample"` — 23 matches +- `"extractAnthropicToolData"` in `src/utils/` — 4 matches +- `"SummaryCollector"` — 12 matches +- `"classifyToolName"` — 8 matches + +### Glob (3 calls) + +- `**/*.ts` — 47 files +- `**/CHANGELOG*` — 1 files +- `src/__tests__/**` — 8 files +``` + +> The receiving AI now sees exactly what happened: which commands failed and why, +> what the diffs look like, which files were read at what line ranges, and how many +> grep matches were found. This is the context needed to pick up where the previous +> session left off — not just labels, but actual outcomes. + + +--- + + +## [3.0.0] - 2026-02-21 + +### Breaking Changes + +- **Node.js 22+ required** — uses built-in `node:sqlite` for OpenCode parsing +- **Library exports added** — `continues` is now importable as an ESM package (`import { parseSessions, extractContext } from 'continues'`) +- **Type-safe schemas** — all parser inputs are validated through Zod-like runtime schemas; invalid session data is silently skipped instead of crashing + +### Added + +- **Adapter Registry** (`src/parsers/registry.ts`) — single source of truth for all supported tools. Every parser, CLI command, color, label, resume argument pattern, and storage path is registered in one place. No more switch statements or hardcoded tool lists. Adding a new platform = 3 files, 0 wiring. + +- **Registry-driven cross-tool flag forwarding** — when resuming a session in a different tool, `continues` now automatically translates compatible flags (e.g., `--model`, `--allowedTools`) between tool CLIs using registry-defined flag maps. + +- **Cursor AI support** — full parser for Cursor's agent transcripts under `~/.cursor/projects/*/agent-transcripts/`. Supports file operations, shell commands, codebase search, and MCP tools. + +- **Library entry point** (`src/index.ts`) — exports all types, all parsers, and all utilities for programmatic use. Build AI session analysis tooling on top of `continues` without going through the CLI. + +- **Typed runtime schemas** (`src/types/schemas.ts`) — per-tool schema validators for Claude JSONL, Codex JSONL, Gemini JSON, Copilot YAML+JSONL, Droid JSONL, and Cursor JSONL formats. Parsers validate before accessing fields. + +- **Content block utilities** (`src/types/content-blocks.ts`, `src/utils/content.ts`) — shared extractors for the Anthropic message format used by Claude, Droid, and Cursor (tool_use, tool_result, text, thinking blocks). + +- **Tool name taxonomy** (`src/types/tool-names.ts`) — canonical sets (`SHELL_TOOLS`, `READ_TOOLS`, `WRITE_TOOLS`, `EDIT_TOOLS`, `GREP_TOOLS`, `SEARCH_TOOLS`, `SKIP_TOOLS`, `MCP_TOOL_PATTERN`) used by `SummaryCollector` and the new `classifyToolName()` function. + +- **Shared tool extraction** (`src/utils/tool-extraction.ts`) — `extractAnthropicToolData()` handles the two-pass extraction pattern (collect results by ID, then process tool_use blocks) shared by Claude, Droid, and Cursor parsers. + +- **JSONL streaming utility** (`src/utils/jsonl.ts`) — `streamJsonl()` and `readJsonl()` replace per-parser readline boilerplate. + +- **Filesystem helpers** (`src/utils/fs-helpers.ts`) — `safeReadFile()`, `safeReaddir()`, `safeGlob()` with built-in error suppression. + +- **Structured logging** (`src/logger.ts`) — `Logger` class with `DEBUG` and `VERBOSE` levels, replacing scattered `console.log` calls. + +- **Custom error types** (`src/errors.ts`) — `ParseError`, `SessionNotFoundError`, `ResumeError`, `IndexError` for better error messages. + +- **CLI command modules** — `src/commands/` directory splits the monolithic `cli.ts` (699→~200 LOC) into `list.ts`, `pick.ts`, `resume-cmd.ts`, `scan.ts`, `rebuild.ts`, `quick-resume.ts`, and `_shared.ts`. + +- **Display modules** — `src/display/` directory with `banner.ts` (gradient ASCII art), `format.ts` (session formatting), and `help.ts`. + +- **Comprehensive test suite** — 238 tests across 5 test files: + - `schemas.test.ts` — 62 tests validating all runtime schema validators + - `shared-utils.test.ts` — 46 tests for tool summarizer, extraction, and utility functions + - `unit-conversions.test.ts` — 112 tests covering all 42 cross-tool conversion paths (7 tools x 6 targets) + - `forward-flags.test.ts` — 6 tests for cross-tool flag translation + - `cwd-matching.test.ts` — 12 tests for working directory matching logic + +### Changed + +- **All 7 parsers rewritten** — Claude, Codex, Copilot, Gemini, OpenCode, Droid, and Cursor parsers now use shared utilities (`streamJsonl`, `extractAnthropicToolData`, `SummaryCollector`, content block helpers) instead of duplicating logic. Average parser LOC reduced ~40%. + +- **`SummaryCollector`** (`src/utils/tool-summarizer.ts`) — upgraded from loose strings to typed `ToolSample` objects with deduplicated file tracking and configurable sample limits. + +- **Session index** (`src/utils/index.ts`) — uses `Promise.allSettled` for all parsers so one broken parser cannot crash the CLI. + +- **Resume logic** (`src/utils/resume.ts`) — consults the adapter registry for binary names, argument patterns, and display strings instead of hardcoded switch blocks. + +- **Markdown generator** (`src/utils/markdown.ts`) — now shared by all parsers (previously each parser had inline markdown generation). Produces consistent handoff documents with overview table, tool activity, key decisions, recent conversation, files modified, and pending tasks. + +### Removed + +- `src/__tests__/conversions.test.ts` — replaced by `unit-conversions.test.ts` +- `src/__tests__/parsers.test.ts` — replaced by `schemas.test.ts` + `shared-utils.test.ts` +- Per-parser inline markdown generation — all parsers now call `generateHandoffMarkdown()` + + +--- + + +## [2.7.0] - 2026-02-19 + +### Added + +- **Factory Droid support** — `continues` now discovers and parses sessions from [Factory's Droid CLI](https://www.factory.ai/). Full support for cross-tool handoff to and from Droid, including: + - Session discovery from `~/.factory/sessions/` + - File operations: `Create`, `Read`, `Edit`, `ApplyPatch` + - Shell commands: `Execute`, `Bash` + - MCP tool calls (e.g. `context7___query-docs`) + - Thinking blocks extracted as reasoning highlights + - Token usage and model info from companion `.settings.json` + - Pending tasks from `todo_state` events +- Quick-resume: `continues droid` / `continues droid 3` +- `droid` added to interactive picker, `list --source droid`, `scan`, and cross-tool handoff targets +- Test coverage: 30 conversion paths (up from 20) covering all 6x5 source-target combinations + + +## [2.6.7] - 2026-02-19 + +Previous release. Supported Claude Code, GitHub Copilot CLI, Gemini CLI, Codex CLI, and OpenCode. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..4b325e2 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,149 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## What This Project Is + +`continues` is a CLI tool that lets users resume AI coding sessions across Claude Code, GitHub Copilot CLI, Gemini CLI, Codex CLI, OpenCode, Factory Droid, and Cursor AI. It reads each tool's native session storage (read-only), extracts context (messages, file changes, tool activity, AI reasoning), and injects it into a different tool as a structured markdown handoff document. + +## Build & Development Commands + +```bash +pnpm install # Install dependencies +pnpm run build # TypeScript compile (tsc) → dist/ +pnpm run dev # Run with tsx (no build step) +pnpm test # Run unit tests (vitest) +pnpm run test:watch # Watch mode +pnpm run link # Build + pnpm link --global (local testing as `continues` / `cont`) +``` + +Run a single test file: +```bash +npx vitest run src/__tests__/unit-conversions.test.ts +``` + +Requires **Node.js 22+** (uses built-in `node:sqlite` for OpenCode parsing). + +## Architecture + +### Core Flow + +``` +CLI (src/cli.ts) → Registry (src/parsers/registry.ts) → Index (src/utils/index.ts) → Parsers (src/parsers/*.ts) → Markdown (src/utils/markdown.ts) → Resume (src/utils/resume.ts) +``` + +1. **Adapter Registry** (`src/parsers/registry.ts`): Central `ToolAdapter` interface and `adapters` record. Every supported CLI tool is registered here with its parser functions, resume commands, color, label, and storage path. All other modules derive their behavior from the registry — no manual switch statements or hardcoded tool lists. + +2. **CLI** (`src/cli.ts`): Commander-based CLI with interactive TUI (@clack/prompts). Handles `list`, `resume`, `scan`, `rebuild`, `pick`, and per-tool quick-resume subcommands. Quick-resume commands and source colors are generated from the registry automatically. + +3. **Session Index** (`src/utils/index.ts`): Builds and caches a unified JSONL index at `~/.continues/sessions.jsonl` (5-min TTL). Calls all parsers in parallel via `Promise.allSettled` (one broken parser won't crash the CLI), merges and sorts by `updatedAt`. + +4. **Parsers** (`src/parsers/*.ts`): One file per tool. Each exports `parseSessions()` (discovery + metadata) and `extractContext()` (full conversation + tool activity extraction). Formats vary: + - `claude.ts` — JSONL files under `~/.claude/projects/`, streamed with `readline` + - `codex.ts` — JSONL files under `~/.codex/sessions/`, streamed with `readline` + - `copilot.ts` — YAML workspace + JSONL events under `~/.copilot/session-state/` + - `gemini.ts` — JSON files under `~/.gemini/tmp/*/chats/` + - `opencode.ts` — SQLite DB at `~/.local/share/opencode/opencode.db` (via `node:sqlite`), with JSON file fallback + - `droid.ts` — JSONL + companion `.settings.json` under `~/.factory/sessions//` + - `cursor.ts` — JSONL agent transcripts under `~/.cursor/projects/*/agent-transcripts/` + +5. **Shared Utilities** (`src/utils/parser-helpers.ts`): Common functions shared by parsers — `cleanSummary()`, `extractRepoFromCwd()`, `homeDir()`. + +6. **Tool Summarizer** (`src/utils/tool-summarizer.ts`): `SummaryCollector` class + formatting helpers (`shellSummary`, `fileSummary`, `grepSummary`, etc.) shared by all parsers to produce consistent one-line tool activity summaries. + +7. **Markdown Generator** (`src/utils/markdown.ts`): `generateHandoffMarkdown()` takes parsed session data and produces the structured handoff document with overview table, tool activity, key decisions, recent conversation, files modified, and pending tasks. + +8. **Resume** (`src/utils/resume.ts`): Handles both native resume (same tool) and cross-tool handoff. Uses the adapter registry for CLI binary names and argument patterns. For cross-tool: extracts context, saves `.continues-handoff.md` to project dir, then spawns the target CLI with the inline or reference prompt. + +### Types + +`src/types/index.ts` defines: `SessionSource` (union of 7 tool names), `UnifiedSession`, `ConversationMessage`, `ToolCall`, `ToolUsageSummary`, `SessionNotes`, `SessionContext`, `HandoffOptions`. + +### Adding a New Platform + +Adding support for a new AI coding CLI (e.g. "newtool") requires changes in **3 files**. Use `codex.ts` as the simplest reference parser. + +#### 1. Add to the `SessionSource` type — `src/types/index.ts` + +Add the new tool name to the union type: +```ts +export type SessionSource = 'codex' | 'claude' | 'copilot' | 'gemini' | 'opencode' | 'droid' | 'cursor' | 'newtool'; +``` + +#### 2. Create the parser — `src/parsers/newtool.ts` + +Export two functions following the established pattern: + +- `parseNewtoolSessions(): Promise` — Discovers session files from the tool's storage directory, reads metadata (id, cwd, repo, branch, timestamps, summary), and returns `UnifiedSession[]` sorted by `updatedAt` descending. +- `extractNewtoolContext(session: UnifiedSession): Promise` — Reads the full session, extracts `ConversationMessage[]`, uses `SummaryCollector` from `tool-summarizer.ts` to collect tool activity, and calls `generateHandoffMarkdown()` from `utils/markdown.ts` to produce the final markdown. Returns a `SessionContext`. + +Key patterns from existing parsers: +- Import shared utilities: `import { cleanSummary, extractRepoFromCwd, homeDir } from '../utils/parser-helpers.js';` +- Session discovery: walk the tool's storage directory, filter by file extension/naming pattern. +- For JSONL formats: stream with `readline.createInterface` to avoid loading entire files into memory. +- Use `SummaryCollector.add(category, summary, filePath?, isWrite?)` to accumulate tool usage and track modified files. +- Keep only the last ~10 messages in `recentMessages` for the handoff, but ensure at least one user message is included. +- Silently skip files/sessions that fail to parse (`catch {}` blocks). + +#### 3. Register in the adapter registry — `src/parsers/registry.ts` + +Add an entry to the registry with all metadata, parser functions, and resume commands: +```ts +import { parseNewtoolSessions, extractNewtoolContext } from './newtool.js'; + +register({ + name: 'newtool', + label: 'NewTool', + color: chalk.hex('#FF6600'), + storagePath: '~/.newtool/sessions/', + binaryName: 'newtool', + parseSessions: parseNewtoolSessions, + extractContext: extractNewtoolContext, + nativeResumeArgs: (s) => ['--resume', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `newtool --resume ${s.id}`, +}); +``` + +That's it — the registry automatically wires the new tool into the CLI (quick-resume commands, source colors, help text, session index, resume logic). No switch statements or hardcoded arrays to update. + +#### 4. Add test fixtures — `src/__tests__/fixtures/index.ts` + +Create a `createNewtoolFixture(): FixtureDir` function that: +- Creates a temp directory matching the tool's storage layout. +- Writes minimal but realistic session data (at least 2 user messages + 2 assistant messages). +- Returns `{ root, cleanup }`. + +Then add conversion test cases in `src/__tests__/unit-conversions.test.ts` covering the new tool as both source and target (N-1 new conversion paths for each direction). + +## Testing + +Tests live in `src/__tests__/`. The vitest config (`vitest.config.ts`) **excludes** several test files by pattern: `e2e*`, `real-e2e*`, `stress*`, `injection*`, `parsers.test*`, `conversions.test*` (legacy file; the active suite is `unit-conversions.test.ts`). The primary test suite is `unit-conversions.test.ts`, which uses fixture data from `src/__tests__/fixtures/index.ts` to test all 42 cross-tool conversion paths (7 tools × 6 targets each) without requiring real session files on the machine. + +## Test-Driven Development + +Every code change should follow TDD discipline: + +1. **Write the test first** — parser changes, new features, and bug fixes all start with a failing test. +2. **Ground fixtures in real schemas** — before creating fixture data, read a real session file to verify field names and data structure. Use the Read tool or MCP to inspect the actual storage paths (`~/.claude/projects/`, `~/.codex/sessions/`, `~/.copilot/session-state/`, `~/.gemini/tmp/*/chats/`, `~/.local/share/opencode/`, `~/.factory/sessions/`, `~/.cursor/projects/*/agent-transcripts/`). +3. **If real session data isn't available** — ask the user to provide a sample or point to the storage directory. Don't invent schemas from imagination. + +### Test file conventions + +- **Parser/conversion tests**: `src/__tests__/unit-conversions.test.ts` — the primary test suite (fixture-based, all conversion paths) +- **Utility tests**: dedicated files (e.g. `src/__tests__/cwd-matching.test.ts`) +- **Fixtures**: `src/__tests__/fixtures/index.ts` — one `createXxxFixture()` factory per tool + +### Minimum test coverage for PRs + +- **New parser**: fixture factory + low-level parsing tests + all N-1 conversion paths in each direction +- **New utility function**: dedicated test file with edge cases +- **Bug fix**: regression test that reproduces the bug before the fix is applied + +## Key Conventions + +- ESM-only (`"type": "module"` in package.json). All local imports use `.js` extensions. +- `process.exitCode` is set instead of calling `process.exit()` directly. +- The tool suppresses `ExperimentalWarning` from `node:sqlite` at the top of `cli.ts`. +- Session data is **read-only** — the tool never modifies source session files. +- The index cache and handoff contexts are stored under `~/.continues/`. diff --git a/README.md b/README.md index 8726a5d..b7acf4b 100644 --- a/README.md +++ b/README.md @@ -1,287 +1,249 @@ # continues -> Pick up where you left off — seamlessly continue AI coding sessions across Claude, Copilot, Gemini, Codex & OpenCode. +> You hit the rate limit mid-debug. 30 messages of context — file changes, architecture decisions, half-finished refactors — and now you either wait hours or start fresh in another tool. **`continues` grabs your session from whichever AI coding tool you were using and hands it off to another one.** Conversation history, file changes, working state — all of it comes along. ```bash npx continues ``` - +https://github.com/user-attachments/assets/6945f3a5-bd19-45ab-9702-6df8e165a734 + [![npm version](https://img.shields.io/npm/v/continues.svg)](https://www.npmjs.com/package/continues) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -## Why? - -Have you ever hit your daily limit on Claude Code mid-debug? Or burned through your Gemini quota right when things were getting interesting? +## Supported tools -You've built up 30 messages of context — file changes, architecture decisions, debugging history. And now you either wait hours for the limit to reset, or start fresh in another tool and explain everything from scratch. +14 AI coding agents, any-to-any handoff: -**`continues` reads your session from any supported tool, extracts the context, and injects it into whichever tool you switch to.** Your conversation history, file changes, and working directory all come along. +**Claude Code** · **Codex** · **GitHub Copilot CLI** · **Gemini CLI** · **Cursor** · **Amp** · **Cline** · **Roo Code** · **Kilo Code** · **Kiro** · **Crush** · **OpenCode** · **Factory Droid** · **Antigravity** -## Features +That's 182 cross-tool handoff paths. Pick any source, pick any destination — it works. -- 🔄 **Cross-tool handoff** — Move sessions between Claude, Copilot, Gemini, Codex & OpenCode -- 🔍 **Auto-discovery** — Scans all 5 tools' session directories automatically -- 🛠️ **Tool activity extraction** — Parses shell commands, file edits, MCP tool calls, patches, and more from every session -- 🧠 **AI reasoning capture** — Extracts thinking blocks, agent reasoning, and model info for richer handoffs -- 📋 **Interactive picker** — Browse, filter, and select sessions with a beautiful TUI -- ⚡ **Quick resume** — `continues claude` / `continues codex 3` — one command, done -- 🖥️ **Scriptable** — JSON/JSONL output, TTY detection, non-interactive mode -- 📊 **Session stats** — `continues scan` to see everything at a glance +## Install -## Installation - -No install needed — just run: +No install needed — just run `npx continues`. Or install globally: ```bash -npx continues +npm install -g continues # gives you `continues` and `cont` ``` -Or install globally: - -```bash -npm install -g continues -``` +## How it works -Both `continues` and `cont` work as commands after global install. - -## Quick Start - -```bash -# Interactive session picker — browse, pick, switch tools -continues - -# List all sessions across every tool -continues list - -# Grab a Claude session and continue it in Gemini -continues resume abc123 --in gemini +1. **Discovery** — scans session directories for all 14 tools +2. **Parsing** — reads each tool's native format (JSONL, JSON, SQLite, YAML — they're all different) +3. **Extraction** — pulls recent messages, file changes, tool activity, AI reasoning +4. **Handoff** — generates a structured context doc and injects it into the target tool -# Quick-resume your latest Claude session (native resume) -continues claude -``` +The handoff document is designed so the receiving agent immediately understands what you were doing, what files were touched, what commands ran, and what's left to do. ## Usage -### Interactive Mode (default) - -Just run `continues`. It walks you through: +### Interactive (default) -1. Filter by directory, CLI tool, or browse all -2. Pick a session -3. Choose which CLI tool to continue in (only shows *other* tools — the whole point is switching) - -When you run `continues` from a project directory, it prioritizes sessions from that directory first: +Just run `continues`. It finds all your sessions, lets you pick one, and asks where to continue: ``` ┌ continues — pick up where you left off │ -│ ▸ 12 sessions found in current directory -│ Found 904 sessions across 5 CLI tools -│ claude: 723 codex: 72 copilot: 39 opencode: 38 gemini: 31 +│ Found 1842 sessions across 14 CLI tools +│ claude: 723 codex: 72 cursor: 68 copilot: 39 ... │ -◆ Filter sessions -│ ● This directory (12 sessions) -│ ○ All CLI tools (904 sessions) -│ ○ Claude (723) -│ ○ Codex (72) -│ ○ Copilot (39) -│ ○ Opencode (38) -│ ○ Gemini (31) -└ - -◆ Select a session (12 available) -│ [claude] 2026-02-19 05:28 my-project Debugging SSH tunnel config 84a36c5d -│ [copilot] 2026-02-19 04:41 my-project Migrate presets from Electron c2f5974c -│ [codex] 2026-02-18 23:12 my-project Fix OpenCode SQLite parser a1e90b3f +◆ Select a session +│ [claude] 2026-02-19 05:28 my-project Debugging SSH tunnel config 84a36c5d +│ [copilot] 2026-02-19 04:41 my-project Migrate presets from Electron c2f5974c +│ [codex] 2026-02-18 23:12 my-project Fix OpenCode SQLite parser a1e90b3f │ ... └ -◆ Continue claude session in: -│ ○ Gemini -│ ○ Copilot -│ ○ Codex -│ ○ OpenCode +◆ Continue in: +│ ○ Gemini ○ Codex ○ Amp ○ Kiro ... └ ``` -If no sessions are found for the current directory, all sessions are shown automatically. +When you run from a project directory, sessions from that directory are prioritized. + +### Quick resume -### Non-interactive +Skip the picker entirely — resume the Nth most recent session from a tool: ```bash -continues list # List all sessions -continues list --source claude --json # JSON output, filtered -continues list --jsonl -n 10 # JSONL, limit to 10 -continues scan # Session discovery stats -continues rebuild # Force-rebuild the index +continues claude # latest Claude session +continues codex 3 # 3rd most recent Codex +continues amp # latest Amp +continues cline # latest Cline +continues kiro # latest Kiro +continues crush # latest Crush ``` -`list` output: +Works for all 14 tools. This uses **native resume** — same tool, full history, no context injection. + +### Cross-tool handoff +This is the main thing. Start in one tool, finish in another: + +```bash +# Hit the Claude rate limit? Hand it off to Gemini: +continues resume abc123 --in gemini + +# Or pass flags through to the destination tool: +continues resume abc123 --in codex --yolo --search --add-dir /tmp ``` -Found 894 sessions (showing 5): -[claude] 2026-02-19 05:28 dev-test/SuperCmd SSH tunnel config debugging 84a36c5d -[copilot] 2026-02-19 04:41 migrate-to-tauri Copy Presets From Electron c2f5974c -[codex] 2026-02-18 23:12 cli-continues Fix OpenCode SQLite parser a1e90b3f -[gemini] 2026-02-18 05:10 my-project Tauri window management 96315428 -[opencode] 2026-02-14 17:12 codex-session-picker Where does Codex save JSON files ses_3a2d +`continues` maps common flags (model, sandbox, auto-approve, extra dirs) to the target tool's equivalent. Anything it doesn't recognize gets passed through as-is. + +### Scripting & CI + +```bash +continues list # table output +continues list --source claude --json # JSON, filtered +continues list --jsonl -n 10 # JSONL, last 10 +continues scan # discovery stats +continues scan --rebuild # force re-index ``` -### Quick Resume +### Inspect (for debugging) -Resume the Nth most recent session from a specific tool using native resume (no context injection — fastest, preserves full history): +See exactly what gets parsed and what ends up in the handoff: ```bash -continues claude # Latest Claude session -continues codex 3 # 3rd most recent Codex session -continues copilot # Latest Copilot session -continues gemini 2 # 2nd most recent Gemini session -continues opencode # Latest OpenCode session +continues inspect abc123 # diagnostic view +continues inspect abc123 --preset full --write-md handoff.md # dump full markdown +continues inspect abc123 --truncate 50 # compact one-liner view ``` -### Cross-tool Handoff +### Dump (bulk export) -This is the whole point. Start in one tool, finish in another: +Export all sessions to files for backup, analysis, or archival: ```bash -# You were debugging in Claude, but hit the rate limit. -# Grab the session ID from `continues list` and hand it off: -continues resume abc123 --in gemini +# Export all sessions to markdown (default) +continues dump all ./sessions + +# Export specific tool's sessions +continues dump claude ./sessions/claude +continues dump gemini ./sessions/gemini + +# Export as JSON instead of markdown +continues dump all ./sessions --json -# Or pick interactively — just run `continues`, select a session, -# and choose a different tool as the target. +# Control verbosity with presets +continues dump all ./sessions --preset full + +# Limit number of sessions +continues dump all ./sessions --limit 50 ``` -`continues` extracts your conversation context (messages, file changes, pending tasks) and injects it as a structured prompt into the target tool. The target picks up with full awareness of what you were working on. +File naming: `{source}_{id}.md` or `{source}_{id}.json` + +## Verbosity control + +Not every handoff needs to be a novel. Four presets control how much detail goes in: -## How It Works +| Preset | Messages | Tool samples | Subagent detail | When to use | +|:-------|:---------|:-------------|:----------------|:------------| +| `minimal` | 3 | 0 | None | Quick context, token-constrained targets | +| `standard` | 10 | 5 | 500 chars | Default — good balance | +| `verbose` | 20 | 10 | 2000 chars | Debugging, complex multi-file tasks | +| `full` | 50 | All | Everything | Complete session capture | +```bash +continues resume abc123 --preset full ``` -1. Discovery → Scans session directories for all 5 tools -2. Parsing → Reads each tool's native format (JSONL, JSON, SQLite, YAML) -3. Extraction → Pulls recent messages, file changes, tool activity, AI reasoning -4. Summarizing → Groups tool calls by type with concise one-line samples -5. Handoff → Generates a structured context document -6. Injection → Launches target tool with the context pre-loaded + +### YAML config + +For per-project defaults, drop a `.continues.yml` in your project root: + +```yaml +preset: verbose +recentMessages: 15 +shell: + maxSamples: 10 + stdoutLines: 20 ``` -### Tool Activity Extraction +Resolution order: `--config ` → `.continues.yml` in cwd → `~/.continues/config.yml` → `standard` preset. See `.continues.example.yml` for the full reference. + +## What gets extracted -Every tool call from the source session is parsed, categorized, and summarized. The handoff document includes a **Tool Activity** section so the target tool knows exactly what was done — not just what was said. +Every tool stores sessions differently — different formats, different schemas, different paths. Here's what `continues` reads: -Shared formatting helpers (`SummaryCollector` + per-tool formatters in `src/utils/tool-summarizer.ts`) keep summaries consistent across all 5 CLIs. Adding support for a new tool type is a one-liner. +| Tool | Format | Where it lives | +|:-----|:-------|:---------------| +| Claude Code | JSONL | `~/.claude/projects/` | +| Codex | JSONL | `~/.codex/sessions/` | +| Copilot | YAML + JSONL | `~/.copilot/session-state/` | +| Gemini CLI | JSON | `~/.gemini/tmp/*/chats/` | +| OpenCode | SQLite | `~/.local/share/opencode/storage/` | +| Factory Droid | JSONL + JSON | `~/.factory/sessions/` | +| Cursor | JSONL | `~/.cursor/projects/*/agent-transcripts/` | +| Amp | JSON | `~/.local/share/amp/threads/` | +| Kiro | JSON | `~/Library/Application Support/Kiro/workspace-sessions/` | +| Crush | SQLite | `~/.crush/crush.db` | +| Cline | JSON | VS Code `globalStorage/saoudrizwan.claude-dev/tasks/` | +| Roo Code | JSON | VS Code `globalStorage/rooveterinaryinc.roo-cline/tasks/` | +| Kilo Code | JSON | VS Code `globalStorage/kilocode.kilo-code/tasks/` | +| Antigravity | JSONL | `~/.gemini/antigravity/code_tracker/` | -**What gets extracted per CLI:** +All reads are **read-only** — `continues` never modifies your session files. Index cached at `~/.continues/sessions.jsonl` (5-min TTL, auto-refresh). -| Tool | Extracted | -|:-----|:----------| -| Claude Code | Bash commands (with exit codes), Read/Write/Edit (file paths), Grep/Glob, WebFetch/WebSearch, Task/subagent dispatches, MCP tools (`mcp__*`), thinking blocks → reasoning notes | -| Codex CLI | exec_command/shell_command (grouped by base command: `npm`, `git`, etc.), apply_patch (file paths from patch format), web_search, write_stdin, MCP resources, agent_reasoning → reasoning notes, token usage | -| Gemini CLI | read_file/write_file (with `diffStat`: +N -M lines), thoughts → reasoning notes, model info, token usage (accumulated) | -| Copilot CLI | Session metadata from workspace.yaml (tool calls not persisted by Copilot) | -| OpenCode | Messages from SQLite DB or JSON fallback (tool-specific parts TBD) | +### Tool activity in handoffs -**Example handoff output:** +The handoff document includes a **Tool Activity** section so the target agent knows what was *done*, not just what was *said*: ```markdown ## Tool Activity - **Bash** (×47): `$ npm test → exit 0` · `$ git status → exit 0` · `$ npm run build → exit 1` - **Edit** (×12): `edit src/auth.ts` · `edit src/api/routes.ts` · `edit tests/auth.test.ts` - **Grep** (×8): `grep "handleLogin" src/` · `grep "JWT_SECRET"` · `grep "middleware"` -- **apply_patch** (×5): `patch: src/utils/db.ts, src/models/user.ts` ## Session Notes - **Model**: claude-sonnet-4 -- **Tokens**: 45,230 input, 12,847 output +- **Tokens**: 45,230 in / 12,847 out - 💭 Need to handle the edge case where token refresh races with logout -- 💭 The middleware chain order matters — auth must come before rate limiting -``` - -### Session Storage - -`continues` reads session data from each tool's native storage. Read-only — it doesn't modify or copy anything. - -| Tool | Location | Format | -|:-----|:---------|:-------| -| Claude Code | `~/.claude/projects/` | JSONL | -| GitHub Copilot | `~/.copilot/session-state/` | YAML + JSONL | -| Google Gemini CLI | `~/.gemini/tmp/*/chats/` | JSON | -| OpenAI Codex | `~/.codex/sessions/` | JSONL | -| OpenCode | `~/.local/share/opencode/` | SQLite | - -Session index cached at `~/.continues/sessions.jsonl`. Auto-refreshes when stale (5 min TTL). - -## Commands - -``` -continues Interactive TUI picker (default) -continues list List all sessions -continues resume Resume by session ID -continues resume --in Cross-tool handoff -continues scan Session discovery statistics -continues rebuild Force-rebuild session index -continues [n] Quick-resume Nth session from tool ``` -### `continues` / `continues pick` +This works for all 14 tools — bash commands, file reads/writes/edits, grep/glob, MCP tool calls, thinking blocks, subagent dispatches, token usage, model info. The shared `SummaryCollector` keeps the format consistent regardless of source. -Interactive session picker. Requires a TTY. +Every handoff also includes the **full file path** of the original session, so the receiving tool can trace back to the raw data if needed. -| Flag | Description | -|:-----|:------------| -| `-s, --source ` | Pre-filter to one tool | -| `--no-tui` | Disable interactive mode | -| `--rebuild` | Force-rebuild index first | +## Commands reference -### `continues list` (alias: `ls`) +| Command | What it does | +|:--------|:-------------| +| `continues` | Interactive TUI picker | +| `continues list` | List sessions (`--source`, `--json`, `--jsonl`, `-n`) | +| `continues resume ` | Resume by ID (`--in `, `--preset`) | +| `continues inspect ` | Diagnostic view (`--truncate`, `--write-md`, `--preset`) | +| `continues dump ` | Bulk export sessions (`--json`, `--preset`, `--limit`) | +| `continues scan` | Discovery stats (`--rebuild`) | +| `continues rebuild` | Force-rebuild session index | +| `continues [n]` | Quick-resume Nth session from any of the 14 tools | -| Flag | Description | Default | -|:-----|:------------|:--------| -| `-s, --source ` | Filter by tool | all | -| `-n, --limit ` | Max sessions to show | 50 | -| `--json` | Output as JSON array | — | -| `--jsonl` | Output as JSONL | — | -| `--rebuild` | Force-rebuild index first | — | +Global flags: `--config `, `--preset `, `--verbose`, `--debug` -### `continues resume ` (alias: `r`) +## Community contributions -| Flag | Description | Default | -|:-----|:------------|:--------| -| `-i, --in ` | Target tool for cross-tool handoff | — | -| `--no-tui` | Skip interactive prompts | — | +This started as a 7-tool project and grew fast thanks to contributors: -### `continues scan` +- **Factory Droid support** — [#1](https://github.com/yigitkonur/cli-continues/pull/1), first community parser +- **Cursor AI support** — [#4](https://github.com/yigitkonur/cli-continues/pull/4) by [@Evrim267](https://github.com/Evrim267), with smart slug-to-path resolution +- **Single-tool error handling** — [#3](https://github.com/yigitkonur/cli-continues/pull/3) by [@barisgirismen](https://github.com/barisgirismen), clear error when only one CLI is installed +- **Env var overrides** — [#14](https://github.com/yigitkonur/cli-continues/pull/14) by [@yutakobayashidev](https://github.com/yutakobayashidev), respects `CLAUDE_CONFIG_DIR`, `CODEX_HOME`, `GEMINI_CLI_HOME`, `XDG_DATA_HOME` -| Flag | Description | -|:-----|:------------| -| `--rebuild` | Force-rebuild index first | +The latest batch — **Amp, Kiro, Crush, Cline, Roo Code, Kilo Code, and Antigravity** — was added by reverse-engineering [mnemo](https://github.com/Pilan-AI/mnemo)'s Go adapters and adapting the schemas for TypeScript. Along the way we also improved token/cache/model extraction for the existing Claude, Codex, Cursor, and Gemini parsers. -### `continues [n]` - -Quick-resume using native resume (same tool, no context injection). -Tools: `claude`, `copilot`, `gemini`, `codex`, `opencode`. Default `n` is 1. - -## Conversion Matrix - -All 20 cross-tool paths are supported and tested: - -| | → Claude | → Copilot | → Gemini | → Codex | → OpenCode | -|:--|:--------:|:---------:|:--------:|:-------:|:----------:| -| **Claude** | — | ✅ | ✅ | ✅ | ✅ | -| **Copilot** | ✅ | — | ✅ | ✅ | ✅ | -| **Gemini** | ✅ | ✅ | — | ✅ | ✅ | -| **Codex** | ✅ | ✅ | ✅ | — | ✅ | -| **OpenCode** | ✅ | ✅ | ✅ | ✅ | — | - -Same-tool resume is available via `continues ` shortcuts (native resume, not shown in matrix). +**Bugs fixed in this round:** +- Symlink traversal — `fs.Dirent.isDirectory()` returns `false` for symlinks; fixed with `isSymbolicLink() && statSync()` fallback +- Zero-token display — no longer shows "0 in / 0 out" when a session has no token data +- Key Decisions count — now respects the verbosity config instead of being hardcoded to 5 ## Requirements -- **Node.js 22+** (uses built-in `node:sqlite` for OpenCode parsing) -- At least one of: Claude Code, GitHub Copilot, Gemini CLI, Codex, or OpenCode +- **Node.js 22+** (uses built-in `node:sqlite` for OpenCode and Crush) +- At least one of the 14 supported tools installed +- `sqlite3` CLI binary (only needed for Crush — ships with macOS) ## Development @@ -290,12 +252,14 @@ git clone https://github.com/yigitkonur/cli-continues cd cli-continues pnpm install -pnpm run dev # Run with tsx (no build needed) -pnpm run build # Compile TypeScript -pnpm test # Run 122 tests -pnpm run test:watch # Watch mode +pnpm run dev # run with tsx, no build needed +pnpm run build # compile TypeScript +pnpm test # run tests +pnpm run test:watch # watch mode ``` +Adding a new tool? Create a parser in `src/parsers/`, add the tool name to `src/types/tool-names.ts`, register it in `src/parsers/registry.ts`. The registry has a compile-time completeness check — if you add a name but forget the parser, it throws at import. + ## License MIT © [Yigit Konur](https://github.com/yigitkonur) diff --git a/REVIEW.md b/REVIEW.md new file mode 100644 index 0000000..3420acf --- /dev/null +++ b/REVIEW.md @@ -0,0 +1,104 @@ +# REVIEW.md + +Review guidelines for the `continues` CLI tool — a read-only session parser and cross-tool handoff generator for AI coding CLIs. + +## Critical Areas + +- **Parser files** (`src/parsers/*.ts`): Each parser reads from a different tool's session storage. Changes must preserve read-only semantics — the tool must never write to or modify source session files. Verify that new parsers handle malformed/missing data gracefully (silent `catch {}` blocks, not thrown errors that crash the CLI). +- **Registry** (`src/parsers/registry.ts`): Single source of truth for all tool adapters. Any change here affects every tool. Verify the completeness assertion at the bottom still passes — every `SessionSource` must have a registered adapter. +- **Resume logic** (`src/utils/resume.ts`): Spawns external CLI processes. Review for command injection — user-controlled session IDs and paths are passed as CLI arguments. Ensure arguments are passed as array elements to `spawn()`, never interpolated into a shell string. +- **Forward flags** (`src/utils/forward-flags.ts`): Maps CLI flags across tools with different permission models. Flag precedence logic (auto-approve > full-auto > sandbox) is security-sensitive — incorrect mapping could grant unintended permissions in the target tool. +- **Type definitions** (`src/types/index.ts`, `src/types/tool-names.ts`): Changes to `SessionSource` or `TOOL_NAMES` require corresponding updates in the registry, fixtures, and tests. + +## Conventions + +- ESM-only: all local imports must use `.js` extensions, even for `.ts` source files. +- Use `process.exitCode = N` instead of `process.exit(N)`. +- Biome handles linting and formatting — do not introduce ESLint or Prettier configs. +- Parser functions must return `Promise` and `Promise` respectively. Both must be registered in `src/parsers/registry.ts`. +- JSONL parsing must stream with `readline.createInterface` — do not load entire files into memory with `fs.readFileSync`. +- Use the `SummaryCollector` class from `src/utils/tool-summarizer.ts` for tool activity summaries. Do not manually build summary arrays. +- Shared helpers (`cleanSummary`, `extractRepoFromCwd`, `homeDir`) live in `src/utils/parser-helpers.ts`. Do not duplicate these in individual parsers. +- Error hierarchy: use typed errors from `src/errors.ts` (`ParseError`, `SessionNotFoundError`, `ToolNotAvailableError`, `UnknownSourceError`, `IndexError`, `StorageError`) rather than bare `throw new Error()` for user-facing error paths. +- Use `logger` from `src/logger.ts` for diagnostic output inside catch blocks — do not use raw `console.log`, `console.warn`, or `console.error`. Silent empty `catch {}` blocks violate the Biome `noEmptyBlockStatements` rule; use `logger.debug` or `logger.warn` instead. + +## Security + +- Session data is **read-only**. Any PR that writes to tool storage directories (`~/.claude/`, `~/.codex/`, `~/.copilot/`, etc.) is a severe bug. +- External process spawning in `resume.ts` must use `spawn()` with arguments as an array, never `exec()` with string interpolation. This prevents command injection via session IDs or file paths containing shell metacharacters. +- The `--dangerously-skip-permissions` / `--dangerously-bypass-approvals-and-sandbox` flags in forward-flag mapping are security-critical. Verify these are only set when the source session explicitly requested auto-approve behavior. +- Handoff markdown files (`.continues-handoff.md`) are written to project directories and may be read by other AI tools. Do not embed secrets, API keys, or environment variable values in handoff output. + +## Performance + +- Parsers run in parallel via `Promise.allSettled` in the session index builder. A slow parser blocks only its own results, not the entire index. However, flag any parser that performs synchronous I/O or blocks the event loop. +- The session index uses a 5-minute TTL cache (`~/.continues/sessions.jsonl`). Changes that bypass or invalidate the cache should be justified. +- Flag any use of `fs.readFileSync` or `fs.writeFileSync` in parser code — these block the event loop and can stall the CLI when scanning large session directories. +- SQLite access (OpenCode, Crush parsers) uses `node:sqlite`. Queries should be parameterized and avoid scanning entire tables when filtering by session ID. + +## Patterns + +### Adding a New Parser + +Every new parser must follow this three-file pattern. Missing any of these is a bug. + +1. **Parser file** — `src/parsers/.ts` exporting `parseSessions()` and `extractContext()` +2. **Registry entry** — `src/parsers/registry.ts` with all `ToolAdapter` fields populated +3. **Type update** — `src/types/tool-names.ts` adding the tool to `SessionSource` and `TOOL_NAMES` +4. **Test fixtures** — `src/__tests__/fixtures/index.ts` with a `createFixture()` factory +5. **Conversion tests** — `src/__tests__/unit-conversions.test.ts` covering all N-1 paths in each direction + +### Error Handling in Parsers + +Parsers must never crash the CLI. Malformed session files should be silently skipped. + +**Good:** +```typescript +try { + const data = JSON.parse(line); + // process data... +} catch (err) { + logger.debug('skipping malformed line', err); +} +``` + +**Bad:** +```typescript +const data = JSON.parse(line); // Throws on malformed input, crashes CLI +``` + +### Tool Summarizer Usage + +Always use `SummaryCollector` — do not build tool summaries manually. + +**Good:** +```typescript +const collector = new SummaryCollector(); +collector.add('shell', shellSummary(cmd, exitCode), undefined, false); +collector.add('write', fileSummary(filePath, 'write'), filePath, true); +return collector.finalize(); +``` + +**Bad:** +```typescript +const toolSummaries: ToolUsageSummary[] = []; +toolSummaries.push({ name: 'Bash', count: 1, samples: [{ summary: cmd }] }); +``` + +## Testing + +- The primary test suite is `src/__tests__/unit-conversions.test.ts`. All 14 tools x 13 targets = 182 conversion paths must pass. +- PRs that change parser logic must include or update fixture data in `src/__tests__/fixtures/index.ts`. +- PRs that add source files under `src/` but do not touch any test files should be flagged — the CI will also flag this via the test-quality job. +- Node.js 22+ is required. The `node:sqlite` built-in is used by OpenCode and Crush parsers. Do not add third-party SQLite dependencies. +- Test timeout is 30 seconds (`vitest.config.ts`). If a test needs more, the parser likely has a performance problem. + +## Ignore + +- `dist/` — compiled output, auto-generated by `tsc`. +- `node_modules/` — dependency tree. +- `pnpm-lock.yaml`, `package-lock.json` — lock files, unless dependency changes are part of the PR. +- `CHANGELOG.md` — auto-generated by release tooling. +- `demo.mp4` — demo video, not code. +- `test-fixtures/` — legacy fixture directory (active fixtures are in `src/__tests__/fixtures/`). +- `.DS_Store` — macOS metadata. diff --git a/biome.json b/biome.json new file mode 100644 index 0000000..428b4ef --- /dev/null +++ b/biome.json @@ -0,0 +1,38 @@ +{ + "$schema": "https://biomejs.dev/schemas/2.4.4/schema.json", + "assist": { "actions": { "source": { "organizeImports": "on" } } }, + "formatter": { + "enabled": true, + "indentStyle": "space", + "indentWidth": 2, + "lineWidth": 120 + }, + "javascript": { + "formatter": { + "quoteStyle": "single", + "trailingCommas": "all", + "semicolons": "always" + } + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true, + "suspicious": { + "noEmptyBlockStatements": "error", + "noExplicitAny": "warn" + }, + "correctness": { + "noUnusedImports": "error", + "noUnusedVariables": "warn" + }, + "style": { + "useConst": "error", + "noNonNullAssertion": "off" + } + } + }, + "files": { + "includes": ["**/src/**/*.ts", "!**/dist/**", "!**/node_modules/**", "!**/*.json", "!**/*.md"] + } +} diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..b667372 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,2160 @@ +{ + "name": "continues", + "version": "4.0.2", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "continues", + "version": "4.0.2", + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@clack/prompts": "^1.0.1", + "chalk": "^4.1.2", + "commander": "^14.0.3", + "ora": "^5.4.1", + "yaml": "^2.8.2", + "zod": "^4.3.6" + }, + "bin": { + "cont": "dist/cli.js", + "continues": "dist/cli.js" + }, + "devDependencies": { + "@biomejs/biome": "^2.4.4", + "@types/node": "^25.1.0", + "tsx": "^4.21.0", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=22.0.0" + } + }, + "node_modules/@biomejs/biome": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-2.4.4.tgz", + "integrity": "sha512-tigwWS5KfJf0cABVd52NVaXyAVv4qpUXOWJ1rxFL8xF1RVoeS2q/LK+FHgYoKMclJCuRoCWAPy1IXaN9/mS61Q==", + "dev": true, + "license": "MIT OR Apache-2.0", + "bin": { + "biome": "bin/biome" + }, + "engines": { + "node": ">=14.21.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/biome" + }, + "optionalDependencies": { + "@biomejs/cli-darwin-arm64": "2.4.4", + "@biomejs/cli-darwin-x64": "2.4.4", + "@biomejs/cli-linux-arm64": "2.4.4", + "@biomejs/cli-linux-arm64-musl": "2.4.4", + "@biomejs/cli-linux-x64": "2.4.4", + "@biomejs/cli-linux-x64-musl": "2.4.4", + "@biomejs/cli-win32-arm64": "2.4.4", + "@biomejs/cli-win32-x64": "2.4.4" + } + }, + "node_modules/@biomejs/cli-darwin-arm64": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.4.4.tgz", + "integrity": "sha512-jZ+Xc6qvD6tTH5jM6eKX44dcbyNqJHssfl2nnwT6vma6B1sj7ZLTGIk6N5QwVBs5xGN52r3trk5fgd3sQ9We9A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-darwin-x64": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.4.4.tgz", + "integrity": "sha512-Dh1a/+W+SUCXhEdL7TiX3ArPTFCQKJTI1mGncZNWfO+6suk+gYA4lNyJcBB+pwvF49uw0pEbUS49BgYOY4hzUg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.4.4.tgz", + "integrity": "sha512-V/NFfbWhsUU6w+m5WYbBenlEAz8eYnSqRMDMAW3K+3v0tYVkNyZn8VU0XPxk/lOqNXLSCCrV7FmV/u3SjCBShg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64-musl": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.4.4.tgz", + "integrity": "sha512-+sPAXq3bxmFwhVFJnSwkSF5Rw2ZAJMH3MF6C9IveAEOdSpgajPhoQhbbAK12SehN9j2QrHpk4J/cHsa/HqWaYQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-2.4.4.tgz", + "integrity": "sha512-R4+ZCDtG9kHArasyBO+UBD6jr/FcFCTH8QkNTOCu0pRJzCWyWC4EtZa2AmUZB5h3e0jD7bRV2KvrENcf8rndBg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64-musl": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.4.4.tgz", + "integrity": "sha512-gGvFTGpOIQDb5CQ2VC0n9Z2UEqlP46c4aNgHmAMytYieTGEcfqhfCFnhs6xjt0S3igE6q5GLuIXtdQt3Izok+g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-arm64": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.4.4.tgz", + "integrity": "sha512-trzCqM7x+Gn832zZHgr28JoYagQNX4CZkUZhMUac2YxvvyDRLJDrb5m9IA7CaZLlX6lTQmADVfLEKP1et1Ma4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-x64": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-2.4.4.tgz", + "integrity": "sha512-gnOHKVPFAAPrpoPt2t+Q6FZ7RPry/FDV3GcpU53P3PtLNnQjBmKyN2Vh/JtqXet+H4pme8CC76rScwdjDcT1/A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@clack/core": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@clack/core/-/core-1.0.1.tgz", + "integrity": "sha512-WKeyK3NOBwDOzagPR5H08rFk9D/WuN705yEbuZvKqlkmoLM2woKtXb10OO2k1NoSU4SFG947i2/SCYh+2u5e4g==", + "license": "MIT", + "dependencies": { + "picocolors": "^1.0.0", + "sisteransi": "^1.0.5" + } + }, + "node_modules/@clack/prompts": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@clack/prompts/-/prompts-1.0.1.tgz", + "integrity": "sha512-/42G73JkuYdyWZ6m8d/CJtBrGl1Hegyc7Fy78m5Ob+jF85TOUmLR5XLce/U3LxYAw0kJ8CT5aI99RIvPHcGp/Q==", + "license": "MIT", + "dependencies": { + "@clack/core": "1.0.1", + "picocolors": "^1.0.0", + "sisteransi": "^1.0.5" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz", + "integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz", + "integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz", + "integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz", + "integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz", + "integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz", + "integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz", + "integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz", + "integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz", + "integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz", + "integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz", + "integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz", + "integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz", + "integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz", + "integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz", + "integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz", + "integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz", + "integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz", + "integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz", + "integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz", + "integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz", + "integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz", + "integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz", + "integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz", + "integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz", + "integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "25.3.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.3.0.tgz", + "integrity": "sha512-4K3bqJpXpqfg2XKGK9bpDTc6xO/xoUP/RBWS7AtRMug6zZFaRekiLzjVtAoZMquxoAbzBvy5nxQ7veS5eYzf8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.18.0" + } + }, + "node_modules/@vitest/expect": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz", + "integrity": "sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "chai": "^6.2.1", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.18.tgz", + "integrity": "sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.0.18", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.18.tgz", + "integrity": "sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.18.tgz", + "integrity": "sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.0.18", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.18.tgz", + "integrity": "sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.18.tgz", + "integrity": "sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.18.tgz", + "integrity": "sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.0.18", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "license": "MIT", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/commander": { + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.3.tgz", + "integrity": "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==", + "license": "MIT", + "engines": { + "node": ">=20" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "license": "MIT", + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-tsconfig": { + "version": "4.13.6", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.13.6.tgz", + "integrity": "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "license": "MIT", + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/rollup": { + "version": "4.59.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", + "integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.59.0", + "@rollup/rollup-android-arm64": "4.59.0", + "@rollup/rollup-darwin-arm64": "4.59.0", + "@rollup/rollup-darwin-x64": "4.59.0", + "@rollup/rollup-freebsd-arm64": "4.59.0", + "@rollup/rollup-freebsd-x64": "4.59.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.59.0", + "@rollup/rollup-linux-arm-musleabihf": "4.59.0", + "@rollup/rollup-linux-arm64-gnu": "4.59.0", + "@rollup/rollup-linux-arm64-musl": "4.59.0", + "@rollup/rollup-linux-loong64-gnu": "4.59.0", + "@rollup/rollup-linux-loong64-musl": "4.59.0", + "@rollup/rollup-linux-ppc64-gnu": "4.59.0", + "@rollup/rollup-linux-ppc64-musl": "4.59.0", + "@rollup/rollup-linux-riscv64-gnu": "4.59.0", + "@rollup/rollup-linux-riscv64-musl": "4.59.0", + "@rollup/rollup-linux-s390x-gnu": "4.59.0", + "@rollup/rollup-linux-x64-gnu": "4.59.0", + "@rollup/rollup-linux-x64-musl": "4.59.0", + "@rollup/rollup-openbsd-x64": "4.59.0", + "@rollup/rollup-openharmony-arm64": "4.59.0", + "@rollup/rollup-win32-arm64-msvc": "4.59.0", + "@rollup/rollup-win32-ia32-msvc": "4.59.0", + "@rollup/rollup-win32-x64-gnu": "4.59.0", + "@rollup/rollup-win32-x64-msvc": "4.59.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "license": "MIT" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyrainbow": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz", + "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tsx": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz", + "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "7.18.2", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz", + "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/vite": { + "version": "7.3.1", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", + "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitest": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.18.tgz", + "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.0.18", + "@vitest/mocker": "4.0.18", + "@vitest/pretty-format": "4.0.18", + "@vitest/runner": "4.0.18", + "@vitest/snapshot": "4.0.18", + "@vitest/spy": "4.0.18", + "@vitest/utils": "4.0.18", + "es-module-lexer": "^1.7.0", + "expect-type": "^1.2.2", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^3.10.0", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.0.18", + "@vitest/browser-preview": "4.0.18", + "@vitest/browser-webdriverio": "4.0.18", + "@vitest/ui": "4.0.18", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "license": "MIT", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yaml": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", + "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14.6" + }, + "funding": { + "url": "https://github.com/sponsors/eemeli" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/package.json b/package.json index 172861c..8720cc7 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,16 @@ { "name": "continues", - "version": "2.6.6", - "description": "Never lose context. Resume any AI coding session across Claude, Copilot, Gemini, Codex & OpenCode.", - "main": "dist/cli.js", + "version": "4.0.12", + "description": "Never lose context. Resume any AI coding session across Claude Code, Codex, Copilot, Gemini CLI, Cursor, Amp, Cline, Roo Code, Kilo Code, Kiro, Crush, OpenCode, Droid & Antigravity.", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + }, + "./cli": "./dist/cli.js" + }, "bin": { "continues": "dist/cli.js", "cont": "dist/cli.js" @@ -21,6 +29,11 @@ "start": "node dist/cli.js", "clean": "rm -rf dist", "prepublishOnly": "npm run build", + "postinstall": "echo '\n continues v3.0.0 — Major refactor with typed schemas, library exports & Cursor support!\n Run `continues list` to see all sessions.\n'", + "lint": "biome check src/", + "lint:fix": "biome check --write src/", + "format": "biome format --write src/", + "check": "pnpm lint && pnpm build", "link": "pnpm build && pnpm link --global", "version:patch": "npm version patch", "version:minor": "npm version minor", @@ -38,6 +51,8 @@ "copilot", "gemini", "opencode", + "droid", + "factory", "session", "resume", "context", @@ -47,7 +62,8 @@ "rate-limit", "session-handoff", "context-switch", - "cross-tool" + "cross-tool", + "cursor" ], "author": "Yigit Konur", "license": "MIT", @@ -63,9 +79,11 @@ "chalk": "^4.1.2", "commander": "^14.0.3", "ora": "^5.4.1", - "yaml": "^2.8.2" + "yaml": "^2.8.2", + "zod": "^4.3.6" }, "devDependencies": { + "@biomejs/biome": "^2.4.4", "@types/node": "^25.1.0", "tsx": "^4.21.0", "typescript": "^5.9.3", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a39ca50..aa6c3dc 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -23,7 +23,13 @@ importers: yaml: specifier: ^2.8.2 version: 2.8.2 + zod: + specifier: ^4.3.6 + version: 4.3.6 devDependencies: + '@biomejs/biome': + specifier: ^2.4.4 + version: 2.4.4 '@types/node': specifier: ^25.1.0 version: 25.1.0 @@ -39,6 +45,63 @@ importers: packages: + '@biomejs/biome@2.4.4': + resolution: {integrity: sha512-tigwWS5KfJf0cABVd52NVaXyAVv4qpUXOWJ1rxFL8xF1RVoeS2q/LK+FHgYoKMclJCuRoCWAPy1IXaN9/mS61Q==} + engines: {node: '>=14.21.3'} + hasBin: true + + '@biomejs/cli-darwin-arm64@2.4.4': + resolution: {integrity: sha512-jZ+Xc6qvD6tTH5jM6eKX44dcbyNqJHssfl2nnwT6vma6B1sj7ZLTGIk6N5QwVBs5xGN52r3trk5fgd3sQ9We9A==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [darwin] + + '@biomejs/cli-darwin-x64@2.4.4': + resolution: {integrity: sha512-Dh1a/+W+SUCXhEdL7TiX3ArPTFCQKJTI1mGncZNWfO+6suk+gYA4lNyJcBB+pwvF49uw0pEbUS49BgYOY4hzUg==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [darwin] + + '@biomejs/cli-linux-arm64-musl@2.4.4': + resolution: {integrity: sha512-+sPAXq3bxmFwhVFJnSwkSF5Rw2ZAJMH3MF6C9IveAEOdSpgajPhoQhbbAK12SehN9j2QrHpk4J/cHsa/HqWaYQ==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@biomejs/cli-linux-arm64@2.4.4': + resolution: {integrity: sha512-V/NFfbWhsUU6w+m5WYbBenlEAz8eYnSqRMDMAW3K+3v0tYVkNyZn8VU0XPxk/lOqNXLSCCrV7FmV/u3SjCBShg==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@biomejs/cli-linux-x64-musl@2.4.4': + resolution: {integrity: sha512-gGvFTGpOIQDb5CQ2VC0n9Z2UEqlP46c4aNgHmAMytYieTGEcfqhfCFnhs6xjt0S3igE6q5GLuIXtdQt3Izok+g==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [linux] + libc: [musl] + + '@biomejs/cli-linux-x64@2.4.4': + resolution: {integrity: sha512-R4+ZCDtG9kHArasyBO+UBD6jr/FcFCTH8QkNTOCu0pRJzCWyWC4EtZa2AmUZB5h3e0jD7bRV2KvrENcf8rndBg==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@biomejs/cli-win32-arm64@2.4.4': + resolution: {integrity: sha512-trzCqM7x+Gn832zZHgr28JoYagQNX4CZkUZhMUac2YxvvyDRLJDrb5m9IA7CaZLlX6lTQmADVfLEKP1et1Ma4Q==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [win32] + + '@biomejs/cli-win32-x64@2.4.4': + resolution: {integrity: sha512-gnOHKVPFAAPrpoPt2t+Q6FZ7RPry/FDV3GcpU53P3PtLNnQjBmKyN2Vh/JtqXet+H4pme8CC76rScwdjDcT1/A==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [win32] + '@clack/core@1.0.1': resolution: {integrity: sha512-WKeyK3NOBwDOzagPR5H08rFk9D/WuN705yEbuZvKqlkmoLM2woKtXb10OO2k1NoSU4SFG947i2/SCYh+2u5e4g==} @@ -699,8 +762,46 @@ packages: engines: {node: '>= 14.6'} hasBin: true + zod@4.3.6: + resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} + snapshots: + '@biomejs/biome@2.4.4': + optionalDependencies: + '@biomejs/cli-darwin-arm64': 2.4.4 + '@biomejs/cli-darwin-x64': 2.4.4 + '@biomejs/cli-linux-arm64': 2.4.4 + '@biomejs/cli-linux-arm64-musl': 2.4.4 + '@biomejs/cli-linux-x64': 2.4.4 + '@biomejs/cli-linux-x64-musl': 2.4.4 + '@biomejs/cli-win32-arm64': 2.4.4 + '@biomejs/cli-win32-x64': 2.4.4 + + '@biomejs/cli-darwin-arm64@2.4.4': + optional: true + + '@biomejs/cli-darwin-x64@2.4.4': + optional: true + + '@biomejs/cli-linux-arm64-musl@2.4.4': + optional: true + + '@biomejs/cli-linux-arm64@2.4.4': + optional: true + + '@biomejs/cli-linux-x64-musl@2.4.4': + optional: true + + '@biomejs/cli-linux-x64@2.4.4': + optional: true + + '@biomejs/cli-win32-arm64@2.4.4': + optional: true + + '@biomejs/cli-win32-x64@2.4.4': + optional: true + '@clack/core@1.0.1': dependencies: picocolors: 1.1.1 @@ -1225,3 +1326,5 @@ snapshots: stackback: 0.0.2 yaml@2.8.2: {} + + zod@4.3.6: {} diff --git a/src/__tests__/claude-task-reconciliation.test.ts b/src/__tests__/claude-task-reconciliation.test.ts new file mode 100644 index 0000000..8de35d1 --- /dev/null +++ b/src/__tests__/claude-task-reconciliation.test.ts @@ -0,0 +1,144 @@ +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { afterEach, describe, expect, it } from 'vitest'; +import { getPreset } from '../config/index.js'; +import { extractClaudeContext } from '../parsers/claude.js'; +import type { UnifiedSession } from '../types/index.js'; + +function makeSession(originalPath: string): UnifiedSession { + const now = new Date('2026-03-03T00:00:00.000Z'); + return { + id: 'test-session', + source: 'claude', + cwd: '/tmp', + lines: 0, + bytes: 0, + createdAt: now, + updatedAt: now, + originalPath, + }; +} + +function writeJsonl(filePath: string, records: Array>): void { + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, records.map((r) => JSON.stringify(r)).join('\n') + '\n', 'utf8'); +} + +describe('Claude task reconciliation', () => { + const tempDirs: string[] = []; + + afterEach(() => { + for (const dir of tempDirs.splice(0)) { + fs.rmSync(dir, { recursive: true, force: true }); + } + }); + + it('does not mark local_bash queue tasks as pending subagents', async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-claude-bash-')); + tempDirs.push(tmp); + const filePath = path.join(tmp, 'session.jsonl'); + + writeJsonl(filePath, [ + { + type: 'queue-operation', + operation: 'enqueue', + content: '{"task_id":"b57b3c5","description":"Watch CI run until completion","task_type":"local_bash"}', + }, + { + type: 'assistant', + timestamp: '2026-03-03T00:00:01.000Z', + message: { role: 'assistant', content: [{ type: 'text', text: 'Done.' }] }, + }, + ]); + + const ctx = await extractClaudeContext(makeSession(filePath), getPreset('standard')); + expect(ctx.pendingTasks).not.toContain('Incomplete subagent: Watch CI run until completion'); + }); + + it('keeps local_agent without terminal evidence as pending when transcript is missing', async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-claude-agent-')); + tempDirs.push(tmp); + const filePath = path.join(tmp, 'session.jsonl'); + + writeJsonl(filePath, [ + { + type: 'queue-operation', + operation: 'enqueue', + content: '{"task_id":"a111111","description":"Explore formatting","task_type":"local_agent"}', + }, + ]); + + const ctx = await extractClaudeContext(makeSession(filePath), getPreset('standard')); + expect(ctx.pendingTasks).toContain('Incomplete subagent: Explore formatting'); + }); + + it('uses TaskOutput completion status as terminal evidence', async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-claude-taskoutput-')); + tempDirs.push(tmp); + const filePath = path.join(tmp, 'session.jsonl'); + + writeJsonl(filePath, [ + { + type: 'queue-operation', + operation: 'enqueue', + content: '{"task_id":"a222222","description":"Create docs","task_type":"local_agent"}', + }, + { + type: 'assistant', + timestamp: '2026-03-03T00:00:01.000Z', + message: { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu-taskoutput', name: 'TaskOutput', input: { task_id: 'a222222' } }], + }, + }, + { + type: 'user', + timestamp: '2026-03-03T00:00:02.000Z', + message: { + role: 'user', + content: [ + { + type: 'tool_result', + tool_use_id: 'tu-taskoutput', + content: 'successa222222completed', + }, + ], + }, + }, + ]); + + const ctx = await extractClaudeContext(makeSession(filePath), getPreset('standard')); + expect(ctx.pendingTasks).not.toContain('Incomplete subagent: Create docs'); + }); + + it('uses XML task-notification status as terminal evidence', async () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-claude-tasknotif-')); + tempDirs.push(tmp); + const filePath = path.join(tmp, 'session.jsonl'); + + writeJsonl(filePath, [ + { + type: 'queue-operation', + operation: 'enqueue', + content: '{"task_id":"a333333","description":"Create architecture docs","task_type":"local_agent"}', + }, + { + type: 'user', + timestamp: '2026-03-03T00:00:03.000Z', + message: { + role: 'user', + content: [ + { + type: 'text', + text: 'a333333completeddone', + }, + ], + }, + }, + ]); + + const ctx = await extractClaudeContext(makeSession(filePath), getPreset('standard')); + expect(ctx.pendingTasks).not.toContain('Incomplete subagent: Create architecture docs'); + }); +}); diff --git a/src/__tests__/conversions.test.ts b/src/__tests__/conversions.test.ts deleted file mode 100644 index aae88fa..0000000 --- a/src/__tests__/conversions.test.ts +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Tests for all 20 cross-tool conversion paths. - * Verifies that every source→target combination produces valid handoff markdown. - */ -import { describe, it, expect, beforeAll } from 'vitest'; -import type { UnifiedSession, SessionSource, SessionContext } from '../types/index.js'; -import { - parseClaudeSessions, - extractClaudeContext, - parseCopilotSessions, - extractCopilotContext, - parseGeminiSessions, - extractGeminiContext, - parseCodexSessions, - extractCodexContext, - parseOpenCodeSessions, - extractOpenCodeContext, -} from '../parsers/index.js'; - -const ALL_SOURCES: SessionSource[] = ['claude', 'copilot', 'gemini', 'codex', 'opencode']; - -// Cache parsed sessions and contexts so we only parse once -const sessionCache: Record = {}; -const contextCache: Record = {}; - -const parsers: Record Promise> = { - claude: parseClaudeSessions, - copilot: parseCopilotSessions, - gemini: parseGeminiSessions, - codex: parseCodexSessions, - opencode: parseOpenCodeSessions, -}; - -const extractors: Record Promise> = { - claude: extractClaudeContext, - copilot: extractCopilotContext, - gemini: extractGeminiContext, - codex: extractCodexContext, - opencode: extractOpenCodeContext, -}; - -const friendlyNames: Record = { - claude: 'Claude Code', - copilot: 'GitHub Copilot CLI', - gemini: 'Gemini CLI', - codex: 'Codex CLI', - opencode: 'OpenCode', -}; - -beforeAll(async () => { - // Pre-load all sessions - for (const source of ALL_SOURCES) { - sessionCache[source] = await parsers[source](); - } - // Pre-extract context for first session of each source - for (const source of ALL_SOURCES) { - const sessions = sessionCache[source]; - if (sessions.length > 0) { - contextCache[source] = await extractors[source](sessions[0]); - } - } -}, 60000); // 60s timeout for loading all sessions - -/** - * Simulate cross-tool conversion by: - * 1. Extracting context from source session - * 2. Validating the handoff markdown is well-formed - * 3. Verifying the markdown contains the right source attribution - * 4. Checking it would work as injection into the target tool - */ -function validateConversion(sourceCtx: SessionContext, target: SessionSource) { - const md = sourceCtx.markdown; - - // Basic markdown structure - expect(md).toContain('# Session Handoff Context'); - expect(md).toContain('## Original Session'); - expect(md).toContain('**Session ID**'); - expect(md).toContain('**Last Active**'); - expect(md).toContain('Continue this session'); - - // Source attribution - verify the source is correctly identified - expect(md).toContain('**Source**'); - - // Working directory should be present - expect(md).toContain('**Working Directory**'); - - // Recent conversation section should exist - expect(md).toContain('Recent Conversation'); - - // Markdown should be non-trivial - expect(md.length).toBeGreaterThan(100); - - // Should be valid markdown (no unclosed blocks, basic sanity) - const lines = md.split('\n'); - const headers = lines.filter(l => l.startsWith('#')); - expect(headers.length).toBeGreaterThan(0); - - // Should have at least some content between headers - expect(lines.length).toBeGreaterThan(5); -} - -// Generate all 20 conversion tests -describe('Cross-Tool Conversions (20 paths)', () => { - let conversionNumber = 0; - - for (const source of ALL_SOURCES) { - for (const target of ALL_SOURCES) { - if (source === target) continue; - - conversionNumber++; - const testName = `#${conversionNumber}: ${source} → ${target}`; - - it(testName, async () => { - const sessions = sessionCache[source]; - expect(sessions?.length, `No ${source} sessions found`).toBeGreaterThan(0); - - const ctx = contextCache[source]; - expect(ctx, `No context extracted for ${source}`).toBeDefined(); - - // Validate the conversion produces valid handoff - validateConversion(ctx, target); - - // Verify messages were extracted - expect(ctx.recentMessages.length, `No messages extracted from ${source}`).toBeGreaterThan(0); - - // Verify source is correctly identified in markdown - expect(ctx.session.source).toBe(source); - }); - } - } -}); - -describe('Conversion Content Quality', () => { - for (const source of ALL_SOURCES) { - it(`${source} context has meaningful content`, async () => { - const ctx = contextCache[source]; - if (!ctx) return; // skip if no sessions - - // Messages should have actual content, not empty strings - for (const msg of ctx.recentMessages) { - expect(msg.content.length).toBeGreaterThan(0); - expect(['user', 'assistant', 'system', 'tool']).toContain(msg.role); - } - - // Markdown should contain actual conversation snippets - if (ctx.recentMessages.length > 0) { - // At least one message's content should appear in markdown (truncated or not) - const firstMsg = ctx.recentMessages[0]; - const snippet = firstMsg.content.slice(0, 50); - // The markdown might truncate, so just check the conversation section exists - expect(ctx.markdown).toContain('###'); - } - }); - } - - it('all 5 sources produce different session IDs', () => { - const ids = new Set(); - for (const source of ALL_SOURCES) { - const ctx = contextCache[source]; - if (ctx) { - ids.add(ctx.session.id); - } - } - expect(ids.size).toBe(ALL_SOURCES.length); - }); - - it('all 5 sources produce markdown with correct source attribution', () => { - const sourceLabels: Record = { - claude: 'Claude Code', - copilot: 'GitHub Copilot CLI', - gemini: 'Gemini CLI', - codex: 'Codex CLI', - opencode: 'OpenCode', - }; - - for (const source of ALL_SOURCES) { - const ctx = contextCache[source]; - if (!ctx) continue; - - expect(ctx.markdown).toContain(sourceLabels[source]); - } - }); -}); - -describe('Handoff Markdown Injectability', () => { - // Test that the markdown produced would be valid for injection into each target - for (const source of ALL_SOURCES) { - for (const target of ALL_SOURCES) { - if (source === target) continue; - - it(`${source}→${target}: markdown is safe for injection`, () => { - const ctx = contextCache[source]; - if (!ctx) return; - - const md = ctx.markdown; - - // No null bytes - expect(md).not.toContain('\0'); - - // No very long lines that could break terminals (> 10K chars per line) - const lines = md.split('\n'); - for (const line of lines) { - expect(line.length).toBeLessThan(10000); - } - - // Should be valid UTF-8 (no mojibake) - expect(Buffer.from(md, 'utf8').toString('utf8')).toBe(md); - - // Total size should be reasonable (< 50KB) - expect(Buffer.byteLength(md, 'utf8')).toBeLessThan(50000); - }); - } - } -}); diff --git a/src/__tests__/cwd-from-slug.test.ts b/src/__tests__/cwd-from-slug.test.ts new file mode 100644 index 0000000..731e8f7 --- /dev/null +++ b/src/__tests__/cwd-from-slug.test.ts @@ -0,0 +1,36 @@ +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { describe, expect, it } from 'vitest'; +import { cwdFromSlug } from '../utils/slug.js'; + +describe('cwdFromSlug', () => { + const itWindows = process.platform === 'win32' ? it : it.skip; + + itWindows('resolves Windows drive-letter slugs using existing path', () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-slug-')); + const target = path.join(base, 'project-alpha'); + fs.mkdirSync(target, { recursive: true }); + + const normalized = target.replace(/\\/g, '/'); + const slug = normalized.replace(':', '').replace(/[/.]/g, '-'); + const resolved = cwdFromSlug(slug).replace(/\\/g, '/'); + + expect(resolved.toLowerCase()).toBe(normalized.toLowerCase()); + + fs.rmSync(base, { recursive: true, force: true }); + }); + + itWindows('falls back to drive-letter path format when no candidate exists', () => { + expect(cwdFromSlug('D-Workspace-project-alpha')).toBe('D:/Workspace/project/alpha'); + }); + + it('falls back to Unix path format for drive-letter-like slugs on non-Windows', () => { + if (process.platform === 'win32') return; + expect(cwdFromSlug('D-Workspace-project-alpha')).toBe('/D/Workspace/project/alpha'); + }); + + it('keeps Unix fallback behavior for non-drive slugs', () => { + expect(cwdFromSlug('Users-alice-my-project')).toBe('/Users/alice/my/project'); + }); +}); diff --git a/src/__tests__/cwd-matching.test.ts b/src/__tests__/cwd-matching.test.ts new file mode 100644 index 0000000..37e015f --- /dev/null +++ b/src/__tests__/cwd-matching.test.ts @@ -0,0 +1,52 @@ +import { describe, expect, it } from 'vitest'; +import { matchesCwd } from '../utils/slug.js'; + +describe('matchesCwd', () => { + it('exact match', () => { + expect(matchesCwd('/Users/me/project', '/Users/me/project')).toBe(true); + }); + + it('subdirectory match', () => { + expect(matchesCwd('/Users/me/project/src', '/Users/me/project')).toBe(true); + }); + + it('deeply nested subdirectory match', () => { + expect(matchesCwd('/Users/me/project/src/utils/deep', '/Users/me/project')).toBe(true); + }); + + it('non-match: different directory', () => { + expect(matchesCwd('/Users/me/other', '/Users/me/project')).toBe(false); + }); + + it('non-match: partial name overlap', () => { + expect(matchesCwd('/Users/me/project-v2', '/Users/me/project')).toBe(false); + }); + + it('non-match: parent directory', () => { + expect(matchesCwd('/Users/me', '/Users/me/project')).toBe(false); + }); + + it('handles trailing slashes on session cwd', () => { + expect(matchesCwd('/Users/me/project/', '/Users/me/project')).toBe(true); + }); + + it('handles trailing slashes on target dir', () => { + expect(matchesCwd('/Users/me/project', '/Users/me/project/')).toBe(true); + }); + + it('handles trailing slashes on both', () => { + expect(matchesCwd('/Users/me/project/', '/Users/me/project/')).toBe(true); + }); + + it('empty session cwd returns false', () => { + expect(matchesCwd('', '/Users/me/project')).toBe(false); + }); + + it('empty target dir returns false', () => { + expect(matchesCwd('/Users/me/project', '')).toBe(false); + }); + + it('root target returns false', () => { + expect(matchesCwd('/Users/me/project', '/')).toBe(false); + }); +}); diff --git a/src/__tests__/e2e-conversions.test.ts b/src/__tests__/e2e-conversions.test.ts index b004b9b..ee88161 100644 --- a/src/__tests__/e2e-conversions.test.ts +++ b/src/__tests__/e2e-conversions.test.ts @@ -10,20 +10,49 @@ * opencode run "message" * copilot -i "prompt" (falls back to stdin) */ -import { describe, it, expect, beforeAll } from 'vitest'; + import { execSync } from 'child_process'; import * as fs from 'fs'; +import { WHICH_CMD } from '../utils/platform.js'; import * as path from 'path'; -import type { UnifiedSession, SessionSource, SessionContext } from '../types/index.js'; +import { beforeAll, describe, expect, it } from 'vitest'; import { - parseClaudeSessions, extractClaudeContext, - parseCopilotSessions, extractCopilotContext, - parseGeminiSessions, extractGeminiContext, - parseCodexSessions, extractCodexContext, - parseOpenCodeSessions, extractOpenCodeContext, + extractClaudeContext, + extractCodexContext, + extractCopilotContext, + extractCursorContext, + extractDroidContext, + extractGeminiContext, + extractOpenCodeContext, + extractAmpContext, + extractKiroContext, + extractCrushContext, + extractClineContext, + extractRooCodeContext, + extractKiloCodeContext, + extractAntigravityContext, + extractKimiContext, + extractQwenCodeContext, + parseClaudeSessions, + parseCodexSessions, + parseCopilotSessions, + parseCursorSessions, + parseDroidSessions, + parseGeminiSessions, + parseOpenCodeSessions, + parseAmpSessions, + parseKiroSessions, + parseCrushSessions, + parseClineSessions, + parseRooCodeSessions, + parseKiloCodeSessions, + parseAntigravitySessions, + parseKimiSessions, + parseQwenCodeSessions, } from '../parsers/index.js'; +import type { SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; -const ALL_SOURCES: SessionSource[] = ['claude', 'copilot', 'gemini', 'codex', 'opencode']; +const ALL_SOURCES: SessionSource[] = ['claude', 'copilot', 'gemini', 'codex', 'opencode', 'droid', 'cursor', 'amp', 'kiro', 'crush', 'cline', 'roo-code', 'kilo-code', 'antigravity', 'kimi', 'qwen-code']; const parsers: Record Promise> = { claude: parseClaudeSessions, @@ -31,6 +60,17 @@ const parsers: Record Promise> = { gemini: parseGeminiSessions, codex: parseCodexSessions, opencode: parseOpenCodeSessions, + droid: parseDroidSessions, + cursor: parseCursorSessions, + amp: parseAmpSessions, + kiro: parseKiroSessions, + crush: parseCrushSessions, + cline: parseClineSessions, + 'roo-code': parseRooCodeSessions, + 'kilo-code': parseKiloCodeSessions, + antigravity: parseAntigravitySessions, + kimi: parseKimiSessions, + 'qwen-code': parseQwenCodeSessions, }; const extractors: Record Promise> = { @@ -39,6 +79,17 @@ const extractors: Record Promise = {}; */ function toolExists(tool: string): boolean { try { - execSync(`which ${tool}`, { stdio: 'ignore' }); + execSync(`${WHICH_CMD} ${tool}`, { stdio: 'ignore' }); return true; } catch { return false; @@ -120,7 +171,11 @@ function runTool(tool: SessionSource, prompt: string, cwd: string): string { } catch (err: any) { return `ERROR: ${err.message?.slice(0, 500) || 'unknown error'}`; } finally { - try { fs.unlinkSync(tmpFile); } catch { /* ignore */ } + try { + fs.unlinkSync(tmpFile); + } catch { + /* ignore */ + } } } @@ -139,7 +194,7 @@ beforeAll(async () => { // Pick smallest session with actual content const sorted = [...sessions].sort((a, b) => a.bytes - b.bytes); - const session = sorted.find(s => s.bytes > 200) || sessions[sessions.length - 1]; + const session = sorted.find((s) => s.bytes > 200) || sessions[sessions.length - 1]; const ctx = await extractors[source](session); if (ctx.recentMessages.length === 0) { @@ -160,7 +215,9 @@ beforeAll(async () => { const mdPath = path.join(RESULTS_DIR, `handoff-from-${source}.md`); fs.writeFileSync(mdPath, contexts[source].markdown); handoffFiles[source] = mdPath; - console.log(`✓ ${source}: extracted ${contexts[source].recentMessages.length} messages from session ${session.id.slice(0, 12)}`); + console.log( + `✓ ${source}: extracted ${contexts[source].recentMessages.length} messages from session ${session.id.slice(0, 12)}`, + ); } } catch (err) { console.log(`⚠ ${source}: extraction failed - ${err}`); @@ -207,10 +264,25 @@ describe('E2E: 20 Cross-Tool Conversion Paths', () => { return; } - const sourceLabel = { - claude: 'Claude Code', copilot: 'GitHub Copilot CLI', - gemini: 'Gemini CLI', codex: 'Codex CLI', opencode: 'OpenCode', - }[source]; + const sourceLabels: Record = { + claude: 'Claude Code', + copilot: 'GitHub Copilot CLI', + gemini: 'Gemini CLI', + codex: 'Codex CLI', + opencode: 'OpenCode', + droid: 'Factory Droid', + cursor: 'Cursor AI', + amp: 'Amp CLI', + kiro: 'Kiro IDE', + crush: 'Crush CLI', + cline: 'Cline', + 'roo-code': 'Roo Code', + 'kilo-code': 'Kilo Code', + antigravity: 'Antigravity', + kimi: 'Kimi CLI', + 'qwen-code': 'Qwen Code', + }; + const sourceLabel = sourceLabels[source]; const prompt = buildVerificationPrompt(contexts[source].markdown, sourceLabel); const cwd = contexts[source].session.cwd || process.cwd(); @@ -238,7 +310,10 @@ describe('E2E: 20 Cross-Tool Conversion Paths', () => { lowerOutput.includes('received') || output.includes('HANDOFF_RECEIVED'); - expect(acknowledged, `${target} did not acknowledge the handoff from ${source}. Output: ${output.slice(0, 300)}`).toBe(true); + expect( + acknowledged, + `${target} did not acknowledge the handoff from ${source}. Output: ${output.slice(0, 300)}`, + ).toBe(true); }, 180_000); // 3 min timeout per test } } diff --git a/src/__tests__/env-cache-invalidation.test.ts b/src/__tests__/env-cache-invalidation.test.ts new file mode 100644 index 0000000..926577d --- /dev/null +++ b/src/__tests__/env-cache-invalidation.test.ts @@ -0,0 +1,144 @@ +/** + * Regression test for GitHub issue #18: + * CLAUDE_CONFIG_DIR environment variable is ignored when running continues resume. + * + * Root cause: the session index cache (~/.continues/sessions.jsonl) had a 5-min TTL + * but did not track which env vars were in effect when the cache was built. Changing + * CLAUDE_CONFIG_DIR (or any adapter envVar) would still serve the stale cache. + * + * Fix: store an env fingerprint as the first line of the index file and invalidate + * when the fingerprint changes. + */ + +import { createHash } from 'crypto'; +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { afterAll, afterEach, beforeAll, describe, expect, it, vi } from 'vitest'; + +// Create the fake home eagerly so it's ready before any mock evaluates +const fakeHome = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-env-test-')); + +afterAll(() => { + fs.rmSync(fakeHome, { recursive: true, force: true }); +}); + +// Mock homeDir() BEFORE importing the index module — the module evaluates +// CONTINUES_DIR = path.join(homeDir(), '.continues') at import time. +vi.mock('../utils/parser-helpers.js', async (importOriginal) => { + const orig = await importOriginal(); + return { + ...orig, + homeDir: () => fakeHome, + }; +}); + +// Now import the module under test — it will resolve INDEX_FILE under fakeHome. +const { indexNeedsRebuild, loadIndex, ensureDirectories } = await import('../utils/index.js'); +const { adapters } = await import('../parsers/registry.js'); + +// ── Helpers ────────────────────────────────────────────────────────────────── + +function indexFilePath(): string { + return path.join(fakeHome, '.continues', 'sessions.jsonl'); +} + +function writeIndex(fingerprint: string, sessions: Record[]): void { + ensureDirectories(); + const lines = sessions.map((s) => JSON.stringify(s)); + fs.writeFileSync(indexFilePath(), fingerprint + '\n' + lines.join('\n') + '\n'); +} + +function makeSession(id: string, source = 'claude'): Record { + return { + id, + source, + cwd: '/tmp/project', + repo: 'test/repo', + branch: 'main', + lines: 10, + bytes: 500, + createdAt: '2025-06-01T00:00:00.000Z', + updatedAt: '2025-06-01T00:00:00.000Z', + originalPath: `/tmp/${id}.jsonl`, + }; +} + +afterEach(() => { + // Clean the index file between tests + try { fs.unlinkSync(indexFilePath()); } catch (_) { /* file may not exist */ } + vi.unstubAllEnvs(); +}); + +// ── Tests ──────────────────────────────────────────────────────────────────── + +describe('env fingerprint cache invalidation (issue #18)', () => { + it('indexNeedsRebuild returns true when no index file exists', () => { + expect(indexNeedsRebuild()).toBe(true); + }); + + it('indexNeedsRebuild returns false when index is fresh and fingerprint matches', () => { + // Compute what the real module would write — import adapters to derive env vars + // The simplest way: write an index via the fingerprint the module itself expects. + // We write a fingerprint that matches the current env (all env vars unset in test). + const seen = new Set(); + const parts: string[] = []; + for (const adapter of Object.values(adapters) as Array<{ envVar?: string }>) { + if (adapter.envVar && !seen.has(adapter.envVar)) { + seen.add(adapter.envVar); + const val = process.env[adapter.envVar] || ''; + parts.push(`${adapter.envVar}=${val}`); + } + } + const hash = createHash('sha256').update(parts.sort().join('|')).digest('hex'); + const fingerprint = `#env:${hash}`; + + writeIndex(fingerprint, [makeSession('sess-1')]); + + expect(indexNeedsRebuild()).toBe(false); + }); + + it('indexNeedsRebuild returns true when CLAUDE_CONFIG_DIR changes', () => { + // Write index with current fingerprint (CLAUDE_CONFIG_DIR unset) + const seen = new Set(); + const parts: string[] = []; + for (const adapter of Object.values(adapters) as Array<{ envVar?: string }>) { + if (adapter.envVar && !seen.has(adapter.envVar)) { + seen.add(adapter.envVar); + const val = process.env[adapter.envVar] || ''; + parts.push(`${adapter.envVar}=${val}`); + } + } + const hash = createHash('sha256').update(parts.sort().join('|')).digest('hex'); + const fingerprint = `#env:${hash}`; + writeIndex(fingerprint, [makeSession('sess-1')]); + + // Now change the env var — fingerprint should mismatch + vi.stubEnv('CLAUDE_CONFIG_DIR', '/home/user/.claude-work'); + + expect(indexNeedsRebuild()).toBe(true); + }); + + it('loadIndex skips the fingerprint line and returns only sessions', () => { + writeIndex('#env:CLAUDE_CONFIG_DIR=', [ + makeSession('sess-1', 'claude'), + makeSession('sess-2', 'codex'), + ]); + + const sessions = loadIndex(); + + expect(sessions).toHaveLength(2); + expect(sessions[0].id).toBe('sess-1'); + expect(sessions[1].id).toBe('sess-2'); + expect(sessions[0].createdAt).toBeInstanceOf(Date); + }); + + it('loadIndex returns empty array for non-existent file', () => { + expect(loadIndex()).toEqual([]); + }); + + it('fingerprint line is not parseable as JSON', () => { + const hash = createHash('sha256').update('test').digest('hex'); + expect(() => JSON.parse(`#env:\${hash}`)).toThrow(); + }); +}); diff --git a/src/__tests__/extract-handoffs.ts b/src/__tests__/extract-handoffs.ts index 133f92e..e74faa9 100644 --- a/src/__tests__/extract-handoffs.ts +++ b/src/__tests__/extract-handoffs.ts @@ -2,21 +2,49 @@ * Extract handoff markdown from the smallest real session of each source. * Saves to ~/.continues/e2e-test-results/handoff-from-{source}.md */ -import { - parseClaudeSessions, extractClaudeContext, - parseCopilotSessions, extractCopilotContext, - parseGeminiSessions, extractGeminiContext, - parseCodexSessions, extractCodexContext, - parseOpenCodeSessions, extractOpenCodeContext, -} from '../parsers/index.js'; -import type { UnifiedSession, SessionSource, SessionContext } from '../types/index.js'; + import * as fs from 'fs'; import * as path from 'path'; +import { + extractClaudeContext, + extractCodexContext, + extractCopilotContext, + extractCursorContext, + extractDroidContext, + extractGeminiContext, + extractOpenCodeContext, + extractAmpContext, + extractKiroContext, + extractCrushContext, + extractClineContext, + extractRooCodeContext, + extractKiloCodeContext, + extractAntigravityContext, + extractKimiContext, + extractQwenCodeContext, + parseClaudeSessions, + parseCodexSessions, + parseCopilotSessions, + parseCursorSessions, + parseDroidSessions, + parseGeminiSessions, + parseOpenCodeSessions, + parseAmpSessions, + parseKiroSessions, + parseCrushSessions, + parseClineSessions, + parseRooCodeSessions, + parseKiloCodeSessions, + parseAntigravitySessions, + parseKimiSessions, + parseQwenCodeSessions, +} from '../parsers/index.js'; +import type { SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; const RESULTS_DIR = path.join(process.env.HOME || '~', '.continues', 'e2e-test-results'); fs.mkdirSync(RESULTS_DIR, { recursive: true }); -const ALL_SOURCES: SessionSource[] = ['claude', 'copilot', 'gemini', 'codex', 'opencode']; +const ALL_SOURCES: SessionSource[] = ['claude', 'copilot', 'gemini', 'codex', 'opencode', 'droid', 'cursor', 'amp', 'kiro', 'crush', 'cline', 'roo-code', 'kilo-code', 'antigravity', 'kimi', 'qwen-code']; const parsers: Record Promise> = { claude: parseClaudeSessions, @@ -24,6 +52,17 @@ const parsers: Record Promise> = { gemini: parseGeminiSessions, codex: parseCodexSessions, opencode: parseOpenCodeSessions, + droid: parseDroidSessions, + cursor: parseCursorSessions, + amp: parseAmpSessions, + kiro: parseKiroSessions, + crush: parseCrushSessions, + cline: parseClineSessions, + 'roo-code': parseRooCodeSessions, + 'kilo-code': parseKiloCodeSessions, + antigravity: parseAntigravitySessions, + kimi: parseKimiSessions, + 'qwen-code': parseQwenCodeSessions, }; const extractors: Record Promise> = { @@ -32,6 +71,17 @@ const extractors: Record Promise m.role === 'user')?.content.slice(0, 80) || '(no user msg)'; + const firstUserMsg = ctx.recentMessages.find((m) => m.role === 'user')?.content.slice(0, 80) || '(no user msg)'; summary[source] = { sessionId: usedSession.id.slice(0, 16), msgCount: ctx.recentMessages.length, diff --git a/src/__tests__/fixtures/index.ts b/src/__tests__/fixtures/index.ts index 540f013..7fda7c2 100644 --- a/src/__tests__/fixtures/index.ts +++ b/src/__tests__/fixtures/index.ts @@ -1,9 +1,10 @@ /** - * Test fixtures - sanitized session data for all 5 tools + * Test fixtures - sanitized session data for supported parsers */ +import { createHash } from 'crypto'; import * as fs from 'fs'; -import * as path from 'path'; import * as os from 'os'; +import * as path from 'path'; export interface FixtureDir { root: string; @@ -215,10 +216,7 @@ export function createGeminiFixture(): FixtureDir { ], }; - fs.writeFileSync( - path.join(chatsDir, 'session-2026-01-15T10-00-test1234.json'), - JSON.stringify(session, null, 2) - ); + fs.writeFileSync(path.join(chatsDir, 'session-2026-01-15T10-00-test1234.json'), JSON.stringify(session, null, 2)); return { root, @@ -363,53 +361,111 @@ export function createOpenCodeSqliteFixture(): FixtureDir { // Insert project db.prepare('INSERT INTO project VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)').run( - 'proj_test1', '/home/user/project', 'git', 'project', null, null, now - 10000, now, null, '[]', null + 'proj_test1', + '/home/user/project', + 'git', + 'project', + null, + null, + now - 10000, + now, + null, + '[]', + null, ); // Insert session db.prepare('INSERT INTO session VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)').run( - 'ses_test1', 'proj_test1', null, 'test-session', '/home/user/project', - 'Fix authentication bug', '1.2.0', null, 2, 0, 1, null, null, null, now - 5000, now, null, null + 'ses_test1', + 'proj_test1', + null, + 'test-session', + '/home/user/project', + 'Fix authentication bug', + '1.2.0', + null, + 2, + 0, + 1, + null, + null, + null, + now - 5000, + now, + null, + null, ); // Insert user message db.prepare('INSERT INTO message VALUES (?, ?, ?, ?, ?)').run( - 'msg_user1', 'ses_test1', now - 4000, now - 4000, - JSON.stringify({ role: 'user', time: { created: now - 4000 } }) + 'msg_user1', + 'ses_test1', + now - 4000, + now - 4000, + JSON.stringify({ role: 'user', time: { created: now - 4000 } }), ); db.prepare('INSERT INTO part VALUES (?, ?, ?, ?, ?, ?)').run( - 'prt_user1', 'msg_user1', 'ses_test1', now - 4000, now - 4000, - JSON.stringify({ type: 'text', text: 'Fix the authentication bug in login.ts' }) + 'prt_user1', + 'msg_user1', + 'ses_test1', + now - 4000, + now - 4000, + JSON.stringify({ type: 'text', text: 'Fix the authentication bug in login.ts' }), ); // Insert assistant message db.prepare('INSERT INTO message VALUES (?, ?, ?, ?, ?)').run( - 'msg_asst1', 'ses_test1', now - 3000, now - 3000, - JSON.stringify({ role: 'assistant', time: { created: now - 3000, completed: now - 2500 }, modelID: 'claude-opus-4.6' }) + 'msg_asst1', + 'ses_test1', + now - 3000, + now - 3000, + JSON.stringify({ + role: 'assistant', + time: { created: now - 3000, completed: now - 2500 }, + modelID: 'claude-opus-4.6', + }), ); db.prepare('INSERT INTO part VALUES (?, ?, ?, ?, ?, ?)').run( - 'prt_asst1', 'msg_asst1', 'ses_test1', now - 3000, now - 3000, - JSON.stringify({ type: 'text', text: 'I found the issue in login.ts. The token validation was missing.' }) + 'prt_asst1', + 'msg_asst1', + 'ses_test1', + now - 3000, + now - 3000, + JSON.stringify({ type: 'text', text: 'I found the issue in login.ts. The token validation was missing.' }), ); // Insert another user message db.prepare('INSERT INTO message VALUES (?, ?, ?, ?, ?)').run( - 'msg_user2', 'ses_test1', now - 2000, now - 2000, - JSON.stringify({ role: 'user', time: { created: now - 2000 } }) + 'msg_user2', + 'ses_test1', + now - 2000, + now - 2000, + JSON.stringify({ role: 'user', time: { created: now - 2000 } }), ); db.prepare('INSERT INTO part VALUES (?, ?, ?, ?, ?, ?)').run( - 'prt_user2', 'msg_user2', 'ses_test1', now - 2000, now - 2000, - JSON.stringify({ type: 'text', text: 'Great, please also add error handling' }) + 'prt_user2', + 'msg_user2', + 'ses_test1', + now - 2000, + now - 2000, + JSON.stringify({ type: 'text', text: 'Great, please also add error handling' }), ); // Insert another assistant message db.prepare('INSERT INTO message VALUES (?, ?, ?, ?, ?)').run( - 'msg_asst2', 'ses_test1', now - 1000, now - 1000, - JSON.stringify({ role: 'assistant', time: { created: now - 1000, completed: now - 500 } }) + 'msg_asst2', + 'ses_test1', + now - 1000, + now - 1000, + JSON.stringify({ role: 'assistant', time: { created: now - 1000, completed: now - 500 } }), ); db.prepare('INSERT INTO part VALUES (?, ?, ?, ?, ?, ?)').run( - 'prt_asst2', 'msg_asst2', 'ses_test1', now - 1000, now - 1000, - JSON.stringify({ type: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }) + 'prt_asst2', + 'msg_asst2', + 'ses_test1', + now - 1000, + now - 1000, + JSON.stringify({ type: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }), ); db.close(); @@ -426,6 +482,615 @@ export function createOpenCodeSqliteFixture(): FixtureDir { }; } +/** + * Create a temporary directory with Droid session fixtures + */ +export function createDroidFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-droid-')); + const workspaceDir = path.join(root, '-home-user-project'); + fs.mkdirSync(workspaceDir, { recursive: true }); + + const sessionId = 'dddddddd-1111-2222-3333-444444444444'; + + // Create .settings.json + const settings = { + assistantActiveTimeMs: 15000, + model: 'claude-opus-4-6', + reasoningEffort: 'max', + interactionMode: 'auto', + autonomyMode: 'auto-low', + tokenUsage: { + inputTokens: 5000, + outputTokens: 1200, + cacheCreationTokens: 0, + cacheReadTokens: 0, + thinkingTokens: 50, + }, + }; + fs.writeFileSync(path.join(workspaceDir, `${sessionId}.settings.json`), JSON.stringify(settings, null, 2)); + + // Create JSONL session + const lines = [ + JSON.stringify({ + type: 'session_start', + id: sessionId, + title: 'Fix authentication bug', + sessionTitle: 'Auth Bug Fix', + owner: 'testuser', + version: 2, + cwd: '/home/user/project', + }), + JSON.stringify({ + type: 'message', + id: 'msg-001', + timestamp: '2026-01-15T10:00:01.000Z', + message: { + role: 'user', + content: [{ type: 'text', text: 'Fix the authentication bug in login.ts' }], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-002', + timestamp: '2026-01-15T10:00:05.000Z', + message: { + role: 'assistant', + content: [ + { type: 'tool_use', id: 'tc-001', name: 'Read', input: { file_path: '/home/user/project/login.ts' } }, + ], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-003', + timestamp: '2026-01-15T10:00:06.000Z', + message: { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tc-001', content: 'export function login() { ... }' }], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-004', + timestamp: '2026-01-15T10:00:08.000Z', + message: { + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'tc-002', + name: 'Edit', + input: { file_path: '/home/user/project/login.ts', old_str: 'old code', new_str: 'new code' }, + }, + ], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-005', + timestamp: '2026-01-15T10:00:09.000Z', + message: { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tc-002', content: 'File edited successfully' }], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-006', + timestamp: '2026-01-15T10:00:10.000Z', + message: { + role: 'assistant', + content: [{ type: 'text', text: 'I found the issue in login.ts. The token validation was missing.' }], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-007', + timestamp: '2026-01-15T10:00:12.000Z', + message: { + role: 'user', + content: [{ type: 'text', text: 'Great, please also add error handling' }], + }, + }), + JSON.stringify({ + type: 'message', + id: 'msg-008', + timestamp: '2026-01-15T10:00:15.000Z', + message: { + role: 'assistant', + content: [{ type: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }], + }, + }), + JSON.stringify({ + type: 'todo_state', + id: 'todo-001', + timestamp: '2026-01-15T10:00:15.000Z', + todos: { + todos: '1. [completed] Fix token validation\n2. [in_progress] Add error handling\n3. [pending] Write tests', + }, + messageIndex: 3, + }), + ]; + + fs.writeFileSync(path.join(workspaceDir, `${sessionId}.jsonl`), lines.join('\n') + '\n'); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Cursor agent-transcript fixtures + */ +export function createCursorFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-cursor-')); + const projectDir = path.join(root, '-test-project'); + const transcriptsDir = path.join(projectDir, 'agent-transcripts'); + const sessionId = 'cccccccc-1111-2222-3333-444444444444'; + const sessionDir = path.join(transcriptsDir, sessionId); + fs.mkdirSync(sessionDir, { recursive: true }); + + const lines = [ + JSON.stringify({ + role: 'user', + message: { + content: [{ type: 'text', text: '\nFix the authentication bug in login.ts\n' }], + }, + }), + JSON.stringify({ + role: 'assistant', + message: { + content: [{ type: 'text', text: "I'll look into the login.ts file to find the authentication bug." }], + }, + }), + JSON.stringify({ + role: 'assistant', + message: { + content: [ + { type: 'tool_use', id: 'tc-001', name: 'read_file', input: { file_path: '/home/user/project/login.ts' } }, + ], + }, + }), + JSON.stringify({ + role: 'user', + message: { + content: [{ type: 'tool_result', tool_use_id: 'tc-001', content: 'export function login() { ... }' }], + }, + }), + JSON.stringify({ + role: 'assistant', + message: { + content: [ + { + type: 'tool_use', + id: 'tc-002', + name: 'edit_file', + input: { file_path: '/home/user/project/login.ts', old_str: 'old code', new_str: 'new code' }, + }, + ], + }, + }), + JSON.stringify({ + role: 'user', + message: { + content: [{ type: 'tool_result', tool_use_id: 'tc-002', content: 'File edited successfully' }], + }, + }), + JSON.stringify({ + role: 'assistant', + message: { + content: [{ type: 'text', text: 'I found the issue in login.ts. The token validation was missing.' }], + }, + }), + JSON.stringify({ + role: 'user', + message: { + content: [{ type: 'text', text: '\nGreat, please also add error handling\n' }], + }, + }), + JSON.stringify({ + role: 'assistant', + message: { + content: [{ type: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }], + }, + }), + ]; + + fs.writeFileSync(path.join(sessionDir, `${sessionId}.jsonl`), lines.join('\n') + '\n'); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Amp session fixtures + */ +export function createAmpFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-amp-')); + + const session = { + id: 'thread-amp-test-1', + created: 1705312800000, + messages: [ + { role: 'user', content: 'Fix the authentication bug in login.ts' }, + { + role: 'assistant', + content: 'I found the issue in login.ts. The token validation was missing.', + model: 'claude-sonnet-4', + usage: { input_tokens: 500, output_tokens: 450 }, + }, + { role: 'user', content: 'Great, please also add error handling' }, + { + role: 'assistant', + content: 'Done. I added try-catch blocks and proper error messages.', + model: 'claude-sonnet-4', + usage: { input_tokens: 600, output_tokens: 380 }, + }, + ], + }; + + fs.writeFileSync(path.join(root, 'thread-amp-test-1.json'), JSON.stringify(session, null, 2)); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Kiro session fixtures + */ +export function createKiroFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-kiro-')); + + const session = { + workspacePath: '/home/user/project', + selectedModel: 'claude-sonnet-4', + history: [ + { role: 'human', content: 'Fix the authentication bug in login.ts' }, + { role: 'assistant', content: 'I found the issue in login.ts. The token validation was missing.' }, + { role: 'human', content: 'Great, please also add error handling' }, + { role: 'assistant', content: 'Done. I added try-catch blocks and proper error messages.' }, + ], + }; + + fs.writeFileSync(path.join(root, 'session-kiro-test-1.json'), JSON.stringify(session, null, 2)); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Cline session fixtures (ui_messages.json format). + * This format is shared by cline, roo-code, and kilo-code. + */ +export function createClineFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-cline-')); + const taskDir = path.join(root, 'task-cline-test-1'); + fs.mkdirSync(taskDir, { recursive: true }); + + const messages = [ + { ts: 1705312800000, type: 'say', say: 'task', text: 'Fix the authentication bug in login.ts' }, + { + ts: 1705312801000, + type: 'say', + say: 'text', + text: 'I found the issue in login.ts. The token validation was missing.', + }, + { ts: 1705312810000, type: 'say', say: 'task', text: 'Great, please also add error handling' }, + { ts: 1705312811000, type: 'say', say: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }, + ]; + + fs.writeFileSync(path.join(taskDir, 'ui_messages.json'), JSON.stringify(messages, null, 2)); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Roo Code session fixtures (same format as Cline) + */ +export function createRooCodeFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-roo-code-')); + const taskDir = path.join(root, 'task-roo-code-test-1'); + fs.mkdirSync(taskDir, { recursive: true }); + + const messages = [ + { ts: 1705312900000, type: 'say', say: 'task', text: 'Fix the authentication bug in login.ts' }, + { + ts: 1705312901000, + type: 'say', + say: 'text', + text: 'I found the issue in login.ts. The token validation was missing.', + }, + { ts: 1705312910000, type: 'say', say: 'task', text: 'Great, please also add error handling' }, + { ts: 1705312911000, type: 'say', say: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }, + ]; + + fs.writeFileSync(path.join(taskDir, 'ui_messages.json'), JSON.stringify(messages, null, 2)); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Kilo Code session fixtures (same format as Cline) + */ +export function createKiloCodeFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-kilo-code-')); + const taskDir = path.join(root, 'task-kilo-code-test-1'); + fs.mkdirSync(taskDir, { recursive: true }); + + const messages = [ + { ts: 1705313000000, type: 'say', say: 'task', text: 'Fix the authentication bug in login.ts' }, + { + ts: 1705313001000, + type: 'say', + say: 'text', + text: 'I found the issue in login.ts. The token validation was missing.', + }, + { ts: 1705313010000, type: 'say', say: 'task', text: 'Great, please also add error handling' }, + { ts: 1705313011000, type: 'say', say: 'text', text: 'Done. I added try-catch blocks and proper error messages.' }, + ]; + + fs.writeFileSync(path.join(taskDir, 'ui_messages.json'), JSON.stringify(messages, null, 2)); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Antigravity session fixtures (JSONL with type/content/timestamp) + */ +export function createAntigravityFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-antigravity-')); + + const lines = [ + JSON.stringify({ + type: 'user', + content: 'Fix the authentication bug in login.ts', + timestamp: '2025-02-25T10:00:00Z', + }), + JSON.stringify({ + type: 'assistant', + content: 'I found the issue in login.ts. The token validation was missing.', + timestamp: '2025-02-25T10:00:05Z', + }), + JSON.stringify({ + type: 'user', + content: 'Great, please also add error handling', + timestamp: '2025-02-25T10:01:00Z', + }), + JSON.stringify({ + type: 'assistant', + content: 'Done. I added try-catch blocks and proper error messages.', + timestamp: '2025-02-25T10:01:10Z', + }), + ]; + + fs.writeFileSync(path.join(root, 'session.jsonl'), lines.join('\n') + '\n'); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Kimi session fixtures + * + * Mirrors Kimi CLI's share-dir layout: + * ~/.kimi/kimi.json + * ~/.kimi/sessions///{context.jsonl, metadata.json, wire.jsonl, state.json} + */ +export function createKimiFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-kimi-')); + const kimiDir = path.join(root, '.kimi'); + + const workDirPath = '/home/user/project'; + const sessionId = 'test-kimi-session-1'; + const workDirHash = createHash('md5').update(workDirPath, 'utf8').digest('hex'); + const sessionDir = path.join(kimiDir, 'sessions', workDirHash, sessionId); + fs.mkdirSync(sessionDir, { recursive: true }); + + // ~/.kimi/kimi.json — work dir index used for resolving cwd from workdir hash + fs.writeFileSync( + path.join(kimiDir, 'kimi.json'), + JSON.stringify( + { + work_dirs: [{ path: workDirPath, kaos: 'local', last_session_id: sessionId }], + }, + null, + 2, + ), + ); + + // context.jsonl — includes string + block-array content plus special markers + const contextLines = [ + JSON.stringify({ + role: 'user', + content: [{ type: 'text', text: 'Fix the authentication bug in login.ts' }], + }), + JSON.stringify({ + role: 'assistant', + content: [ + { type: 'think', think: 'Need to inspect login.ts and validate token flow first.' }, + { type: 'text', text: 'I found the issue in login.ts. The token validation was missing.' }, + ], + tool_calls: [ + { + type: 'function', + id: 'tc-001', + function: { + name: 'ReadFile', + arguments: JSON.stringify({ file_path: '/home/user/project/login.ts' }), + }, + }, + ], + }), + JSON.stringify({ role: '_usage', token_count: 256 }), + JSON.stringify({ role: '_checkpoint', id: 0 }), + JSON.stringify({ + role: 'user', + content: 'Great, please also add error handling', + }), + JSON.stringify({ + role: 'assistant', + content: 'Done. I added try-catch blocks and proper error messages.', + }), + ]; + fs.writeFileSync(path.join(sessionDir, 'context.jsonl'), contextLines.join('\n') + '\n'); + + // metadata.json — optional in Kimi, but included here for schema/compat coverage + fs.writeFileSync( + path.join(sessionDir, 'metadata.json'), + JSON.stringify( + { + session_id: sessionId, + title: 'Fix auth bug', + title_generated: false, + archived: false, + archived_at: null, + wire_mtime: null, + }, + null, + 2, + ), + ); + + // wire.jsonl/state.json — present in real Kimi CLI session directories + fs.writeFileSync( + path.join(sessionDir, 'wire.jsonl'), + `${JSON.stringify({ timestamp: 1736935200, message: { type: 'TurnBegin', payload: { user_input: 'Fix the authentication bug in login.ts' } } })}\n`, + ); + fs.writeFileSync( + path.join(sessionDir, 'state.json'), + JSON.stringify( + { + version: 1, + approval: { yolo: false, auto_approve_actions: [] }, + dynamic_subagents: [], + additional_dirs: [], + }, + null, + 2, + ), + ); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + +/** + * Create a temporary directory with Qwen Code session fixtures + * Storage: ~/.qwen/projects//chats/.jsonl + * sanitizeCwd replaces [^a-zA-Z0-9] with '-' + */ +export function createQwenCodeFixture(): FixtureDir { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'test-qwen-code-')); + // sanitizeCwd('/home/user/project') → '-home-user-project' + const sanitizedCwd = '/home/user/project'.replace(/[^a-zA-Z0-9]/g, '-'); + const chatsDir = path.join(root, sanitizedCwd, 'chats'); + fs.mkdirSync(chatsDir, { recursive: true }); + + const sessionId = 'test-qwen-code-session-1'; + const lines = [ + JSON.stringify({ + uuid: '00000000-0000-0000-0000-000000000001', + parentUuid: null, + sessionId, + timestamp: '2026-01-15T10:00:01.000Z', + type: 'user', + cwd: '/home/user/project', + version: '1.0.0', + gitBranch: 'main', + message: { role: 'user', parts: [{ text: 'Fix the authentication bug in login.ts' }] }, + }), + JSON.stringify({ + uuid: '00000000-0000-0000-0000-000000000002', + parentUuid: '00000000-0000-0000-0000-000000000001', + sessionId, + timestamp: '2026-01-15T10:00:05.000Z', + type: 'assistant', + cwd: '/home/user/project', + version: '1.0.0', + model: 'qwen3-coder', + message: { + role: 'model', + parts: [ + { text: 'Let me think about this...', thought: true }, + { text: 'I found the issue in login.ts. The token validation was missing.' }, + { functionCall: { name: 'read_file', args: { file_path: 'login.ts' } } }, + ], + }, + usageMetadata: { promptTokenCount: 100, candidatesTokenCount: 200, thoughtsTokenCount: 50 }, + }), + JSON.stringify({ + uuid: '00000000-0000-0000-0000-000000000005', + parentUuid: '00000000-0000-0000-0000-000000000002', + sessionId, + timestamp: '2026-01-15T10:00:07.000Z', + type: 'tool_result', + cwd: '/home/user/project', + version: '1.0.0', + toolCallResult: { + displayName: 'Edit', + status: 'ok', + resultDisplay: { + fileName: 'login.ts', + fileDiff: '+ validateToken(token);\n- // missing validation', + originalContent: '// missing validation', + diffStat: { model_added_lines: 1, model_removed_lines: 1 }, + }, + }, + }), + JSON.stringify({ + uuid: '00000000-0000-0000-0000-000000000003', + parentUuid: '00000000-0000-0000-0000-000000000005', + sessionId, + timestamp: '2026-01-15T10:00:10.000Z', + type: 'user', + cwd: '/home/user/project', + version: '1.0.0', + message: { role: 'user', parts: [{ text: 'Great, please also add error handling' }] }, + }), + JSON.stringify({ + uuid: '00000000-0000-0000-0000-000000000004', + parentUuid: '00000000-0000-0000-0000-000000000003', + sessionId, + timestamp: '2026-01-15T10:00:15.000Z', + type: 'assistant', + cwd: '/home/user/project', + version: '1.0.0', + model: 'qwen3-coder', + message: { + role: 'model', + parts: [{ text: 'Done. I added try-catch blocks and proper error messages.' }], + }, + }), + ]; + + fs.writeFileSync(path.join(chatsDir, `${sessionId}.jsonl`), lines.join('\n') + '\n'); + + return { + root, + cleanup: () => fs.rmSync(root, { recursive: true, force: true }), + }; +} + /** * Create OpenCode JSON-only fixture (legacy format) */ @@ -449,7 +1114,7 @@ export function createOpenCodeJsonFixture(): FixtureDir { directory: '/home/user/project', title: 'Fix authentication bug (JSON)', time: { created: now - 5000, updated: now }, - }) + }), ); // Create project file @@ -457,7 +1122,7 @@ export function createOpenCodeJsonFixture(): FixtureDir { fs.mkdirSync(projectDir, { recursive: true }); fs.writeFileSync( path.join(projectDir, `${projectId}.json`), - JSON.stringify({ id: projectId, worktree: '/home/user/project' }) + JSON.stringify({ id: projectId, worktree: '/home/user/project' }), ); // Create message files @@ -470,7 +1135,7 @@ export function createOpenCodeJsonFixture(): FixtureDir { sessionID: sessionId, role: 'user', time: { created: now - 4000 }, - }) + }), ); fs.writeFileSync( path.join(msgDir, 'msg_a1.json'), @@ -479,7 +1144,7 @@ export function createOpenCodeJsonFixture(): FixtureDir { sessionID: sessionId, role: 'assistant', time: { created: now - 3000, completed: now - 2500 }, - }) + }), ); // Create part files @@ -487,14 +1152,26 @@ export function createOpenCodeJsonFixture(): FixtureDir { fs.mkdirSync(partDirU1, { recursive: true }); fs.writeFileSync( path.join(partDirU1, 'prt_u1.json'), - JSON.stringify({ id: 'prt_u1', sessionID: sessionId, messageID: 'msg_u1', type: 'text', text: 'Fix the authentication bug in login.ts' }) + JSON.stringify({ + id: 'prt_u1', + sessionID: sessionId, + messageID: 'msg_u1', + type: 'text', + text: 'Fix the authentication bug in login.ts', + }), ); const partDirA1 = path.join(storageDir, 'part', 'msg_a1'); fs.mkdirSync(partDirA1, { recursive: true }); fs.writeFileSync( path.join(partDirA1, 'prt_a1.json'), - JSON.stringify({ id: 'prt_a1', sessionID: sessionId, messageID: 'msg_a1', type: 'text', text: 'I found the issue. The token validation was missing.' }) + JSON.stringify({ + id: 'prt_a1', + sessionID: sessionId, + messageID: 'msg_a1', + type: 'text', + text: 'I found the issue. The token validation was missing.', + }), ); return { diff --git a/src/__tests__/forward-flags.test.ts b/src/__tests__/forward-flags.test.ts new file mode 100644 index 0000000..571339a --- /dev/null +++ b/src/__tests__/forward-flags.test.ts @@ -0,0 +1,283 @@ +import { describe, expect, it } from 'vitest'; +import { adapters } from '../parsers/registry.js'; +import type { UnifiedSession } from '../types/index.js'; +import { getDefaultHandoffInitArgs, getResumeCommand, resolveCrossToolForwarding } from '../utils/resume.js'; + +describe('cross-tool forwarding', () => { + it('enforces codex precedence yolo > full-auto > sandbox', () => { + const resolved = resolveCrossToolForwarding('codex', { + rawArgs: ['--yolo', '--full-auto', '--sandbox', 'workspace-write', '--ask-for-approval', 'never'], + }); + + expect(resolved.mappedArgs).toEqual(['--dangerously-bypass-approvals-and-sandbox']); + expect(resolved.passthroughArgs).toEqual([]); + expect(resolved.warnings.length).toBeGreaterThan(0); + }); + + it('passes unmapped flags through unchanged', () => { + const resolved = resolveCrossToolForwarding('claude', { + rawArgs: ['--search', '--unknown-flag', 'value'], + }); + + expect(resolved.mappedArgs).toEqual([]); + expect(resolved.passthroughArgs).toEqual(['--search', '--unknown-flag', 'value']); + expect(resolved.extraArgs).toEqual(['--search', '--unknown-flag', 'value']); + }); + + it('keeps unsupported known flags as passthrough when target has no mapping', () => { + const resolved = resolveCrossToolForwarding('claude', { + rawArgs: ['--full-auto'], + }); + + expect(resolved.mappedArgs).toEqual([]); + expect(resolved.passthroughArgs).toEqual(['--full-auto']); + }); + + it('maps add-dir category into gemini include-directories', () => { + const resolved = resolveCrossToolForwarding('gemini', { + rawArgs: ['--add-dir', '/tmp/workspace'], + }); + + expect(resolved.mappedArgs).toEqual(['--include-directories', '/tmp/workspace']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps cursor target using agent semantics', () => { + expect(adapters.cursor.binaryName).toBe('cursor-agent'); + expect(adapters.cursor.binaryFallbacks).toEqual(['agent']); + + const resolved = resolveCrossToolForwarding('cursor', { + rawArgs: ['--sandbox', 'workspace-write', '--model', 'gpt-5', '--approve-mcps'], + }); + + expect(resolved.mappedArgs).toEqual(['--model', 'gpt-5', '--sandbox', 'enabled', '--approve-mcps']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps droid yolo-like forwarding into skip-permissions-unsafe', () => { + const resolved = resolveCrossToolForwarding('droid', { + rawArgs: ['--yolo', '--model', 'gpt-5.3-codex'], + }); + + expect(resolved.mappedArgs).toEqual(['--skip-permissions-unsafe', '--model', 'gpt-5.3-codex']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps droid approval-mode yolo into skip-permissions-unsafe', () => { + const resolved = resolveCrossToolForwarding('droid', { + rawArgs: ['--approval-mode', 'yolo'], + }); + + expect(resolved.mappedArgs).toEqual(['--skip-permissions-unsafe']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps kimi allow-all forwarding into --yolo and preserves mapped args', () => { + const resolved = resolveCrossToolForwarding('kimi', { + rawArgs: ['--allow-all', '--add-dir', '/tmp/workspace', '--model', 'kimi-k2.5'], + }); + + expect(resolved.mappedArgs).toEqual(['--yolo', '--model', 'kimi-k2.5', '--add-dir', '/tmp/workspace']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps amp yolo-like forwarding into --dangerously-allow-all', () => { + const resolved = resolveCrossToolForwarding('amp', { + rawArgs: ['--yolo'], + }); + + expect(resolved.mappedArgs).toEqual(['--dangerously-allow-all']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps amp approval-mode yolo into --dangerously-allow-all', () => { + const resolved = resolveCrossToolForwarding('amp', { + rawArgs: ['--approval-mode', 'yolo'], + }); + + expect(resolved.mappedArgs).toEqual(['--dangerously-allow-all']); + expect(resolved.passthroughArgs).toEqual([]); + }); + it('maps kiro yolo-like forwarding into --trust-all-tools', () => { + const resolved = resolveCrossToolForwarding('kiro', { + rawArgs: ['--yolo', '--agent', 'reviewer'], + }); + + expect(resolved.mappedArgs).toEqual(['--trust-all-tools', '--agent', 'reviewer']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps crush ask-for-approval never into --yolo', () => { + const resolved = resolveCrossToolForwarding('crush', { + rawArgs: ['--ask-for-approval', 'never'], + }); + + expect(resolved.mappedArgs).toEqual(['--yolo']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('maps qwen-code allow-all forwarding into approval-mode yolo', () => { + const resolved = resolveCrossToolForwarding('qwen-code', { + rawArgs: ['--allow-all'], + }); + + expect(resolved.mappedArgs).toEqual(['--approval-mode', 'yolo']); + expect(resolved.passthroughArgs).toEqual([]); + }); + + it('consumes unsupported approval and permission forwarding for opencode', () => { + const resolved = resolveCrossToolForwarding('opencode', { + rawArgs: ['--approval-mode', 'plan', '--permission-mode', 'plan'], + }); + + expect(resolved.mappedArgs).toEqual([]); + expect(resolved.passthroughArgs).toEqual([]); + expect(resolved.warnings).toContain( + 'OpenCode: auto-approval, permission, and sandbox forwarding flags are not supported and were ignored.', + ); + }); + + it('does not apply dangerous default init flags for handoff targets', () => { + expect(getDefaultHandoffInitArgs('claude')).toEqual([]); + expect(getDefaultHandoffInitArgs('copilot')).toEqual([]); + expect(getDefaultHandoffInitArgs('gemini')).toEqual([]); + expect(getDefaultHandoffInitArgs('cursor')).toEqual([]); + expect(getDefaultHandoffInitArgs('droid')).toEqual([]); + expect(getDefaultHandoffInitArgs('kimi')).toEqual([]); + expect(getDefaultHandoffInitArgs('amp')).toEqual([]); + expect(getDefaultHandoffInitArgs('kiro')).toEqual([]); + expect(getDefaultHandoffInitArgs('crush')).toEqual([]); + expect(getDefaultHandoffInitArgs('qwen-code')).toEqual([]); + + expect(getDefaultHandoffInitArgs('codex')).toEqual([ + '-c', + 'model_reasoning_effort="high"', + '-c', + 'model_reasoning_summary="detailed"', + '-c', + 'model_supports_reasoning_summaries=true', + ]); + }); + + it('keeps codex reasoning defaults when explicit forwarding args are present', () => { + const args = getDefaultHandoffInitArgs('codex', [ + '--dangerously-bypass-approvals-and-sandbox', + '--sandbox', + 'workspace-write', + ]); + + expect(args).toEqual([ + '-c', + 'model_reasoning_effort="high"', + '-c', + 'model_reasoning_summary="detailed"', + '-c', + 'model_supports_reasoning_summaries=true', + ]); + }); + + it('keeps codex reasoning defaults when approval-mode is explicitly set', () => { + const args = getDefaultHandoffInitArgs('codex', ['--approval-mode', 'yolo']); + + expect(args).toEqual([ + '-c', + 'model_reasoning_effort="high"', + '-c', + 'model_reasoning_summary="detailed"', + '-c', + 'model_supports_reasoning_summaries=true', + ]); + }); + + it('shows mapped forward args in cross-tool command preview', () => { + const session: UnifiedSession = { + id: 'abc123456789', + source: 'claude', + cwd: '/tmp/project', + lines: 10, + bytes: 120, + createdAt: new Date('2026-02-20T00:00:00.000Z'), + updatedAt: new Date('2026-02-20T00:00:00.000Z'), + originalPath: '/tmp/session.jsonl', + }; + + const command = getResumeCommand(session, 'codex', { + rawArgs: ['--yolo', '--search'], + }); + + expect(command).toContain('continues resume abc123456789 --in codex'); + expect(command).toContain('--dangerously-bypass-approvals-and-sandbox'); + expect(command).toContain('--search'); + }); + + it('shows both preferred and fallback binaries in cursor native command preview', () => { + const session: UnifiedSession = { + id: 'abc123456789', + source: 'cursor', + cwd: '/tmp/project', + lines: 10, + bytes: 120, + createdAt: new Date('2026-02-20T00:00:00.000Z'), + updatedAt: new Date('2026-02-20T00:00:00.000Z'), + originalPath: '/tmp/session.jsonl', + }; + + expect(getResumeCommand(session)).toBe('cursor-agent --resume abc123456789 (or: agent --resume abc123456789)'); + }); + + it('does not add default approval flags to gemini command preview', () => { + const session: UnifiedSession = { + id: 'abc123456789', + source: 'claude', + cwd: '/tmp/project', + lines: 10, + bytes: 120, + createdAt: new Date('2026-02-20T00:00:00.000Z'), + updatedAt: new Date('2026-02-20T00:00:00.000Z'), + originalPath: '/tmp/session.jsonl', + }; + + const command = getResumeCommand(session, 'gemini'); + + expect(command).toContain('continues resume abc123456789 --in gemini'); + expect(command).not.toContain('--yolo'); + expect(command).not.toContain('--approval-mode yolo'); + }); + + it('does not add default approval flags to droid command preview', () => { + const session: UnifiedSession = { + id: 'abc123456789', + source: 'claude', + cwd: '/tmp/project', + lines: 10, + bytes: 120, + createdAt: new Date('2026-02-20T00:00:00.000Z'), + updatedAt: new Date('2026-02-20T00:00:00.000Z'), + originalPath: '/tmp/session.jsonl', + }; + + const command = getResumeCommand(session, 'droid'); + + expect(command).toContain('continues resume abc123456789 --in droid'); + expect(command).not.toContain('--skip-permissions-unsafe'); + }); + + it('does not add default approval flags to qwen command preview', () => { + const session: UnifiedSession = { + id: 'abc123456789', + source: 'claude', + cwd: '/tmp/project', + lines: 10, + bytes: 120, + createdAt: new Date('2026-02-20T00:00:00.000Z'), + updatedAt: new Date('2026-02-20T00:00:00.000Z'), + originalPath: '/tmp/session.jsonl', + }; + + const command = getResumeCommand(session, 'qwen-code'); + + expect(command).toContain('continues resume abc123456789 --in qwen-code'); + expect(command).not.toContain('--yolo'); + expect(command).not.toContain('--approval-mode yolo'); + }); +}); diff --git a/src/__tests__/injection-test.ts b/src/__tests__/injection-test.ts index 729eff7..aae62c6 100644 --- a/src/__tests__/injection-test.ts +++ b/src/__tests__/injection-test.ts @@ -11,15 +11,14 @@ */ import * as fs from 'fs'; -import * as path from 'path'; import * as os from 'os'; +import * as path from 'path'; import { extractClaudeContext } from '../parsers/claude.js'; +import { extractCodexContext } from '../parsers/codex.js'; import { extractCopilotContext } from '../parsers/copilot.js'; import { extractGeminiContext } from '../parsers/gemini.js'; -import { extractCodexContext } from '../parsers/codex.js'; -import { extractOpenCodeContext } from '../parsers/opencode.js'; +import type { ConversationMessage, SessionContext, UnifiedSession } from '../types/index.js'; import { generateHandoffMarkdown } from '../utils/markdown.js'; -import type { UnifiedSession, SessionContext, ConversationMessage } from '../types/index.js'; // --------------------------------------------------------------------------- // Constants @@ -397,12 +396,7 @@ async function readBackFromTarget(target: Source, targetPath: string): Promise JSON.stringify(row)).join('\n'); + fs.writeFileSync(filePath, `${content}\n`, 'utf8'); +} + +function createKimiSession(opts: { + homeDir: string; + workDirPath: string; + sessionId: string; + messages: unknown[]; + metadata?: Record; + rawMetadata?: string; +}): string { + const hashDir = md5(opts.workDirPath); + const sessionDir = path.join(opts.homeDir, '.kimi', 'sessions', hashDir, opts.sessionId); + fs.mkdirSync(sessionDir, { recursive: true }); + writeJsonl(path.join(sessionDir, 'context.jsonl'), opts.messages); + + if (opts.rawMetadata !== undefined) { + fs.writeFileSync(path.join(sessionDir, 'metadata.json'), opts.rawMetadata, 'utf8'); + } else if (opts.metadata) { + fs.writeFileSync(path.join(sessionDir, 'metadata.json'), JSON.stringify(opts.metadata), 'utf8'); + } + + return sessionDir; +} + +function writeKimiConfig(homeDir: string, workDirs: Array<{ path: string; kaos?: string }>): void { + const kimiDir = path.join(homeDir, '.kimi'); + fs.mkdirSync(kimiDir, { recursive: true }); + fs.writeFileSync(path.join(kimiDir, 'kimi.json'), JSON.stringify({ work_dirs: workDirs }, null, 2), 'utf8'); +} + +async function loadKimiParserWithHome(homeDir: string): Promise { + vi.resetModules(); + vi.doMock('os', async () => { + const actual = await vi.importActual('os'); + return { + ...actual, + homedir: () => homeDir, + }; + }); + return import('../parsers/kimi.js'); +} + +afterEach(() => { + vi.doUnmock('os'); + vi.resetModules(); + for (const tmpHome of tmpHomes) { + fs.rmSync(tmpHome, { recursive: true, force: true }); + } + tmpHomes.length = 0; +}); + +describe('kimi parser hardening', () => { + it('discovers sessions even when metadata.json is missing', async () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), 'kimi-parser-')); + tmpHomes.push(home); + const workDirPath = '/tmp/project-no-metadata'; + const sessionId = 'missing-metadata-session'; + + writeKimiConfig(home, [{ path: workDirPath }]); + createKimiSession({ + homeDir: home, + workDirPath, + sessionId, + messages: [ + { role: 'user', content: 'Fix parser discovery' }, + { role: 'assistant', content: 'Done.' }, + ], + }); + + const { parseKimiSessions } = await loadKimiParserWithHome(home); + const sessions = await parseKimiSessions(); + + expect(sessions).toHaveLength(1); + expect(sessions[0].id).toBe(sessionId); + expect(sessions[0].cwd).toBe(workDirPath); + expect(sessions[0].summary).toBe('Fix parser discovery'); + }); + + it('accepts nullable wire_mtime and numeric archived_at metadata values', async () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), 'kimi-parser-')); + tmpHomes.push(home); + const workDirPath = '/tmp/project-schema-compat'; + const sessionId = 'schema-compat-session'; + + writeKimiConfig(home, [{ path: workDirPath }]); + createKimiSession({ + homeDir: home, + workDirPath, + sessionId, + messages: [ + { role: 'user', content: 'Schema compatibility check' }, + { role: 'assistant', content: 'Looks good.' }, + ], + metadata: { + session_id: sessionId, + archived: false, + archived_at: 1735086302.21, + wire_mtime: null, + }, + }); + + const { parseKimiSessions } = await loadKimiParserWithHome(home); + const sessions = await parseKimiSessions(); + + expect(sessions).toHaveLength(1); + expect(sessions[0].id).toBe(sessionId); + }); + + it('matches cwd deterministically when multiple work_dirs exist', async () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), 'kimi-parser-')); + tmpHomes.push(home); + const workDirA = '/tmp/workdir-alpha'; + const workDirB = '/tmp/workdir-beta'; + const sessionId = 'hash-match-session'; + + // Put A first to ensure buggy "first entry wins" behavior would fail this test. + writeKimiConfig(home, [{ path: workDirA }, { path: workDirB }]); + createKimiSession({ + homeDir: home, + workDirPath: workDirB, + sessionId, + messages: [ + { role: 'user', content: 'Use the correct repository cwd' }, + { role: 'assistant', content: 'Acknowledged.' }, + ], + metadata: { + session_id: sessionId, + }, + }); + + const { parseKimiSessions } = await loadKimiParserWithHome(home); + const sessions = await parseKimiSessions(); + + expect(sessions).toHaveLength(1); + expect(sessions[0].cwd).toBe(workDirB); + expect(sessions[0].cwd).not.toBe(workDirA); + }); + + it('uses latest _usage snapshot but does not fabricate input/output token split', async () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), 'kimi-parser-')); + tmpHomes.push(home); + const workDirPath = '/tmp/project-token-usage'; + const sessionId = 'token-usage-session'; + + writeKimiConfig(home, [{ path: workDirPath }]); + const sessionDir = createKimiSession({ + homeDir: home, + workDirPath, + sessionId, + messages: [ + { role: 'user', content: 'Track token count correctly' }, + { role: 'assistant', content: [{ type: 'text', text: 'processing' }] }, + { role: '_usage', token_count: 100 }, + { role: '_usage', token_count: 250 }, + ], + metadata: { + session_id: sessionId, + }, + }); + + const { extractKimiContext } = await loadKimiParserWithHome(home); + const session: UnifiedSession = { + id: sessionId, + source: 'kimi', + cwd: workDirPath, + repo: '', + lines: 4, + bytes: fs.statSync(path.join(sessionDir, 'context.jsonl')).size, + createdAt: new Date(), + updatedAt: new Date(), + originalPath: sessionDir, + summary: 'Token test', + }; + + const context = await extractKimiContext(session); + expect(context.sessionNotes?.tokenUsage).toBeUndefined(); + }); + + it('falls back safely when metadata is malformed and when work_dir hash has no match', async () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), 'kimi-parser-')); + tmpHomes.push(home); + const sessionId = 'malformed-metadata-session'; + const unknownWorkDir = '/tmp/workdir-not-listed'; + + writeKimiConfig(home, [{ path: '/tmp/other-workdir' }]); + createKimiSession({ + homeDir: home, + workDirPath: unknownWorkDir, + sessionId, + messages: [ + { role: 'user', content: 'Keep parsing despite malformed metadata' }, + { role: 'assistant', content: 'Will do.' }, + ], + rawMetadata: '{ this-is-not-valid-json', + }); + + const { parseKimiSessions } = await loadKimiParserWithHome(home); + const sessions = await parseKimiSessions(); + + expect(sessions).toHaveLength(1); + expect(sessions[0].id).toBe(sessionId); + expect(sessions[0].cwd).toBe(''); + }); + + it('excludes explicitly archived sessions but keeps non-archived ones', async () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), 'kimi-parser-')); + tmpHomes.push(home); + const workDirPath = '/tmp/project-archive-behavior'; + + writeKimiConfig(home, [{ path: workDirPath }]); + createKimiSession({ + homeDir: home, + workDirPath, + sessionId: 'active-session', + messages: [ + { role: 'user', content: 'Active session should remain visible' }, + { role: 'assistant', content: 'Visible.' }, + ], + metadata: { + session_id: 'active-session', + archived: false, + }, + }); + createKimiSession({ + homeDir: home, + workDirPath, + sessionId: 'archived-session', + messages: [ + { role: 'user', content: 'Archived session should be hidden' }, + { role: 'assistant', content: 'Hidden.' }, + ], + metadata: { + session_id: 'archived-session', + archived: true, + }, + }); + + const { parseKimiSessions } = await loadKimiParserWithHome(home); + const sessions = await parseKimiSessions(); + + expect(sessions.map((s) => s.id)).toContain('active-session'); + expect(sessions.map((s) => s.id)).not.toContain('archived-session'); + }); +}); diff --git a/src/__tests__/parsers.test.ts b/src/__tests__/parsers.test.ts deleted file mode 100644 index dde64b0..0000000 --- a/src/__tests__/parsers.test.ts +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Integration tests for all 5 parsers using real session data. - * Tests parseAll + extractContext for each tool. - */ -import { describe, it, expect } from 'vitest'; -import { - parseClaudeSessions, - extractClaudeContext, - parseCopilotSessions, - extractCopilotContext, - parseGeminiSessions, - extractGeminiContext, - parseCodexSessions, - extractCodexContext, - parseOpenCodeSessions, - extractOpenCodeContext, -} from '../parsers/index.js'; -import type { UnifiedSession, SessionContext } from '../types/index.js'; - -/** - * Validate that a UnifiedSession has all required fields - */ -function validateSession(session: UnifiedSession, source: string) { - expect(session.id).toBeTruthy(); - expect(session.source).toBe(source); - expect(session.cwd).toBeDefined(); - expect(typeof session.lines).toBe('number'); - expect(typeof session.bytes).toBe('number'); - expect(session.createdAt).toBeInstanceOf(Date); - expect(session.updatedAt).toBeInstanceOf(Date); - expect(session.originalPath).toBeTruthy(); -} - -/** - * Validate that a SessionContext has all required fields - */ -function validateContext(ctx: SessionContext) { - expect(ctx.session).toBeDefined(); - expect(ctx.session.id).toBeTruthy(); - expect(Array.isArray(ctx.recentMessages)).toBe(true); - expect(Array.isArray(ctx.filesModified)).toBe(true); - expect(Array.isArray(ctx.pendingTasks)).toBe(true); - expect(typeof ctx.markdown).toBe('string'); - expect(ctx.markdown.length).toBeGreaterThan(0); - expect(ctx.markdown).toContain('Session Handoff Context'); -} - -/** - * Validate that handoff markdown has proper structure - */ -function validateHandoffMarkdown(markdown: string, source: string) { - expect(markdown).toContain('# Session Handoff Context'); - expect(markdown).toContain('## Original Session'); - expect(markdown).toContain('**Session ID**'); - expect(markdown).toContain('**Last Active**'); - expect(markdown).toContain('Continue this session'); -} - -describe('Claude Parser', () => { - it('should find Claude sessions', async () => { - const sessions = await parseClaudeSessions(); - expect(sessions.length).toBeGreaterThan(0); - for (const session of sessions.slice(0, 3)) { - validateSession(session, 'claude'); - } - }); - - it('should have sorted sessions (newest first)', async () => { - const sessions = await parseClaudeSessions(); - for (let i = 1; i < Math.min(sessions.length, 5); i++) { - expect(sessions[i - 1].updatedAt.getTime()).toBeGreaterThanOrEqual(sessions[i].updatedAt.getTime()); - } - }); - - it('should extract context from a Claude session', async () => { - const sessions = await parseClaudeSessions(); - expect(sessions.length).toBeGreaterThan(0); - - const ctx = await extractClaudeContext(sessions[0]); - validateContext(ctx); - validateHandoffMarkdown(ctx.markdown, 'claude'); - - // Should have at least some messages - expect(ctx.recentMessages.length).toBeGreaterThan(0); - for (const msg of ctx.recentMessages) { - expect(['user', 'assistant', 'system', 'tool']).toContain(msg.role); - expect(msg.content.length).toBeGreaterThan(0); - } - }); -}); - -describe('Copilot Parser', () => { - it('should find Copilot sessions', async () => { - const sessions = await parseCopilotSessions(); - expect(sessions.length).toBeGreaterThan(0); - for (const session of sessions.slice(0, 3)) { - validateSession(session, 'copilot'); - } - }); - - it('should parse workspace.yaml fields correctly', async () => { - const sessions = await parseCopilotSessions(); - const session = sessions[0]; - expect(session.cwd).toBeTruthy(); - expect(session.createdAt).toBeInstanceOf(Date); - expect(session.updatedAt).toBeInstanceOf(Date); - }); - - it('should extract context from a Copilot session', async () => { - const sessions = await parseCopilotSessions(); - // Find a session with events - const sessWithEvents = sessions.find(s => s.bytes > 0); - if (!sessWithEvents) return; // skip if no sessions with events - - const ctx = await extractCopilotContext(sessWithEvents); - validateContext(ctx); - validateHandoffMarkdown(ctx.markdown, 'copilot'); - }); -}); - -describe('Gemini Parser', () => { - it('should find Gemini sessions', async () => { - const sessions = await parseGeminiSessions(); - expect(sessions.length).toBeGreaterThan(0); - for (const session of sessions.slice(0, 3)) { - validateSession(session, 'gemini'); - } - }); - - it('should extract context with messages from a Gemini session', async () => { - const sessions = await parseGeminiSessions(); - expect(sessions.length).toBeGreaterThan(0); - - const ctx = await extractGeminiContext(sessions[0]); - validateContext(ctx); - validateHandoffMarkdown(ctx.markdown, 'gemini'); - - expect(ctx.recentMessages.length).toBeGreaterThan(0); - // Gemini messages should have user and assistant (gemini) roles - const userMsgs = ctx.recentMessages.filter(m => m.role === 'user'); - const asstMsgs = ctx.recentMessages.filter(m => m.role === 'assistant'); - expect(userMsgs.length).toBeGreaterThan(0); - expect(asstMsgs.length).toBeGreaterThan(0); - }); -}); - -describe('Codex Parser', () => { - it('should find Codex sessions', async () => { - const sessions = await parseCodexSessions(); - expect(sessions.length).toBeGreaterThan(0); - for (const session of sessions.slice(0, 3)) { - validateSession(session, 'codex'); - } - }); - - it('should parse session metadata from filename', async () => { - const sessions = await parseCodexSessions(); - const session = sessions[0]; - expect(session.id).toBeTruthy(); - expect(session.createdAt).toBeInstanceOf(Date); - }); - - it('should extract context with agent_message from Codex session', async () => { - const sessions = await parseCodexSessions(); - expect(sessions.length).toBeGreaterThan(0); - - const ctx = await extractCodexContext(sessions[0]); - validateContext(ctx); - validateHandoffMarkdown(ctx.markdown, 'codex'); - - // After the fix, should find both user and agent messages - expect(ctx.recentMessages.length).toBeGreaterThan(0); - const userMsgs = ctx.recentMessages.filter(m => m.role === 'user'); - const asstMsgs = ctx.recentMessages.filter(m => m.role === 'assistant'); - expect(userMsgs.length).toBeGreaterThan(0); - expect(asstMsgs.length).toBeGreaterThan(0); - }); -}); - -describe('OpenCode Parser', () => { - it('should find OpenCode sessions (SQLite or JSON)', async () => { - const sessions = await parseOpenCodeSessions(); - expect(sessions.length).toBeGreaterThan(0); - for (const session of sessions.slice(0, 3)) { - validateSession(session, 'opencode'); - } - }); - - it('should find more sessions than JSON-only (SQLite has 5 vs 2 JSON)', async () => { - const sessions = await parseOpenCodeSessions(); - // With SQLite support, we should have more sessions - expect(sessions.length).toBeGreaterThanOrEqual(2); - }); - - it('should extract context from an OpenCode session', async () => { - const sessions = await parseOpenCodeSessions(); - expect(sessions.length).toBeGreaterThan(0); - - // Find a session with messages - let ctx: SessionContext | null = null; - for (const session of sessions.slice(0, 3)) { - const c = await extractOpenCodeContext(session); - if (c.recentMessages.length > 0) { - ctx = c; - break; - } - } - - if (ctx) { - validateContext(ctx); - validateHandoffMarkdown(ctx.markdown, 'opencode'); - expect(ctx.recentMessages.length).toBeGreaterThan(0); - } - }); -}); diff --git a/src/__tests__/real-e2e-full.ts b/src/__tests__/real-e2e-full.ts index badf859..878cdf3 100644 --- a/src/__tests__/real-e2e-full.ts +++ b/src/__tests__/real-e2e-full.ts @@ -1,6 +1,6 @@ /** * REAL E2E Test: cli-continues cross-tool conversion pipeline - * + * * Tests the ACTUAL flow: * 1. extractContext() parses real sessions (NOT test artifacts) * 2. Generated markdown contains real conversation content @@ -8,11 +8,11 @@ * 4. Semantic verification: target must reference specific facts from the source */ -import { getAllSessions, extractContext } from '../utils/index.js'; import { execSync } from 'child_process'; import * as fs from 'fs'; import * as path from 'path'; -import type { UnifiedSession, SessionContext, SessionSource } from '../types/index.js'; +import type { SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; +import { extractContext, getAllSessions } from '../utils/index.js'; const RESULTS_DIR = path.join(process.env.HOME!, '.continues', 'real-e2e'); @@ -40,27 +40,27 @@ async function pickRealSessions(): Promise> { const picked = new Map(); for (const source of ['claude', 'copilot', 'gemini', 'codex', 'opencode'] as const) { - const sourceSessions = sessions.filter(s => s.source === source); - + const sourceSessions = sessions.filter((s) => s.source === source); + for (const s of sourceSessions) { try { const ctx = await extractContext(s); if (ctx.recentMessages.length < 2) continue; - + // Skip test artifacts from earlier runs const firstMsg = ctx.recentMessages[0].content; if (firstMsg.startsWith('# Session Handoff Context')) continue; if (firstMsg.includes('HANDOFF_RECEIVED')) continue; if (firstMsg.includes('VERIFICATION TASK')) continue; - + // Extract key facts for semantic verification const keyFacts = extractKeyFacts(ctx); if (keyFacts.length < 2) continue; - + picked.set(source, { source, session: s, context: ctx, keyFacts }); break; - } catch (e) { - continue; + } catch (_e) { + /* skip sessions that fail to parse */ } } } @@ -71,27 +71,24 @@ async function pickRealSessions(): Promise> { function extractKeyFacts(ctx: SessionContext): string[] { const facts: string[] = []; const seen = new Set(); - + // Use the summary as the primary source of key facts const summary = (ctx.session.summary || '').toLowerCase(); - + // Also get the first user message - const firstUser = ctx.recentMessages.find(m => m.role === 'user'); + const firstUser = ctx.recentMessages.find((m) => m.role === 'user'); const firstUserContent = (firstUser?.content || '').toLowerCase(); - + // All text for broader matching - const allText = [ - summary, - firstUserContent, - ...ctx.recentMessages.map(m => m.content) - ].join(' ').toLowerCase(); - + const allText = [summary, firstUserContent, ...ctx.recentMessages.map((m) => m.content)].join(' ').toLowerCase(); + // Extract SEMANTIC topic keywords (not garbage file paths) // Focus on words that describe WHAT the session was about - const topicTerms = allText.match( - /\b(?:ssh|quic|migration|superset|tauri|electron|authentication|codex|readme|count|sample|switcher|account|backup|architecture|remote|workspace|desktop|terminal|integration|session|handoff|picker)\b/gi - ) || []; - + const topicTerms = + allText.match( + /\b(?:ssh|quic|migration|superset|tauri|electron|authentication|codex|readme|count|sample|switcher|account|backup|architecture|remote|workspace|desktop|terminal|integration|session|handoff|picker)\b/gi, + ) || []; + for (const t of topicTerms) { const lower = t.toLowerCase(); if (!seen.has(lower)) { @@ -99,7 +96,7 @@ function extractKeyFacts(ctx: SessionContext): string[] { facts.push(lower); } } - + // Extract meaningful file names (only short, recognizable ones) const files = allText.match(/\b[a-z]+\.(?:json|txt|md|ts|js|toml|db)\b/g) || []; for (const f of files) { @@ -108,7 +105,7 @@ function extractKeyFacts(ctx: SessionContext): string[] { facts.push(f); } } - + // Extract short paths (only recognizable ones like ~/.codex/) const shortPaths = allText.match(/~\/\.[a-z]+\/[a-z.]+/g) || []; for (const p of shortPaths) { @@ -117,10 +114,10 @@ function extractKeyFacts(ctx: SessionContext): string[] { facts.push(p); } } - + // Extract key action words from user message if (firstUserContent.length > 3) { - const words = firstUserContent.split(/\s+/).filter(w => w.length > 4 && /^[a-z]+$/i.test(w)); + const words = firstUserContent.split(/\s+/).filter((w) => w.length > 4 && /^[a-z]+$/i.test(w)); for (const w of words.slice(0, 3)) { if (!seen.has(w)) { seen.add(w); @@ -128,13 +125,13 @@ function extractKeyFacts(ctx: SessionContext): string[] { } } } - + return facts.slice(0, 8); } function invokeTarget(target: string, markdown: string, cwd: string): string { const effectiveCwd = cwd && fs.existsSync(cwd) ? cwd : process.cwd(); - + const prompt = `${markdown} --- @@ -149,42 +146,52 @@ Based ONLY on the handoff context above, describe in 2-3 sentences: try { switch (target) { case 'claude': - return execSync( - `cat "${promptFile}" | claude -p --max-turns 2`, - { cwd: effectiveCwd, timeout: 120_000, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] } - ).trim(); + return execSync(`cat "${promptFile}" | claude -p --max-turns 2`, { + cwd: effectiveCwd, + timeout: 120_000, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + }).trim(); case 'gemini': - return execSync( - `cat "${promptFile}" | gemini -p ""`, - { cwd: effectiveCwd, timeout: 120_000, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] } - ).trim(); + return execSync(`cat "${promptFile}" | gemini -p ""`, { + cwd: effectiveCwd, + timeout: 120_000, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + }).trim(); case 'codex': { // Codex needs a trusted git dir - const codexCwd = fs.existsSync(path.join(effectiveCwd, '.git')) - ? effectiveCwd - : process.cwd(); - return execSync( - `cat "${promptFile}" | codex exec`, - { cwd: codexCwd, timeout: 120_000, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] } - ).trim(); + const codexCwd = fs.existsSync(path.join(effectiveCwd, '.git')) ? effectiveCwd : process.cwd(); + return execSync(`cat "${promptFile}" | codex exec`, { + cwd: codexCwd, + timeout: 120_000, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + }).trim(); } case 'opencode': { const truncated = prompt.slice(0, 4000).replace(/"/g, '\\"').replace(/`/g, '\\`'); - return execSync( - `opencode run "${truncated}"`, - { cwd: effectiveCwd, timeout: 120_000, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'], shell: '/bin/zsh' } - ).trim(); + return execSync(`opencode run "${truncated}"`, { + cwd: effectiveCwd, + timeout: 120_000, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + shell: '/bin/zsh', + }).trim(); } case 'copilot': { const truncated = prompt.slice(0, 3000).replace(/"/g, '\\"').replace(/`/g, '\\`'); - return execSync( - `timeout 90 copilot -i "${truncated}" --no-ask-user --max-autopilot-continues 0`, - { cwd: effectiveCwd, timeout: 120_000, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'], shell: '/bin/zsh' } - ).trim(); + return execSync(`timeout 90 copilot -i "${truncated}" --no-ask-user --max-autopilot-continues 0`, { + cwd: effectiveCwd, + timeout: 120_000, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + shell: '/bin/zsh', + }).trim(); } default: @@ -209,7 +216,9 @@ async function main() { const picked = await pickRealSessions(); for (const [source, data] of picked) { - console.log(` ✅ ${source}: "${data.session.summary?.slice(0, 50) || '(no summary)'}" (${data.context.recentMessages.length} msgs)`); + console.log( + ` ✅ ${source}: "${data.session.summary?.slice(0, 50) || '(no summary)'}" (${data.context.recentMessages.length} msgs)`, + ); console.log(` Key facts: [${data.keyFacts.join(', ')}]`); console.log(` Markdown: ${data.context.markdown.length} bytes`); } @@ -240,17 +249,19 @@ async function main() { const responseLower = response.toLowerCase(); // Verify key facts - const factResults = sourceData.keyFacts.map(fact => ({ + const factResults = sourceData.keyFacts.map((fact) => ({ fact, found: responseLower.includes(fact), })); - const found = factResults.filter(f => f.found).length; + const found = factResults.filter((f) => f.found).length; const threshold = Math.max(1, Math.ceil(sourceData.keyFacts.length * 0.3)); const passed = found >= threshold; results.push({ - id: testId, source, target, + id: testId, + source, + target, status: passed ? 'pass' : 'fail', factsFound: found, factsTotal: sourceData.keyFacts.length, @@ -258,23 +269,21 @@ async function main() { responsePreview: response.slice(0, 300), }); - const foundFacts = factResults.filter(f => f.found).map(f => f.fact); - const missedFacts = factResults.filter(f => !f.found).map(f => f.fact); - + const foundFacts = factResults.filter((f) => f.found).map((f) => f.fact); + const missedFacts = factResults.filter((f) => !f.found).map((f) => f.fact); + console.log(` ${passed ? '✅' : '❌'} Facts: ${found}/${sourceData.keyFacts.length} (need ${threshold})`); if (foundFacts.length > 0) console.log(` Found: ${foundFacts.join(', ')}`); if (missedFacts.length > 0) console.log(` Missed: ${missedFacts.join(', ')}`); // Save response - fs.writeFileSync( - path.join(RESULTS_DIR, `response-${source}-to-${target}.txt`), - response - ); - + fs.writeFileSync(path.join(RESULTS_DIR, `response-${source}-to-${target}.txt`), response); } catch (e: any) { console.log(` ⚠️ Error: ${e.message?.slice(0, 80)}`); results.push({ - id: testId, source, target, + id: testId, + source, + target, status: 'error', factsFound: 0, factsTotal: sourceData.keyFacts.length, @@ -291,18 +300,18 @@ async function main() { console.log('║ RESULTS ║'); console.log('╚═══════════════════════════════════════════════════════════════╝\n'); - const pass = results.filter(r => r.status === 'pass').length; - const fail = results.filter(r => r.status === 'fail').length; - const err = results.filter(r => r.status === 'error').length; + const pass = results.filter((r) => r.status === 'pass').length; + const fail = results.filter((r) => r.status === 'fail').length; + const err = results.filter((r) => r.status === 'error').length; // Matrix console.log('From / To | Claude | Copilot | Gemini | Codex | OpenCode'); console.log('──────────────-|--------|---------|--------|--------|----------'); for (const source of targets) { if (!picked.has(source)) continue; - const cols = targets.map(target => { + const cols = targets.map((target) => { if (target === source) return ' - '; - const r = results.find(r => r.source === source && r.target === target); + const r = results.find((r) => r.source === source && r.target === target); if (!r) return ' ? '; if (r.status === 'pass') return ` ✅${r.factsFound}/${r.factsTotal}`; if (r.status === 'fail') return ` ❌${r.factsFound}/${r.factsTotal}`; @@ -316,9 +325,12 @@ async function main() { // Save full results fs.writeFileSync(path.join(RESULTS_DIR, 'results.json'), JSON.stringify(results, null, 2)); console.log(`\nResults: ${RESULTS_DIR}/results.json`); - + // Exit code process.exit(fail + err); } -main().catch(e => { console.error(e); process.exit(1); }); +main().catch((e) => { + console.error(e); + process.exit(1); +}); diff --git a/src/__tests__/resume-binary.test.ts b/src/__tests__/resume-binary.test.ts new file mode 100644 index 0000000..27fb109 --- /dev/null +++ b/src/__tests__/resume-binary.test.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from 'vitest'; +import { getToolBinaryCandidates, resolveToolBinaryName } from '../utils/resume.js'; + +describe('tool binary resolution', () => { + it('prefers cursor-agent with agent as fallback', () => { + expect(getToolBinaryCandidates('cursor')).toEqual(['cursor-agent', 'agent']); + }); + + it('chooses cursor-agent when it is available', async () => { + const binaryName = await resolveToolBinaryName('cursor', async (candidate) => candidate === 'cursor-agent'); + + expect(binaryName).toBe('cursor-agent'); + }); + + it('falls back to agent when cursor-agent is unavailable', async () => { + const binaryName = await resolveToolBinaryName('cursor', async (candidate) => candidate === 'agent'); + + expect(binaryName).toBe('agent'); + }); + + it('returns null when no cursor binary is available', async () => { + const binaryName = await resolveToolBinaryName('cursor', async () => false); + + expect(binaryName).toBeNull(); + }); +}); diff --git a/src/__tests__/schemas.test.ts b/src/__tests__/schemas.test.ts new file mode 100644 index 0000000..5d57560 --- /dev/null +++ b/src/__tests__/schemas.test.ts @@ -0,0 +1,717 @@ +/** + * Tests for Zod schemas in src/types/. + * Covers content-blocks, tool-names, and all parser raw format schemas. + */ +import { describe, expect, it } from 'vitest'; +import { + ContentBlockSchema, + TextBlockSchema, + ThinkingBlockSchema, + ToolResultBlockSchema, + ToolUseBlockSchema, +} from '../types/content-blocks.js'; +import { + ClaudeMessageSchema, + CodexEventMsgSchema, + CodexMessageSchema, + CodexResponseItemSchema, + CodexSessionMetaSchema, + CodexTurnContextSchema, + CopilotEventSchema, + CopilotWorkspaceSchema, + CursorTranscriptLineSchema, + DroidCompactionStateSchema, + DroidEventSchema, + DroidMessageEventSchema, + DroidSessionStartSchema, + DroidSettingsSchema, + DroidTodoStateSchema, + GeminiMessageSchema, + GeminiSessionSchema, + GeminiToolCallSchema, + KimiMetadataSchema, + OpenCodeMessageSchema, + OpenCodeSessionSchema, + SerializedSessionSchema, + SqliteSessionRowSchema, +} from '../types/schemas.js'; +import type { SessionSource } from '../types/tool-names.js'; +import { EDIT_TOOLS, READ_TOOLS, SHELL_TOOLS, TOOL_NAMES, WRITE_TOOLS } from '../types/tool-names.js'; + +// ── tool-names.ts ──────────────────────────────────────────────────────────── + +describe('TOOL_NAMES', () => { + it('contains exactly 16 tools', () => { + expect(TOOL_NAMES).toHaveLength(16); + }); + + it('includes all known tools', () => { + const expected: SessionSource[] = [ + 'claude', 'codex', 'copilot', 'gemini', 'opencode', 'droid', 'cursor', + 'amp', 'kiro', 'crush', 'cline', 'roo-code', 'kilo-code', 'antigravity', 'kimi', 'qwen-code', + ]; + expect([...TOOL_NAMES]).toEqual(expected); + }); + + it('is frozen at runtime (immutable)', () => { + expect(Object.isFrozen(TOOL_NAMES)).toBe(true); + }); +}); + +describe('Canonical tool name sets', () => { + it('SHELL_TOOLS contains Bash and exec_command', () => { + expect(SHELL_TOOLS.has('Bash')).toBe(true); + expect(SHELL_TOOLS.has('exec_command')).toBe(true); + }); + + it('READ_TOOLS contains Read and read_file', () => { + expect(READ_TOOLS.has('Read')).toBe(true); + expect(READ_TOOLS.has('read_file')).toBe(true); + }); + + it('WRITE_TOOLS contains Write and create_file', () => { + expect(WRITE_TOOLS.has('Write')).toBe(true); + expect(WRITE_TOOLS.has('create_file')).toBe(true); + }); + + it('EDIT_TOOLS contains Edit and apply_diff', () => { + expect(EDIT_TOOLS.has('Edit')).toBe(true); + expect(EDIT_TOOLS.has('apply_diff')).toBe(true); + }); +}); + +// ── content-blocks.ts ──────────────────────────────────────────────────────── + +describe('ContentBlock schemas', () => { + describe('TextBlockSchema', () => { + it('accepts valid text block', () => { + const result = TextBlockSchema.safeParse({ type: 'text', text: 'hello' }); + expect(result.success).toBe(true); + }); + + it('rejects missing text field', () => { + const result = TextBlockSchema.safeParse({ type: 'text' }); + expect(result.success).toBe(false); + }); + + it('rejects wrong type discriminator', () => { + const result = TextBlockSchema.safeParse({ type: 'thinking', text: 'hello' }); + expect(result.success).toBe(false); + }); + }); + + describe('ThinkingBlockSchema', () => { + it('accepts thinking block with text', () => { + const result = ThinkingBlockSchema.safeParse({ type: 'thinking', text: 'reasoning...' }); + expect(result.success).toBe(true); + }); + + it('accepts thinking block with thinking field', () => { + const result = ThinkingBlockSchema.safeParse({ type: 'thinking', thinking: 'deep thought' }); + expect(result.success).toBe(true); + }); + + it('accepts thinking block with no text (both optional)', () => { + const result = ThinkingBlockSchema.safeParse({ type: 'thinking' }); + expect(result.success).toBe(true); + }); + }); + + describe('ToolUseBlockSchema', () => { + it('accepts valid tool_use block', () => { + const result = ToolUseBlockSchema.safeParse({ + type: 'tool_use', + id: 'tu_123', + name: 'Bash', + input: { command: 'ls' }, + }); + expect(result.success).toBe(true); + }); + + it('defaults input to empty object when omitted', () => { + const result = ToolUseBlockSchema.safeParse({ + type: 'tool_use', + id: 'tu_123', + name: 'Bash', + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.input).toEqual({}); + } + }); + + it('rejects missing name', () => { + const result = ToolUseBlockSchema.safeParse({ + type: 'tool_use', + id: 'tu_123', + }); + expect(result.success).toBe(false); + }); + }); + + describe('ToolResultBlockSchema', () => { + it('accepts string content', () => { + const result = ToolResultBlockSchema.safeParse({ + type: 'tool_result', + tool_use_id: 'tu_123', + content: 'output here', + }); + expect(result.success).toBe(true); + }); + + it('accepts array content', () => { + const result = ToolResultBlockSchema.safeParse({ + type: 'tool_result', + tool_use_id: 'tu_123', + content: [{ type: 'text', text: 'output here' }], + }); + expect(result.success).toBe(true); + }); + + it('accepts is_error flag', () => { + const result = ToolResultBlockSchema.safeParse({ + type: 'tool_result', + tool_use_id: 'tu_123', + content: 'error output', + is_error: true, + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.is_error).toBe(true); + } + }); + }); + + describe('ContentBlockSchema (discriminated union)', () => { + it('discriminates text blocks', () => { + const result = ContentBlockSchema.safeParse({ type: 'text', text: 'hello' }); + expect(result.success).toBe(true); + if (result.success) expect(result.data.type).toBe('text'); + }); + + it('discriminates tool_use blocks', () => { + const result = ContentBlockSchema.safeParse({ + type: 'tool_use', + id: 'x', + name: 'Read', + input: {}, + }); + expect(result.success).toBe(true); + if (result.success) expect(result.data.type).toBe('tool_use'); + }); + + it('rejects unknown type discriminator', () => { + const result = ContentBlockSchema.safeParse({ type: 'image', url: 'http://...' }); + expect(result.success).toBe(false); + }); + }); +}); + +// ── Claude schemas ─────────────────────────────────────────────────────────── + +describe('ClaudeMessageSchema', () => { + const validMsg = { + type: 'human', + uuid: 'abc-123', + timestamp: '2025-01-01T00:00:00Z', + sessionId: 'sess_1', + cwd: '/home/user/project', + message: { + role: 'user', + content: 'Hello', + }, + }; + + it('accepts valid Claude message', () => { + const result = ClaudeMessageSchema.safeParse(validMsg); + expect(result.success).toBe(true); + }); + + it('accepts message with content block array', () => { + const msg = { + ...validMsg, + message: { + role: 'assistant', + content: [ + { type: 'text', text: 'response' }, + { type: 'tool_use', id: 'tu_1', name: 'Bash', input: { command: 'ls' } }, + ], + }, + }; + const result = ClaudeMessageSchema.safeParse(msg); + expect(result.success).toBe(true); + }); + + it('accepts optional fields (model, isCompactSummary, gitBranch)', () => { + const msg = { + ...validMsg, + model: 'claude-sonnet-4-20250514', + isCompactSummary: true, + gitBranch: 'main', + }; + const result = ClaudeMessageSchema.safeParse(msg); + expect(result.success).toBe(true); + }); + + it('tolerates extra fields via passthrough', () => { + const msg = { ...validMsg, unknownField: 'extra data' }; + const result = ClaudeMessageSchema.safeParse(msg); + expect(result.success).toBe(true); + }); + + it('rejects missing uuid', () => { + const { uuid, ...noUuid } = validMsg; + const result = ClaudeMessageSchema.safeParse(noUuid); + expect(result.success).toBe(false); + }); +}); + +// ── Codex schemas ──────────────────────────────────────────────────────────── + +describe('CodexMessageSchema (discriminated union)', () => { + it('accepts session_meta', () => { + const result = CodexSessionMetaSchema.safeParse({ + timestamp: '2025-01-01T00:00:00Z', + type: 'session_meta', + payload: { id: 'sess_1', cwd: '/tmp' }, + }); + expect(result.success).toBe(true); + }); + + it('accepts event_msg', () => { + const result = CodexEventMsgSchema.safeParse({ + timestamp: '2025-01-01T00:00:00Z', + type: 'event_msg', + payload: { role: 'user', message: 'hello' }, + }); + expect(result.success).toBe(true); + }); + + it('accepts response_item', () => { + const result = CodexResponseItemSchema.safeParse({ + timestamp: '2025-01-01T00:00:00Z', + type: 'response_item', + payload: { type: 'function_call', name: 'shell', arguments: '{"cmd":"ls"}' }, + }); + expect(result.success).toBe(true); + }); + + it('accepts turn_context', () => { + const result = CodexTurnContextSchema.safeParse({ + timestamp: '2025-01-01T00:00:00Z', + type: 'turn_context', + payload: { model: 'o3-mini' }, + }); + expect(result.success).toBe(true); + }); + + it('discriminates correctly in union', () => { + const meta = { + timestamp: '2025-01-01T00:00:00Z', + type: 'session_meta', + payload: { id: 'x' }, + }; + const result = CodexMessageSchema.safeParse(meta); + expect(result.success).toBe(true); + if (result.success) expect(result.data.type).toBe('session_meta'); + }); + + it('rejects unknown type in union', () => { + const result = CodexMessageSchema.safeParse({ + timestamp: '2025-01-01T00:00:00Z', + type: 'unknown_type', + payload: {}, + }); + expect(result.success).toBe(false); + }); +}); + +// ── Copilot schemas ────────────────────────────────────────────────────────── + +describe('CopilotWorkspaceSchema', () => { + it('accepts valid workspace', () => { + const result = CopilotWorkspaceSchema.safeParse({ + id: 'ws_1', + cwd: '/home/user/proj', + created_at: '2025-01-01T00:00:00Z', + updated_at: '2025-01-02T00:00:00Z', + }); + expect(result.success).toBe(true); + }); + + it('accepts optional fields', () => { + const result = CopilotWorkspaceSchema.safeParse({ + id: 'ws_1', + cwd: '/tmp', + git_root: '/tmp', + repository: 'owner/repo', + branch: 'main', + summary: 'test session', + summary_count: 5, + created_at: '2025-01-01T00:00:00Z', + updated_at: '2025-01-02T00:00:00Z', + }); + expect(result.success).toBe(true); + }); +}); + +describe('CopilotEventSchema', () => { + it('accepts valid event', () => { + const result = CopilotEventSchema.safeParse({ + type: 'user.message', + id: 'evt_1', + timestamp: '2025-01-01T00:00:00Z', + data: { content: 'hello' }, + }); + expect(result.success).toBe(true); + }); +}); + +// ── Gemini schemas ─────────────────────────────────────────────────────────── + +describe('GeminiMessageSchema', () => { + it('accepts message with string content', () => { + const result = GeminiMessageSchema.safeParse({ + id: 'msg_1', + timestamp: '2025-01-01T00:00:00Z', + type: 'user', + content: 'hello', + }); + expect(result.success).toBe(true); + }); + + it('accepts message with array content', () => { + const result = GeminiMessageSchema.safeParse({ + id: 'msg_1', + timestamp: '2025-01-01T00:00:00Z', + type: 'model', + content: [{ text: 'response', type: 'text' }], + }); + expect(result.success).toBe(true); + }); + + it('accepts tool calls', () => { + const result = GeminiMessageSchema.safeParse({ + id: 'msg_1', + timestamp: '2025-01-01T00:00:00Z', + type: 'model', + content: 'using tool', + toolCalls: [{ name: 'shell', args: { command: 'ls' } }], + }); + expect(result.success).toBe(true); + }); + + it('accepts token usage', () => { + const result = GeminiMessageSchema.safeParse({ + id: 'msg_1', + timestamp: '2025-01-01T00:00:00Z', + type: 'model', + content: 'response', + tokens: { input: 100, output: 50, total: 150 }, + }); + expect(result.success).toBe(true); + }); +}); + +describe('GeminiSessionSchema', () => { + it('accepts valid session', () => { + const result = GeminiSessionSchema.safeParse({ + sessionId: 'sess_1', + projectHash: 'abc123', + startTime: '2025-01-01T00:00:00Z', + lastUpdated: '2025-01-02T00:00:00Z', + messages: [{ id: 'msg_1', timestamp: '2025-01-01T00:00:00Z', type: 'user', content: 'hi' }], + }); + expect(result.success).toBe(true); + }); +}); + +describe('GeminiToolCallSchema', () => { + it('accepts tool call with resultDisplay', () => { + const result = GeminiToolCallSchema.safeParse({ + name: 'edit_file', + args: { path: '/tmp/test.ts' }, + resultDisplay: { + fileName: 'test.ts', + filePath: '/tmp/test.ts', + diffStat: { model_added_lines: 5, model_removed_lines: 2 }, + isNewFile: false, + }, + }); + expect(result.success).toBe(true); + }); +}); + +// ── OpenCode schemas ───────────────────────────────────────────────────────── + +describe('OpenCodeSessionSchema', () => { + it('accepts valid session', () => { + const result = OpenCodeSessionSchema.safeParse({ + id: 'sess_1', + projectID: 'proj_1', + directory: '/home/user/proj', + time: { created: 1704067200, updated: 1704153600 }, + }); + expect(result.success).toBe(true); + }); + + it('accepts optional summary fields', () => { + const result = OpenCodeSessionSchema.safeParse({ + id: 'sess_1', + projectID: 'proj_1', + directory: '/tmp', + time: { created: 1704067200, updated: 1704153600 }, + slug: 'test-session', + title: 'Test Session', + summary: { additions: 10, deletions: 5, files: 3 }, + }); + expect(result.success).toBe(true); + }); +}); + +describe('OpenCodeMessageSchema', () => { + it('accepts valid message', () => { + const result = OpenCodeMessageSchema.safeParse({ + id: 'msg_1', + sessionID: 'sess_1', + role: 'user', + time: { created: 1704067200 }, + }); + expect(result.success).toBe(true); + }); + + it('rejects invalid role', () => { + const result = OpenCodeMessageSchema.safeParse({ + id: 'msg_1', + sessionID: 'sess_1', + role: 'system', + time: { created: 1704067200 }, + }); + expect(result.success).toBe(false); + }); +}); + +describe('SqliteSessionRowSchema', () => { + it('accepts valid SQLite row', () => { + const result = SqliteSessionRowSchema.safeParse({ + id: 'sess_1', + project_id: 'proj_1', + slug: 'test', + directory: '/tmp', + title: 'Test', + version: '1.0', + summary_additions: 10, + summary_deletions: null, + summary_files: 3, + time_created: 1704067200, + time_updated: 1704153600, + }); + expect(result.success).toBe(true); + }); +}); + +// ── Droid schemas ──────────────────────────────────────────────────────────── + +describe('DroidEventSchema (discriminated union)', () => { + it('accepts session_start', () => { + const result = DroidSessionStartSchema.safeParse({ + type: 'session_start', + id: 'sess_1', + title: 'My Session', + sessionTitle: 'My Session', + cwd: '/home/user/proj', + }); + expect(result.success).toBe(true); + }); + + it('accepts message event', () => { + const result = DroidMessageEventSchema.safeParse({ + type: 'message', + id: 'msg_1', + timestamp: '2025-01-01T00:00:00Z', + message: { + role: 'user', + content: [{ type: 'text', text: 'hello' }], + }, + }); + expect(result.success).toBe(true); + }); + + it('accepts todo_state', () => { + const result = DroidTodoStateSchema.safeParse({ + type: 'todo_state', + id: 'todo_1', + timestamp: '2025-01-01T00:00:00Z', + todos: '- [ ] task 1', + }); + expect(result.success).toBe(true); + }); + + it('accepts compaction_state', () => { + const result = DroidCompactionStateSchema.safeParse({ + type: 'compaction_state', + id: 'comp_1', + timestamp: '2025-01-01T00:00:00Z', + summaryText: 'Compacted conversation summary', + summaryTokens: 500, + }); + expect(result.success).toBe(true); + }); + + it('discriminates correctly in union', () => { + const msg = { + type: 'message', + id: 'msg_1', + timestamp: '2025-01-01T00:00:00Z', + message: { + role: 'assistant', + content: [{ type: 'text', text: 'I will help.' }], + }, + }; + const result = DroidEventSchema.safeParse(msg); + expect(result.success).toBe(true); + if (result.success) expect(result.data.type).toBe('message'); + }); + + it('rejects unknown type in union', () => { + const result = DroidEventSchema.safeParse({ + type: 'unknown', + id: 'x', + timestamp: '2025-01-01T00:00:00Z', + }); + expect(result.success).toBe(false); + }); +}); + +describe('DroidSettingsSchema', () => { + it('accepts settings with token usage', () => { + const result = DroidSettingsSchema.safeParse({ + model: 'claude-sonnet-4-20250514', + tokenUsage: { + inputTokens: 1000, + outputTokens: 500, + cacheCreationTokens: 200, + }, + }); + expect(result.success).toBe(true); + }); +}); + +// ── Cursor schemas ─────────────────────────────────────────────────────────── + +describe('CursorTranscriptLineSchema', () => { + it('accepts user message', () => { + const result = CursorTranscriptLineSchema.safeParse({ + role: 'user', + message: { + content: [{ type: 'text', text: 'fix the bug' }], + }, + }); + expect(result.success).toBe(true); + }); + + it('accepts assistant message with tool use', () => { + const result = CursorTranscriptLineSchema.safeParse({ + role: 'assistant', + message: { + content: [ + { type: 'text', text: 'Let me look at the code.' }, + { type: 'tool_use', id: 'tu_1', name: 'Read', input: { path: '/tmp/test.ts' } }, + ], + }, + }); + expect(result.success).toBe(true); + }); + + it('rejects system role', () => { + const result = CursorTranscriptLineSchema.safeParse({ + role: 'system', + message: { content: [{ type: 'text', text: 'prompt' }] }, + }); + expect(result.success).toBe(false); + }); +}); + +// ── Kimi schemas ───────────────────────────────────────────────────────────── + +describe('KimiMetadataSchema', () => { + it('accepts nullable wire_mtime and numeric archived_at', () => { + const result = KimiMetadataSchema.safeParse({ + session_id: 'kimi-session-1', + archived_at: 1735086302.21, + wire_mtime: null, + }); + expect(result.success).toBe(true); + }); + + it('accepts legacy/string archived_at values', () => { + const result = KimiMetadataSchema.safeParse({ + session_id: 'kimi-session-2', + archived_at: '2026-01-01T12:00:00.000Z', + wire_mtime: 1735086302.21, + }); + expect(result.success).toBe(true); + }); +}); + +// ── Serialized Session (Index) ─────────────────────────────────────────────── + +describe('SerializedSessionSchema', () => { + const validSession = { + id: 'sess_1', + source: 'claude', + cwd: '/home/user/project', + lines: 42, + bytes: 12345, + createdAt: '2025-01-01T00:00:00.000Z', + updatedAt: '2025-01-02T00:00:00.000Z', + originalPath: '/home/user/.claude/projects/proj/session.jsonl', + }; + + it('accepts valid session and transforms dates', () => { + const result = SerializedSessionSchema.safeParse(validSession); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.createdAt).toBeInstanceOf(Date); + expect(result.data.updatedAt).toBeInstanceOf(Date); + expect(result.data.createdAt.toISOString()).toBe('2025-01-01T00:00:00.000Z'); + } + }); + + it('validates source against TOOL_NAMES', () => { + const result = SerializedSessionSchema.safeParse({ + ...validSession, + source: 'unknown_tool', + }); + expect(result.success).toBe(false); + }); + + it('accepts all valid source values', () => { + for (const source of TOOL_NAMES) { + const result = SerializedSessionSchema.safeParse({ ...validSession, source }); + expect(result.success).toBe(true); + } + }); + + it('accepts optional fields', () => { + const result = SerializedSessionSchema.safeParse({ + ...validSession, + repo: 'owner/repo', + branch: 'main', + summary: 'Test session', + model: 'claude-sonnet-4-20250514', + }); + expect(result.success).toBe(true); + }); + + it('rejects missing required fields', () => { + const { id, ...noId } = validSession; + expect(SerializedSessionSchema.safeParse(noId).success).toBe(false); + + const { cwd, ...noCwd } = validSession; + expect(SerializedSessionSchema.safeParse(noCwd).success).toBe(false); + + const { lines, ...noLines } = validSession; + expect(SerializedSessionSchema.safeParse(noLines).success).toBe(false); + }); +}); diff --git a/src/__tests__/shared-utils.test.ts b/src/__tests__/shared-utils.test.ts new file mode 100644 index 0000000..25a7c12 --- /dev/null +++ b/src/__tests__/shared-utils.test.ts @@ -0,0 +1,848 @@ +/** + * Tests for shared utility modules created in Wave 2. + * Covers: jsonl, fs-helpers, content, tool-extraction, parser-helpers additions. + */ + +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { afterEach, describe, expect, it } from 'vitest'; +import type { ConversationMessage } from '../types/index.js'; +import { classifyToolName } from '../types/tool-names.js'; +import { + cleanUserQueryText, + extractRepoFromGitUrl, + extractTextFromBlocks, + isRealUserMessage, + isSystemContent, +} from '../utils/content.js'; +import { countDiffStats, extractStdoutTail, formatEditDiff, formatNewFileDiff } from '../utils/diff.js'; +import { findFiles, listSubdirectories } from '../utils/fs-helpers.js'; +import { getFileStats, readJsonlFile, scanJsonlHead } from '../utils/jsonl.js'; +import { extractRepo, trimMessages } from '../utils/parser-helpers.js'; +import { + type AnthropicMessage, + extractAnthropicToolData, + extractThinkingHighlights, +} from '../utils/tool-extraction.js'; + +// ── Temp file helpers ──────────────────────────────────────────────────────── + +const tmpDirs: string[] = []; + +function makeTmpDir(): string { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'continues-test-')); + tmpDirs.push(dir); + return dir; +} + +afterEach(() => { + for (const dir of tmpDirs) { + fs.rmSync(dir, { recursive: true, force: true }); + } + tmpDirs.length = 0; +}); + +// ── jsonl.ts ───────────────────────────────────────────────────────────────── + +describe('readJsonlFile', () => { + it('reads valid JSONL file', async () => { + const dir = makeTmpDir(); + const file = path.join(dir, 'test.jsonl'); + fs.writeFileSync(file, '{"a":1}\n{"a":2}\n{"a":3}\n'); + + const result = await readJsonlFile<{ a: number }>(file); + expect(result).toHaveLength(3); + expect(result[0].a).toBe(1); + expect(result[2].a).toBe(3); + }); + + it('skips invalid lines', async () => { + const dir = makeTmpDir(); + const file = path.join(dir, 'test.jsonl'); + fs.writeFileSync(file, '{"ok":true}\nnot json\n{"ok":false}\n'); + + const result = await readJsonlFile<{ ok: boolean }>(file); + expect(result).toHaveLength(2); + }); + + it('returns empty array for non-existent file', async () => { + const result = await readJsonlFile('/tmp/nonexistent-file.jsonl'); + expect(result).toEqual([]); + }); +}); + +describe('scanJsonlHead', () => { + it('scans first N lines and stops', async () => { + const dir = makeTmpDir(); + const file = path.join(dir, 'test.jsonl'); + const lines = Array.from({ length: 100 }, (_, i) => JSON.stringify({ i })); + fs.writeFileSync(file, lines.join('\n') + '\n'); + + const visited: number[] = []; + await scanJsonlHead(file, 5, (parsed) => { + visited.push((parsed as { i: number }).i); + return 'continue'; + }); + + expect(visited).toHaveLength(5); + expect(visited).toEqual([0, 1, 2, 3, 4]); + }); + + it('supports early stop via visitor', async () => { + const dir = makeTmpDir(); + const file = path.join(dir, 'test.jsonl'); + fs.writeFileSync(file, '{"type":"a"}\n{"type":"b"}\n{"type":"c"}\n'); + + const visited: string[] = []; + await scanJsonlHead(file, 100, (parsed) => { + const p = parsed as { type: string }; + visited.push(p.type); + return p.type === 'b' ? 'stop' : 'continue'; + }); + + expect(visited).toEqual(['a', 'b']); + }); + + it('handles non-existent file gracefully', async () => { + await scanJsonlHead('/tmp/nonexistent.jsonl', 10, () => 'continue'); + // No error thrown + }); +}); + +describe('getFileStats', () => { + it('returns line count and byte size', async () => { + const dir = makeTmpDir(); + const file = path.join(dir, 'test.jsonl'); + fs.writeFileSync(file, '{"a":1}\n{"a":2}\n{"a":3}\n'); + + const stats = await getFileStats(file); + expect(stats.lines).toBe(3); + expect(stats.bytes).toBeGreaterThan(0); + }); +}); + +// ── fs-helpers.ts ──────────────────────────────────────────────────────────── + +describe('findFiles', () => { + it('finds files matching predicate', () => { + const dir = makeTmpDir(); + fs.writeFileSync(path.join(dir, 'a.jsonl'), ''); + fs.writeFileSync(path.join(dir, 'b.txt'), ''); + fs.writeFileSync(path.join(dir, 'c.jsonl'), ''); + + const files = findFiles(dir, { + match: (entry) => entry.name.endsWith('.jsonl'), + }); + expect(files).toHaveLength(2); + }); + + it('recurses into subdirectories by default', () => { + const dir = makeTmpDir(); + const sub = path.join(dir, 'sub'); + fs.mkdirSync(sub); + fs.writeFileSync(path.join(dir, 'a.jsonl'), ''); + fs.writeFileSync(path.join(sub, 'b.jsonl'), ''); + + const files = findFiles(dir, { + match: (entry) => entry.name.endsWith('.jsonl'), + }); + expect(files).toHaveLength(2); + }); + + it('respects maxDepth', () => { + const dir = makeTmpDir(); + const sub = path.join(dir, 'sub'); + const subsub = path.join(sub, 'subsub'); + fs.mkdirSync(sub); + fs.mkdirSync(subsub); + fs.writeFileSync(path.join(dir, 'a.jsonl'), ''); + fs.writeFileSync(path.join(sub, 'b.jsonl'), ''); + fs.writeFileSync(path.join(subsub, 'c.jsonl'), ''); + + const files = findFiles(dir, { + match: (entry) => entry.name.endsWith('.jsonl'), + maxDepth: 1, + }); + expect(files).toHaveLength(2); // a.jsonl + sub/b.jsonl + }); + + it('returns empty for non-existent directory', () => { + const files = findFiles('/tmp/nonexistent-dir', { + match: () => true, + }); + expect(files).toEqual([]); + }); +}); + +describe('listSubdirectories', () => { + it('lists immediate subdirectories', () => { + const dir = makeTmpDir(); + fs.mkdirSync(path.join(dir, 'a')); + fs.mkdirSync(path.join(dir, 'b')); + fs.writeFileSync(path.join(dir, 'file.txt'), ''); + + const dirs = listSubdirectories(dir); + expect(dirs).toHaveLength(2); + }); + + it('returns empty for non-existent directory', () => { + expect(listSubdirectories('/tmp/nonexistent-dir')).toEqual([]); + }); +}); + +// ── content.ts ─────────────────────────────────────────────────────────────── + +describe('extractTextFromBlocks', () => { + it('returns string content as-is', () => { + expect(extractTextFromBlocks('hello')).toBe('hello'); + }); + + it('extracts text blocks from array', () => { + const content = [ + { type: 'text', text: 'line 1' }, + { type: 'tool_use', text: 'not this' }, + { type: 'text', text: 'line 2' }, + ]; + expect(extractTextFromBlocks(content)).toBe('line 1\nline 2'); + }); + + it('returns empty for undefined', () => { + expect(extractTextFromBlocks(undefined)).toBe(''); + }); +}); + +describe('isSystemContent', () => { + it('detects system-reminder tags', () => { + expect(isSystemContent('...')).toBe(true); + }); + + it('detects permissions tags', () => { + expect(isSystemContent('...')).toBe(true); + }); + + it('does not flag regular text', () => { + expect(isSystemContent('Hello, please fix the bug')).toBe(false); + }); +}); + +describe('isRealUserMessage', () => { + it('accepts normal user text', () => { + expect(isRealUserMessage('fix the login bug')).toBe(true); + }); + + it('rejects XML-prefixed content', () => { + expect(isRealUserMessage('...')).toBe(false); + }); + + it('rejects commands', () => { + expect(isRealUserMessage('/help')).toBe(false); + }); + + it('rejects handoff summaries', () => { + expect(isRealUserMessage('Session Handoff from Claude')).toBe(false); + }); + + it('rejects empty text', () => { + expect(isRealUserMessage('')).toBe(false); + }); +}); + +describe('extractRepoFromGitUrl', () => { + it('extracts from HTTPS URL', () => { + expect(extractRepoFromGitUrl('https://github.com/owner/repo.git')).toBe('owner/repo'); + }); + + it('extracts from SSH URL', () => { + expect(extractRepoFromGitUrl('git@github.com:owner/repo.git')).toBe('owner/repo'); + }); + + it('extracts without .git suffix', () => { + expect(extractRepoFromGitUrl('https://github.com/owner/repo')).toBe('owner/repo'); + }); + + it('returns empty for invalid URL', () => { + expect(extractRepoFromGitUrl('not-a-url')).toBe(''); + }); + + it('returns empty for empty string', () => { + expect(extractRepoFromGitUrl('')).toBe(''); + }); +}); + +describe('cleanUserQueryText', () => { + it('extracts text from user_query tags', () => { + expect(cleanUserQueryText('fix the bug')).toBe('fix the bug'); + }); + + it('returns original text if no tags', () => { + expect(cleanUserQueryText('just text')).toBe('just text'); + }); +}); + +// ── tool-extraction.ts ─────────────────────────────────────────────────────── + +describe('extractAnthropicToolData', () => { + it('extracts shell tool usage', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'Bash', input: { command: 'npm test' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: 'exit code 0' }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + expect(summaries).toHaveLength(1); + expect(summaries[0].name).toBe('Bash'); + expect(summaries[0].count).toBe(1); + }); + + it('extracts file modifications', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { type: 'tool_use', id: 'tu1', name: 'Write', input: { file_path: '/tmp/test.ts' } }, + { type: 'tool_use', id: 'tu2', name: 'Edit', input: { file_path: '/tmp/other.ts' } }, + ], + }, + ]; + + const { filesModified } = extractAnthropicToolData(messages); + expect(filesModified).toContain('/tmp/test.ts'); + expect(filesModified).toContain('/tmp/other.ts'); + }); + + it('skips SKIP_TOOLS', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { type: 'tool_use', id: 'tu1', name: 'TodoWrite', input: {} }, + { type: 'tool_use', id: 'tu2', name: 'Read', input: { file_path: '/test.ts' } }, + ], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + expect(summaries).toHaveLength(1); + expect(summaries[0].name).toBe('Read'); + }); + + it('handles MCP tools', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'mcp__github-server___list-issues', input: { repo: 'test' } }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + expect(summaries).toHaveLength(1); + expect(summaries[0].name).toBe('mcp__github-server___list-issues'); + }); + + it('tracks files modified by mcp__morph__edit_file', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'tu1', + name: 'mcp__morph__edit_file', + input: { path: '/tmp/edited-by-morph.ts', instruction: 'edit file', code_edit: '...' }, + }, + ], + }, + ]; + + const { filesModified } = extractAnthropicToolData(messages); + expect(filesModified).toContain('/tmp/edited-by-morph.ts'); + }); + + it('handles empty messages', () => { + const { summaries, filesModified } = extractAnthropicToolData([]); + expect(summaries).toEqual([]); + expect(filesModified).toEqual([]); + }); +}); + +describe('extractThinkingHighlights', () => { + it('extracts reasoning from thinking blocks', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'thinking', thinking: 'I need to understand the authentication flow before making changes' }], + }, + ]; + + const highlights = extractThinkingHighlights(messages); + expect(highlights).toHaveLength(1); + expect(highlights[0]).toContain('I need to understand the authentication flow'); + }); + + it('respects maxHighlights limit', () => { + const messages: AnthropicMessage[] = Array.from({ length: 10 }, () => ({ + role: 'assistant', + content: [{ type: 'thinking', thinking: 'Some reasoning that is long enough to be captured here' }], + })); + + const highlights = extractThinkingHighlights(messages, 3); + expect(highlights).toHaveLength(3); + }); + + it('skips very short thinking blocks', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { type: 'thinking', thinking: 'ok' }, // too short + { type: 'thinking', thinking: 'This is a substantial reasoning block that should be captured' }, + ], + }, + ]; + + const highlights = extractThinkingHighlights(messages); + expect(highlights).toHaveLength(1); + }); + + it('uses text field as fallback when thinking is absent', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'thinking', text: 'Reasoning via text field which is quite long enough' }], + }, + ]; + + const highlights = extractThinkingHighlights(messages); + expect(highlights).toHaveLength(1); + }); +}); + +// ── parser-helpers.ts additions ────────────────────────────────────────────── + +describe('extractRepo', () => { + it('prefers git URL when available', () => { + expect( + extractRepo({ + gitUrl: 'https://github.com/owner/repo.git', + cwd: '/home/user/project', + }), + ).toBe('owner/repo'); + }); + + it('falls back to cwd when no git URL', () => { + expect(extractRepo({ cwd: '/home/user/project' })).toBe('user/project'); + }); + + it('falls back to cwd when git URL is invalid', () => { + expect( + extractRepo({ + gitUrl: 'not-a-url', + cwd: '/home/user/project', + }), + ).toBe('user/project'); + }); + + it('returns empty for no inputs', () => { + expect(extractRepo({})).toBe(''); + }); +}); + +describe('trimMessages', () => { + const msg = (role: 'user' | 'assistant', i: number): ConversationMessage => ({ + role, + content: `msg ${i}`, + }); + + it('returns last N messages', () => { + const msgs = [msg('user', 1), msg('assistant', 2), msg('user', 3), msg('assistant', 4)]; + expect(trimMessages(msgs, 2)).toHaveLength(2); + expect(trimMessages(msgs, 2)[0].content).toBe('msg 3'); + }); + + it('includes last user message when tail has none', () => { + const msgs = [msg('user', 1), msg('assistant', 2), msg('assistant', 3), msg('assistant', 4), msg('assistant', 5)]; + const trimmed = trimMessages(msgs, 3); + expect(trimmed.some((m) => m.role === 'user')).toBe(true); + }); + + it('returns all messages when count <= maxCount', () => { + const msgs = [msg('user', 1), msg('assistant', 2)]; + expect(trimMessages(msgs, 10)).toHaveLength(2); + }); +}); + +// ── classifyToolName ──────────────────────────────────────────────────────── + +describe('classifyToolName', () => { + it('classifies shell tools', () => { + expect(classifyToolName('Bash')).toBe('shell'); + expect(classifyToolName('bash')).toBe('shell'); + expect(classifyToolName('exec_command')).toBe('shell'); + expect(classifyToolName('run_terminal_command')).toBe('shell'); + }); + + it('classifies file operation tools', () => { + expect(classifyToolName('Read')).toBe('read'); + expect(classifyToolName('Write')).toBe('write'); + expect(classifyToolName('Edit')).toBe('edit'); + expect(classifyToolName('apply_diff')).toBe('edit'); + expect(classifyToolName('create_file')).toBe('write'); + }); + + it('classifies search tools', () => { + expect(classifyToolName('Grep')).toBe('grep'); + expect(classifyToolName('Glob')).toBe('glob'); + expect(classifyToolName('WebSearch')).toBe('search'); + expect(classifyToolName('WebFetch')).toBe('fetch'); + }); + + it('classifies task and ask tools', () => { + expect(classifyToolName('Task')).toBe('task'); + expect(classifyToolName('TaskOutput')).toBe('task'); + expect(classifyToolName('AskUserQuestion')).toBe('ask'); + }); + + it('returns undefined for skip tools', () => { + expect(classifyToolName('TodoWrite')).toBeUndefined(); + }); + + it('returns mcp for unknown tools', () => { + expect(classifyToolName('ExitPlanMode')).toBe('mcp'); + expect(classifyToolName('mcp__github__list_issues')).toBe('mcp'); + expect(classifyToolName('some-custom-tool')).toBe('mcp'); + expect(classifyToolName('unknown_tool')).toBe('mcp'); + }); +}); + +// ── diff utilities ────────────────────────────────────────────────────────── + +describe('formatNewFileDiff', () => { + it('formats new file as all + lines', () => { + const content = 'line1\nline2\nline3'; + const { diff, truncated } = formatNewFileDiff(content, 'test.ts', 200); + expect(diff).toContain('+++ b/test.ts'); + expect(diff).toContain('+line1'); + expect(diff).toContain('+line2'); + expect(diff).toContain('+line3'); + expect(truncated).toBe(0); + }); + + it('truncates when exceeding maxLines', () => { + const lines = Array.from({ length: 250 }, (_, i) => `line${i}`); + const { diff, truncated } = formatNewFileDiff(lines.join('\n'), 'big.ts', 200); + expect(truncated).toBe(50); + expect(diff).toContain('+50 lines truncated'); + }); +}); + +describe('formatEditDiff', () => { + it('formats old/new as - and + lines', () => { + const { diff } = formatEditDiff('old line', 'new line', 'file.ts', 200); + expect(diff).toContain('-old line'); + expect(diff).toContain('+new line'); + expect(diff).toContain('--- a/file.ts'); + expect(diff).toContain('+++ b/file.ts'); + }); +}); + +describe('extractStdoutTail', () => { + it('returns last N non-empty lines', () => { + const output = 'line1\nline2\nline3\nline4\nline5\nline6\nline7'; + expect(extractStdoutTail(output, 3)).toBe('line5\nline6\nline7'); + }); + + it('skips empty lines', () => { + const output = 'line1\n\n\nline2\n\nline3\n\n'; + expect(extractStdoutTail(output, 2)).toBe('line2\nline3'); + }); +}); + +describe('countDiffStats', () => { + it('counts added and removed lines', () => { + const diff = '--- a/f.ts\n+++ b/f.ts\n-old\n+new1\n+new2\n context'; + const stats = countDiffStats(diff); + expect(stats.added).toBe(2); + expect(stats.removed).toBe(1); + }); +}); + +// ── Structured tool data in extractAnthropicToolData ──────────────────────── + +describe('extractAnthropicToolData structured data', () => { + it('produces ShellSampleData with exitCode and stdoutTail', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'Bash', input: { command: 'npm test' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: 'Tests passed\nexit code 0' }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const shell = summaries.find((s) => s.name === 'Bash'); + expect(shell).toBeDefined(); + const sample = shell!.samples[0]; + expect(sample.data).toBeDefined(); + expect(sample.data!.category).toBe('shell'); + if (sample.data!.category === 'shell') { + expect(sample.data!.command).toBe('npm test'); + expect(sample.data!.exitCode).toBe(0); + } + }); + + it('produces ReadSampleData with filePath', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'Read', input: { file_path: '/src/app.ts', offset: 10, limit: 50 } }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const read = summaries.find((s) => s.name === 'Read'); + expect(read).toBeDefined(); + const sample = read!.samples[0]; + expect(sample.data).toBeDefined(); + if (sample.data!.category === 'read') { + expect(sample.data!.filePath).toBe('/src/app.ts'); + expect(sample.data!.lineStart).toBe(10); + } + }); + + it('produces WriteSampleData with diff for new files', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'tu1', + name: 'create_file', + input: { file_path: '/src/new.ts', content: 'export const foo = 1;\nexport const bar = 2;' }, + }, + ], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const write = summaries.find((s) => s.name === 'create_file'); + expect(write).toBeDefined(); + const sample = write!.samples[0]; + expect(sample.data).toBeDefined(); + if (sample.data!.category === 'write') { + expect(sample.data!.filePath).toBe('/src/new.ts'); + expect(sample.data!.isNewFile).toBe(true); + expect(sample.data!.diff).toContain('+export const foo = 1;'); + expect(sample.data!.diffStats).toBeDefined(); + expect(sample.data!.diffStats!.added).toBe(2); + } + }); + + it('produces EditSampleData with diff', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { + type: 'tool_use', + id: 'tu1', + name: 'Edit', + input: { file_path: '/src/app.ts', old_string: 'const x = 1;', new_string: 'const x = 2;\nconst y = 3;' }, + }, + ], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const edit = summaries.find((s) => s.name === 'Edit'); + expect(edit).toBeDefined(); + const sample = edit!.samples[0]; + expect(sample.data).toBeDefined(); + if (sample.data!.category === 'edit') { + expect(sample.data!.filePath).toBe('/src/app.ts'); + expect(sample.data!.diff).toContain('-const x = 1;'); + expect(sample.data!.diff).toContain('+const x = 2;'); + expect(sample.data!.diffStats!.added).toBe(2); + expect(sample.data!.diffStats!.removed).toBe(1); + } + }); + + it('produces GrepSampleData with pattern', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'Grep', input: { pattern: 'TODO', path: 'src/' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: 'Found 3 matches\nsrc/a.ts\nsrc/b.ts\nsrc/c.ts' }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const grep = summaries.find((s) => s.name === 'Grep'); + expect(grep).toBeDefined(); + if (grep!.samples[0].data!.category === 'grep') { + expect(grep!.samples[0].data!.pattern).toBe('TODO'); + expect(grep!.samples[0].data!.targetPath).toBe('src/'); + expect(grep!.samples[0].data!.matchCount).toBe(3); + } + }); + + it('tracks errors for shell commands with non-zero exit', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'Bash', input: { command: 'npm test' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: 'FAIL\nexit code 1' }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const shell = summaries.find((s) => s.name === 'Bash'); + expect(shell!.errorCount).toBe(1); + if (shell!.samples[0].data!.category === 'shell') { + expect(shell!.samples[0].data!.errored).toBe(true); + expect(shell!.samples[0].data!.exitCode).toBe(1); + } + }); + + it('produces McpSampleData for MCP tools', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'mcp__github__list_issues', input: { repo: 'test/repo', state: 'open' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: '[{"number": 42, "title": "Bug"}]' }], + }, + ]; + + const { summaries } = extractAnthropicToolData(messages); + const mcp = summaries.find((s) => s.name === 'mcp__github__list_issues'); + expect(mcp).toBeDefined(); + if (mcp!.samples[0].data!.category === 'mcp') { + expect(mcp!.samples[0].data!.toolName).toBe('mcp__github__list_issues'); + expect(mcp!.samples[0].data!.params).toContain('repo='); + expect(mcp!.samples[0].data!.result).toBeDefined(); + } + }); + + it('captures errorMessage for failed shell commands', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'Bash', input: { command: 'rm -rf /nope' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: 'Permission denied\nexit code 1', is_error: true }], + }, + ]; + const { summaries } = extractAnthropicToolData(messages); + const shell = summaries.find((s) => s.name === 'Bash'); + expect(shell).toBeDefined(); + if (shell!.samples[0].data!.category === 'shell') { + expect(shell!.samples[0].data!.errored).toBe(true); + expect(shell!.samples[0].data!.errorMessage).toContain('Permission denied'); + } + }); + + it('derives isNewFile=true for Create tool, undefined for Write tool', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [ + { type: 'tool_use', id: 'tu1', name: 'Create', input: { file_path: '/a.ts', content: 'x' } }, + { type: 'tool_use', id: 'tu2', name: 'Write', input: { file_path: '/b.ts', content: 'y' } }, + ], + }, + ]; + const { summaries } = extractAnthropicToolData(messages); + const create = summaries.find((s) => s.name === 'Create'); + const write = summaries.find((s) => s.name === 'Write'); + expect(create).toBeDefined(); + expect(write).toBeDefined(); + if (create!.samples[0].data!.category === 'write') { + expect(create!.samples[0].data!.isNewFile).toBe(true); + } + if (write!.samples[0].data!.category === 'write') { + expect(write!.samples[0].data!.isNewFile).toBeUndefined(); + } + }); + + it('captures SearchSampleData with result preview', () => { + const messages: AnthropicMessage[] = [ + { + role: 'assistant', + content: [{ type: 'tool_use', id: 'tu1', name: 'WebSearch', input: { query: 'vitest mocking' } }], + }, + { + role: 'user', + content: [{ type: 'tool_result', tool_use_id: 'tu1', content: 'Found 5 results\n1. vitest docs...' }], + }, + ]; + const { summaries } = extractAnthropicToolData(messages); + const search = summaries.find((s) => s.name === 'WebSearch'); + expect(search).toBeDefined(); + if (search!.samples[0].data!.category === 'search') { + expect(search!.samples[0].data!.query).toBe('vitest mocking'); + expect(search!.samples[0].data!.resultPreview).toContain('Found 5 results'); + expect(search!.samples[0].data!.resultCount).toBe(5); + } + }); +}); + +// ── Tool name classification v2 (new aliases) ────────────────────────────── + +describe('classifyToolName v2 aliases', () => { + it('classifies apply_patch as edit', () => { + expect(classifyToolName('apply_patch')).toBe('edit'); + }); + + it('classifies web_search_call as search', () => { + expect(classifyToolName('web_search_call')).toBe('search'); + }); + + it('skips update_plan and view_image', () => { + expect(classifyToolName('update_plan')).toBeUndefined(); + expect(classifyToolName('view_image')).toBeUndefined(); + }); +}); + +// ── v4.1.0 — detectCategory fallback uses classifyToolName ────────────────── + +describe('detectCategory fallback via classifyToolName', () => { + // We test indirectly via generateHandoffMarkdown — if a tool summary has + // no structured data, the fallback should still classify it correctly. + // The key guarantee: classifyToolName covers ALL names in tool-names.ts. + it('classifies Execute as shell', () => { + expect(classifyToolName('Execute')).toBe('shell'); + }); + it('classifies run_terminal_command as shell', () => { + expect(classifyToolName('run_terminal_command')).toBe('shell'); + }); + it('classifies apply_patch as edit (not mcp)', () => { + expect(classifyToolName('apply_patch')).toBe('edit'); + }); + it('classifies web_search_call as search (not mcp)', () => { + expect(classifyToolName('web_search_call')).toBe('search'); + }); + it('classifies request_user_input as ask', () => { + expect(classifyToolName('request_user_input')).toBe('ask'); + }); +}); diff --git a/src/__tests__/stress-test.ts b/src/__tests__/stress-test.ts index 31af07a..5fe5699 100755 --- a/src/__tests__/stress-test.ts +++ b/src/__tests__/stress-test.ts @@ -1,26 +1,26 @@ #!/usr/bin/env tsx /** * Comprehensive Stress Test for Session Parsers - * + * * This script validates all 5 parsers against real large sessions on this machine: * - Parses the largest real sessions for each format * - Validates extracted data structure * - Cross-converts between all formats (5 sources × 4 targets = 20 conversions) * - Reports timing and results - * + * * Run with: npx tsx src/__tests__/stress-test.ts */ import * as fs from 'fs'; +import { createRequire } from 'module'; import * as path from 'path'; import { performance } from 'perf_hooks'; -import { createRequire } from 'module'; -import type { UnifiedSession, SessionContext } from '../types/index.js'; import { extractClaudeContext } from '../parsers/claude.js'; +import { extractCodexContext } from '../parsers/codex.js'; import { extractCopilotContext } from '../parsers/copilot.js'; import { extractGeminiContext } from '../parsers/gemini.js'; -import { extractCodexContext } from '../parsers/codex.js'; import { extractOpenCodeContext } from '../parsers/opencode.js'; +import type { SessionContext, UnifiedSession } from '../types/index.js'; import { generateHandoffMarkdown } from '../utils/markdown.js'; // ============================================================================ @@ -49,7 +49,7 @@ const TEST_SESSIONS: TestSession[] = [ path: '/Users/yigitkonur/.claude/projects/-Users-yigitkonur-dev-my-experiments/ee128819-496f-4b8b-8118-096d5e0f9075.jsonl', expectedSize: '~52MB', }, - + // Copilot Sessions (directories with events.jsonl) { name: 'Copilot Large (7MB)', @@ -65,7 +65,7 @@ const TEST_SESSIONS: TestSession[] = [ expectedSize: '~5MB', isDirectory: true, }, - + // Gemini Sessions (JSON files) { name: 'Gemini Large (67KB)', @@ -79,7 +79,7 @@ const TEST_SESSIONS: TestSession[] = [ path: '/Users/yigitkonur/.gemini/tmp/cli-continues/chats/session-2026-02-19T00-55-4c7f6d40.json', expectedSize: '~40KB', }, - + // Codex Sessions (JSONL files) { name: 'Codex Large (12MB)', @@ -93,7 +93,7 @@ const TEST_SESSIONS: TestSession[] = [ path: '/Users/yigitkonur/.codex/sessions/2026/02/17/rollout-2026-02-17T16-44-56-019c6e35-0c75-7e13-a35d-c5aee1134efe.jsonl', expectedSize: '~8MB', }, - + // OpenCode Session (SQLite database) { name: 'OpenCode (SQLite ~2MB)', @@ -113,7 +113,7 @@ function formatBytes(bytes: number): string { const k = 1024; const sizes = ['B', 'KB', 'MB', 'GB']; const i = Math.floor(Math.log(bytes) / Math.log(k)); - return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}`; + return `${(bytes / k ** i).toFixed(2)} ${sizes[i]}`; } /** Format duration in milliseconds */ @@ -164,15 +164,15 @@ function getPathSize(filePath: string): number { /** Create a UnifiedSession from a file path */ function createSessionFromPath(testSession: TestSession): UnifiedSession | null { const { name, source, path: filePath } = testSession; - + if (!pathExists(filePath)) { console.error(` ❌ Path not found: ${filePath}`); return null; } - + const size = getPathSize(filePath); const stats = fs.statSync(filePath); - + // Extract session ID from path let sessionId: string; if (source === 'claude' || source === 'codex') { @@ -188,20 +188,20 @@ function createSessionFromPath(testSession: TestSession): UnifiedSession | null const { DatabaseSync } = require('node:sqlite'); const dbPath = path.join(process.env.HOME || '~', '.local', 'share', 'opencode', 'opencode.db'); const db = new DatabaseSync(dbPath, { open: true, readOnly: true }); - + // Find session with most messages - const result = db.prepare( - 'SELECT m.session_id, COUNT(*) as cnt FROM message m GROUP BY m.session_id ORDER BY cnt DESC LIMIT 1' - ).get() as { session_id: string; cnt: number } | undefined; - + const result = db + .prepare('SELECT m.session_id, COUNT(*) as cnt FROM message m GROUP BY m.session_id ORDER BY cnt DESC LIMIT 1') + .get() as { session_id: string; cnt: number } | undefined; + if (result) { sessionId = result.session_id; - + // Get session details for cwd - const sessionRow = db.prepare( - 'SELECT id, title, directory FROM session WHERE id = ?' - ).get(result.session_id) as { id: string; title?: string; directory?: string } | undefined; - + const sessionRow = db.prepare('SELECT id, title, directory FROM session WHERE id = ?').get(result.session_id) as + | { id: string; title?: string; directory?: string } + | undefined; + if (sessionRow?.directory) { // Store directory for later use in the return statement (testSession as any)._opencodeDir = sessionRow.directory; @@ -209,7 +209,7 @@ function createSessionFromPath(testSession: TestSession): UnifiedSession | null } else { sessionId = 'latest'; } - + db.close(); } catch (err) { console.error(` ⚠️ Failed to query OpenCode DB: ${err}`); @@ -218,13 +218,16 @@ function createSessionFromPath(testSession: TestSession): UnifiedSession | null } else { sessionId = 'unknown'; } - + return { id: sessionId, source, - cwd: source === 'copilot' ? filePath : - source === 'opencode' && (testSession as any)._opencodeDir ? (testSession as any)._opencodeDir : - path.dirname(filePath), + cwd: + source === 'copilot' + ? filePath + : source === 'opencode' && (testSession as any)._opencodeDir + ? (testSession as any)._opencodeDir + : path.dirname(filePath), repo: 'test-repo', lines: 0, bytes: size, @@ -238,17 +241,17 @@ function createSessionFromPath(testSession: TestSession): UnifiedSession | null /** Validate SessionContext structure */ function validateContext(context: SessionContext, sourceName: string): { valid: boolean; errors: string[] } { const errors: string[] = []; - + // Check required fields if (!context.session) errors.push('Missing session'); if (!context.recentMessages) errors.push('Missing recentMessages'); if (!Array.isArray(context.recentMessages)) errors.push('recentMessages is not an array'); if (!context.markdown) errors.push('Missing markdown'); - + // Check filesModified and pendingTasks exist if (!Array.isArray(context.filesModified)) errors.push('filesModified is not an array'); if (!Array.isArray(context.pendingTasks)) errors.push('pendingTasks is not an array'); - + // Check message structure if (context.recentMessages) { context.recentMessages.forEach((msg, idx) => { @@ -265,7 +268,7 @@ function validateContext(context: SessionContext, sourceName: string): { valid: } }); } - + // Check markdown structure if (context.markdown) { const requiredSections = ['# Session Handoff Context', '## Recent Conversation']; @@ -274,45 +277,45 @@ function validateContext(context: SessionContext, sourceName: string): { valid: errors.push(`Markdown missing section: ${section}`); } } - + // Check that markdown is not too small (should have real content) if (context.markdown.length < 100) { errors.push(`Markdown too short (${context.markdown.length} chars) - may be missing content`); } } - + // Check session metadata if (context.session) { if (!context.session.id) errors.push('Session missing id'); if (!context.session.source) errors.push('Session missing source'); if (!context.session.originalPath) errors.push('Session missing originalPath'); } - + return { valid: errors.length === 0, errors }; } /** Validate cross-conversion (recreate markdown for different target) */ function validateCrossConversion( sourceContext: SessionContext, - targetSource: 'claude' | 'copilot' | 'gemini' | 'codex' | 'opencode' + targetSource: 'claude' | 'copilot' | 'gemini' | 'codex' | 'opencode', ): { valid: boolean; errors: string[] } { const errors: string[] = []; - + try { // Create a new session object as if it came from the target source const targetSession: UnifiedSession = { ...sourceContext.session, source: targetSource, }; - + // Generate markdown using the target source const markdown = generateHandoffMarkdown( targetSession, sourceContext.recentMessages, sourceContext.filesModified, - sourceContext.pendingTasks + sourceContext.pendingTasks, ); - + // Validate the generated markdown if (!markdown) errors.push('Generated markdown is empty'); if (markdown && !markdown.includes('# Session Handoff Context')) { @@ -321,11 +324,10 @@ function validateCrossConversion( if (markdown && !markdown.includes('## Recent Conversation')) { errors.push('Markdown missing conversation section'); } - } catch (error) { errors.push(`Cross-conversion error: ${error instanceof Error ? error.message : String(error)}`); } - + return { valid: errors.length === 0, errors }; } @@ -377,21 +379,21 @@ async function runStressTest() { console.log('║ SESSION PARSER COMPREHENSIVE STRESS TEST ║'); console.log('╚════════════════════════════════════════════════════════════════╝'); console.log(); - + // Phase 1: Test each parser with its real sessions console.log('📊 PHASE 1: PARSING & EXTRACTION TESTS'); console.log('─'.repeat(70)); console.log(); - + const extractedContexts: Map = new Map(); - + for (const testSession of TEST_SESSIONS) { const { name, source, path: filePath } = testSession; - + console.log(`Testing: ${name}`); console.log(` Source: ${source}`); console.log(` Path: ${filePath}`); - + // Check if path exists if (!pathExists(filePath)) { console.log(` ⚠️ SKIP - Path not found`); @@ -407,10 +409,10 @@ async function runStressTest() { console.log(); continue; } - + const size = getPathSize(filePath); console.log(` Size: ${formatBytes(size)}`); - + // Create session object const session = createSessionFromPath(testSession); if (!session) { @@ -427,7 +429,7 @@ async function runStressTest() { console.log(); continue; } - + // Extract context const startTime = performance.now(); const memBefore = getMemoryUsageMB(); @@ -437,17 +439,19 @@ async function runStressTest() { const duration = performance.now() - startTime; const memAfter = getMemoryUsageMB(); const memUsed = memAfter - memBefore; - + // Validate context const validation = validateContext(context, name); - + if (validation.valid) { console.log(` ✅ PASS - Extracted in ${formatDuration(duration)}`); console.log(` Messages: ${context.recentMessages.length}`); - console.log(` Markdown: ${formatBytes(context.markdown.length)} (${context.markdown.split('\n').length} lines)`); + console.log( + ` Markdown: ${formatBytes(context.markdown.length)} (${context.markdown.split('\n').length} lines)`, + ); console.log(` Working Dir: ${context.session.cwd}`); console.log(` Memory Used: ${formatMemory(memUsed)}`); - + testResults.push({ session: name, source, @@ -458,13 +462,13 @@ async function runStressTest() { errors: [], memoryUsedMB: memUsed, }); - + // Store for cross-conversion tests extractedContexts.set(name, context); } else { console.log(` ❌ FAIL - Validation errors:`); - validation.errors.forEach(err => console.log(` - ${err}`)); - + for (const err of validation.errors) console.log(` - ${err}`); + testResults.push({ session: name, source, @@ -479,7 +483,7 @@ async function runStressTest() { const duration = performance.now() - startTime; const errorMsg = error instanceof Error ? error.message : String(error); console.log(` ❌ FAIL - ${errorMsg}`); - + testResults.push({ session: name, source, @@ -490,33 +494,37 @@ async function runStressTest() { errors: [errorMsg], }); } - + console.log(); } - + // Phase 2: Cross-conversion tests console.log('🔄 PHASE 2: CROSS-CONVERSION TESTS'); console.log('─'.repeat(70)); console.log(); console.log('Testing all 20 conversion paths (5 sources × 4 targets each)...'); console.log(); - - const sources: Array<'claude' | 'copilot' | 'gemini' | 'codex' | 'opencode'> = - ['claude', 'copilot', 'gemini', 'codex', 'opencode']; - + + const sources: Array<'claude' | 'copilot' | 'gemini' | 'codex' | 'opencode'> = [ + 'claude', + 'copilot', + 'gemini', + 'codex', + 'opencode', + ]; + // Take one successful extraction from each source const sourceContexts = new Map(); for (const source of sources) { - const context = Array.from(extractedContexts.entries()) - .find(([name, ctx]) => ctx.session.source === source)?.[1]; + const context = Array.from(extractedContexts.entries()).find(([name, ctx]) => ctx.session.source === source)?.[1]; if (context) { sourceContexts.set(source, context); } } - + console.log(`Found ${sourceContexts.size} source contexts to test conversions`); console.log(); - + for (const sourceType of sources) { const sourceContext = sourceContexts.get(sourceType); if (!sourceContext) { @@ -533,14 +541,14 @@ async function runStressTest() { } continue; } - + console.log(`From ${sourceType.toUpperCase()}:`); - + for (const targetType of sources) { if (sourceType === targetType) continue; // Skip same-to-same - + const validation = validateCrossConversion(sourceContext, targetType); - + if (validation.valid) { console.log(` ✅ ${sourceType} → ${targetType}`); conversionResults.push({ @@ -551,7 +559,7 @@ async function runStressTest() { }); } else { console.log(` ❌ ${sourceType} → ${targetType}`); - validation.errors.forEach(err => console.log(` - ${err}`)); + for (const err of validation.errors) console.log(` - ${err}`); conversionResults.push({ from: sourceType, to: targetType, @@ -562,23 +570,23 @@ async function runStressTest() { } console.log(); } - + // Phase 3: Summary Report console.log('📈 SUMMARY REPORT'); console.log('═'.repeat(70)); console.log(); - + // Extraction Tests Summary console.log('Extraction Tests:'); console.log('─'.repeat(70)); - const passed = testResults.filter(r => r.status === 'pass').length; - const failed = testResults.filter(r => r.status === 'fail').length; - const skipped = testResults.filter(r => r.status === 'skip').length; + const passed = testResults.filter((r) => r.status === 'pass').length; + const failed = testResults.filter((r) => r.status === 'fail').length; + const skipped = testResults.filter((r) => r.status === 'skip').length; const total = testResults.length; - + console.log(`Total: ${total} | ✅ Pass: ${passed} | ❌ Fail: ${failed} | ⚠️ Skip: ${skipped}`); console.log(); - + // Detailed results table console.log('Session | Source | Status | Time | Msgs | MD Size'); console.log('─'.repeat(70)); @@ -590,90 +598,91 @@ async function runStressTest() { const time = formatDuration(result.duration).padStart(8); const msgs = String(result.messageCount).padStart(4); const mdSize = formatBytes(result.markdownLength).padStart(7); - + console.log(`${sessionName} | ${source} | ${status} | ${time} | ${msgs} | ${mdSize}`); } console.log(); - + // Cross-Conversion Summary console.log('Cross-Conversion Tests (5 sources × 4 targets = 20 paths):'); console.log('─'.repeat(70)); - const convPassed = conversionResults.filter(r => r.status === 'pass').length; - const convFailed = conversionResults.filter(r => r.status === 'fail').length; - const convSkipped = conversionResults.filter(r => r.status === 'skip').length; + const convPassed = conversionResults.filter((r) => r.status === 'pass').length; + const convFailed = conversionResults.filter((r) => r.status === 'fail').length; + const convSkipped = conversionResults.filter((r) => r.status === 'skip').length; const convTotal = conversionResults.length; - + console.log(`Total: ${convTotal} | ✅ Pass: ${convPassed} | ❌ Fail: ${convFailed} | ⚠️ Skip: ${convSkipped}`); console.log(); - + // Conversion matrix console.log('Conversion Matrix (✅ = pass, ❌ = fail, ⚠️ = skip):'); console.log(); console.log(' │ claude │ copilot │ gemini │ codex │ opencode'); console.log('────────┼────────┼─────────┼────────┼───────┼─────────'); - + for (const fromSource of sources) { const row = [`${fromSource.padEnd(7)} │`]; for (const toSource of sources) { if (fromSource === toSource) { row.push(' - │'); } else { - const result = conversionResults.find(r => r.from === fromSource && r.to === toSource); - const icon = result?.status === 'pass' ? ' ✅ ' : - result?.status === 'fail' ? ' ❌ ' : ' ⚠️ '; + const result = conversionResults.find((r) => r.from === fromSource && r.to === toSource); + const icon = result?.status === 'pass' ? ' ✅ ' : result?.status === 'fail' ? ' ❌ ' : ' ⚠️ '; row.push(`${icon}│`); } } console.log(row.join(' ')); } console.log(); - + // Performance Stats - const successfulTests = testResults.filter(r => r.status === 'pass'); + const successfulTests = testResults.filter((r) => r.status === 'pass'); if (successfulTests.length > 0) { console.log('Performance Statistics:'); console.log('─'.repeat(70)); - + const avgDuration = successfulTests.reduce((sum, r) => sum + r.duration, 0) / successfulTests.length; - const maxDuration = Math.max(...successfulTests.map(r => r.duration)); - const minDuration = Math.min(...successfulTests.map(r => r.duration)); - + const maxDuration = Math.max(...successfulTests.map((r) => r.duration)); + const minDuration = Math.min(...successfulTests.map((r) => r.duration)); + console.log(`Average Parse Time: ${formatDuration(avgDuration)}`); console.log(`Fastest: ${formatDuration(minDuration)}`); console.log(`Slowest: ${formatDuration(maxDuration)}`); - + // Memory stats - const testsWithMemory = successfulTests.filter(r => r.memoryUsedMB !== undefined); + const testsWithMemory = successfulTests.filter((r) => r.memoryUsedMB !== undefined); if (testsWithMemory.length > 0) { const avgMemory = testsWithMemory.reduce((sum, r) => sum + (r.memoryUsedMB || 0), 0) / testsWithMemory.length; - const maxMemory = Math.max(...testsWithMemory.map(r => r.memoryUsedMB || 0)); + const maxMemory = Math.max(...testsWithMemory.map((r) => r.memoryUsedMB || 0)); console.log(`Average Memory Used: ${formatMemory(avgMemory)}`); console.log(`Peak Memory: ${formatMemory(maxMemory)}`); } - + // Show largest session parsed - const largestSession = successfulTests.reduce((max, r) => - testResults.find(t => t.session === r.session && t.status === 'pass') && - TEST_SESSIONS.find(s => s.name === r.session)!.expectedSize > - TEST_SESSIONS.find(s => s.name === max.session)!.expectedSize ? r : max + const largestSession = successfulTests.reduce((max, r) => + testResults.find((t) => t.session === r.session && t.status === 'pass') && + TEST_SESSIONS.find((s) => s.name === r.session)!.expectedSize > + TEST_SESSIONS.find((s) => s.name === max.session)!.expectedSize + ? r + : max, ); - + console.log(`Largest Session Tested: ${largestSession.session} (${formatDuration(largestSession.duration)})`); - + // Throughput calculation const totalBytes = successfulTests.reduce((sum, r) => { - const ts = TEST_SESSIONS.find(s => s.name === r.session); + const ts = TEST_SESSIONS.find((s) => s.name === r.session); return sum + (ts ? getPathSize(ts.path) : 0); }, 0); const totalTime = successfulTests.reduce((sum, r) => sum + r.duration, 0); if (totalTime > 0) { - const mbPerSecond = (totalBytes / 1024 / 1024) / (totalTime / 1000); + const mbPerSecond = totalBytes / 1024 / 1024 / (totalTime / 1000); console.log(`Overall Throughput: ${mbPerSecond.toFixed(2)} MB/s`); } - + console.log(); } - + // Final verdict console.log('═'.repeat(70)); const allPassed = failed === 0 && convFailed === 0; @@ -683,13 +692,13 @@ async function runStressTest() { console.log(`⚠️ TESTS COMPLETED WITH ${failed + convFailed} FAILURES`); } console.log('═'.repeat(70)); - + // Exit with appropriate code process.exit(allPassed ? 0 : 1); } // Run the stress test -runStressTest().catch(error => { +runStressTest().catch((error) => { console.error('Fatal error running stress test:', error); process.exit(1); }); diff --git a/src/__tests__/unit-conversions.test.ts b/src/__tests__/unit-conversions.test.ts index a767209..51bd368 100644 --- a/src/__tests__/unit-conversions.test.ts +++ b/src/__tests__/unit-conversions.test.ts @@ -1,19 +1,30 @@ /** - * Fixture-based unit tests for all 20 cross-tool conversion paths. + * Fixture-based unit tests for all 30 cross-tool conversion paths. * Tests each parser's extractContext using controlled fixture data, * independent of real session files on the machine. */ -import { describe, it, expect, beforeAll, afterAll } from 'vitest'; + import * as fs from 'fs'; import * as path from 'path'; -import type { UnifiedSession, SessionSource, SessionContext, ConversationMessage } from '../types/index.js'; -import { generateHandoffMarkdown, SOURCE_LABELS } from '../utils/markdown.js'; +import { afterAll, beforeAll, describe, expect, it } from 'vitest'; +import type { ConversationMessage, SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; +import { generateHandoffMarkdown, getSourceLabels } from '../utils/markdown.js'; import { + createAmpFixture, + createAntigravityFixture, createClaudeFixture, + createClineFixture, + createCodexFixture, createCopilotFixture, + createCursorFixture, + createDroidFixture, createGeminiFixture, - createCodexFixture, + createKiloCodeFixture, + createKimiFixture, + createKiroFixture, createOpenCodeSqliteFixture, + createQwenCodeFixture, + createRooCodeFixture, type FixtureDir, } from './fixtures/index.js'; @@ -36,21 +47,31 @@ function parseClaudeFixtureMessages(filePath: string): ConversationMessage[] { if (parsed.isCompactSummary) continue; if (parsed.type === 'user' && parsed.message?.content) { - const text = typeof parsed.message.content === 'string' - ? parsed.message.content - : parsed.message.content.filter((c: any) => c.type === 'text' && c.text).map((c: any) => c.text).join('\n'); + const text = + typeof parsed.message.content === 'string' + ? parsed.message.content + : parsed.message.content + .filter((c: any) => c.type === 'text' && c.text) + .map((c: any) => c.text) + .join('\n'); if (text && !text.startsWith('<') && !text.startsWith('/') && !text.includes('Session Handoff')) { messages.push({ role: 'user', content: text, timestamp: new Date(parsed.timestamp) }); } } else if (parsed.type === 'assistant' && parsed.message?.content) { - const text = typeof parsed.message.content === 'string' - ? parsed.message.content - : parsed.message.content.filter((c: any) => c.type === 'text' && c.text).map((c: any) => c.text).join('\n'); + const text = + typeof parsed.message.content === 'string' + ? parsed.message.content + : parsed.message.content + .filter((c: any) => c.type === 'text' && c.text) + .map((c: any) => c.text) + .join('\n'); if (text) { messages.push({ role: 'assistant', content: text, timestamp: new Date(parsed.timestamp) }); } } - } catch { /* skip */ } + } catch { + /* skip */ + } } return messages; } @@ -70,7 +91,9 @@ function parseCopilotFixtureMessages(eventsPath: string): ConversationMessage[] const text = event.data?.content || ''; if (text) messages.push({ role: 'assistant', content: text, timestamp: new Date(event.timestamp) }); } - } catch { /* skip */ } + } catch { + /* skip */ + } } return messages; } @@ -110,11 +133,16 @@ function parseCodexFixtureMessages(filePath: string): ConversationMessage[] { if (parsed.type === 'event_msg' && parsed.payload?.type === 'user_message') { const text = parsed.payload.message || ''; if (text) messages.push({ role: 'user', content: text, timestamp: new Date(parsed.timestamp) }); - } else if (parsed.type === 'event_msg' && (parsed.payload?.type === 'agent_message' || parsed.payload?.type === 'assistant_message')) { + } else if ( + parsed.type === 'event_msg' && + (parsed.payload?.type === 'agent_message' || parsed.payload?.type === 'assistant_message') + ) { const text = parsed.payload.message || ''; if (text) messages.push({ role: 'assistant', content: text, timestamp: new Date(parsed.timestamp) }); } - } catch { /* skip */ } + } catch { + /* skip */ + } } return messages; } @@ -125,17 +153,17 @@ function parseOpenCodeFixtureMessages(dbPath: string, sessionId: string): Conver const messages: ConversationMessage[] = []; try { - const msgRows = db.prepare( - 'SELECT id, session_id, time_created, data FROM message WHERE session_id = ? ORDER BY time_created ASC' - ).all(sessionId) as any[]; + const msgRows = db + .prepare('SELECT id, session_id, time_created, data FROM message WHERE session_id = ? ORDER BY time_created ASC') + .all(sessionId) as any[]; for (const msgRow of msgRows) { const msgData = JSON.parse(msgRow.data); const role = msgData.role === 'user' ? 'user' : 'assistant'; - const partRows = db.prepare( - 'SELECT data FROM part WHERE message_id = ? ORDER BY time_created ASC' - ).all(msgRow.id) as any[]; + const partRows = db + .prepare('SELECT data FROM part WHERE message_id = ? ORDER BY time_created ASC') + .all(msgRow.id) as any[]; let text = ''; for (const partRow of partRows) { @@ -157,12 +185,221 @@ function parseOpenCodeFixtureMessages(dbPath: string, sessionId: string): Conver return messages; } +function parseCursorFixtureMessages(filePath: string): ConversationMessage[] { + const content = fs.readFileSync(filePath, 'utf8'); + const lines = content.trim().split('\n'); + const messages: ConversationMessage[] = []; + + for (const line of lines) { + try { + const parsed = JSON.parse(line); + const role = parsed.role; + const contentBlocks = parsed.message?.content || []; + + const textParts: string[] = []; + for (const block of contentBlocks) { + if (block.type === 'text' && block.text) { + if ( + block.text.startsWith('') || + block.text.startsWith('') || + block.text.startsWith('') + ) + continue; + + // Extract from user_query tags + const queryMatch = block.text.match(/\s*([\s\S]*?)\s*<\/user_query>/); + const cleaned = queryMatch ? queryMatch[1].trim() : block.text; + if (cleaned) textParts.push(cleaned); + } + } + + const text = textParts.join('\n').trim(); + if (!text) continue; + + messages.push({ + role: role === 'user' ? 'user' : 'assistant', + content: text, + }); + } catch { + /* skip */ + } + } + return messages; +} + +function parseDroidFixtureMessages(filePath: string): ConversationMessage[] { + const content = fs.readFileSync(filePath, 'utf8'); + const lines = content.trim().split('\n'); + const messages: ConversationMessage[] = []; + + for (const line of lines) { + try { + const parsed = JSON.parse(line); + if (parsed.type !== 'message') continue; + const role = parsed.message?.role; + const contentBlocks = parsed.message?.content || []; + + const textParts: string[] = []; + for (const block of contentBlocks) { + if (block.type === 'text' && block.text) { + if (!block.text.startsWith('') && !block.text.startsWith(' b?.type === 'text' && typeof b.text === 'string') + .map((b: any) => b.text) + .join('\n'); + + if (!text) continue; + + messages.push({ + role: parsed.role, + content: text, + }); + } catch { + /* skip */ + } + } + + return messages; +} + +function parseQwenCodeFixtureMessages(filePath: string): ConversationMessage[] { + const content = fs.readFileSync(filePath, 'utf8'); + const lines = content.trim().split('\n'); + const messages: ConversationMessage[] = []; + + for (const line of lines) { + try { + const parsed = JSON.parse(line); + if (parsed.type !== 'user' && parsed.type !== 'assistant') continue; + + const text = (parsed.message?.parts || []) + .filter((p: any) => p?.text) + .map((p: any) => p.text) + .join('\n'); + + if (!text) continue; + + messages.push({ + role: parsed.type === 'user' ? 'user' : 'assistant', + content: text, + timestamp: parsed.timestamp ? new Date(parsed.timestamp) : undefined, + }); + } catch { + /* skip */ + } + } + + return messages; +} + // ─── Fixture Data ──────────────────────────────────────────────────────────── -const ALL_SOURCES: SessionSource[] = ['claude', 'copilot', 'gemini', 'codex', 'opencode']; +// Derive from registry — automatically picks up new tools +import { ALL_TOOLS } from '../parsers/registry.js'; -let fixtures: Record = {}; -let contexts: Record = {}; +const ALL_SOURCES: readonly SessionSource[] = ALL_TOOLS; + +const fixtures: Record = {}; +const contexts: Record = {}; beforeAll(() => { // Create fixtures @@ -171,88 +408,463 @@ beforeAll(() => { fixtures.gemini = createGeminiFixture(); fixtures.codex = createCodexFixture(); fixtures.opencode = createOpenCodeSqliteFixture(); + fixtures.droid = createDroidFixture(); + fixtures.cursor = createCursorFixture(); + fixtures.amp = createAmpFixture(); + fixtures.kiro = createKiroFixture(); + fixtures.kimi = createKimiFixture(); + fixtures.cline = createClineFixture(); + fixtures['roo-code'] = createRooCodeFixture(); + fixtures['kilo-code'] = createKiloCodeFixture(); + fixtures.antigravity = createAntigravityFixture(); + fixtures['qwen-code'] = createQwenCodeFixture(); // Build contexts from fixtures const now = new Date(); // Claude - const claudeFile = fs.readdirSync(fixtures.claude.root, { recursive: true }) - .map(f => path.join(fixtures.claude.root, f as string)) - .find(f => f.endsWith('.jsonl'))!; + const claudeFile = fs + .readdirSync(fixtures.claude.root, { recursive: true }) + .map((f) => path.join(fixtures.claude.root, f as string)) + .find((f) => f.endsWith('.jsonl'))!; const claudeSession: UnifiedSession = { - id: 'test-claude-session-1', source: 'claude', cwd: '/home/user/project', - repo: 'user/project', branch: 'main', lines: 5, bytes: 1000, - createdAt: now, updatedAt: now, originalPath: claudeFile, summary: 'Fix auth bug', + id: 'test-claude-session-1', + source: 'claude', + cwd: '/home/user/project', + repo: 'user/project', + branch: 'main', + lines: 5, + bytes: 1000, + createdAt: now, + updatedAt: now, + originalPath: claudeFile, + summary: 'Fix auth bug', }; const claudeMsgs = parseClaudeFixtureMessages(claudeFile); contexts.claude = { - session: claudeSession, recentMessages: claudeMsgs, - filesModified: [], pendingTasks: [], toolSummaries: [], + session: claudeSession, + recentMessages: claudeMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], markdown: generateHandoffMarkdown(claudeSession, claudeMsgs, [], [], []), }; // Copilot - const copilotDir = fs.readdirSync(fixtures.copilot.root, { withFileTypes: true }) - .find(d => d.isDirectory())!; + const copilotDir = fs.readdirSync(fixtures.copilot.root, { withFileTypes: true }).find((d) => d.isDirectory())!; const copilotEventsPath = path.join(fixtures.copilot.root, copilotDir.name, 'events.jsonl'); const copilotSession: UnifiedSession = { - id: 'test-copilot-session-1', source: 'copilot', cwd: '/home/user/project', - repo: undefined, lines: 5, bytes: 1000, - createdAt: now, updatedAt: now, originalPath: path.join(fixtures.copilot.root, copilotDir.name), - summary: 'Fix auth bug', model: 'claude-sonnet-4', + id: 'test-copilot-session-1', + source: 'copilot', + cwd: '/home/user/project', + repo: undefined, + lines: 5, + bytes: 1000, + createdAt: now, + updatedAt: now, + originalPath: path.join(fixtures.copilot.root, copilotDir.name), + summary: 'Fix auth bug', + model: 'claude-sonnet-4', }; const copilotMsgs = parseCopilotFixtureMessages(copilotEventsPath); contexts.copilot = { - session: copilotSession, recentMessages: copilotMsgs, - filesModified: [], pendingTasks: [], toolSummaries: [], + session: copilotSession, + recentMessages: copilotMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], markdown: generateHandoffMarkdown(copilotSession, copilotMsgs, [], [], []), }; // Gemini - const geminiFile = fs.readdirSync(fixtures.gemini.root, { recursive: true }) - .map(f => path.join(fixtures.gemini.root, f as string)) - .find(f => f.endsWith('.json'))!; + const geminiFile = fs + .readdirSync(fixtures.gemini.root, { recursive: true }) + .map((f) => path.join(fixtures.gemini.root, f as string)) + .find((f) => f.endsWith('.json'))!; const geminiSession: UnifiedSession = { - id: 'test-gemini-session-1', source: 'gemini', cwd: '/home/user/project', - repo: 'user/project', lines: 10, bytes: 500, - createdAt: now, updatedAt: now, originalPath: geminiFile, summary: 'Fix auth bug', + id: 'test-gemini-session-1', + source: 'gemini', + cwd: '', + repo: '', + lines: 10, + bytes: 500, + createdAt: now, + updatedAt: now, + originalPath: geminiFile, + summary: 'Fix auth bug', }; const geminiMsgs = parseGeminiFixtureMessages(geminiFile); contexts.gemini = { - session: geminiSession, recentMessages: geminiMsgs, - filesModified: [], pendingTasks: [], toolSummaries: [], + session: geminiSession, + recentMessages: geminiMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], markdown: generateHandoffMarkdown(geminiSession, geminiMsgs, [], [], []), }; // Codex - const codexFile = fs.readdirSync(fixtures.codex.root, { recursive: true }) - .map(f => path.join(fixtures.codex.root, f as string)) - .find(f => f.endsWith('.jsonl'))!; + const codexFile = fs + .readdirSync(fixtures.codex.root, { recursive: true }) + .map((f) => path.join(fixtures.codex.root, f as string)) + .find((f) => f.endsWith('.jsonl'))!; const codexSession: UnifiedSession = { - id: 'test-codex-uuid-1234', source: 'codex', cwd: '/home/user/project', - repo: 'user/project.git', branch: 'main', lines: 5, bytes: 800, - createdAt: now, updatedAt: now, originalPath: codexFile, summary: 'Fix auth bug', + id: 'test-codex-uuid-1234', + source: 'codex', + cwd: '/home/user/project', + repo: 'user/project.git', + branch: 'main', + lines: 5, + bytes: 800, + createdAt: now, + updatedAt: now, + originalPath: codexFile, + summary: 'Fix auth bug', }; const codexMsgs = parseCodexFixtureMessages(codexFile); contexts.codex = { - session: codexSession, recentMessages: codexMsgs, - filesModified: [], pendingTasks: [], toolSummaries: [], + session: codexSession, + recentMessages: codexMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], markdown: generateHandoffMarkdown(codexSession, codexMsgs, [], [], []), }; // OpenCode (SQLite) const opencodDbPath = path.join(fixtures.opencode.root, 'opencode.db'); const opencodeSession: UnifiedSession = { - id: 'ses_test1', source: 'opencode', cwd: '/home/user/project', - repo: 'user/project', lines: 4, bytes: 0, - createdAt: now, updatedAt: now, originalPath: opencodDbPath, summary: 'Fix auth bug', + id: 'ses_test1', + source: 'opencode', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 0, + createdAt: now, + updatedAt: now, + originalPath: opencodDbPath, + summary: 'Fix auth bug', }; const opencodeMsgs = parseOpenCodeFixtureMessages(opencodDbPath, 'ses_test1'); contexts.opencode = { - session: opencodeSession, recentMessages: opencodeMsgs, - filesModified: [], pendingTasks: [], toolSummaries: [], + session: opencodeSession, + recentMessages: opencodeMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], markdown: generateHandoffMarkdown(opencodeSession, opencodeMsgs, [], [], []), }; + + // Droid + const droidFile = fs + .readdirSync(fixtures.droid.root, { recursive: true }) + .map((f) => path.join(fixtures.droid.root, f as string)) + .find((f) => f.endsWith('.jsonl'))!; + const droidSession: UnifiedSession = { + id: 'dddddddd-1111-2222-3333-444444444444', + source: 'droid', + cwd: '/home/user/project', + repo: 'user/project', + lines: 10, + bytes: 2000, + createdAt: now, + updatedAt: now, + originalPath: droidFile, + summary: 'Fix auth bug', + model: 'claude-opus-4-6', + }; + const droidMsgs = parseDroidFixtureMessages(droidFile); + contexts.droid = { + session: droidSession, + recentMessages: droidMsgs, + filesModified: ['/home/user/project/login.ts'], + pendingTasks: ['Add error handling', 'Write tests'], + toolSummaries: [], + markdown: generateHandoffMarkdown( + droidSession, + droidMsgs, + ['/home/user/project/login.ts'], + ['Add error handling', 'Write tests'], + [], + ), + }; + + // Cursor + const cursorFile = fs + .readdirSync(fixtures.cursor.root, { recursive: true }) + .map((f) => path.join(fixtures.cursor.root, f as string)) + .find((f) => f.endsWith('.jsonl'))!; + const cursorSession: UnifiedSession = { + id: 'cccccccc-1111-2222-3333-444444444444', + source: 'cursor', + cwd: '/home/user/project', + repo: 'user/project', + lines: 9, + bytes: 1500, + createdAt: now, + updatedAt: now, + originalPath: cursorFile, + summary: 'Fix auth bug', + }; + const cursorMsgs = parseCursorFixtureMessages(cursorFile); + contexts.cursor = { + session: cursorSession, + recentMessages: cursorMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(cursorSession, cursorMsgs, [], [], []), + }; + + // Amp + const ampFile = fs + .readdirSync(fixtures.amp.root) + .map((f) => path.join(fixtures.amp.root, f as string)) + .find((f) => f.endsWith('.json'))!; + const ampSession: UnifiedSession = { + id: 'test-amp-session-1', + source: 'amp', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 800, + createdAt: now, + updatedAt: now, + originalPath: ampFile, + summary: 'Fix auth bug', + model: 'claude-sonnet-4', + }; + const ampMsgs = parseAmpFixtureMessages(ampFile); + contexts.amp = { + session: ampSession, + recentMessages: ampMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(ampSession, ampMsgs, [], [], []), + }; + + // Kiro + const kiroFile = fs + .readdirSync(fixtures.kiro.root) + .map((f) => path.join(fixtures.kiro.root, f as string)) + .find((f) => f.endsWith('.json'))!; + const kiroSession: UnifiedSession = { + id: 'test-kiro-session-1', + source: 'kiro', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 600, + createdAt: now, + updatedAt: now, + originalPath: kiroFile, + summary: 'Fix auth bug', + model: 'claude-sonnet-4', + }; + const kiroMsgs = parseKiroFixtureMessages(kiroFile); + contexts.kiro = { + session: kiroSession, + recentMessages: kiroMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(kiroSession, kiroMsgs, [], [], []), + }; + + // Crush — inline context (no file fixture; real parser uses SQLite) + const crushSession: UnifiedSession = { + id: 'test-crush-session-1', + source: 'crush', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 500, + createdAt: now, + updatedAt: now, + originalPath: '/tmp/crush-mock', + summary: 'Fix auth bug', + }; + const crushMsgs: ConversationMessage[] = [ + { role: 'user', content: 'Fix the authentication bug in login.ts' }, + { role: 'assistant', content: 'I found the issue in login.ts. The token validation was missing.' }, + { role: 'user', content: 'Great, please also add error handling' }, + { role: 'assistant', content: 'Done. I added try-catch blocks and proper error messages.' }, + ]; + contexts.crush = { + session: crushSession, + recentMessages: crushMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(crushSession, crushMsgs, [], [], []), + }; + + // Cline + const clineFile = fs + .readdirSync(fixtures.cline.root, { recursive: true }) + .map((f) => path.join(fixtures.cline.root, f as string)) + .find((f) => f.endsWith('ui_messages.json'))!; + const clineSession: UnifiedSession = { + id: 'test-cline-session-1', + source: 'cline', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 700, + createdAt: now, + updatedAt: now, + originalPath: clineFile, + summary: 'Fix auth bug', + }; + const clineMsgs = parseClineFixtureMessages(clineFile); + contexts.cline = { + session: clineSession, + recentMessages: clineMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(clineSession, clineMsgs, [], [], []), + }; + + // Roo Code + const rooCodeFile = fs + .readdirSync(fixtures['roo-code'].root, { recursive: true }) + .map((f) => path.join(fixtures['roo-code'].root, f as string)) + .find((f) => f.endsWith('ui_messages.json'))!; + const rooCodeSession: UnifiedSession = { + id: 'test-roo-code-session-1', + source: 'roo-code', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 700, + createdAt: now, + updatedAt: now, + originalPath: rooCodeFile, + summary: 'Fix auth bug', + }; + const rooCodeMsgs = parseClineFixtureMessages(rooCodeFile); + contexts['roo-code'] = { + session: rooCodeSession, + recentMessages: rooCodeMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(rooCodeSession, rooCodeMsgs, [], [], []), + }; + + // Kilo Code + const kiloCodeFile = fs + .readdirSync(fixtures['kilo-code'].root, { recursive: true }) + .map((f) => path.join(fixtures['kilo-code'].root, f as string)) + .find((f) => f.endsWith('ui_messages.json'))!; + const kiloCodeSession: UnifiedSession = { + id: 'test-kilo-code-session-1', + source: 'kilo-code', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 700, + createdAt: now, + updatedAt: now, + originalPath: kiloCodeFile, + summary: 'Fix auth bug', + }; + const kiloCodeMsgs = parseClineFixtureMessages(kiloCodeFile); + contexts['kilo-code'] = { + session: kiloCodeSession, + recentMessages: kiloCodeMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(kiloCodeSession, kiloCodeMsgs, [], [], []), + }; + + // Antigravity + const antigravityFile = fs + .readdirSync(fixtures.antigravity.root) + .map((f) => path.join(fixtures.antigravity.root, f as string)) + .find((f) => f.endsWith('.json') || f.endsWith('.jsonl'))!; + const antigravitySession: UnifiedSession = { + id: 'test-antigravity-session-1', + source: 'antigravity', + cwd: '/home/user/project', + repo: 'user/project', + lines: 4, + bytes: 600, + createdAt: now, + updatedAt: now, + originalPath: antigravityFile, + summary: 'Fix auth bug', + }; + const antigravityMsgs = parseAntigravityFixtureMessages(antigravityFile); + contexts.antigravity = { + session: antigravitySession, + recentMessages: antigravityMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(antigravitySession, antigravityMsgs, [], [], []), + }; + + // Kimi + const kimiContextFile = fs + .readdirSync(fixtures.kimi.root, { recursive: true }) + .map((f) => path.join(fixtures.kimi.root, f as string)) + .find((f) => f.endsWith(`${path.sep}context.jsonl`) || f.endsWith('/context.jsonl'))!; + const kimiSessionDir = path.dirname(kimiContextFile); + const kimiSession: UnifiedSession = { + id: path.basename(kimiSessionDir), + source: 'kimi', + cwd: '/home/user/project', + repo: 'user/project', + lines: 6, + bytes: fs.statSync(kimiContextFile).size, + createdAt: now, + updatedAt: now, + originalPath: kimiSessionDir, + summary: 'Fix auth bug', + model: 'kimi-k2.5', + }; + const kimiMsgs = parseKimiFixtureMessages(kimiContextFile); + contexts.kimi = { + session: kimiSession, + recentMessages: kimiMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(kimiSession, kimiMsgs, [], [], []), + }; + + // Qwen Code + const qwenCodeFile = fs + .readdirSync(fixtures['qwen-code'].root, { recursive: true }) + .map((f) => path.join(fixtures['qwen-code'].root, f as string)) + .find((f) => f.endsWith('.jsonl'))!; + const qwenCodeSession: UnifiedSession = { + id: 'test-qwen-code-session-1', + source: 'qwen-code', + cwd: '/home/user/project', + repo: 'user/project', + branch: 'main', + lines: 5, + bytes: fs.statSync(qwenCodeFile).size, + createdAt: now, + updatedAt: now, + originalPath: qwenCodeFile, + summary: 'Fix auth bug', + model: 'qwen3-coder', + }; + const qwenCodeMsgs = parseQwenCodeFixtureMessages(qwenCodeFile); + contexts['qwen-code'] = { + session: qwenCodeSession, + recentMessages: qwenCodeMsgs, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + markdown: generateHandoffMarkdown(qwenCodeSession, qwenCodeMsgs, [], [], []), + }; }); afterAll(() => { @@ -275,7 +887,7 @@ describe('Low-Level Fixture Parsing', () => { it('Claude: skips system and queue-operation messages', () => { const msgs = contexts.claude.recentMessages; - const systemMsgs = msgs.filter(m => m.role === 'system'); + const systemMsgs = msgs.filter((m) => m.role === 'system'); expect(systemMsgs.length).toBe(0); }); @@ -298,15 +910,15 @@ describe('Low-Level Fixture Parsing', () => { it('Gemini: extracts user and gemini-type messages', () => { const msgs = contexts.gemini.recentMessages; - const userMsgs = msgs.filter(m => m.role === 'user'); - const asstMsgs = msgs.filter(m => m.role === 'assistant'); + const userMsgs = msgs.filter((m) => m.role === 'user'); + const asstMsgs = msgs.filter((m) => m.role === 'assistant'); expect(userMsgs.length).toBe(2); expect(asstMsgs.length).toBeGreaterThanOrEqual(2); }); it('Gemini: captures tool-call-only messages (empty content + toolCalls)', () => { const msgs = contexts.gemini.recentMessages; - const toolMsgs = msgs.filter(m => m.content.includes('[Used tools:')); + const toolMsgs = msgs.filter((m) => m.content.includes('[Used tools:')); expect(toolMsgs.length).toBe(1); expect(toolMsgs[0].content).toContain('read_file'); expect(toolMsgs[0].toolCalls).toBeDefined(); @@ -346,6 +958,58 @@ describe('Low-Level Fixture Parsing', () => { expect(msgs[i].timestamp!.getTime()).toBeGreaterThanOrEqual(msgs[i - 1].timestamp!.getTime()); } }); + + it('Droid: extracts user and assistant text messages from JSONL', () => { + const msgs = contexts.droid.recentMessages; + expect(msgs.length).toBeGreaterThanOrEqual(3); + expect(msgs[0].role).toBe('user'); + expect(msgs[0].content).toContain('Fix the authentication bug'); + const asstMsgs = msgs.filter((m) => m.role === 'assistant'); + expect(asstMsgs.length).toBeGreaterThan(0); + expect(asstMsgs[0].content).toContain('token validation was missing'); + }); + + it('Droid: skips session_start and todo_state events in message extraction', () => { + const msgs = contexts.droid.recentMessages; + for (const msg of msgs) { + expect(msg.content).not.toContain('session_start'); + expect(msg.content).not.toContain('todo_state'); + } + }); + + it('Droid: skips tool_use and tool_result content blocks (only text)', () => { + const msgs = contexts.droid.recentMessages; + for (const msg of msgs) { + expect(msg.content).not.toContain('tool_use'); + expect(msg.content).not.toContain('tool_result'); + } + }); + + it('Cursor: extracts user and assistant text messages from JSONL', () => { + const msgs = contexts.cursor.recentMessages; + expect(msgs.length).toBeGreaterThanOrEqual(3); + expect(msgs[0].role).toBe('user'); + expect(msgs[0].content).toContain('Fix the authentication bug'); + const asstMsgs = msgs.filter((m) => m.role === 'assistant'); + expect(asstMsgs.length).toBeGreaterThan(0); + expect(asstMsgs.some((m) => m.content.includes('token validation was missing'))).toBe(true); + }); + + it('Cursor: strips tags from user messages', () => { + const msgs = contexts.cursor.recentMessages; + for (const msg of msgs) { + expect(msg.content).not.toContain(''); + expect(msg.content).not.toContain(''); + } + }); + + it('Cursor: skips tool_use and tool_result content blocks (only text)', () => { + const msgs = contexts.cursor.recentMessages; + for (const msg of msgs) { + expect(msg.content).not.toContain('tool_use'); + expect(msg.content).not.toContain('tool_result'); + } + }); }); // ─── Shared Markdown Generator Tests ──────────────────────────────────────── @@ -354,7 +1018,7 @@ describe('Shared generateHandoffMarkdown', () => { it('includes correct source label for each tool', () => { for (const source of ALL_SOURCES) { const ctx = contexts[source]; - expect(ctx.markdown).toContain(SOURCE_LABELS[source]); + expect(ctx.markdown).toContain(getSourceLabels()[source]); } }); @@ -368,7 +1032,12 @@ describe('Shared generateHandoffMarkdown', () => { it('includes working directory', () => { for (const source of ALL_SOURCES) { const ctx = contexts[source]; - expect(ctx.markdown).toContain('/home/user/project'); + if (source === 'gemini') { + // Gemini has no cwd data + expect(ctx.session.cwd).toBe(''); + } else { + expect(ctx.markdown).toContain('/home/user/project'); + } } }); @@ -392,8 +1061,14 @@ describe('Shared generateHandoffMarkdown', () => { content: 'A'.repeat(600), }; const session: UnifiedSession = { - id: 'test', source: 'claude', cwd: '/tmp', lines: 1, bytes: 100, - createdAt: new Date(), updatedAt: new Date(), originalPath: '/tmp', + id: 'test', + source: 'claude', + cwd: '/tmp', + lines: 1, + bytes: 100, + createdAt: new Date(), + updatedAt: new Date(), + originalPath: '/tmp', }; const md = generateHandoffMarkdown(session, [longMsg], [], [], []); expect(md).toContain('A'.repeat(500) + '…'); @@ -402,8 +1077,14 @@ describe('Shared generateHandoffMarkdown', () => { it('includes files modified when present', () => { const session: UnifiedSession = { - id: 'test', source: 'codex', cwd: '/tmp', lines: 1, bytes: 100, - createdAt: new Date(), updatedAt: new Date(), originalPath: '/tmp', + id: 'test', + source: 'codex', + cwd: '/tmp', + lines: 1, + bytes: 100, + createdAt: new Date(), + updatedAt: new Date(), + originalPath: '/tmp', }; const md = generateHandoffMarkdown(session, [], ['src/auth.ts', 'src/login.ts'], [], []); expect(md).toContain('## Files Modified'); @@ -413,8 +1094,14 @@ describe('Shared generateHandoffMarkdown', () => { it('includes pending tasks when present', () => { const session: UnifiedSession = { - id: 'test', source: 'opencode', cwd: '/tmp', lines: 1, bytes: 100, - createdAt: new Date(), updatedAt: new Date(), originalPath: '/tmp', + id: 'test', + source: 'opencode', + cwd: '/tmp', + lines: 1, + bytes: 100, + createdAt: new Date(), + updatedAt: new Date(), + originalPath: '/tmp', }; const md = generateHandoffMarkdown(session, [], [], ['Add tests', 'Fix lint errors'], []); expect(md).toContain('## Pending Tasks'); @@ -428,11 +1115,153 @@ describe('Shared generateHandoffMarkdown', () => { expect(ctx.markdown).toContain('You are continuing this session'); } }); + + it('renders category-aware tool activity with structured data', () => { + const session: UnifiedSession = { + id: 'test', + source: 'claude', + cwd: '/tmp', + lines: 1, + bytes: 100, + createdAt: new Date(), + updatedAt: new Date(), + originalPath: '/tmp', + }; + + const toolSummaries: import('../types/index.js').ToolUsageSummary[] = [ + { + name: 'Bash', + count: 2, + errorCount: 1, + samples: [ + { + summary: '$ npm test → exit 0', + data: { category: 'shell', command: 'npm test', exitCode: 0 }, + }, + { + summary: '$ npm build → exit 1', + data: { + category: 'shell', + command: 'npm build', + exitCode: 1, + errored: true, + stdoutTail: 'Error: build failed', + }, + }, + ], + }, + { + name: 'Read', + count: 2, + samples: [ + { + summary: 'read src/app.ts', + data: { category: 'read', filePath: 'src/app.ts' }, + }, + { + summary: 'read src/utils.ts (lines 10-50)', + data: { category: 'read', filePath: 'src/utils.ts', lineStart: 10, lineEnd: 50 }, + }, + ], + }, + { + name: 'Edit', + count: 1, + samples: [ + { + summary: 'edit src/auth.ts (+2 -1)', + data: { + category: 'edit', + filePath: 'src/auth.ts', + diff: '--- a/src/auth.ts\n+++ b/src/auth.ts\n-old line\n+new line1\n+new line2', + diffStats: { added: 2, removed: 1 }, + }, + }, + ], + }, + { + name: 'Grep', + count: 1, + samples: [ + { + summary: 'grep "TODO" src/', + data: { category: 'grep', pattern: 'TODO', targetPath: 'src/', matchCount: 5 }, + }, + ], + }, + ]; + + const md = generateHandoffMarkdown(session, [], [], [], toolSummaries); + + // Category headers + expect(md).toContain('### Shell (2 calls, 1 errors)'); + expect(md).toContain('### Read (2 calls)'); + expect(md).toContain('### Edit (1 calls)'); + expect(md).toContain('### Grep (1 calls)'); + + // Shell: commands and exit codes + expect(md).toContain('`$ npm test`'); + expect(md).toContain('Exit: 0'); + expect(md).toContain('Exit: 1 **[ERROR]**'); + expect(md).toContain('Error: build failed'); + + // Read: file paths with line ranges + expect(md).toContain('`src/app.ts`'); + expect(md).toContain('`src/utils.ts` (lines 10-50)'); + + // Edit: diff blocks + expect(md).toContain('```diff'); + expect(md).toContain('-old line'); + expect(md).toContain('+new line1'); + + // Grep: pattern with match count + expect(md).toContain('`"TODO"`'); + expect(md).toContain('5 matches'); + }); + + it('shows error count in non-shell section headers', () => { + const session: UnifiedSession = { + id: 'test', + source: 'claude', + cwd: '/tmp', + lines: 1, + bytes: 100, + createdAt: new Date(), + updatedAt: new Date(), + originalPath: '/tmp', + }; + + const toolSummaries: import('../types/index.js').ToolUsageSummary[] = [ + { + name: 'Write', + count: 3, + errorCount: 2, + samples: [{ summary: 'write a.ts', data: { category: 'write', filePath: 'a.ts' } }], + }, + { + name: 'Edit', + count: 1, + errorCount: 1, + samples: [{ summary: 'edit b.ts', data: { category: 'edit', filePath: 'b.ts' } }], + }, + { + name: 'Grep', + count: 2, + errorCount: 1, + samples: [{ summary: 'grep "foo"', data: { category: 'grep', pattern: 'foo' } }], + }, + ]; + + const md = generateHandoffMarkdown(session, [], [], [], toolSummaries); + expect(md).toContain('### Write (3 calls, 2 errors)'); + expect(md).toContain('### Edit (1 calls, 1 errors)'); + expect(md).toContain('### Grep (2 calls, 1 errors)'); + }); }); // ─── All 20 Conversion Path Tests ────────────────────────────────────────── -describe('All 20 Fixture-Based Conversion Paths', () => { +describe('All 42 Fixture-Based Conversion Paths', () => { let conversionNumber = 0; for (const source of ALL_SOURCES) { @@ -458,7 +1287,7 @@ describe('All 20 Fixture-Based Conversion Paths', () => { expect(md).toContain('You are continuing this session'); // Source attribution - expect(md).toContain(SOURCE_LABELS[source]); + expect(md).toContain(getSourceLabels()[source]); // Has meaningful content expect(md.length).toBeGreaterThan(100); @@ -469,8 +1298,8 @@ describe('All 20 Fixture-Based Conversion Paths', () => { expect(ctx.recentMessages.length).toBeGreaterThan(0); // At least one user and one assistant message - const userMsgs = ctx.recentMessages.filter(m => m.role === 'user'); - const asstMsgs = ctx.recentMessages.filter(m => m.role === 'assistant'); + const userMsgs = ctx.recentMessages.filter((m) => m.role === 'user'); + const asstMsgs = ctx.recentMessages.filter((m) => m.role === 'assistant'); expect(userMsgs.length).toBeGreaterThan(0); expect(asstMsgs.length).toBeGreaterThan(0); @@ -512,14 +1341,166 @@ describe('Injection Safety (Fixture-Based)', () => { // ─── Unique Session ID Test ───────────────────────────────────────────────── describe('Cross-Source Uniqueness', () => { - it('all 5 sources produce different session IDs', () => { - const ids = new Set(ALL_SOURCES.map(s => contexts[s].session.id)); - expect(ids.size).toBe(5); + it('all sources produce different session IDs', () => { + const ids = new Set(ALL_SOURCES.map((s) => contexts[s].session.id)); + expect(ids.size).toBe(ALL_SOURCES.length); }); - it('all 5 sources have correct source type', () => { + it('all sources have correct source type', () => { for (const source of ALL_SOURCES) { expect(contexts[source].session.source).toBe(source); } }); }); + +// ─── v4.1.0 — Markdown Rendering Enhancements ────────────────────────────── + +describe('compactSummary rendering', () => { + const baseSession: UnifiedSession = { + id: 'test-compact', + source: 'claude', + cwd: '/tmp/test', + lines: 10, + bytes: 500, + createdAt: new Date('2025-01-01'), + updatedAt: new Date('2025-01-01'), + originalPath: '/tmp/test.jsonl', + }; + + it('renders compactSummary section when present', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], { + compactSummary: 'The user is building a CLI tool with 7 parsers.', + }); + expect(md).toContain('## Session Context (Compacted)'); + expect(md).toContain('The user is building a CLI tool with 7 parsers.'); + }); + + it('omits compactSummary section when absent', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], {}); + expect(md).not.toContain('## Session Context (Compacted)'); + }); +}); + +describe('cache/thinking token breakdown rendering', () => { + const baseSession: UnifiedSession = { + id: 'test-tokens', + source: 'droid', + cwd: '/tmp/test', + lines: 10, + bytes: 500, + createdAt: new Date('2025-01-01'), + updatedAt: new Date('2025-01-01'), + originalPath: '/tmp/test.jsonl', + }; + + it('renders cache token row when cacheTokens present', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], { + tokenUsage: { input: 1000, output: 500 }, + cacheTokens: { creation: 200, read: 800 }, + }); + expect(md).toContain('**Cache Tokens**'); + expect(md).toContain('800'); + expect(md).toContain('200'); + }); + + it('renders thinking token row when thinkingTokens present', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], { + thinkingTokens: 3500, + }); + expect(md).toContain('**Thinking Tokens**'); + expect(md).toContain('3,500'); + }); + + it('renders active time row when activeTimeMs present', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], { + activeTimeMs: 180000, + }); + expect(md).toContain('**Active Time**'); + expect(md).toContain('3 min'); + }); + + it('omits extended rows when fields absent', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], {}); + expect(md).not.toContain('**Cache Tokens**'); + expect(md).not.toContain('**Thinking Tokens**'); + expect(md).not.toContain('**Active Time**'); + }); +}); + +describe('subagent status rendering', () => { + const baseSession: UnifiedSession = { + id: 'test-subagent-status', + source: 'claude', + cwd: '/tmp/test', + lines: 10, + bytes: 500, + createdAt: new Date('2025-01-01'), + updatedAt: new Date('2025-01-01'), + originalPath: '/tmp/test.jsonl', + }; + + it('renders completed-without-result as success (not warning)', () => { + const md = generateHandoffMarkdown(baseSession, [], [], [], [], { + subagentResults: [ + { + taskId: 'a1', + description: 'Watch CI run until completion', + status: 'completed', + toolCallCount: 0, + }, + ], + }); + + expect(md).toContain('✅ Completed'); + expect(md).not.toContain('⚠️ Completed'); + }); +}); + +describe('MCP namespace grouping', () => { + const baseSession: UnifiedSession = { + id: 'test-mcp-ns', + source: 'claude', + cwd: '/tmp/test', + lines: 10, + bytes: 500, + createdAt: new Date('2025-01-01'), + updatedAt: new Date('2025-01-01'), + originalPath: '/tmp/test.jsonl', + }; + + it('groups mcp__github__* tools under a single MCP: github header', () => { + const toolSummaries = [ + { + name: 'mcp__github__list_issues', + count: 3, + samples: [ + { summary: 'list_issues()', data: { category: 'mcp' as const, toolName: 'mcp__github__list_issues' } }, + ], + }, + { + name: 'mcp__github__create_pr', + count: 2, + samples: [{ summary: 'create_pr()', data: { category: 'mcp' as const, toolName: 'mcp__github__create_pr' } }], + }, + ]; + const md = generateHandoffMarkdown(baseSession, [], [], [], toolSummaries); + // Should have a single grouped section, not two separate ones + expect(md).toContain('### MCP (5 calls)'); + // Should NOT have separate ### headers for each tool + expect(md).not.toContain('### mcp__github__list_issues'); + expect(md).not.toContain('### mcp__github__create_pr'); + }); + + it('leaves single-namespace MCP tools ungrouped', () => { + const toolSummaries = [ + { + name: 'mcp__morph__edit_file', + count: 1, + samples: [{ summary: 'edit_file()', data: { category: 'mcp' as const, toolName: 'mcp__morph__edit_file' } }], + }, + ]; + const md = generateHandoffMarkdown(baseSession, [], [], [], toolSummaries); + // Single tool stays as-is + expect(md).toContain('mcp__morph__edit_file'); + }); +}); diff --git a/src/__tests__/windows-safe-prompt.test.ts b/src/__tests__/windows-safe-prompt.test.ts new file mode 100644 index 0000000..079b7ce --- /dev/null +++ b/src/__tests__/windows-safe-prompt.test.ts @@ -0,0 +1,60 @@ +import { describe, expect, it } from 'vitest'; +import type { UnifiedSession } from '../types/index.js'; +import { TOOL_NAMES } from '../types/tool-names.js'; +import { buildWindowsSafePrompt } from '../utils/resume.js'; + +// Minimal session stub for testing +function stubSession(source: UnifiedSession['source']): UnifiedSession { + return { + id: 'test-id', + source, + summary: 'Test session', + cwd: '/test/dir', + updatedAt: new Date(), + createdAt: new Date(), + originalPath: '/test/path', + lines: 0, + bytes: 0, + }; +} + +// Characters that are dangerous in cmd.exe +const CMD_METACHARACTERS = /[|&><^%!`"\n\r]/; + +describe('buildWindowsSafePrompt', () => { + it('should return a single-line string', () => { + const prompt = buildWindowsSafePrompt(stubSession('claude')); + expect(prompt).not.toContain('\n'); + expect(prompt).not.toContain('\r'); + }); + + it('should not contain cmd.exe metacharacters', () => { + const prompt = buildWindowsSafePrompt(stubSession('claude')); + expect(CMD_METACHARACTERS.test(prompt)).toBe(false); + }); + + it('should be under 300 characters', () => { + const prompt = buildWindowsSafePrompt(stubSession('claude')); + expect(prompt.length).toBeLessThan(300); + }); + + it('should reference .continues-handoff.md', () => { + const prompt = buildWindowsSafePrompt(stubSession('claude')); + expect(prompt).toContain('.continues-handoff.md'); + }); + + it('should include the source tool name', () => { + const prompt = buildWindowsSafePrompt(stubSession('codex')); + expect(prompt).toContain('codex'); + }); + + it('should be safe for all supported tools', () => { + const tools = TOOL_NAMES; + for (const tool of tools) { + const prompt = buildWindowsSafePrompt(stubSession(tool)); + expect(CMD_METACHARACTERS.test(prompt)).toBe(false); + expect(prompt).not.toContain('\n'); + expect(prompt.length).toBeLessThan(300); + } + }); +}); diff --git a/src/cli.ts b/src/cli.ts index a2f20c3..bf5d3fc 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -8,50 +8,41 @@ process.on('warning', (warning) => { } }); -import { Command } from 'commander'; -import chalk from 'chalk'; +import { createRequire } from 'node:module'; import * as clack from '@clack/prompts'; -import ora from 'ora'; -import type { UnifiedSession, SessionSource } from './types/index.js'; -import { - getAllSessions, - getSessionsBySource, - findSession, - formatSession, - buildIndex, - sessionsToJsonl, -} from './utils/index.js'; -import { - resume, - nativeResume, - crossToolResume, - getAvailableTools, - getResumeCommand, -} from './utils/resume.js'; +import { Command } from 'commander'; +import { getExtraCommandArgs } from './commands/_shared.js'; +import { dumpCommand } from './commands/dump.js'; +import { inspectSession } from './commands/inspect.js'; +import { listCommand } from './commands/list.js'; +import { interactivePick } from './commands/pick.js'; +import { resumeBySource } from './commands/quick-resume.js'; +import { rebuildCommand } from './commands/rebuild.js'; +import { resumeCommand } from './commands/resume-cmd.js'; +import { scanCommand } from './commands/scan.js'; +import { setLogLevel } from './logger.js'; +import { ALL_TOOLS, adapters, SOURCE_HELP } from './parsers/registry.js'; + +function splitTailArgs(args: string[]): { commandArgs: string[]; tailArgs: string[] } { + const separator = args.indexOf('--'); + if (separator < 0) return { commandArgs: args, tailArgs: [] }; + return { + commandArgs: args.slice(0, separator), + tailArgs: args.slice(separator + 1), + }; +} + +const rawUserArgs = process.argv.slice(2); +const { commandArgs, tailArgs } = splitTailArgs(rawUserArgs); const program = new Command(); -const VERSION = '2.6.0'; +const _require = createRequire(import.meta.url); +const { version: VERSION } = _require('../package.json') as { version: string }; // Detect TTY for interactive mode -const isTTY = process.stdout.isTTY; - -// Color support detection +const isTTY = !!process.stdout.isTTY; const supportsColor = !process.env.NO_COLOR && isTTY; - -/** - * ASCII art banner with highlighted 's' (the "continues" brand mark) - */ -function showBanner(): void { - if (!supportsColor) return; - const dim = chalk.gray; - const hi = chalk.cyan.bold; - console.log(); - console.log(dim(' ┌─────────────────────────────────────┐')); - console.log(dim(' │ ') + chalk.bold('continue') + hi('s') + dim(' │')); - console.log(dim(' │ ') + chalk.gray(`v${VERSION} — pick up where you left off`) + dim(' │')); - console.log(dim(' └─────────────────────────────────────┘')); - console.log(); -} +const cliContext = { isTTY, supportsColor, version: VERSION }; // Signal handling for graceful exits let isExiting = false; @@ -64,7 +55,9 @@ process.on('SIGINT', () => { } else { console.log('\nCancelled.'); } - process.exitCode = 1; + if (process.exitCode == null) { + process.exitCode = 130; + } } }); @@ -75,618 +68,209 @@ process.on('SIGTERM', () => { } }); -/** - * Source-specific colors for consistent branding - */ -const sourceColors: Record string> = { - claude: chalk.blue, - copilot: chalk.green, - gemini: chalk.cyan, - codex: chalk.magenta, - opencode: chalk.yellow, -}; - -/** - * Format session with colors in columnar layout - * Format: [source] YYYY-MM-DD HH:MM project-name summary... short-id - */ -function formatSessionColored(session: UnifiedSession): string { - const colorFn = sourceColors[session.source] || chalk.white; - const tag = `[${session.source}]`; - const source = colorFn(tag.padEnd(10)); - - const date = chalk.gray(session.updatedAt.toISOString().slice(0, 16).replace('T', ' ')); - - // Show repo or last folder of cwd - const repoDisplay = session.repo || session.cwd.split('/').slice(-2).join('/') || ''; - const repo = chalk.cyan(repoDisplay.slice(0, 20).padEnd(20)); - - // Summary - truncate nicely - const summaryText = session.summary || '(no summary)'; - const summary = (session.summary ? chalk.white(summaryText.slice(0, 44)) : chalk.gray(summaryText)).padEnd(44); - - // Short ID - const id = chalk.gray(session.id.slice(0, 8)); - - return `${source} ${date} ${repo} ${summary} ${id}`; -} - -/** - * Format session for clack select - simpler, cleaner - */ -function formatSessionForSelect(session: UnifiedSession): string { - const colorFn = sourceColors[session.source] || chalk.white; - const tag = `[${session.source}]`; - const source = colorFn(tag.padEnd(10)); - const date = session.updatedAt.toISOString().slice(0, 16).replace('T', ' '); - const repoDisplay = session.repo || session.cwd.split('/').slice(-1)[0] || ''; - const summary = (session.summary || '(no summary)').slice(0, 48); - - return `${source} ${date} ${chalk.cyan(repoDisplay.padEnd(20))} ${summary}`; -} - -/** - * Show session discovery stats - */ -function showSessionStats(sessions: UnifiedSession[]): void { - const bySource = sessions.reduce((acc, s) => { - acc[s.source] = (acc[s.source] || 0) + 1; - return acc; - }, {} as Record); - - const cliTools = Object.keys(bySource).length; - const total = sessions.length; - - console.log(chalk.gray(` Found ${total} sessions across ${cliTools} CLI tool${cliTools !== 1 ? 's' : ''}`)); - - // Show breakdown - for (const [source, count] of Object.entries(bySource).sort((a, b) => b[1] - a[1])) { - const colorFn = sourceColors[source as SessionSource] || chalk.white; - console.log(chalk.gray(` ${colorFn(source)}: ${count}`)); - } -} - -/** - * Show helpful error when no sessions found - */ -function showNoSessionsHelp(): void { - clack.log.error('No sessions found.'); - console.log(); - console.log(chalk.gray('Sessions are stored in:')); - console.log(chalk.gray(' ~/.codex/sessions/')); - console.log(chalk.gray(' ~/.claude/projects/')); - console.log(chalk.gray(' ~/.copilot/session-state/')); - console.log(chalk.gray(' ~/.gemini/tmp/*/chats/')); - console.log(chalk.gray(' ~/.local/share/opencode/storage/')); -} - -/** - * Main interactive TUI command - */ -async function interactivePick(options: { source?: string; noTui?: boolean; rebuild?: boolean }): Promise { - try { - // If not TTY or --no-tui, fall back to list - if (!isTTY || options.noTui) { - console.log(chalk.yellow('Interactive mode requires a TTY. Use "continues list" instead.')); - process.exitCode = 1; - return; - } - - showBanner(); - clack.intro(chalk.bold('continue') + chalk.cyan.bold('s') + chalk.gray(' — session picker')); - - const s = clack.spinner(); - s.start('Loading sessions...'); - - let sessions: UnifiedSession[]; - if (options.source) { - sessions = await getSessionsBySource(options.source as SessionSource, options.rebuild); - } else { - sessions = await getAllSessions(options.rebuild); - } - - s.stop(); - - if (sessions.length === 0) { - showNoSessionsHelp(); - clack.outro(chalk.gray('No sessions to resume')); - return; - } - - // Check for sessions matching current working directory - const currentDir = process.cwd(); - const cwdSessions = sessions.filter(sess => sess.cwd === currentDir); - const otherSessions = sessions.filter(sess => sess.cwd !== currentDir); - const hasCwdSessions = cwdSessions.length > 0; - - if (hasCwdSessions) { - console.log(chalk.gray(` ${chalk.green('▸')} ${cwdSessions.length} session${cwdSessions.length !== 1 ? 's' : ''} found in current directory`)); - } else { - console.log(chalk.gray(` No sessions found for ${chalk.cyan(currentDir.split('/').slice(-2).join('/'))}`)); - console.log(chalk.gray(` Showing all sessions instead`)); - } - - // Show stats - showSessionStats(sessions); - console.log(); - - // Step 1: Filter by CLI tool (optional) — skip if source already specified - let filteredSessions = sessions; - - if (!options.source && sessions.length > 0) { - const bySource = sessions.reduce((acc, s) => { - acc[s.source] = (acc[s.source] || 0) + 1; - return acc; - }, {} as Record); - - const filterOptions: { value: string; label: string }[] = []; - - // If we have cwd sessions, offer "This directory" as first option - if (hasCwdSessions) { - filterOptions.push({ - value: 'cwd', - label: `This directory (${cwdSessions.length} session${cwdSessions.length !== 1 ? 's' : ''})`, - }); - } - - filterOptions.push( - { value: 'all', label: `All CLI tools (${sessions.length} sessions)` }, - ...Object.entries(bySource) - .sort((a, b) => b[1] - a[1]) - .map(([source, count]) => ({ - value: source, - label: `${sourceColors[source as SessionSource](source.charAt(0).toUpperCase() + source.slice(1))} (${count})`, - })), - ); - - const toolFilter = await clack.select({ - message: 'Filter sessions', - options: filterOptions, - initialValue: hasCwdSessions ? 'cwd' : undefined, - }); - - if (clack.isCancel(toolFilter)) { - clack.cancel('Cancelled'); - return; - } - - if (toolFilter === 'cwd') { - filteredSessions = cwdSessions; - } else if (toolFilter !== 'all') { - filteredSessions = sessions.filter(s => s.source === toolFilter); - } - } - - // Step 2: Select session — show all with scrolling (maxItems controls viewport) - const PAGE_SIZE = 500; - const sessionOptions = filteredSessions.slice(0, PAGE_SIZE).map(s => ({ - value: s, - label: formatSessionForSelect(s), - hint: s.id.slice(0, 8), - })); - - if (filteredSessions.length > PAGE_SIZE) { - clack.log.info(chalk.gray(`Showing first ${PAGE_SIZE} of ${filteredSessions.length} sessions. Use --source to narrow results.`)); - } - - const selectedSession = await clack.select({ - message: `Select a session (${filteredSessions.length} available)`, - options: sessionOptions, - maxItems: 15, - }); - - if (clack.isCancel(selectedSession)) { - clack.cancel('Cancelled'); - return; - } - - const session = selectedSession as UnifiedSession; - - // Step 3: Select target tool - const availableTools = await getAvailableTools(); - - const targetOptions = availableTools - .filter(t => t !== session.source) - .map(t => ({ - value: t, - label: `${sourceColors[t](t.charAt(0).toUpperCase() + t.slice(1))}`, - })); - - const targetTool = await clack.select({ - message: `Continue ${sourceColors[session.source](session.source)} session in:`, - options: targetOptions, - }) as SessionSource; - - if (clack.isCancel(targetTool)) { - clack.cancel('Cancelled'); - return; - } - - // Step 4: Show what will happen and resume - console.log(); - clack.log.info(`Working directory: ${chalk.cyan(session.cwd)}`); - - const messageCount = (session as any).messageCount || '?'; - const fileCount = (session as any).filesModified?.length || '?'; - clack.log.info(`Context: ${messageCount} messages, ${fileCount} files modified`); - clack.log.info(`Command: ${chalk.cyan(getResumeCommand(session, targetTool))}`); - console.log(); - - clack.log.step(`Handing off to ${targetTool}...`); - clack.outro(`Launching ${targetTool}`); - - // Change to session's working directory and resume - process.chdir(session.cwd); - await resume(session, targetTool); - - } catch (error) { - if (clack.isCancel(error)) { - clack.cancel('Cancelled'); - return; - } - clack.log.error(`${(error as Error).message}`); - process.exitCode = 1; - } -} - /** * Configure CLI program */ program .name('continues') - .description('Never lose context. Resume any AI coding session across Claude, Copilot, Gemini, Codex & OpenCode.') + .description( + 'Never lose context. Resume any AI coding session across Claude Code, Codex, Copilot, Gemini CLI, Cursor, Amp, Cline, Roo Code, Kilo Code, Kiro, Crush, OpenCode, Droid & Antigravity.', + ) .version(VERSION) + .option('--verbose', 'Show info-level logs') + .option('--debug', 'Show debug-level logs') + .option('--config ', 'Path to .continues.yml config file') + .option('--preset ', 'Verbosity preset for inspect/dump output: minimal, standard, verbose, full', 'standard') + .option('--no-chain', 'Disable compacted-session chaining in handoff extraction') .helpOption('-h, --help', 'Display help for command') - .addHelpText('after', ` -Examples: - $ continues # Interactive TUI picker - $ continues list # List all sessions - $ continues list --source claude # Filter by source - $ continues list --json # JSON output for scripting - $ continues resume abc123 # Resume by ID - $ continues resume abc123 --in gemini # Cross-tool handoff - $ continues scan # Show session discovery stats - -Short aliases: - cont (binary alias) + .hook('preAction', () => { + const opts = program.opts(); + if (opts.debug) setLogLevel('debug'); + else if (opts.verbose) setLogLevel('info'); + }) + .addHelpText( + 'after', + ` +Quick start: + $ continues + $ npx continues --preset full + $ continues claude 1 + +Core workflows: + $ continues list + $ continues list --source claude --limit 25 + $ continues list --jsonl | jq '.source' + $ continues resume abc123 + $ continues resume abc123 --in gemini + $ continues scan --rebuild + +Inspect & export: + $ continues inspect abc123 --preset full + $ continues inspect abc123 --preset verbose --write-md handoff.md + $ continues dump all ./out --preset verbose + $ continues dump claude ./out --json --limit 50 + +Preset guide: + minimal -> shortest output (token-saving / quick skim) + standard -> balanced default for daily usage + verbose -> extra context + richer tool activity detail + full -> maximum detail for handoff, debugging, and audits + +Power tips: + - Use --all to bypass current-directory filtering in pick mode + - Forward raw args to target tools after -- (example: continues claude 1 -- --help) + - Combine --config .continues.yml with --preset for project defaults + per-run overrides + +Aliases: + cont -> continues ls -> list r -> resume -`); - -/** - * Default command - Interactive TUI - */ -program - .action(async () => { - await interactivePick({}); - }); +`, + ); + +// Default command - Interactive TUI +program.option('-a, --all', 'Show all sessions globally (skip directory filtering)').action(async (options) => { + const globalOptions = program.opts(); + await interactivePick( + { + all: options.all, + forwardArgs: tailArgs, + preset: globalOptions.preset as string | undefined, + configPath: globalOptions.config as string | undefined, + chain: globalOptions.chain as boolean | undefined, + }, + cliContext, + ); +}); -/** - * Pick command (explicit TUI) - */ +// Pick command (explicit TUI) program .command('pick') .description('Interactive session picker (TUI mode)') - .option('-s, --source ', 'Filter by source (claude, copilot, gemini, codex, opencode)') + .option('-s, --source ', SOURCE_HELP) + .option('-a, --all', 'Show all sessions globally (skip directory filtering)') .option('--no-tui', 'Disable TUI, use plain text') .option('--rebuild', 'Force rebuild session index') - .action(async (options) => { - await interactivePick(options); + .allowUnknownOption(true) + .allowExcessArguments(true) + .action(async (options, command: Command) => { + const rawForwardArgs = getExtraCommandArgs(command, 0); + const globalOptions = program.opts(); + await interactivePick( + { + ...options, + forwardArgs: [...rawForwardArgs, ...tailArgs], + preset: globalOptions.preset as string | undefined, + configPath: globalOptions.config as string | undefined, + chain: globalOptions.chain as boolean | undefined, + }, + cliContext, + ); }); -/** - * List sessions command - */ +// List sessions command program .command('list') .alias('ls') .description('List all sessions in table format') - .option('-s, --source ', 'Filter by source (claude, copilot, gemini, codex, opencode)') + .option('-s, --source ', SOURCE_HELP) .option('-n, --limit ', 'Limit number of sessions', '50') .option('--json', 'Output as JSON array') .option('--jsonl', 'Output as JSONL') .option('--rebuild', 'Force rebuild session index') .action(async (options) => { - try { - // Use simple spinner for non-interactive - const spinner = isTTY && !options.json && !options.jsonl - ? ora('Loading sessions...').start() - : null; - - let sessions: UnifiedSession[]; - if (options.source) { - sessions = await getSessionsBySource(options.source as SessionSource, options.rebuild); - } else { - sessions = await getAllSessions(options.rebuild); - } - - if (spinner) spinner.stop(); - - const limit = parseInt(options.limit, 10); - const displaySessions = sessions.slice(0, limit); - - if (options.json) { - console.log(JSON.stringify(displaySessions, null, 2)); - return; - } - - if (options.jsonl) { - console.log(sessionsToJsonl(displaySessions)); - return; - } - - if (sessions.length === 0) { - if (isTTY) { - showNoSessionsHelp(); - } else { - console.log('No sessions found.'); - } - return; - } - - // Print header - console.log(chalk.gray(`Found ${sessions.length} sessions (showing ${displaySessions.length}):`)); - console.log(); - - for (const session of displaySessions) { - console.log(formatSessionColored(session)); - } - } catch (error) { - console.error(chalk.red('Error:'), (error as Error).message); - process.exitCode = 1; - } + await listCommand(options, cliContext); }); -/** - * Resume a specific session - */ +// Resume a specific session program .command('resume ') .alias('r') .description('Resume a session by ID or short ID') - .option('-i, --in ', 'Target CLI tool (claude, copilot, gemini, codex, opencode)') + .option('-i, --in ', `Target CLI tool (${ALL_TOOLS.join(', ')})`) .option('--reference', 'Use file reference instead of inline context (for very large sessions)') .option('--no-tui', 'Disable interactive prompts') - .action(async (sessionId, options) => { - try { - const spinner = isTTY && !options.noTui ? ora('Finding session...').start() : null; - const session = await findSession(sessionId); - if (spinner) spinner.stop(); - - if (!session) { - // Try to find similar sessions - const allSessions = await getAllSessions(); - const similar = allSessions.filter(s => - s.id.toLowerCase().includes(sessionId.toLowerCase()) || - s.summary?.toLowerCase().includes(sessionId.toLowerCase()) - ).slice(0, 3); - - console.error(chalk.red(`Session not found: ${sessionId}`)); - - if (similar.length > 0) { - console.log(chalk.yellow('\nDid you mean one of these?')); - for (const s of similar) { - console.log(' ' + formatSessionColored(s)); - } - } - - process.exitCode = 1; - return; - } - - const target = options.in as SessionSource | undefined; - const mode = options.reference ? 'reference' as const : 'inline' as const; - - // In non-interactive mode, just resume directly - if (!isTTY || options.noTui) { - console.log(chalk.gray('Session: ') + formatSession(session)); - console.log(chalk.gray('Command: ') + chalk.cyan(getResumeCommand(session, target))); - console.log(); - - process.chdir(session.cwd); - await resume(session, target, mode); - return; - } - - // Interactive mode - show details and prompt for target if not specified - if (isTTY && !target) { - clack.intro(chalk.bold('Resume session')); - - console.log(formatSessionColored(session)); - console.log(); - - const availableTools = await getAvailableTools(); - - const targetOptions = availableTools - .filter(t => t !== session.source) - .map(t => ({ - value: t, - label: `${sourceColors[t](t.charAt(0).toUpperCase() + t.slice(1))}`, - })); - - const selectedTarget = await clack.select({ - message: `Continue ${sourceColors[session.source](session.source)} session in:`, - options: targetOptions, - }) as SessionSource; - - if (clack.isCancel(selectedTarget)) { - clack.cancel('Cancelled'); - return; - } - - clack.log.step(`Handing off to ${selectedTarget}...`); - clack.outro(`Launching ${selectedTarget}`); - - process.chdir(session.cwd); - await resume(session, selectedTarget, mode); - } else { - // Target specified, just resume - console.log(chalk.gray('Session: ') + formatSession(session)); - console.log(chalk.gray('Command: ') + chalk.cyan(getResumeCommand(session, target))); - console.log(); - - process.chdir(session.cwd); - await resume(session, target, mode); - } - - } catch (error) { - if (clack.isCancel(error)) { - clack.cancel('Cancelled'); - return; - } - console.error(chalk.red('Error:'), (error as Error).message); - process.exitCode = 1; - } + .allowUnknownOption(true) + .allowExcessArguments(true) + .action(async (sessionId, options, command: Command) => { + const rawForwardArgs = getExtraCommandArgs(command, 1); + const globalOptions = program.opts(); + await resumeCommand( + sessionId, + { + ...options, + preset: globalOptions.preset as string | undefined, + configPath: globalOptions.config as string | undefined, + chain: globalOptions.chain as boolean | undefined, + }, + cliContext, + { rawArgs: rawForwardArgs, tailArgs }, + ); }); -/** - * Scan command - show session discovery stats - */ +// Scan command program .command('scan') .description('Show session discovery statistics') .option('--rebuild', 'Force rebuild session index') .action(async (options) => { - try { - const spinner = isTTY ? ora('Scanning session directories...').start() : null; - - const sessions = await getAllSessions(options.rebuild); - - if (spinner) spinner.stop(); - - if (isTTY) { - clack.intro(chalk.bold('Session Discovery Statistics')); - } - - const bySource = sessions.reduce((acc, s) => { - acc[s.source] = (acc[s.source] || 0) + 1; - return acc; - }, {} as Record); - - console.log(); - console.log(chalk.bold(`Total sessions: ${sessions.length}`)); - console.log(); - - for (const [source, count] of Object.entries(bySource).sort((a, b) => b[1] - a[1])) { - const colorFn = sourceColors[source as SessionSource] || chalk.white; - const bar = '█'.repeat(Math.min(50, Math.floor(count / 10))); - console.log(`${colorFn(source.padEnd(8))}: ${count.toString().padStart(4)} ${chalk.gray(bar)}`); - } - - if (isTTY) { - console.log(); - clack.outro(chalk.gray('Run "continues" to pick a session')); - } - } catch (error) { - console.error(chalk.red('Error:'), (error as Error).message); - process.exitCode = 1; - } + await scanCommand(options, cliContext); }); -/** - * Rebuild the session index - */ +// Rebuild the session index program .command('rebuild') .description('Force rebuild the session index cache') .action(async () => { - const spinner = isTTY ? clack.spinner() : null; - - try { - if (spinner) { - spinner.start('Rebuilding session index...'); - } - - const sessions = await buildIndex(true); - - if (spinner) { - spinner.stop(`Index rebuilt with ${sessions.length} sessions`); - } else { - console.log(`Index rebuilt with ${sessions.length} sessions`); - } - - // Show summary by source - const bySource = sessions.reduce((acc, s) => { - acc[s.source] = (acc[s.source] || 0) + 1; - return acc; - }, {} as Record); - - for (const [source, count] of Object.entries(bySource)) { - console.log(chalk.gray(` ${source}: ${count} sessions`)); - } - } catch (error) { - if (spinner) { - spinner.stop('Failed to rebuild index'); - } - console.error(chalk.red('Error:'), (error as Error).message); - process.exitCode = 1; - } + await rebuildCommand(cliContext); }); -/** - * Quick resume commands for each tool - */ +// Dump sessions to directory program - .command('claude [n]') - .description('Resume Nth newest Claude session (default: 1)') - .action(async (n = '1') => { - await resumeBySource('claude', parseInt(n, 10)); - }); - -program - .command('copilot [n]') - .description('Resume Nth newest Copilot session (default: 1)') - .action(async (n = '1') => { - await resumeBySource('copilot', parseInt(n, 10)); - }); - -program - .command('gemini [n]') - .description('Resume Nth newest Gemini session (default: 1)') - .action(async (n = '1') => { - await resumeBySource('gemini', parseInt(n, 10)); - }); - -program - .command('codex [n]') - .description('Resume Nth newest Codex session (default: 1)') - .action(async (n = '1') => { - await resumeBySource('codex', parseInt(n, 10)); + .command('dump ') + .description('Bulk export sessions to markdown or JSON files') + .option('--preset ', 'Verbosity preset for export detail: minimal, standard, verbose, full', 'standard') + .option('--json', 'Output as JSON instead of markdown') + .option('--limit ', 'Limit number of sessions') + .option('--rebuild', 'Force rebuild session index') + .action(async (sourceOrAll, directory, options) => { + const globalOptions = program.opts(); + await dumpCommand( + sourceOrAll, + directory, + { + ...options, + configPath: globalOptions.config as string | undefined, + chain: globalOptions.chain as boolean | undefined, + }, + cliContext, + ); }); +// Inspect a session — parsing diagnostics program - .command('opencode [n]') - .description('Resume Nth newest OpenCode session (default: 1)') - .action(async (n = '1') => { - await resumeBySource('opencode', parseInt(n, 10)); + .command('inspect ') + .description('Inspect a session and show parsing diagnostics') + .option('--truncate ', 'Compact output truncated to N chars per line', parseInt) + .option('--write-md [path]', 'Write markdown output to file') + .action(async (sessionId: string, opts: { truncate?: number; writeMd?: string | boolean }) => { + // Inherit --preset from global options (subcommand duplication causes Commander scoping bug) + const globalOptions = program.opts(); + const globalPreset = globalOptions.preset as string | undefined; + const globalChain = globalOptions.chain as boolean | undefined; + await inspectSession(sessionId, { ...opts, preset: globalPreset, chain: globalChain }); }); -/** - * Helper to resume Nth session from a source - */ -async function resumeBySource(source: SessionSource, n: number): Promise { - try { - const sessions = await getSessionsBySource(source); - - if (sessions.length === 0) { - console.log(chalk.yellow(`No ${source} sessions found.`)); - return; - } - - const index = Math.max(0, Math.min(n - 1, sessions.length - 1)); - const session = sessions[index]; - - console.log(chalk.gray(`Resuming ${source} session #${index + 1}:`)); - console.log(formatSessionColored(session)); - console.log(); - - process.chdir(session.cwd); - await nativeResume(session); - } catch (error) { - console.error(chalk.red('Error:'), (error as Error).message); - process.exitCode = 1; - } +// Quick resume commands for each tool — generated from the adapter registry +for (const tool of ALL_TOOLS) { + const adapter = adapters[tool]; + program + .command(`${tool} [n]`) + .description(`Resume Nth newest ${adapter.label} session (default: 1)`) + .action(async (n = '1') => { + await resumeBySource(tool, parseInt(n, 10)); + }); } // Parse and run -program.parse(); +program.parse([process.argv[0], process.argv[1], ...commandArgs]); diff --git a/src/commands/_shared.ts b/src/commands/_shared.ts new file mode 100644 index 0000000..67ac8a0 --- /dev/null +++ b/src/commands/_shared.ts @@ -0,0 +1,112 @@ +import * as clack from '@clack/prompts'; +import chalk from 'chalk'; +import type { Command } from 'commander'; +import { sourceColors } from '../display/format.js'; +import { ALL_TOOLS } from '../parsers/registry.js'; +import type { SessionSource, UnifiedSession } from '../types/index.js'; +import { getAvailableTools } from '../utils/resume.js'; + +/** + * Show interactive tool-selection TUI and return the chosen target tool. + * Returns null if user cancels or no tools are available. + * + * Shared by pick, resume, and quick-resume commands to avoid 3x duplication. + */ +export async function selectTargetTool( + session: UnifiedSession, + options?: { excludeSource?: boolean }, +): Promise { + const availableTools = await getAvailableTools(); + const exclude = options?.excludeSource ?? true; + + const targetOptions = availableTools + .filter((t) => !exclude || t !== session.source) + .map((t) => ({ + value: t, + label: + t === session.source + ? `${sourceColors[t](t.charAt(0).toUpperCase() + t.slice(1))} (native resume)` + : `${sourceColors[t](t.charAt(0).toUpperCase() + t.slice(1))}`, + })); + + if (targetOptions.length === 0) { + const missing = ALL_TOOLS.filter((t) => !availableTools.includes(t)).map( + (t) => t.charAt(0).toUpperCase() + t.slice(1), + ); + clack.log.warn( + `Only ${sourceColors[session.source](session.source)} is installed. ` + + `Install at least one more (${missing.join(', ')}) to enable cross-tool handoff.`, + ); + return null; + } + + const targetTool = (await clack.select({ + message: `Continue ${sourceColors[session.source](session.source)} session in:`, + options: targetOptions, + ...(exclude ? {} : { initialValue: session.source }), + })) as SessionSource; + + if (clack.isCancel(targetTool)) { + clack.cancel('Cancelled'); + return null; + } + + return targetTool; +} + +/** + * Check if only the native tool is available and auto-resume if so. + * Returns true if it handled the auto-resume (caller should return). + */ +export async function checkSingleToolAutoResume( + session: UnifiedSession, + nativeResumeFn: (s: UnifiedSession) => Promise, +): Promise { + const availableTools = await getAvailableTools(); + if (availableTools.length === 1 && availableTools[0] === session.source) { + clack.log.step(`Resuming natively in ${sourceColors[session.source](session.source)}...`); + clack.outro(`Launching ${session.source}`); + if (session.cwd) process.chdir(session.cwd); + await nativeResumeFn(session); + return true; + } + return false; +} + +function wait(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Warn user about precedence remapping and give a brief countdown in TTY mode. + */ +export async function showForwardingWarnings( + warnings: string[], + context: { isTTY: boolean }, + seconds = 5, +): Promise { + if (warnings.length === 0) return; + + for (const warning of warnings) { + clack.log.warn(warning); + } + + if (!context.isTTY || seconds <= 0) return; + + for (let remaining = seconds; remaining >= 1; remaining -= 1) { + process.stdout.write(chalk.yellow(`\rContinuing in ${remaining}s... `)); + await wait(1000); + } + + process.stdout.write('\r'); + process.stdout.write(' '.repeat(24)); + process.stdout.write('\r'); +} + +/** + * Extract extra CLI args from commander command object. + * Used for forwarding unknown options to target CLI tools. + */ +export function getExtraCommandArgs(command: Command, processedArgCount: number): string[] { + return command.args.slice(processedArgCount); +} diff --git a/src/commands/dump.ts b/src/commands/dump.ts new file mode 100644 index 0000000..2588cab --- /dev/null +++ b/src/commands/dump.ts @@ -0,0 +1,173 @@ +/** + * `continues dump ` — bulk export sessions to files. + */ +import * as fs from 'fs'; +import * as path from 'path'; +import chalk from 'chalk'; +import ora from 'ora'; +import { getPreset, loadConfig } from '../config/index.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { adapters, ALL_TOOLS } from '../parsers/registry.js'; +import type { SessionSource, UnifiedSession } from '../types/index.js'; +import { getAllSessions, getSessionsBySource } from '../utils/index.js'; + +/** + * Dump sessions to files in a directory. + */ +export async function dumpCommand( + sourceOrAll: string, + directory: string, + options: { + preset?: string; + json?: boolean; + limit?: string; + rebuild?: boolean; + configPath?: string; + chain?: boolean; + }, + context: { isTTY: boolean }, +): Promise { + let spinner: ReturnType | null = null; + try { + // Validate source + const isAll = sourceOrAll === 'all'; + if (!isAll && !ALL_TOOLS.includes(sourceOrAll as SessionSource)) { + console.error(chalk.red(`Invalid source: ${sourceOrAll}`)); + console.error(chalk.gray(`Valid sources: all, ${ALL_TOOLS.join(', ')}`)); + process.exitCode = 1; + return; + } + + // Get sessions + spinner = context.isTTY ? ora('Loading sessions...').start() : null; + + let sessions: UnifiedSession[]; + if (isAll) { + sessions = await getAllSessions(options.rebuild); + } else { + sessions = await getSessionsBySource(sourceOrAll as SessionSource, options.rebuild); + } + + if (spinner) spinner.stop(); + + if (sessions.length === 0) { + console.log(chalk.yellow('No sessions found.')); + return; + } + + // Apply limit + const limit = options.limit ? parseInt(options.limit, 10) : undefined; + if (limit !== undefined && (!Number.isFinite(limit) || limit <= 0)) { + console.error(chalk.red('--limit must be a positive integer')); + process.exitCode = 1; + return; + } + if (limit && limit > 0) { + sessions = sessions.slice(0, limit); + } + + // Create directory (mkdirSync with recursive handles existing dirs) + const targetDir = path.resolve(directory); + try { + fs.mkdirSync(targetDir, { recursive: true }); + } catch (err) { + console.error(chalk.red(`Failed to create directory: ${targetDir}`)); + console.error((err as Error).message); + process.exitCode = 1; + return; + } + + // Get preset config + const presetName = options.preset || 'standard'; + let config: VerbosityConfig; + try { + config = getPreset(presetName); + } catch { + config = loadConfig(options.configPath); + } + + if (options.chain === false) { + config = { + ...config, + agents: { + ...config.agents, + claude: { + ...config.agents.claude, + chainCompactedHistory: false, + }, + }, + }; + } + + // Export sessions + let successCount = 0; + let errorCount = 0; + const startTime = Date.now(); + const successBySource: Record = {}; + + for (let i = 0; i < sessions.length; i++) { + const session = sessions[i]; + const progress = `${i + 1}/${sessions.length}`; + + if (context.isTTY) { + process.stdout.write(`\r${chalk.gray(progress)} Exporting: ${session.source}/${session.id.slice(0, 8)}...`); + } + + const ext = options.json ? 'json' : 'md'; + const filename = `${session.source}_${session.id}.${ext}`; + const filepath = path.join(targetDir, filename); + + try { + if (options.json) { + // JSON export + const json = JSON.stringify(session, null, 2); + fs.writeFileSync(filepath, json, 'utf8'); + } else { + // Markdown export - reuse adapter's extractContext + const adapter = adapters[session.source]; + if (!adapter) { + throw new Error(`No adapter found for source: ${session.source}`); + } + const ctx = await adapter.extractContext(session, config); + fs.writeFileSync(filepath, ctx.markdown, 'utf8'); + } + successCount++; + successBySource[session.source] = (successBySource[session.source] || 0) + 1; + } catch (err) { + if (!context.isTTY) { + console.error(chalk.red(`Failed: ${session.id}`), (err as Error).message); + } + errorCount++; + } + } + + // Clear progress line and print summary + if (context.isTTY) { + process.stdout.write('\r' + ' '.repeat(80) + '\r'); + } + + const elapsed = ((Date.now() - startTime) / 1000).toFixed(1); + + console.log(chalk.green.bold('Dump complete:')); + console.log(` Files: ${successCount} exported`); + if (errorCount > 0) { + console.log(` ${chalk.red(`Errors: ${errorCount} failed`)}`); + } + console.log(` Directory: ${targetDir}`); + console.log(` Time: ${elapsed}s`); + + // Count by source (only successful exports) + console.log('\n By source:'); + for (const [src, count] of Object.entries(successBySource).sort((a, b) => b[1] - a[1])) { + console.log(` ${src.padEnd(12)} ${count}`); + } + + if (errorCount > 0) { + process.exitCode = 1; + } + } catch (error) { + spinner?.stop(); + console.error(chalk.red('Error:'), (error as Error).message); + process.exitCode = 1; + } +} diff --git a/src/commands/inspect.ts b/src/commands/inspect.ts new file mode 100644 index 0000000..0402f17 --- /dev/null +++ b/src/commands/inspect.ts @@ -0,0 +1,785 @@ +/** + * `continues inspect ` — diagnostic command that runs the full + * parsing pipeline and outputs detailed statistics showing what was parsed, + * how much made it into the markdown, and conversion efficiency. + * + * Designed for verifying that nothing is silently dropped during extraction. + */ +import * as fs from 'fs'; +import * as path from 'path'; +import chalk from 'chalk'; +import { getPreset, loadConfig } from '../config/index.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { adapters } from '../parsers/registry.js'; +import type { SessionContext, ReasoningStep, UnifiedSession } from '../types/index.js'; +import { classifyToolName } from '../types/tool-names.js'; +import { findSession } from '../utils/index.js'; +import { readJsonlFile } from '../utils/jsonl.js'; + +// ── Format Detection ──────────────────────────────────────────────────────── + +type SessionFormat = 'jsonl' | 'json' | 'sqlite' | 'yaml'; + +function getSessionFormat(source: string): SessionFormat { + switch (source) { + case 'claude': + case 'codex': + case 'droid': + case 'cursor': + case 'antigravity': + return 'jsonl'; + case 'gemini': + case 'amp': + case 'kiro': + case 'cline': + case 'roo-code': + case 'kilo-code': + return 'json'; + case 'crush': + case 'opencode': + return 'sqlite'; + case 'copilot': + return 'yaml'; + default: + return 'jsonl'; + } +} + +// ── Helpers ───────────────────────────────────────────────────────────────── + +/** Format bytes into a human-readable string (KB, MB, GB). */ +function formatBytes(bytes: number): string { + if (bytes < 1024) return `${bytes} B`; + if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`; + if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`; + return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)} GB`; +} + +/** Render a simple bar chart using unicode blocks. Width = 30 chars. */ +function bar(fraction: number, width = 30): string { + const filled = Math.round(fraction * width); + return '█'.repeat(filled) + '░'.repeat(width - filled); +} + +/** Right-pad a string to a given width. */ +function pad(s: string, width: number): string { + return s.length >= width ? s : s + ' '.repeat(width - s.length); +} + +/** Right-align a number string to a given width. */ +function rpad(n: number | string, width: number): string { + const s = String(n); + return s.length >= width ? s : ' '.repeat(width - s.length) + s; +} + +/** Count lines in a file synchronously (fast newline count). */ +function countLines(filePath: string): number { + try { + const buf = fs.readFileSync(filePath); + let count = 0; + for (let i = 0; i < buf.length; i++) { + if (buf[i] === 0x0a) count++; + } + // If file doesn't end with newline, count the last line + if (buf.length > 0 && buf[buf.length - 1] !== 0x0a) count++; + return count; + } catch { + return 0; + } +} + +/** List files in a directory, returning dirent-like info. */ +function listDir(dirPath: string): Array<{ name: string; size: number; lines: number }> { + try { + if (!fs.existsSync(dirPath)) return []; + const entries = fs.readdirSync(dirPath, { withFileTypes: true }); + return entries + .filter((e) => e.isFile()) + .map((e) => { + const fp = path.join(dirPath, e.name); + const stats = fs.statSync(fp); + return { name: e.name, size: stats.size, lines: countLines(fp) }; + }); + } catch { + return []; + } +} + +// ── Content Block & Event Analysis ────────────────────────────────────────── + +interface RawEventCounts { + byType: Map; + total: number; +} + +interface ContentBlockCounts { + byType: Map; + total: number; +} + +interface ToolCategoryCounts { + byType: Map; + total: number; +} + +interface SubagentFileInfo { + name: string; + lines: number; + status: 'completed' | 'killed'; + toolCallCount: number; +} + +interface ToolResultFileInfo { + name: string; + lines: number; + size: number; +} + +/** + * Analyze raw JSONL messages for event distribution, content blocks, + * and tool call categories. + */ +function analyzeRawMessages( + messages: Array>, +): { + events: RawEventCounts; + blocks: ContentBlockCounts; + tools: ToolCategoryCounts; + model: string | undefined; +} { + const eventMap = new Map(); + const blockMap = new Map(); + const toolMap = new Map(); + let model: string | undefined; + + for (const msg of messages) { + // Count event types + const type = (msg.type as string) || 'unknown'; + eventMap.set(type, (eventMap.get(type) || 0) + 1); + + // Extract model + if (!model && msg.model) { + model = msg.model as string; + } + + // Count content blocks from assistant and user messages + const message = msg.message as { role?: string; content?: unknown } | undefined; + if (!message?.content || !Array.isArray(message.content)) continue; + + for (const block of message.content as Array<{ type: string; name?: string }>) { + const blockType = block.type || 'unknown'; + blockMap.set(blockType, (blockMap.get(blockType) || 0) + 1); + + // Classify tool_use blocks by category + if (blockType === 'tool_use' && block.name) { + const category = classifyToolName(block.name); + if (category) { + // Map to display labels + const label = CATEGORY_LABELS[category] || category; + toolMap.set(label, (toolMap.get(label) || 0) + 1); + } + } + } + } + + return { + events: { byType: eventMap, total: messages.length }, + blocks: { + byType: blockMap, + total: Array.from(blockMap.values()).reduce((a, b) => a + b, 0), + }, + tools: { + byType: toolMap, + total: Array.from(toolMap.values()).reduce((a, b) => a + b, 0), + }, + model, + }; +} + +const CATEGORY_LABELS: Record = { + shell: 'Shell/Bash', + read: 'Read', + write: 'Write', + edit: 'Edit', + grep: 'Grep/Glob', + glob: 'Grep/Glob', + search: 'Search', + fetch: 'Fetch', + task: 'Task (subagent)', + ask: 'Ask', + mcp: 'MCP', + reasoning: 'Reasoning', +}; + +// ── Subagent File Analysis (Claude-specific) ──────────────────────────────── + +async function analyzeSubagentFiles(sessionDir: string): Promise { + const subagentsDir = path.join(sessionDir, 'subagents'); + const files = listDir(subagentsDir); + const results: SubagentFileInfo[] = []; + + for (const file of files) { + if (!file.name.endsWith('.jsonl')) continue; + const filePath = path.join(subagentsDir, file.name); + + try { + const msgs = await readJsonlFile>(filePath); + let toolCallCount = 0; + let wasKilled = false; + + for (const m of msgs) { + if (m.type !== 'assistant') continue; + const message = m.message as { content?: Array<{ type: string }> } | undefined; + if (!message?.content || !Array.isArray(message.content)) continue; + for (const block of message.content) { + if (block.type === 'tool_use') toolCallCount++; + } + // Check for rate-limit/kill indicators + const content = message.content as Array<{ type: string; text?: string }>; + for (const block of content) { + if (block.type === 'text' && block.text) { + const lower = block.text.toLowerCase(); + if (lower.includes('out of extra usage') || lower.includes('rate limit')) { + wasKilled = true; + } + } + } + } + + results.push({ + name: file.name, + lines: file.lines, + status: wasKilled ? 'killed' : 'completed', + toolCallCount, + }); + } catch { + results.push({ + name: file.name, + lines: file.lines, + status: 'killed', + toolCallCount: 0, + }); + } + } + + return results; +} + +// ── Markdown Stats ────────────────────────────────────────────────────────── + +interface MarkdownStats { + totalChars: number; + sections: number; + recentMessages: number; + toolSummaries: number; + subagentResults: number; + reasoningSteps: number; + pendingTasks: number; + filesModified: number; +} + +function computeMarkdownStats(ctx: SessionContext): MarkdownStats { + const md = ctx.markdown; + // Count sections by ## headings + const sections = (md.match(/^## /gm) || []).length; + + return { + totalChars: md.length, + sections, + recentMessages: ctx.recentMessages.length, + toolSummaries: ctx.toolSummaries.length, + subagentResults: ctx.sessionNotes?.subagentResults?.length || 0, + reasoningSteps: ctx.sessionNotes?.reasoningSteps?.length || 0, + pendingTasks: ctx.pendingTasks.length, + filesModified: ctx.filesModified.length, + }; +} + +// ── Output Rendering ──────────────────────────────────────────────────────── + +function renderHeader(sessionId: string): string { + const line = '═'.repeat(66); + return [ + '', + chalk.bold(line), + chalk.bold(` SESSION INSPECTION: ${sessionId}`), + chalk.bold(line), + '', + ].join('\n'); +} + +function renderSourceFiles( + session: UnifiedSession, + mainLines: number, + mainSize: number, + subagentFiles: SubagentFileInfo[], + toolResultFiles: ToolResultFileInfo[], +): string { + const lines: string[] = [chalk.cyan.bold('📂 Source Files')]; + + lines.push( + ` Main JSONL: ${chalk.gray(session.originalPath)} (${mainLines} lines, ${formatBytes(mainSize)})`, + ); + + if (subagentFiles.length > 0) { + const totalSubLines = subagentFiles.reduce((s, f) => s + f.lines, 0); + lines.push(` Subagents: ${subagentFiles.length} files (${totalSubLines} total lines)`); + for (const f of subagentFiles) { + const statusIcon = f.status === 'completed' ? chalk.green('✓') : chalk.red('✗'); + const statusText = f.status === 'completed' ? 'completed' : 'killed'; + lines.push( + ` ${pad(f.name, 30)} ${rpad(f.lines, 5)} lines ${statusIcon} ${statusText} (${f.toolCallCount} tools)`, + ); + } + } + + if (toolResultFiles.length > 0) { + const totalSize = toolResultFiles.reduce((s, f) => s + f.size, 0); + lines.push(` Tool Results: ${toolResultFiles.length} files (${formatBytes(totalSize)} total)`); + for (const f of toolResultFiles) { + lines.push( + ` ${pad(f.name, 45)} ${rpad(f.lines, 5)} lines (${formatBytes(f.size)})`, + ); + } + } + + lines.push(''); + return lines.join('\n'); +} + +function renderEventDistribution(events: RawEventCounts, source: string): string { + const lines: string[] = [chalk.cyan.bold(`📊 Raw Event Distribution (${source} session)`)]; + + // Sort by count descending + const sorted = Array.from(events.byType.entries()).sort((a, b) => b[1] - a[1]); + if (sorted.length === 0) { + lines.push(' (no events)'); + lines.push(''); + return lines.join('\n'); + } + // Find longest label for alignment + const maxLabel = Math.max(...sorted.map(([k]) => k.length)); + + for (const [type, count] of sorted) { + const frac = count / events.total; + const pct = (frac * 100).toFixed(1); + lines.push( + ` ${bar(frac)} ${pad(type + ':', maxLabel + 1)} ${rpad(count, 6)} (${rpad(pct, 5)}%)`, + ); + } + + lines.push(` ${' '.repeat(30)} ${pad('TOTAL:', maxLabel + 1)} ${rpad(events.total, 6)}`); + lines.push(''); + return lines.join('\n'); +} + +function renderContentBlocks(blocks: ContentBlockCounts): string { + const lines: string[] = [chalk.cyan.bold('📊 Content Blocks (from messages)')]; + + const sorted = Array.from(blocks.byType.entries()).sort((a, b) => b[1] - a[1]); + if (sorted.length === 0) { + lines.push(' (no blocks)'); + lines.push(''); + return lines.join('\n'); + } + const maxLabel = Math.max(...sorted.map(([k]) => k.length)); + + for (const [type, count] of sorted) { + lines.push(` ${pad(type + ':', maxLabel + 1)} ${rpad(count, 6)}`); + } + + lines.push(` ${pad('TOTAL:', maxLabel + 1)} ${rpad(blocks.total, 6)}`); + lines.push(''); + return lines.join('\n'); +} + +function renderToolCategories(tools: ToolCategoryCounts): string { + const lines: string[] = [chalk.cyan.bold('🔧 Tool Calls by Category')]; + + const sorted = Array.from(tools.byType.entries()).sort((a, b) => b[1] - a[1]); + const maxLabel = Math.max(...sorted.map(([k]) => k.length), 5); + const maxCount = Math.max(...sorted.map(([, v]) => v), 1); + + for (const [category, count] of sorted) { + const barWidth = Math.max(1, Math.round((count / maxCount) * 20)); + lines.push( + ` ${pad(category + ':', maxLabel + 1)} ${rpad(count, 5)} ${'█'.repeat(barWidth)}`, + ); + } + + lines.push(` ${pad('TOTAL:', maxLabel + 1)} ${rpad(tools.total, 5)}`); + lines.push(''); + return lines.join('\n'); +} + +function renderSubagentAnalysis(subagents: SubagentFileInfo[]): string { + if (subagents.length === 0) return ''; + + const lines: string[] = [ + chalk.cyan.bold(`🔍 Subagent Analysis (${subagents.length} found)`), + ]; + lines.push( + ` ${pad('Name', 30)} ${pad('Status', 12)} ${rpad('Tools', 5)} ${rpad('Lines', 5)}`, + ); + + for (const s of subagents) { + const statusColored = + s.status === 'completed' ? chalk.green(pad(s.status, 12)) : chalk.red(pad(s.status, 12)); + const shortName = s.name.replace(/\.jsonl$/, ''); + lines.push( + ` ${pad(shortName, 30)} ${statusColored} ${rpad(s.toolCallCount, 5)} ${rpad(s.lines, 5)}`, + ); + } + + lines.push(''); + return lines.join('\n'); +} + +function renderReasoningChain(steps: ReasoningStep[]): string { + if (!steps || steps.length === 0) return ''; + + const lines: string[] = [ + chalk.cyan.bold(`🧠 Reasoning Chain (${steps.length} steps extracted)`), + ]; + + for (const step of steps) { + const thought = step.thought.length > 60 ? step.thought.slice(0, 57) + '...' : step.thought; + const next = step.nextAction + ? step.nextAction.length > 30 + ? step.nextAction.slice(0, 27) + '...' + : step.nextAction + : ''; + lines.push( + ` Step ${step.stepNumber}/${step.totalSteps} (${step.purpose}): "${thought}"${next ? ` → Next: "${next}"` : ''}`, + ); + } + + lines.push(''); + return lines.join('\n'); +} + +function renderMarkdownOutput(stats: MarkdownStats, presetName: string): string { + const lines: string[] = [ + chalk.cyan.bold(`📝 Markdown Output (preset: ${presetName})`), + ` ┌─────────────────────┬─────────┐`, + ` │ Total chars │ ${rpad(stats.totalChars.toLocaleString(), 7)} │`, + ` │ Sections │ ${rpad(stats.sections, 7)} │`, + ` │ Recent messages │ ${rpad(stats.recentMessages, 7)} │`, + ` │ Tool summaries │ ${rpad(stats.toolSummaries, 7)} │`, + ` │ Subagent results │ ${rpad(stats.subagentResults, 7)} │`, + ` │ Reasoning steps │ ${rpad(stats.reasoningSteps, 7)} │`, + ` │ Pending tasks │ ${rpad(stats.pendingTasks, 7)} │`, + ` │ Files modified │ ${rpad(stats.filesModified, 7)} │`, + ` └─────────────────────┴─────────┘`, + '', + ]; + return lines.join('\n'); +} + +function renderConversionSummary( + mainSize: number, + subagentFiles: SubagentFileInfo[], + toolResultFiles: ToolResultFileInfo[], + events: RawEventCounts, + markdownStats: MarkdownStats, + context: SessionContext, +): string { + // Total raw input size (main JSONL + tool result files) + const toolResultTotalSize = toolResultFiles.reduce((s, f) => s + f.size, 0); + const rawInput = mainSize + toolResultTotalSize; + + const markdownBytes = Buffer.byteLength(context.markdown, 'utf8'); + const ratio = rawInput > 0 ? ((markdownBytes / rawInput) * 100).toFixed(1) : '0.0'; + + // Count content events (non-progress) + const contentEvents = + (events.byType.get('assistant') || 0) + (events.byType.get('user') || 0); + + const subagentsCaptured = context.sessionNotes?.subagentResults?.length || 0; + const reasoningCaptured = context.sessionNotes?.reasoningSteps?.length || 0; + + const lines: string[] = [ + chalk.cyan.bold('📈 Conversion Summary'), + ` Raw input: ${formatBytes(rawInput)} (main${subagentFiles.length > 0 ? ` + ${subagentFiles.length} subagents` : ''}${toolResultFiles.length > 0 ? ` + ${toolResultFiles.length} tool-results` : ''})`, + ` Markdown: ${formatBytes(markdownBytes)}`, + ` Ratio: ${ratio}%`, + ` Coverage:`, + ` ${chalk.green('✓')} ${contentEvents}/${events.total} events → content (noise filtered)`, + ]; + + if (subagentFiles.length > 0) { + lines.push(` ${chalk.green('✓')} ${subagentsCaptured}/${subagentFiles.length} subagent results captured`); + } + if (toolResultFiles.length > 0) { + const toolResultsCaptured = context.sessionNotes?.externalToolResults?.length || 0; + lines.push(` ${chalk.green('✓')} ${toolResultsCaptured}/${toolResultFiles.length} tool-result files noted`); + } + if (reasoningCaptured > 0) { + lines.push(` ${chalk.green('✓')} ${reasoningCaptured} reasoning steps extracted`); + } + lines.push(` ${chalk.green('✓')} ${markdownStats.recentMessages} recent messages included`); + + lines.push(''); + return lines.join('\n'); +} + +// ── Truncated Output ──────────────────────────────────────────────────────── + +function truncateLine(s: string, maxLen: number): string { + if (s.length <= maxLen) return s; + return s.slice(0, maxLen - 3) + '...'; +} + +function renderTruncated( + session: UnifiedSession, + mainLines: number, + mainSize: number, + subagentFiles: SubagentFileInfo[], + toolResultFiles: ToolResultFileInfo[], + events: RawEventCounts, + blocks: ContentBlockCounts, + tools: ToolCategoryCounts, + markdownStats: MarkdownStats, + context: SessionContext, + model: string | undefined, + maxLen: number, +): string { + const lines: string[] = []; + + // SESSION line + const sessionLine = `SESSION: ${session.id.slice(0, 8)} | ${model || session.source} | ${mainLines} lines | ${formatBytes(mainSize)} | ${subagentFiles.length} subagents | ${toolResultFiles.length} tool-results`; + lines.push(truncateLine(sessionLine, maxLen)); + + // EVENTS line + const eventParts = Array.from(events.byType.entries()) + .sort((a, b) => b[1] - a[1]) + .map(([k, v]) => `${k}=${v}`); + lines.push(truncateLine(`EVENTS: ${eventParts.join(' ')}`, maxLen)); + + // BLOCKS line + const blockParts = Array.from(blocks.byType.entries()) + .sort((a, b) => b[1] - a[1]) + .map(([k, v]) => `${k}=${v}`); + lines.push(truncateLine(`BLOCKS: ${blockParts.join(' ')}`, maxLen)); + + // TOOLS line + const toolParts = Array.from(tools.byType.entries()) + .sort((a, b) => b[1] - a[1]) + .map(([k, v]) => `${k}=${v}`); + toolParts.push(`total=${tools.total}`); + lines.push(truncateLine(`TOOLS: ${toolParts.join(' ')}`, maxLen)); + + // SUBS line + if (subagentFiles.length > 0) { + const subParts = subagentFiles.map((s) => { + const short = s.name.replace(/^agent-/, '').replace(/\.jsonl$/, ''); + return `${short}=${s.status}(${s.toolCallCount}t,${s.lines}L)`; + }); + lines.push(truncateLine(`SUBS: ${subParts.join(' ')}`, maxLen)); + } + + // REASON line + const steps = context.sessionNotes?.reasoningSteps; + if (steps && steps.length > 0) { + const stepParts = steps.map( + (s) => `step${s.stepNumber}/${s.totalSteps}(${s.purpose}→${(s.nextAction || '').slice(0, 15)})`, + ); + lines.push(truncateLine(`REASON: ${stepParts.join(' ')}`, maxLen)); + } + + // OUTPUT line + lines.push( + truncateLine( + `OUTPUT: ${markdownStats.totalChars} chars | ${markdownStats.sections} sections | ${markdownStats.recentMessages} msgs | ${markdownStats.toolSummaries} tools | ${markdownStats.subagentResults} subs | ${markdownStats.reasoningSteps} reason | ${markdownStats.pendingTasks} pending | ${markdownStats.filesModified} files`, + maxLen, + ), + ); + + // RATIO line + const markdownBytes = Buffer.byteLength(context.markdown, 'utf8'); + const rawInput = mainSize + toolResultFiles.reduce((s, f) => s + f.size, 0); + const ratio = rawInput > 0 ? ((markdownBytes / rawInput) * 100).toFixed(1) : '0.0'; + const contentEvents = (events.byType.get('assistant') || 0) + (events.byType.get('user') || 0); + const subsCaptured = context.sessionNotes?.subagentResults?.length || 0; + const toolResCaptured = context.sessionNotes?.externalToolResults?.length || 0; + const reasonCaptured = context.sessionNotes?.reasoningSteps?.length || 0; + + lines.push( + truncateLine( + `RATIO: ${formatBytes(rawInput)} → ${formatBytes(markdownBytes)} (${ratio}%) | events=${contentEvents}/${events.total} | subs=${subsCaptured}/${subagentFiles.length} | toolres=${toolResCaptured}/${toolResultFiles.length} | reason=${reasonCaptured}`, + maxLen, + ), + ); + + return lines.join('\n'); +} + +// ── Main Entry Point ──────────────────────────────────────────────────────── + +/** + * Inspect a session and display detailed parsing diagnostics. + * + * @param sessionIdOrShort - Full or short session ID + * @param opts.preset - Verbosity preset name (default: 'standard') + * @param opts.truncate - If set, output compact one-liner per section truncated to N chars + * @param opts.writeMd - If set, write markdown output to file (true = auto-name, string = path) + */ +export async function inspectSession( + sessionIdOrShort: string, + opts: { preset?: string; truncate?: number; writeMd?: string | boolean; chain?: boolean }, +): Promise { + // 1. Find session + const session = await findSession(sessionIdOrShort); + if (!session) { + console.error(chalk.red(`Session not found: ${sessionIdOrShort}`)); + process.exitCode = 1; + return; + } + + const presetName = opts.preset || 'standard'; + let config: VerbosityConfig; + try { + config = getPreset(presetName); + } catch { + // Fall back to loaded config if preset name is invalid + config = loadConfig(); + } + + if (opts.chain === false) { + config = { + ...config, + agents: { + ...config.agents, + claude: { + ...config.agents.claude, + chainCompactedHistory: false, + }, + }, + }; + } + + // 2. Read raw events (format-aware) + const format = getSessionFormat(session.source); + let rawMessages: Array> = []; + let rawEventNote = ''; + + if (format === 'jsonl') { + rawMessages = await readJsonlFile>(session.originalPath); + } else if (format === 'json') { + try { + const content = fs.readFileSync(session.originalPath, 'utf-8'); + const parsed = JSON.parse(content); + rawMessages = Array.isArray(parsed) ? parsed : [parsed]; + } catch { + rawEventNote = '(JSON parse failed)'; + } + } else if (format === 'sqlite') { + rawEventNote = '(raw event analysis not available for SQLite sessions)'; + } else if (format === 'yaml') { + rawEventNote = '(raw event analysis not available for YAML sessions)'; + } + + const { events, blocks, tools, model } = analyzeRawMessages(rawMessages); + + // 3. File stats + const mainFileStats = fs.statSync(session.originalPath); + const mainSize = mainFileStats.size; + const mainLines = format === 'jsonl' ? rawMessages.length : countLines(session.originalPath); + + // 4. Subagent & tool-result files (Claude-specific) + const sessionDir = session.originalPath.replace(/\.jsonl$/, ''); + let subagentFiles: SubagentFileInfo[] = []; + let toolResultFiles: ToolResultFileInfo[] = []; + + if (session.source === 'claude') { + subagentFiles = await analyzeSubagentFiles(sessionDir); + + const toolResultsDir = path.join(sessionDir, 'tool-results'); + toolResultFiles = listDir(toolResultsDir).map((f) => ({ + name: f.name, + lines: f.lines, + size: f.size, + })); + } + + // 5. Run extraction pipeline via adapter + const adapter = adapters[session.source]; + const context = await adapter.extractContext(session, config); + const markdownStats = computeMarkdownStats(context); + + // 6. Write markdown if requested + if (opts.writeMd !== undefined && opts.writeMd !== false) { + const mdPath = + typeof opts.writeMd === 'string' + ? opts.writeMd + : `inspect-${session.id.slice(0, 8)}.md`; + fs.writeFileSync(mdPath, context.markdown, 'utf8'); + console.log(chalk.green(`Markdown written to ${mdPath}`)); + } + + // 7. Render output + if (opts.truncate) { + console.log( + renderTruncated( + session, + mainLines, + mainSize, + subagentFiles, + toolResultFiles, + events, + blocks, + tools, + markdownStats, + context, + model, + opts.truncate, + ), + ); + if (rawMessages.length === 0 && rawEventNote) { + console.log(chalk.gray(` ${rawEventNote}`)); + } + return; + } + + // Full output + const output: string[] = []; + + output.push(renderHeader(session.id)); + output.push( + renderSourceFiles(session, mainLines, mainSize, subagentFiles, toolResultFiles), + ); + output.push(renderEventDistribution(events, session.source)); + if (rawMessages.length === 0 && rawEventNote) { + output.push(chalk.gray(` ${rawEventNote}\n`)); + } + + if (blocks.total > 0) { + output.push(renderContentBlocks(blocks)); + } + + if (tools.total > 0) { + output.push(renderToolCategories(tools)); + } + + if (subagentFiles.length > 0) { + output.push(renderSubagentAnalysis(subagentFiles)); + } + + if (context.sessionNotes?.reasoningSteps) { + output.push(renderReasoningChain(context.sessionNotes.reasoningSteps)); + } + + output.push(renderMarkdownOutput(markdownStats, presetName)); + output.push( + renderConversionSummary( + mainSize, + subagentFiles, + toolResultFiles, + events, + markdownStats, + context, + ), + ); + + console.log(output.join('\n')); +} diff --git a/src/commands/list.ts b/src/commands/list.ts new file mode 100644 index 0000000..966ee21 --- /dev/null +++ b/src/commands/list.ts @@ -0,0 +1,61 @@ +import chalk from 'chalk'; +import ora from 'ora'; +import { formatSessionColored } from '../display/format.js'; +import { showNoSessionsHelp } from '../display/help.js'; +import type { SessionSource, UnifiedSession } from '../types/index.js'; +import { getAllSessions, getSessionsBySource, sessionsToJsonl } from '../utils/index.js'; + +/** + * List sessions command handler + */ +export async function listCommand( + options: { source?: string; limit: string; json?: boolean; jsonl?: boolean; rebuild?: boolean }, + context: { isTTY: boolean }, +): Promise { + try { + // Use simple spinner for non-interactive + const spinner = context.isTTY && !options.json && !options.jsonl ? ora('Loading sessions...').start() : null; + + let sessions: UnifiedSession[]; + if (options.source) { + sessions = await getSessionsBySource(options.source as SessionSource, options.rebuild); + } else { + sessions = await getAllSessions(options.rebuild); + } + + if (spinner) spinner.stop(); + + const limit = parseInt(options.limit, 10); + const displaySessions = sessions.slice(0, limit); + + if (options.json) { + console.log(JSON.stringify(displaySessions, null, 2)); + return; + } + + if (options.jsonl) { + console.log(sessionsToJsonl(displaySessions)); + return; + } + + if (sessions.length === 0) { + if (context.isTTY) { + showNoSessionsHelp(); + } else { + console.log('No sessions found.'); + } + return; + } + + // Print header + console.log(chalk.gray(`Found ${sessions.length} sessions (showing ${displaySessions.length}):`)); + console.log(); + + for (const session of displaySessions) { + console.log(formatSessionColored(session)); + } + } catch (error) { + console.error(chalk.red('Error:'), (error as Error).message); + process.exitCode = 1; + } +} diff --git a/src/commands/pick.ts b/src/commands/pick.ts new file mode 100644 index 0000000..9283436 --- /dev/null +++ b/src/commands/pick.ts @@ -0,0 +1,263 @@ +import * as clack from '@clack/prompts'; +import chalk from 'chalk'; +import { showBanner } from '../display/banner.js'; +import { formatSessionForSelect, sourceColors } from '../display/format.js'; +import { showNoSessionsHelp } from '../display/help.js'; +import { maybePromptGithubStar } from '../display/star-prompt.js'; +import type { SessionSource, UnifiedSession } from '../types/index.js'; +import type { HandoffForwardingOptions } from '../utils/forward-flags.js'; +import { getAllSessions, getSessionsBySource } from '../utils/index.js'; +import { getResumeCommand, nativeResume, resolveCrossToolForwarding, resume } from '../utils/resume.js'; +import { matchesCwd } from '../utils/slug.js'; +import { checkSingleToolAutoResume, selectTargetTool, showForwardingWarnings } from './_shared.js'; + +/** + * Main interactive TUI command + */ +export async function interactivePick( + options: { + source?: string; + noTui?: boolean; + rebuild?: boolean; + all?: boolean; + forwardArgs?: string[]; + preset?: string; + configPath?: string; + chain?: boolean; + }, + context: { isTTY: boolean; supportsColor: boolean; version: string }, +): Promise { + try { + // If not TTY or --no-tui, fall back to list + if (!context.isTTY || options.noTui) { + console.log(chalk.yellow('Interactive mode requires a TTY. Use "continues list" instead.')); + process.exitCode = 1; + return; + } + + const bannerCancelled = await showBanner(context.version, context.supportsColor); + if (bannerCancelled) return; + await maybePromptGithubStar(); + clack.intro(chalk.bold('continue') + chalk.cyan.bold('s') + chalk.gray(' — session picker')); + + const s = clack.spinner(); + s.start('Loading sessions...'); + + let sessions: UnifiedSession[]; + if (options.source) { + sessions = await getSessionsBySource(options.source as SessionSource, options.rebuild); + } else { + sessions = await getAllSessions(options.rebuild); + } + + s.stop(); + + if (sessions.length === 0) { + showNoSessionsHelp(); + clack.outro(chalk.gray('No sessions to resume')); + return; + } + + // Check for sessions matching current working directory (includes subdirectories) + const currentDir = process.cwd(); + const cwdSessions = options.all ? [] : sessions.filter((sess) => matchesCwd(sess.cwd, currentDir)); + const hasCwdSessions = cwdSessions.length > 0; + + const dirName = currentDir.split('/').pop() || currentDir; + + if (!options.all && !hasCwdSessions && sessions.length > 0) { + clack.log.info(chalk.gray(`No sessions in ${dirName}, showing all`)); + } + + // Auto-resume: if exactly 1 session matches cwd, skip picker + if (cwdSessions.length === 1 && !options.source) { + const session = cwdSessions[0]; + console.log(chalk.gray(` Auto-selected the only matching session:`)); + console.log(` ${formatSessionForSelect(session)}`); + console.log(); + + if (await checkSingleToolAutoResume(session, nativeResume)) return; + + const targetTool = await selectTargetTool(session, { excludeSource: false }); + if (!targetTool) return; + + const forwarding: HandoffForwardingOptions | undefined = + targetTool !== session.source ? { tailArgs: options.forwardArgs } : undefined; + + if (forwarding) { + const resolved = resolveCrossToolForwarding(targetTool, forwarding); + await showForwardingWarnings(resolved.warnings, context); + } + + console.log(); + clack.log.info(`Working directory: ${chalk.cyan(session.cwd)}`); + clack.log.info(`Command: ${chalk.cyan(getResumeCommand(session, targetTool, forwarding))}`); + console.log(); + clack.log.step(`Handing off to ${targetTool}...`); + clack.outro(`Launching ${targetTool}`); + + if (session.cwd) process.chdir(session.cwd); + await resume(session, targetTool, 'inline', forwarding, { + preset: options.preset, + configPath: options.configPath, + chain: options.chain, + }); + return; + } + + // Step 1: Filter by CLI tool (optional) -- skip if source already specified + let filteredSessions = hasCwdSessions ? cwdSessions : sessions; + + if (!options.source && sessions.length > 0) { + let scope: 'cwd' | 'all' = hasCwdSessions ? 'cwd' : 'all'; + + while (true) { + const pool = scope === 'cwd' ? cwdSessions : sessions; + const bySource = pool.reduce( + (acc, sess) => { + acc[sess.source] = (acc[sess.source] || 0) + 1; + return acc; + }, + {} as Record, + ); + const toolCount = Object.keys(bySource).length; + + // Select message conveys scope context + let message: string; + if (scope === 'cwd') { + message = `${dirName} — ${pool.length} session${pool.length !== 1 ? 's' : ''}`; + } else if (hasCwdSessions) { + message = `All sessions — ${pool.length} total`; + } else { + message = `${pool.length} sessions across ${toolCount} tool${toolCount !== 1 ? 's' : ''}`; + } + + // Build options: tool names first, then "All tools", then scope toggle + const filterOptions: { value: string; label: string; hint?: string }[] = []; + + // Per-tool options (sorted by count desc, colored) + filterOptions.push( + ...Object.entries(bySource) + .sort((a, b) => b[1] - a[1]) + .map(([source, count]) => ({ + value: source, + label: `${sourceColors[source as SessionSource](source.charAt(0).toUpperCase() + source.slice(1))} (${count})`, + })), + ); + + // "All tools" -- no tool filter, shows all sessions in current scope + filterOptions.push({ + value: 'all-in-scope', + label: `All tools (${pool.length})`, + }); + + // Scope toggle (only when CWD sessions exist and --all wasn't used) + if (hasCwdSessions && !options.all) { + if (scope === 'cwd') { + filterOptions.push({ + value: 'scope-toggle', + label: chalk.dim(`Show all sessions (${sessions.length})`), + }); + } else { + filterOptions.push({ + value: 'scope-toggle', + label: chalk.dim(`This directory (${cwdSessions.length})`), + }); + } + } + + const toolFilter = await clack.select({ + message, + options: filterOptions, + initialValue: 'all-in-scope', + }); + + if (clack.isCancel(toolFilter)) { + clack.cancel('Cancelled'); + return; + } + + // Scope toggle: flip and re-render + if (toolFilter === 'scope-toggle') { + scope = scope === 'cwd' ? 'all' : 'cwd'; + continue; + } + + // "All tools": use entire pool + if (toolFilter === 'all-in-scope') { + filteredSessions = pool; + break; + } + + // Specific tool: filter by source + filteredSessions = pool.filter((sess) => sess.source === toolFilter); + break; + } + } + + // Step 2: Select session -- show all with scrolling (maxItems controls viewport) + const PAGE_SIZE = 500; + const sessionOptions = filteredSessions.slice(0, PAGE_SIZE).map((sess) => ({ + value: sess, + label: formatSessionForSelect(sess), + hint: sess.id.slice(0, 8), + })); + + if (filteredSessions.length > PAGE_SIZE) { + clack.log.info( + chalk.gray( + `Showing first ${PAGE_SIZE} of ${filteredSessions.length} sessions. Use --source to narrow results.`, + ), + ); + } + + const selectedSession = await clack.select({ + message: `Select a session (${filteredSessions.length} available)`, + options: sessionOptions, + maxItems: 15, + }); + + if (clack.isCancel(selectedSession)) { + clack.cancel('Cancelled'); + return; + } + + const session = selectedSession as UnifiedSession; + + // Step 3: Select target tool + const targetTool = await selectTargetTool(session); + if (!targetTool) return; + + const forwarding: HandoffForwardingOptions | undefined = + targetTool !== session.source ? { tailArgs: options.forwardArgs } : undefined; + + if (forwarding) { + const resolved = resolveCrossToolForwarding(targetTool, forwarding); + await showForwardingWarnings(resolved.warnings, context); + } + + // Step 4: Show what will happen and resume + console.log(); + clack.log.info(`Working directory: ${chalk.cyan(session.cwd)}`); + clack.log.info(`Command: ${chalk.cyan(getResumeCommand(session, targetTool, forwarding))}`); + console.log(); + + clack.log.step(`Handing off to ${targetTool}...`); + clack.outro(`Launching ${targetTool}`); + + // Change to session's working directory and resume + if (session.cwd) process.chdir(session.cwd); + await resume(session, targetTool, 'inline', forwarding, { + preset: options.preset, + configPath: options.configPath, + chain: options.chain, + }); + } catch (error) { + if (clack.isCancel(error)) { + clack.cancel('Cancelled'); + return; + } + clack.log.error(`${(error as Error).message}`); + process.exitCode = 1; + } +} diff --git a/src/commands/quick-resume.ts b/src/commands/quick-resume.ts new file mode 100644 index 0000000..cafa921 --- /dev/null +++ b/src/commands/quick-resume.ts @@ -0,0 +1,32 @@ +import chalk from 'chalk'; +import { formatSessionColored } from '../display/format.js'; +import type { SessionSource } from '../types/index.js'; +import { getSessionsBySource } from '../utils/index.js'; +import { nativeResume } from '../utils/resume.js'; + +/** + * Resume Nth session from a specific source tool + */ +export async function resumeBySource(source: SessionSource, n: number): Promise { + try { + const sessions = await getSessionsBySource(source); + + if (sessions.length === 0) { + console.log(chalk.yellow(`No ${source} sessions found.`)); + return; + } + + const index = Math.max(0, Math.min(n - 1, sessions.length - 1)); + const session = sessions[index]; + + console.log(chalk.gray(`Resuming ${source} session #${index + 1}:`)); + console.log(formatSessionColored(session)); + console.log(); + + if (session.cwd) process.chdir(session.cwd); + await nativeResume(session); + } catch (error) { + console.error(chalk.red('Error:'), (error as Error).message); + process.exitCode = 1; + } +} diff --git a/src/commands/rebuild.ts b/src/commands/rebuild.ts new file mode 100644 index 0000000..8e6baad --- /dev/null +++ b/src/commands/rebuild.ts @@ -0,0 +1,43 @@ +import * as clack from '@clack/prompts'; +import chalk from 'chalk'; +import { buildIndex } from '../utils/index.js'; + +/** + * Rebuild the session index cache + */ +export async function rebuildCommand(context: { isTTY: boolean }): Promise { + const spinner = context.isTTY ? clack.spinner() : null; + + try { + if (spinner) { + spinner.start('Rebuilding session index...'); + } + + const sessions = await buildIndex(true); + + if (spinner) { + spinner.stop(`Index rebuilt with ${sessions.length} sessions`); + } else { + console.log(`Index rebuilt with ${sessions.length} sessions`); + } + + // Show summary by source + const bySource = sessions.reduce( + (acc, s) => { + acc[s.source] = (acc[s.source] || 0) + 1; + return acc; + }, + {} as Record, + ); + + for (const [source, count] of Object.entries(bySource)) { + console.log(chalk.gray(` ${source}: ${count} sessions`)); + } + } catch (error) { + if (spinner) { + spinner.stop('Failed to rebuild index'); + } + console.error(chalk.red('Error:'), (error as Error).message); + process.exitCode = 1; + } +} diff --git a/src/commands/resume-cmd.ts b/src/commands/resume-cmd.ts new file mode 100644 index 0000000..b479a92 --- /dev/null +++ b/src/commands/resume-cmd.ts @@ -0,0 +1,119 @@ +import * as clack from '@clack/prompts'; +import chalk from 'chalk'; +import ora from 'ora'; +import { formatSessionColored } from '../display/format.js'; +import type { SessionSource } from '../types/index.js'; +import type { HandoffForwardingOptions } from '../utils/forward-flags.js'; +import { findSession, formatSession, getAllSessions } from '../utils/index.js'; +import { getResumeCommand, resolveCrossToolForwarding, resume } from '../utils/resume.js'; +import { selectTargetTool, showForwardingWarnings } from './_shared.js'; + +/** + * Resume a specific session by ID + */ +export async function resumeCommand( + sessionId: string, + options: { in?: string; reference?: boolean; noTui?: boolean; preset?: string; configPath?: string; chain?: boolean }, + context: { isTTY: boolean }, + forwarding?: HandoffForwardingOptions, +): Promise { + try { + const spinner = context.isTTY && !options.noTui ? ora('Finding session...').start() : null; + const session = await findSession(sessionId); + if (spinner) spinner.stop(); + + if (!session) { + // Try to find similar sessions + const allSessions = await getAllSessions(); + const similar = allSessions + .filter( + (s) => + s.id.toLowerCase().includes(sessionId.toLowerCase()) || + s.summary?.toLowerCase().includes(sessionId.toLowerCase()), + ) + .slice(0, 3); + + console.error(chalk.red(`Session not found: ${sessionId}`)); + + if (similar.length > 0) { + console.log(chalk.yellow('\nDid you mean one of these?')); + for (const s of similar) { + console.log(` ${formatSessionColored(s)}`); + } + } + + process.exitCode = 1; + return; + } + + const target = options.in as SessionSource | undefined; + const mode = options.reference ? ('reference' as const) : ('inline' as const); + const contextOptions = { preset: options.preset, configPath: options.configPath, chain: options.chain }; + + const forwardingFor = (candidateTarget: SessionSource | undefined): HandoffForwardingOptions | undefined => { + if (!candidateTarget || candidateTarget === session.source) return undefined; + return forwarding; + }; + + // In non-interactive mode, just resume directly + if (!context.isTTY || options.noTui) { + const effectiveForwarding = forwardingFor(target); + if (target && effectiveForwarding) { + const resolved = resolveCrossToolForwarding(target, effectiveForwarding); + await showForwardingWarnings(resolved.warnings, context); + } + + console.log(chalk.gray('Session: ') + formatSession(session)); + console.log(chalk.gray('Command: ') + chalk.cyan(getResumeCommand(session, target, effectiveForwarding))); + console.log(); + + if (session.cwd) process.chdir(session.cwd); + await resume(session, target, mode, effectiveForwarding, contextOptions); + return; + } + + // Interactive mode - show details and prompt for target if not specified + if (context.isTTY && !target) { + clack.intro(chalk.bold('Resume session')); + + console.log(formatSessionColored(session)); + console.log(); + + const selectedTarget = await selectTargetTool(session); + if (!selectedTarget) return; + + const effectiveForwarding = forwardingFor(selectedTarget); + if (effectiveForwarding) { + const resolved = resolveCrossToolForwarding(selectedTarget, effectiveForwarding); + await showForwardingWarnings(resolved.warnings, context); + } + + clack.log.step(`Handing off to ${selectedTarget}...`); + clack.outro(`Launching ${selectedTarget}`); + + if (session.cwd) process.chdir(session.cwd); + await resume(session, selectedTarget, mode, effectiveForwarding, contextOptions); + } else { + // Target specified, just resume + const effectiveForwarding = forwardingFor(target); + if (target && effectiveForwarding) { + const resolved = resolveCrossToolForwarding(target, effectiveForwarding); + await showForwardingWarnings(resolved.warnings, context); + } + + console.log(chalk.gray('Session: ') + formatSession(session)); + console.log(chalk.gray('Command: ') + chalk.cyan(getResumeCommand(session, target, effectiveForwarding))); + console.log(); + + if (session.cwd) process.chdir(session.cwd); + await resume(session, target, mode, effectiveForwarding, contextOptions); + } + } catch (error) { + if (clack.isCancel(error)) { + clack.cancel('Cancelled'); + return; + } + console.error(chalk.red('Error:'), (error as Error).message); + process.exitCode = 1; + } +} diff --git a/src/commands/scan.ts b/src/commands/scan.ts new file mode 100644 index 0000000..2f71f39 --- /dev/null +++ b/src/commands/scan.ts @@ -0,0 +1,49 @@ +import * as clack from '@clack/prompts'; +import chalk from 'chalk'; +import ora from 'ora'; +import { sourceColors } from '../display/format.js'; +import type { SessionSource } from '../types/index.js'; +import { getAllSessions } from '../utils/index.js'; + +/** + * Scan command - show session discovery stats + */ +export async function scanCommand(options: { rebuild?: boolean }, context: { isTTY: boolean }): Promise { + try { + const spinner = context.isTTY ? ora('Scanning session directories...').start() : null; + + const sessions = await getAllSessions(options.rebuild); + + if (spinner) spinner.stop(); + + if (context.isTTY) { + clack.intro(chalk.bold('Session Discovery Statistics')); + } + + const bySource = sessions.reduce( + (acc, s) => { + acc[s.source] = (acc[s.source] || 0) + 1; + return acc; + }, + {} as Record, + ); + + console.log(); + console.log(chalk.bold(`Total sessions: ${sessions.length}`)); + console.log(); + + for (const [source, count] of Object.entries(bySource).sort((a, b) => b[1] - a[1])) { + const colorFn = sourceColors[source as SessionSource] || chalk.white; + const bar = '\u2588'.repeat(Math.min(50, Math.floor(count / 10))); + console.log(`${colorFn(source.padEnd(8))}: ${count.toString().padStart(4)} ${chalk.gray(bar)}`); + } + + if (context.isTTY) { + console.log(); + clack.outro(chalk.gray('Run "continues" to pick a session')); + } + } catch (error) { + console.error(chalk.red('Error:'), (error as Error).message); + process.exitCode = 1; + } +} diff --git a/src/config/index.ts b/src/config/index.ts new file mode 100644 index 0000000..b139e76 --- /dev/null +++ b/src/config/index.ts @@ -0,0 +1,5 @@ +/** + * Config module — re-exports verbosity configuration system. + */ +export type { PresetName, VerbosityConfig } from './verbosity.js'; +export { VerbosityConfigSchema, getPreset, loadConfig, mergeConfig } from './verbosity.js'; diff --git a/src/config/verbosity.ts b/src/config/verbosity.ts new file mode 100644 index 0000000..80ac919 --- /dev/null +++ b/src/config/verbosity.ts @@ -0,0 +1,560 @@ +/** + * Unified verbosity configuration for continues. + * + * Controls all truncation limits, sample counts, and feature flags + * across every parser and renderer. Replaces dozens of hardcoded + * `.slice(0, N)` limits with a single, user-configurable system. + * + * Config resolution order: + * 1. Explicit `--config ` CLI flag + * 2. `.continues.yml` in CWD + * 3. `~/.continues/config.yml` + * 4. `standard` preset (built-in default) + * + * Users can override any subset of fields — unspecified fields + * inherit from the chosen preset (or `standard` if none specified). + */ +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { z } from 'zod'; +import YAML from 'yaml'; +import { logger } from '../logger.js'; + +// ── Zod Schema ────────────────────────────────────────────────────────────── + +const ShellConfigSchema = z.object({ + maxSamples: z.number().int().min(0).default(8), + stdoutLines: z.number().int().min(0).default(5), + stderrLines: z.number().int().min(0).default(5), + maxChars: z.number().int().min(0).default(2000), + showCommand: z.boolean().default(true), + showExitCode: z.boolean().default(true), +}); + +const ReadConfigSchema = z.object({ + maxSamples: z.number().int().min(0).default(20), + maxChars: z.number().int().min(0).default(0), + showLineRange: z.boolean().default(true), +}); + +const WriteConfigSchema = z.object({ + maxSamples: z.number().int().min(0).default(5), + diffLines: z.number().int().min(0).default(200), + maxChars: z.number().int().min(0).default(5000), +}); + +const EditConfigSchema = z.object({ + maxSamples: z.number().int().min(0).default(5), + diffLines: z.number().int().min(0).default(200), + maxChars: z.number().int().min(0).default(5000), +}); + +const GrepConfigSchema = z.object({ + maxSamples: z.number().int().min(0).default(10), + maxChars: z.number().int().min(0).default(500), + showPattern: z.boolean().default(true), + matchLines: z.number().int().min(0).default(5), +}); + +const ThinkingToolsConfigSchema = z.object({ + extractReasoning: z.boolean().default(true), + maxReasoningChars: z.number().int().min(0).default(500), +}); + +const McpConfigSchema = z.object({ + maxSamplesPerNamespace: z.number().int().min(0).default(5), + paramChars: z.number().int().min(0).default(100), + resultChars: z.number().int().min(0).default(100), + thinkingTools: ThinkingToolsConfigSchema, +}); + +const TaskConfigSchema = z.object({ + maxSamples: z.number().int().min(0).default(5), + includeSubagentResults: z.boolean().default(true), + subagentResultChars: z.number().int().min(0).default(500), + recurseSubagents: z.boolean().default(false), +}); + +const ThinkingConfigSchema = z.object({ + include: z.boolean().default(true), + maxChars: z.number().int().min(0).default(1000), + maxHighlights: z.number().int().min(0).default(5), +}); + +const CompactSummaryConfigSchema = z.object({ + maxChars: z.number().int().min(0).default(500), +}); + +const PendingTasksConfigSchema = z.object({ + extractFromThinking: z.boolean().default(true), + extractFromSubagents: z.boolean().default(true), + maxTasks: z.number().int().min(0).default(10), +}); + +const ClaudeAgentConfigSchema = z.object({ + filterProgressEvents: z.boolean().default(true), + parseSubagents: z.boolean().default(true), + parseToolResultsDir: z.boolean().default(true), + separateHumanFromToolResults: z.boolean().default(true), + chainCompactedHistory: z.boolean().default(true), + chainMaxDepth: z.number().int().min(0).default(2), + chainSummaryChars: z.number().int().min(0).default(800), +}); + +const AgentFlagsSchema = z.record(z.string(), z.union([z.boolean(), z.number(), z.string()])); + +const AgentsConfigSchema = z + .object({ + claude: ClaudeAgentConfigSchema, + }) + .catchall(AgentFlagsSchema.optional()); + +const PresetNameSchema = z.enum(['minimal', 'standard', 'verbose', 'full']); + +export const VerbosityConfigSchema = z.object({ + preset: PresetNameSchema.default('standard'), + recentMessages: z.number().int().min(0).default(10), + maxMessageChars: z.number().int().min(0).default(500), + shell: ShellConfigSchema, + read: ReadConfigSchema, + write: WriteConfigSchema, + edit: EditConfigSchema, + grep: GrepConfigSchema, + mcp: McpConfigSchema, + task: TaskConfigSchema, + thinking: ThinkingConfigSchema, + compactSummary: CompactSummaryConfigSchema, + pendingTasks: PendingTasksConfigSchema, + agents: AgentsConfigSchema, +}); + +// ── TypeScript Type ───────────────────────────────────────────────────────── + +export type PresetName = z.infer; +export type VerbosityConfig = z.infer; + +// ── Presets ────────────────────────────────────────────────────────────────── + +/** Low output (~2KB). Essentials only. */ +const MINIMAL_PRESET: VerbosityConfig = { + preset: 'minimal', + recentMessages: 3, + maxMessageChars: 200, + shell: { + maxSamples: 3, + stdoutLines: 3, + stderrLines: 3, + maxChars: 500, + showCommand: true, + showExitCode: true, + }, + read: { + maxSamples: 5, + maxChars: 0, // path only + showLineRange: false, + }, + write: { + maxSamples: 3, + diffLines: 20, + maxChars: 1000, + }, + edit: { + maxSamples: 3, + diffLines: 20, + maxChars: 1000, + }, + grep: { + maxSamples: 3, + maxChars: 200, + showPattern: true, + matchLines: 2, + }, + mcp: { + maxSamplesPerNamespace: 1, + paramChars: 50, + resultChars: 50, + thinkingTools: { + extractReasoning: false, + maxReasoningChars: 0, + }, + }, + task: { + maxSamples: 2, + includeSubagentResults: false, + subagentResultChars: 0, + recurseSubagents: false, + }, + thinking: { + include: false, + maxChars: 0, + maxHighlights: 0, + }, + compactSummary: { + maxChars: 200, + }, + pendingTasks: { + extractFromThinking: false, + extractFromSubagents: false, + maxTasks: 5, + }, + agents: { + claude: { + filterProgressEvents: true, + parseSubagents: false, + parseToolResultsDir: false, + separateHumanFromToolResults: false, + chainCompactedHistory: true, + chainMaxDepth: 1, + chainSummaryChars: 300, + }, + }, +}; + +/** Current behavior improved (~8KB). Good default for most handoffs. */ +const STANDARD_PRESET: VerbosityConfig = { + preset: 'standard', + recentMessages: 10, + maxMessageChars: 500, + shell: { + maxSamples: 8, + stdoutLines: 5, + stderrLines: 5, + maxChars: 2000, + showCommand: true, + showExitCode: true, + }, + read: { + maxSamples: 20, + maxChars: 0, // path only + showLineRange: true, + }, + write: { + maxSamples: 5, + diffLines: 200, + maxChars: 5000, + }, + edit: { + maxSamples: 5, + diffLines: 200, + maxChars: 5000, + }, + grep: { + maxSamples: 10, + maxChars: 500, + showPattern: true, + matchLines: 5, + }, + mcp: { + maxSamplesPerNamespace: 5, + paramChars: 100, + resultChars: 100, + thinkingTools: { + extractReasoning: true, + maxReasoningChars: 500, + }, + }, + task: { + maxSamples: 5, + includeSubagentResults: true, + subagentResultChars: 500, + recurseSubagents: false, + }, + thinking: { + include: true, + maxChars: 1000, + maxHighlights: 5, + }, + compactSummary: { + maxChars: 500, + }, + pendingTasks: { + extractFromThinking: true, + extractFromSubagents: true, + maxTasks: 10, + }, + agents: { + claude: { + filterProgressEvents: true, + parseSubagents: true, + parseToolResultsDir: true, + separateHumanFromToolResults: true, + chainCompactedHistory: true, + chainMaxDepth: 2, + chainSummaryChars: 800, + }, + }, +}; + +/** Rich context (~30KB). Useful for complex multi-file tasks. */ +const VERBOSE_PRESET: VerbosityConfig = { + preset: 'verbose', + recentMessages: 20, + maxMessageChars: 2000, + shell: { + maxSamples: 15, + stdoutLines: 20, + stderrLines: 20, + maxChars: 8000, + showCommand: true, + showExitCode: true, + }, + read: { + maxSamples: 50, + maxChars: 500, + showLineRange: true, + }, + write: { + maxSamples: 15, + diffLines: 500, + maxChars: 10000, + }, + edit: { + maxSamples: 15, + diffLines: 500, + maxChars: 10000, + }, + grep: { + maxSamples: 20, + maxChars: 1000, + showPattern: true, + matchLines: 10, + }, + mcp: { + maxSamplesPerNamespace: 10, + paramChars: 500, + resultChars: 1000, + thinkingTools: { + extractReasoning: true, + maxReasoningChars: 2000, + }, + }, + task: { + maxSamples: 10, + includeSubagentResults: true, + subagentResultChars: 2000, + recurseSubagents: true, + }, + thinking: { + include: true, + maxChars: 5000, + maxHighlights: 10, + }, + compactSummary: { + maxChars: 1000, + }, + pendingTasks: { + extractFromThinking: true, + extractFromSubagents: true, + maxTasks: 20, + }, + agents: { + claude: { + filterProgressEvents: true, + parseSubagents: true, + parseToolResultsDir: true, + separateHumanFromToolResults: true, + chainCompactedHistory: true, + chainMaxDepth: 4, + chainSummaryChars: 2000, + }, + }, +}; + +/** Everything (~unlimited). Full session data, no truncation. */ +const FULL_PRESET: VerbosityConfig = { + preset: 'full', + recentMessages: 50, + maxMessageChars: 10000, + shell: { + maxSamples: 999, + stdoutLines: 100, + stderrLines: 100, + maxChars: 50000, + showCommand: true, + showExitCode: true, + }, + read: { + maxSamples: 999, + maxChars: 10000, + showLineRange: true, + }, + write: { + maxSamples: 999, + diffLines: 999, + maxChars: 50000, + }, + edit: { + maxSamples: 999, + diffLines: 999, + maxChars: 50000, + }, + grep: { + maxSamples: 999, + maxChars: 10000, + showPattern: true, + matchLines: 50, + }, + mcp: { + maxSamplesPerNamespace: 999, + paramChars: 10000, + resultChars: 10000, + thinkingTools: { + extractReasoning: true, + maxReasoningChars: 10000, + }, + }, + task: { + maxSamples: 999, + includeSubagentResults: true, + subagentResultChars: 10000, + recurseSubagents: true, + }, + thinking: { + include: true, + maxChars: 50000, + maxHighlights: 50, + }, + compactSummary: { + maxChars: 5000, + }, + pendingTasks: { + extractFromThinking: true, + extractFromSubagents: true, + maxTasks: 100, + }, + agents: { + claude: { + filterProgressEvents: false, + parseSubagents: true, + parseToolResultsDir: true, + separateHumanFromToolResults: true, + chainCompactedHistory: true, + chainMaxDepth: 8, + chainSummaryChars: 5000, + }, + }, +}; + +const PRESETS: Record = { + minimal: MINIMAL_PRESET, + standard: STANDARD_PRESET, + verbose: VERBOSE_PRESET, + full: FULL_PRESET, +}; + +// ── Deep Merge ────────────────────────────────────────────────────────────── + +function isPlainObject(value: unknown): value is Record { + return typeof value === 'object' && value !== null && !Array.isArray(value); +} + +/** + * Recursively merge `overrides` into `base`. + * - Plain objects are merged key-by-key (override wins for leaf values). + * - Arrays and primitives in overrides replace the base value entirely. + * - Keys not present in overrides are kept from base. + */ +function deepMerge>(base: T, overrides: Record): T { + const result = { ...base } as Record; + + for (const key of Object.keys(overrides)) { + const baseVal = result[key]; + const overVal = overrides[key]; + + if (isPlainObject(baseVal) && isPlainObject(overVal)) { + result[key] = deepMerge(baseVal as Record, overVal); + } else if (overVal !== undefined) { + result[key] = overVal; + } + } + + return result as T; +} + +// ── Public API ────────────────────────────────────────────────────────────── + +/** Get a built-in preset by name. Throws on unknown preset. */ +export function getPreset(name: string): VerbosityConfig { + const key = name as PresetName; + const preset = PRESETS[key]; + if (!preset) { + throw new Error(`Unknown verbosity preset "${name}". Valid presets: ${Object.keys(PRESETS).join(', ')}`); + } + return structuredClone(preset); +} + +/** Deep-merge user overrides onto a base config. */ +export function mergeConfig(base: VerbosityConfig, overrides: Partial): VerbosityConfig { + return deepMerge(base, overrides as Record); +} + +/** + * Parse and validate raw user YAML/JSON data into a VerbosityConfig. + * Unknown fields are stripped. Invalid fields fall back to defaults from the + * selected preset (or `standard` if no preset is specified). + */ +function parseUserConfig(raw: unknown): VerbosityConfig { + if (!isPlainObject(raw)) { + logger.warn('Config file is not a plain object, using standard preset'); + return getPreset('standard'); + } + + // Determine which preset to use as the base + const presetName = typeof raw.preset === 'string' && raw.preset in PRESETS ? (raw.preset as PresetName) : 'standard'; + const base = getPreset(presetName); + + // Merge user overrides onto the preset base, then validate + const merged = deepMerge(base, raw as Record); + const result = VerbosityConfigSchema.safeParse(merged); + + if (result.success) { + return result.data; + } + + // Validation failed — log issues and return the base preset + logger.warn('Config validation errors, falling back to preset defaults:', result.error.issues.map((i) => i.message).join('; ')); + return base; +} + +/** + * Load verbosity config from disk using the resolution chain: + * 1. Explicit path (from `--config` CLI flag) + * 2. `.continues.yml` in the current working directory + * 3. `~/.continues/config.yml` in the user's home directory + * 4. Built-in `standard` preset + * + * Partial configs are deep-merged over the selected preset defaults, + * so users only need to specify the fields they want to change. + */ +export function loadConfig(configPath?: string): VerbosityConfig { + const candidates: string[] = []; + + if (configPath) { + candidates.push(path.resolve(configPath)); + } + + candidates.push(path.resolve(process.cwd(), '.continues.yml')); + candidates.push(path.join(os.homedir(), '.continues', 'config.yml')); + + for (const filePath of candidates) { + if (!fs.existsSync(filePath)) { + logger.debug('Config not found:', filePath); + continue; + } + + try { + const content = fs.readFileSync(filePath, 'utf8'); + const raw = YAML.parse(content) as unknown; + logger.info('Loaded config from', filePath); + return parseUserConfig(raw); + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + logger.warn(`Failed to read config at ${filePath}: ${msg}`); + // Continue to next candidate + } + } + + logger.debug('No config file found, using standard preset'); + return getPreset('standard'); +} diff --git a/src/display/banner.ts b/src/display/banner.ts new file mode 100644 index 0000000..5de7bbd --- /dev/null +++ b/src/display/banner.ts @@ -0,0 +1,312 @@ +import { emitKeypressEvents } from 'node:readline'; +import chalk from 'chalk'; + +type RotatingBannerLine = () => string; +type KeyPress = { name?: string; ctrl?: boolean }; + +const TAB_CYCLE_TIMEOUT_MS = 1400; +const TAB_CYCLE_HINT = chalk.gray('[Tab to cycle]'); + +const FALLBACK_BANNER_LINE: RotatingBannerLine = () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Run')} ${chalk.cyan('continues --help')} ${chalk.gray( + 'for examples and preset recipes.', + )}`; + +const PRESET_PROMO_LINES: RotatingBannerLine[] = [ + () => + `${chalk.gray('🎛️ Preset:')} ${chalk.cyan('full')} ${chalk.gray( + 'is ideal for rich handoff context (npx continues --preset full).', + )}`, + () => + `${chalk.gray('🎛️ Preset:')} ${chalk.cyan('minimal')} ${chalk.gray( + 'keeps output compact when you only need the essentials.', + )}`, + () => + `${chalk.gray('🎛️ Preset:')} ${chalk.cyan('verbose')} ${chalk.gray( + 'adds deeper tool activity while staying shorter than full.', + )}`, + () => + `${chalk.gray('🎛️ Preset:')} ${chalk.cyan('standard')} ${chalk.gray( + 'is balanced for daily resume + inspect flows.', + )}`, +]; + +const STAR_PROMO_LINES: RotatingBannerLine[] = [ + () => + `${chalk.bgHex('#FFD93D').black.bold(' ⭐ LOVE CONTINUES? ')} ${chalk + .hex('#FFD93D') + .bold('Star:')} ${chalk.hex('#00FFC8').bold('github.com/yigitkonur/cli-continues')}`, +]; + +const GENERAL_BANNER_LINES: RotatingBannerLine[] = [ + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Use')} ${chalk.cyan('continues inspect --preset full')} ${chalk.gray( + 'for maximum handoff detail.', + )}`, + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Run')} ${chalk.cyan('continues dump all ./handoffs --preset verbose')} ${chalk.gray( + 'to export readable archives.', + )}`, + () => + `${chalk.gray('🧠 Fact:')} ${chalk.gray( + 'Presets cascade into inspect/dump output, so one flag can tune your whole workflow.', + )}`, + () => + `${chalk.gray('🧠 Fact:')} ${chalk.gray( + 'The', + )} ${chalk.cyan('cont')} ${chalk.gray('alias is built in — use')} ${chalk.cyan('cont claude')} ${chalk.gray( + 'for fast quick-resume.', + )}`, + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Pair')} ${chalk.cyan('--config .continues.yml')} ${chalk.gray( + 'with', + )} ${chalk.cyan('--preset')} ${chalk.gray('for per-project defaults + one-off overrides.')}`, + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Run')} ${chalk.cyan('continues resume --in gemini')} ${chalk.gray( + 'to hand off a Codex/Claude session cross-tool.', + )}`, + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Use')} ${chalk.cyan('continues list --jsonl | jq')} ${chalk.gray( + 'when scripting filters around session metadata.', + )}`, + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Try')} ${chalk.cyan('continues scan --rebuild')} ${chalk.gray( + 'if a freshly-created session is missing from the index.', + )}`, + () => + `${chalk.gray('💡 Tip:')} ${chalk.gray('Use')} ${chalk.cyan('continues inspect --write-md handoff.md')} ${chalk.gray( + 'to save a portable handoff file.', + )}`, + () => + `${chalk.gray('🧠 Fact:')} ${chalk.gray('Use')} ${chalk.cyan('--all')} ${chalk.gray( + 'to skip CWD filtering and browse every discovered session.', + )}`, + () => + `${chalk.gray('🧠 Fact:')} ${chalk.gray('You can forward args to target CLIs after')} ${chalk.cyan('--')} ${chalk.gray( + '(e.g. continues claude 1 -- --dangerously-skip-permissions).', + )}`, + () => + `${chalk.gray('🧠 Fact:')} ${chalk.gray('Inspect + dump both honor')} ${chalk.cyan('--preset')} ${chalk.gray( + 'so detail level stays consistent across tools.', + )}`, + () => + `${chalk.gray('🧠 Fact:')} ${chalk.gray('Need quick routing?')} ${chalk.cyan('cont claude')} ${chalk.gray( + 'and', + )} ${chalk.cyan('cont codex')} ${chalk.gray('jump straight into recent sessions.')}`, +]; + +const ALL_ROTATING_BANNER_LINES: RotatingBannerLine[] = [ + ...PRESET_PROMO_LINES, + ...STAR_PROMO_LINES, + ...GENERAL_BANNER_LINES, +]; + +function pickRandomLine(lines: RotatingBannerLine[]): RotatingBannerLine { + if (lines.length === 0) return FALLBACK_BANNER_LINE; + const index = Math.floor(Math.random() * lines.length); + return lines[index] ?? lines[0] ?? FALLBACK_BANNER_LINE; +} + +function pickWeightedInitialLine(): RotatingBannerLine { + const roll = Math.random(); + if (roll < 0.5) return pickRandomLine(PRESET_PROMO_LINES); // 50% + if (roll < 0.6) return pickRandomLine(STAR_PROMO_LINES); // 10% + return pickRandomLine(GENERAL_BANNER_LINES); // 40% +} + +function shuffleLines(lines: RotatingBannerLine[]): RotatingBannerLine[] { + const shuffled = [...lines]; + for (let i = shuffled.length - 1; i > 0; i--) { + const j = Math.floor(Math.random() * (i + 1)); + const current = shuffled[i]; + const target = shuffled[j]; + if (!current || !target) continue; + shuffled[i] = target; + shuffled[j] = current; + } + return shuffled; +} + +function createCycleDeck(): RotatingBannerLine[] { + const initial = pickWeightedInitialLine(); + const rest = shuffleLines(ALL_ROTATING_BANNER_LINES.filter((line) => line !== initial)); + return [initial, ...rest]; +} + +function renderLineFromDeck(cycleDeck: RotatingBannerLine[], index: number): string { + const lineFactory = cycleDeck[index] ?? cycleDeck[0] ?? FALLBACK_BANNER_LINE; + return lineFactory(); +} + +async function showRotatingBannerLine(): Promise { + const cycleDeck = createCycleDeck(); + const stdin = process.stdin; + const stdout = process.stdout; + const canCycle = stdin.isTTY && stdout.isTTY && !process.env.CI && typeof stdin.setRawMode === 'function'; + + if (!canCycle) { + console.log(` ${renderLineFromDeck(cycleDeck, 0)}`); + console.log(); + return false; + } + + let index = 0; + let timeout: NodeJS.Timeout | undefined; + + return await new Promise((resolve) => { + let finished = false; + let rawModeEnabled = false; + + const render = (): void => { + const line = renderLineFromDeck(cycleDeck, index); + stdout.write(`\r\x1B[2K ${line} ${TAB_CYCLE_HINT}`); + }; + + const teardownInput = (): void => { + if (timeout) clearTimeout(timeout); + stdin.off('keypress', onKeyPress); + if (rawModeEnabled) { + stdin.setRawMode(false); + rawModeEnabled = false; + } + stdin.pause(); + }; + + const finish = (): void => { + if (finished) return; + finished = true; + teardownInput(); + stdout.write('\n\n'); + resolve(false); + }; + + const abortFromCtrlC = (): void => { + if (finished) return; + finished = true; + teardownInput(); + stdout.write('\n\n'); + if (process.exitCode === undefined) { + process.exitCode = 130; + } + resolve(true); + }; + + const armTimeout = (): void => { + if (timeout) clearTimeout(timeout); + timeout = setTimeout(finish, TAB_CYCLE_TIMEOUT_MS); + }; + + const onKeyPress = (_input: string, key: KeyPress): void => { + if (key.ctrl && key.name === 'c') { + abortFromCtrlC(); + return; + } + + if (key.name === 'tab') { + index = (index + 1) % cycleDeck.length; + render(); + armTimeout(); + return; + } + + if (key.name === 'return' || key.name === 'enter' || key.name === 'escape' || key.name === 'space') { + finish(); + } + }; + + try { + emitKeypressEvents(stdin); + stdin.resume(); + stdin.setRawMode(true); + rawModeEnabled = true; + stdin.on('keypress', onKeyPress); + render(); + armTimeout(); + } catch { + if (!finished) { + finished = true; + teardownInput(); + console.log(` ${renderLineFromDeck(cycleDeck, 0)}`); + console.log(); + resolve(false); + } + } + }); +} + +/** + * ASCII art banner with warm-to-cool gradient and highlighted 's' brand mark. + * All letters are exactly 4 chars wide, 1-space separated, 3 rows. + */ +export async function showBanner(version: string, supportsColor: boolean): Promise { + if (!supportsColor) return false; + + // Letter glyphs: [top, mid, bot], each 4 chars wide + const glyphs: string[][] = [ + ['\u2588\u2580\u2580\u2580', '\u2588 ', '\u2580\u2580\u2580\u2580'], // c + ['\u2588\u2580\u2580\u2588', '\u2588 \u2588', '\u2580\u2580\u2580\u2580'], // o + ['\u2588\u2580\u2580\u2584', '\u2588 \u2588', '\u2580 \u2580'], // n + ['\u2580\u2588\u2588\u2580', ' \u2588\u2588 ', ' \u2580\u2580 '], // t + [' \u2588\u2588 ', ' \u2588\u2588 ', ' \u2580\u2580 '], // i + ['\u2588\u2580\u2580\u2584', '\u2588 \u2588', '\u2580 \u2580'], // n + ['\u2588 \u2588', '\u2588 \u2588', '\u2580\u2580\u2580\u2580'], // u + ['\u2588\u2580\u2580\u2588', '\u2588\u2580\u2580 ', '\u2580\u2580\u2580\u2580'], // e + ['\u2588\u2580\u2580\u2580', '\u2580\u2580\u2580\u2588', '\u2580\u2580\u2580\u2580'], // s + ]; + + // Gradient: coral -> orange -> gold -> emerald -> blue -> sky -> purple -> mint + const colors = [ + chalk.hex('#FF6B6B'), // c — coral + chalk.hex('#FF8E53'), // o — orange + chalk.hex('#FFA940'), // n — amber + chalk.hex('#FFD93D'), // t — gold + chalk.hex('#6BCB77'), // i — emerald + chalk.hex('#4D96FF'), // n — blue + chalk.hex('#38B6FF'), // u — sky + chalk.hex('#6C5CE7'), // e — purple + chalk.hex('#00FFC8').bold, // s — mint + ]; + + console.log(); + for (let row = 0; row < 3; row++) { + let line = ' '; + for (let i = 0; i < glyphs.length; i++) { + line += colors[i](glyphs[i][row]); + if (i < glyphs.length - 1) line += ' '; + } + console.log(line); + } + console.log(); + console.log( + ' ' + + chalk.bold.white(`v${version}`) + + chalk.gray(' — never lose context across ') + + chalk.cyan('14 AI coding agents'), + ); + console.log(); + console.log( + ' ' + + chalk.gray('🔄 Cross-tool handoff') + + chalk.gray(' · ') + + chalk.gray('🔎 Inspect mode') + + chalk.gray(' · ') + + chalk.gray('⚙️ YAML config') + + chalk.gray(' · ') + + chalk.gray('🌍 Env var overrides'), + ); + console.log( + ' ' + + chalk.gray('🎛️ Try Presets:') + + chalk.gray(' · ') + + chalk.cyan('minimal') + + chalk.gray(' · ') + + chalk.cyan('standard') + + chalk.gray(' · ') + + chalk.cyan('verbose') + + chalk.gray(' · ') + + chalk.cyan('full') + + chalk.gray(' (eg: npx continues --preset full for better context handoff!)'), + ); + console.log(` ${chalk.gray('💡 cont or continues to quick-resume')}`); + return await showRotatingBannerLine(); +} diff --git a/src/display/format.ts b/src/display/format.ts new file mode 100644 index 0000000..744ef0d --- /dev/null +++ b/src/display/format.ts @@ -0,0 +1,50 @@ +import chalk from 'chalk'; +import { adapters } from '../parsers/registry.js'; +import type { SessionSource, UnifiedSession } from '../types/index.js'; + +/** + * Source-specific colors for consistent branding -- derived from the adapter registry + */ +export const sourceColors = Object.fromEntries(Object.values(adapters).map((a) => [a.name, a.color])) as Record< + SessionSource, + (s: string) => string +>; + +/** + * Format session with colors in columnar layout + * Format: [source] YYYY-MM-DD HH:MM project-name summary... short-id + */ +export function formatSessionColored(session: UnifiedSession): string { + const colorFn = sourceColors[session.source] || chalk.white; + const tag = `[${session.source}]`; + const source = colorFn(tag.padEnd(10)); + + const date = chalk.gray(session.updatedAt.toISOString().slice(0, 16).replace('T', ' ')); + + // Show repo or last folder of cwd + const repoDisplay = session.repo || session.cwd.split('/').slice(-2).join('/') || ''; + const repo = chalk.cyan(repoDisplay.slice(0, 20).padEnd(20)); + + // Summary - truncate nicely + const summaryText = session.summary || '(no summary)'; + const summary = (session.summary ? chalk.white(summaryText.slice(0, 44)) : chalk.gray(summaryText)).padEnd(44); + + // Short ID + const id = chalk.gray(session.id.slice(0, 8)); + + return `${source} ${date} ${repo} ${summary} ${id}`; +} + +/** + * Format session for clack select - simpler, cleaner + */ +export function formatSessionForSelect(session: UnifiedSession): string { + const colorFn = sourceColors[session.source] || chalk.white; + const tag = `[${session.source}]`; + const source = colorFn(tag.padEnd(10)); + const date = session.updatedAt.toISOString().slice(0, 16).replace('T', ' '); + const repoDisplay = session.repo || session.cwd.split('/').slice(-1)[0] || ''; + const summary = (session.summary || '(no summary)').slice(0, 48); + + return `${source} ${date} ${chalk.cyan(repoDisplay.padEnd(20))} ${summary}`; +} diff --git a/src/display/help.ts b/src/display/help.ts new file mode 100644 index 0000000..c126101 --- /dev/null +++ b/src/display/help.ts @@ -0,0 +1,16 @@ +import * as clack from '@clack/prompts'; +import chalk from 'chalk'; +import { adapters } from '../parsers/registry.js'; + +/** + * Show helpful error when no sessions found + */ +export function showNoSessionsHelp(): void { + clack.log.error('No sessions found.'); + console.log(); + console.log(chalk.gray('Sessions are stored in:')); + for (const a of Object.values(adapters)) { + const envHint = a.envVar ? chalk.gray(` (override: $${a.envVar})`) : ''; + console.log(chalk.gray(` ${a.storagePath}`) + envHint); + } +} diff --git a/src/display/star-prompt.ts b/src/display/star-prompt.ts new file mode 100644 index 0000000..f1b3442 --- /dev/null +++ b/src/display/star-prompt.ts @@ -0,0 +1,82 @@ +/** + * One-time GitHub star prompt shown on first interactive run. + * Skipped when: no TTY, gh CLI not installed, or already prompted. + * State persisted in ~/.continues/star-prompt.json. + */ + +import { readFile, writeFile, mkdir } from 'fs/promises'; +import { existsSync } from 'fs'; +import { join } from 'path'; +import { homedir } from 'os'; +import { spawnSync } from 'child_process'; +import { SHELL_OPTION } from '../utils/platform.js'; +import chalk from 'chalk'; +import * as clack from '@clack/prompts'; + +const REPO = 'yigitkonur/cli-continues'; + +function statePath(): string { + return join(homedir(), '.continues', 'star-prompt.json'); +} + +async function hasBeenPrompted(): Promise { + const p = statePath(); + if (!existsSync(p)) return false; + try { + const content = await readFile(p, 'utf-8'); + const state = JSON.parse(content); + return typeof state.prompted_at === 'string'; + } catch { + return false; + } +} + +async function markPrompted(): Promise { + const dir = join(homedir(), '.continues'); + await mkdir(dir, { recursive: true }); + await writeFile( + statePath(), + JSON.stringify({ prompted_at: new Date().toISOString() }, null, 2), + ); +} + +function isGhInstalled(): boolean { + const result = spawnSync('gh', ['--version'], { + encoding: 'utf-8', + stdio: ['ignore', 'ignore', 'ignore'], + timeout: 3000, + ...SHELL_OPTION, + }); + return !result.error && result.status === 0; +} + +function starRepo(): boolean { + const result = spawnSync('gh', ['api', '-X', 'PUT', `/user/starred/${REPO}`], { + encoding: 'utf-8', + stdio: ['ignore', 'ignore', 'ignore'], + timeout: 10000, + ...SHELL_OPTION, + }); + return !result.error && result.status === 0; +} + +export async function maybePromptGithubStar(): Promise { + if (!process.stdin.isTTY || !process.stdout.isTTY) return; + if (await hasBeenPrompted()) return; + if (!isGhInstalled()) return; + + // Mark before asking so we never re-prompt even if interrupted + await markPrompted(); + + const shouldStar = await clack.confirm({ + message: chalk.hex('#FFD93D')('⭐') + ' ' + chalk.gray('Enjoying continues? Star it on GitHub?'), + initialValue: true, + }); + + if (clack.isCancel(shouldStar) || !shouldStar) return; + + const ok = starRepo(); + if (ok) { + clack.log.success(chalk.hex('#00FFC8')('Thanks for the star! 🎉')); + } +} diff --git a/src/errors.ts b/src/errors.ts new file mode 100644 index 0000000..c9d54f5 --- /dev/null +++ b/src/errors.ts @@ -0,0 +1,72 @@ +/** + * Typed error hierarchy for continues. + * Replaces anonymous Error throws with machine-readable error types. + */ + +/** + * Base error for all continues errors. + * Includes an optional `cause` for error chaining. + */ +export class ContinuesError extends Error { + override readonly name: string = 'ContinuesError'; + constructor(message: string, options?: { cause?: unknown }) { + super(message, options); + } +} + +/** Thrown when a parser fails to read or interpret session data. */ +export class ParseError extends ContinuesError { + override readonly name = 'ParseError'; + constructor( + public readonly source: string, + public readonly filePath: string, + message: string, + options?: { cause?: unknown }, + ) { + super(`[${source}] ${message} (${filePath})`, options); + } +} + +/** Thrown when a requested session cannot be found by ID or path. */ +export class SessionNotFoundError extends ContinuesError { + override readonly name = 'SessionNotFoundError'; + constructor(public readonly sessionId: string) { + super(`Session not found: ${sessionId}`); + } +} + +/** Thrown when a tool binary is not available on PATH. */ +export class ToolNotAvailableError extends ContinuesError { + override readonly name = 'ToolNotAvailableError'; + constructor(public readonly tool: string) { + super(`Tool not available: ${tool}. Is it installed and on your PATH?`); + } +} + +/** Thrown when an unknown source name is provided. */ +export class UnknownSourceError extends ContinuesError { + override readonly name = 'UnknownSourceError'; + constructor(public readonly source: string) { + super(`Unknown source: "${source}". Valid sources: claude, codex, copilot, gemini, opencode, droid, cursor`); + } +} + +/** Thrown when the session index cannot be read or written. */ +export class IndexError extends ContinuesError { + override readonly name = 'IndexError'; + constructor(message: string, options?: { cause?: unknown }) { + super(message, options); + } +} + +/** Thrown when file storage operations fail (read/write handoff, cache). */ +export class StorageError extends ContinuesError { + override readonly name = 'StorageError'; + constructor( + public readonly filePath: string, + message: string, + options?: { cause?: unknown }, + ) { + super(`${message}: ${filePath}`, options); + } +} diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000..866c38b --- /dev/null +++ b/src/index.ts @@ -0,0 +1,95 @@ +/** + * continues — Public API + * + * Resume any AI coding session across Claude, Copilot, Gemini, Codex, + * OpenCode, Droid, and Cursor. + * + * @example + * ```ts + * import { getAllSessions, extractContext, adapters } from 'continues'; + * + * const sessions = await getAllSessions(); + * const ctx = await extractContext(sessions[0]); + * console.log(ctx.markdown); + * ``` + */ + +// ── Errors ─────────────────────────────────────────────────────────── +export { + ContinuesError, + IndexError, + ParseError, + SessionNotFoundError, + StorageError, + ToolNotAvailableError, + UnknownSourceError, +} from './errors.js'; +// ── Logger ─────────────────────────────────────────────────────────── +export type { LogLevel } from './logger.js'; +export { getLogLevel, logger, setLogLevel } from './logger.js'; + +// ── Registry ───────────────────────────────────────────────────────── +export type { ToolAdapter } from './parsers/registry.js'; +export { ALL_TOOLS, adapters, SOURCE_HELP } from './parsers/registry.js'; +export type { ContentBlock, TextBlock, ThinkingBlock, ToolResultBlock, ToolUseBlock } from './types/content-blocks.js'; +// ── Types ──────────────────────────────────────────────────────────── +export type { + AskSampleData, + ConversationMessage, + EditSampleData, + FetchSampleData, + GlobSampleData, + GrepSampleData, + HandoffOptions, + McpSampleData, + ReadSampleData, + SearchSampleData, + SessionContext, + SessionNotes, + SessionSource, + ShellSampleData, + StructuredToolSample, + TaskSampleData, + ToolCall, + ToolSample, + ToolSampleCategory, + ToolUsageSummary, + UnifiedSession, + WriteSampleData, +} from './types/index.js'; +export { classifyToolName, TOOL_NAMES } from './types/tool-names.js'; +export type { + CanonicalFlagKey, + FlagOccurrence, + ForwardMapResult, + ForwardResolution, + HandoffForwardingOptions, + ParsedForwardFlags, +} from './utils/forward-flags.js'; +export { parseForwardFlags, resolveForwardingArgs } from './utils/forward-flags.js'; +// ── Session Operations ─────────────────────────────────────────────── +export { + buildIndex, + ensureDirectories, + extractContext, + findSession, + formatSession, + getAllSessions, + getCachedContext, + getSessionsBySource, + indexNeedsRebuild, + loadIndex, + saveContext, + sessionsToJsonl, +} from './utils/index.js'; +// ── Markdown ───────────────────────────────────────────────────────── +export { generateHandoffMarkdown, getSourceLabels } from './utils/markdown.js'; +// ── Resume ─────────────────────────────────────────────────────────── +export { + crossToolResume, + getAvailableTools, + getResumeCommand, + nativeResume, + resolveCrossToolForwarding, + resume, +} from './utils/resume.js'; diff --git a/src/logger.ts b/src/logger.ts new file mode 100644 index 0000000..68f66fc --- /dev/null +++ b/src/logger.ts @@ -0,0 +1,79 @@ +/** + * Simple log-level logger for continues. + * Replaces empty catch {} blocks with debug/warn output. + * + * Usage: + * import { logger, setLogLevel } from './logger.js'; + * logger.debug('parsing session', filePath); + * logger.warn('skipping invalid line', line); + * + * Log levels (increasing verbosity): + * silent → error → warn → info → debug + * + * Control via: + * - setLogLevel('debug') from code + * - CONTINUES_DEBUG=1 env var (sets 'debug') + * - --verbose CLI flag (sets 'info') + * - --debug CLI flag (sets 'debug') + */ + +export type LogLevel = 'silent' | 'error' | 'warn' | 'info' | 'debug'; + +const LEVELS: Record = { + silent: 0, + error: 1, + warn: 2, + info: 3, + debug: 4, +}; + +let currentLevel: LogLevel = 'silent'; + +/** Set the global log level. */ +export function setLogLevel(level: LogLevel): void { + currentLevel = level; +} + +/** Get the current log level. */ +export function getLogLevel(): LogLevel { + return currentLevel; +} + +function shouldLog(level: LogLevel): boolean { + return LEVELS[level] <= LEVELS[currentLevel]; +} + +function formatArgs(args: unknown[]): string { + return args.map((a) => (typeof a === 'string' ? a : JSON.stringify(a))).join(' '); +} + +export const logger = { + error(...args: unknown[]): void { + if (shouldLog('error')) { + console.error(`[continues:error] ${formatArgs(args)}`); + } + }, + + warn(...args: unknown[]): void { + if (shouldLog('warn')) { + console.warn(`[continues:warn] ${formatArgs(args)}`); + } + }, + + info(...args: unknown[]): void { + if (shouldLog('info')) { + console.info(`[continues:info] ${formatArgs(args)}`); + } + }, + + debug(...args: unknown[]): void { + if (shouldLog('debug')) { + console.debug(`[continues:debug] ${formatArgs(args)}`); + } + }, +}; + +// Auto-configure from environment +if (process.env.CONTINUES_DEBUG === '1' || process.env.CONTINUES_DEBUG === 'true') { + setLogLevel('debug'); +} diff --git a/src/parsers/amp.ts b/src/parsers/amp.ts new file mode 100644 index 0000000..b1a50ea --- /dev/null +++ b/src/parsers/amp.ts @@ -0,0 +1,292 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + SessionNotes, + ToolUsageSummary, + UnifiedSession, +} from '../types/index.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, homeDir } from '../utils/parser-helpers.js'; +import { findFiles } from '../utils/fs-helpers.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { truncate } from '../utils/tool-summarizer.js'; + +// ── Amp Thread JSON shape ─────────────────────────────────────────────────── +// Minimal interfaces matching ~/.local/share/amp/threads/{id}.json + +interface AmpContentBlock { + type: string; + text?: string; + provider?: string; +} + +interface AmpMessage { + role: 'user' | 'assistant'; + messageId: number; + content: AmpContentBlock[]; +} + +interface AmpUsageEvent { + model?: string; + credits?: number; + tokens?: { input?: number; output?: number }; + operationType?: string; + fromMessageId?: number; + toMessageId?: number; +} + +interface AmpThread { + id: string; + title?: string; + created: number; // milliseconds since epoch + messages: AmpMessage[]; + usageLedger?: { + events?: AmpUsageEvent[]; + }; + env?: { + initial?: { + tags?: string[]; + }; + }; +} + +const AMP_BASE_DIR = process.env.XDG_DATA_HOME + ? path.join(process.env.XDG_DATA_HOME, 'amp', 'threads') + : path.join(homeDir(), '.local', 'share', 'amp', 'threads'); + +/** + * Find all Amp thread JSON files + */ +function findSessionFiles(): string[] { + return findFiles(AMP_BASE_DIR, { + match: (entry) => entry.name.endsWith('.json'), + recursive: false, + }); +} + +/** + * Parse a single Amp thread file. Returns null on any parse error. + */ +function parseThreadFile(filePath: string): AmpThread | null { + try { + const content = fs.readFileSync(filePath, 'utf8'); + const data = JSON.parse(content); + + // Minimal validation: must have id, created timestamp, and messages array + if (typeof data.id !== 'string' || typeof data.created !== 'number' || !Array.isArray(data.messages)) { + logger.debug('amp: thread validation failed — missing id, created, or messages', filePath); + return null; + } + + return data as AmpThread; + } catch (err) { + logger.debug('amp: failed to parse thread file', filePath, err); + return null; + } +} + +/** + * Concatenate text from an Amp message's content blocks + */ +function extractMessageText(message: AmpMessage): string { + if (!message.content || !Array.isArray(message.content)) return ''; + return message.content + .filter((block) => block.type === 'text' && typeof block.text === 'string') + .map((block) => block.text!) + .join('\n') + .trim(); +} + +/** + * Extract the first real user message for use as a session summary + */ +function extractFirstUserMessage(thread: AmpThread): string { + for (const msg of thread.messages) { + if (msg.role === 'user') { + const text = extractMessageText(msg); + if (text) return text; + } + } + return ''; +} + +/** + * Extract model identifier from env.initial.tags (e.g. "model:claude-opus-4-5-20251101" → "claude-opus-4-5-20251101") + */ +function extractModel(thread: AmpThread): string | undefined { + const tags = thread.env?.initial?.tags; + if (!Array.isArray(tags)) return undefined; + + for (const tag of tags) { + if (typeof tag === 'string' && tag.startsWith('model:')) { + return tag.slice('model:'.length); + } + } + return undefined; +} + +/** + * Extract session notes: model info and token usage from usageLedger + */ +function extractSessionNotes(thread: AmpThread): SessionNotes { + const notes: SessionNotes = {}; + + const model = extractModel(thread); + if (model) notes.model = model; + + // Accumulate token usage from ledger events, skipping title-generation + const events = thread.usageLedger?.events; + if (Array.isArray(events)) { + let inputTokens = 0; + let outputTokens = 0; + + for (const event of events) { + if (event.operationType === 'title-generation') continue; + + if (event.tokens) { + inputTokens += event.tokens.input ?? 0; + outputTokens += event.tokens.output ?? 0; + } + + // Use the first non-title-generation model as fallback if env tags didn't provide one + if (!notes.model && event.model) { + notes.model = event.model; + } + } + + if (inputTokens > 0 || outputTokens > 0) { + notes.tokenUsage = { input: inputTokens, output: outputTokens }; + } + } + + return notes; +} + +/** + * Parse all Amp sessions + */ +export async function parseAmpSessions(): Promise { + const files = findSessionFiles(); + const sessions: UnifiedSession[] = []; + + for (const filePath of files) { + try { + const thread = parseThreadFile(filePath); + if (!thread || !thread.id) continue; + + const firstUserMessage = extractFirstUserMessage(thread); + const summary = cleanSummary(thread.title || firstUserMessage); + + const fileStats = fs.statSync(filePath); + const content = fs.readFileSync(filePath, 'utf8'); + const lines = content.split('\n').length; + + sessions.push({ + id: thread.id, + source: 'amp', + cwd: '', + repo: '', + lines, + bytes: fileStats.size, + createdAt: new Date(thread.created), + updatedAt: new Date(fileStats.mtimeMs), + originalPath: filePath, + summary: summary || undefined, + model: extractModel(thread), + }); + } catch (err) { + logger.debug('amp: skipping unparseable thread', filePath, err); + } + } + + return sessions + .filter((s) => s.summary && s.summary.length > 0) + .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +/** + * Extract context from an Amp session for cross-tool continuation + */ +export async function extractAmpContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const cfg = config ?? getPreset('standard'); + const thread = parseThreadFile(session.originalPath); + const recentMessages: ConversationMessage[] = []; + const filesModified: string[] = []; + const pendingTasks: string[] = []; + const toolSummaries: ToolUsageSummary[] = []; + let sessionNotes: SessionNotes | undefined; + + if (thread) { + sessionNotes = extractSessionNotes(thread); + + // Convert Amp messages to unified ConversationMessage format. + // Slice to recent window (×2 to account for user+assistant pairs, matching gemini pattern). + for (const msg of thread.messages.slice(-cfg.recentMessages * 2)) { + const text = extractMessageText(msg); + if (!text) continue; + + if (msg.role === 'user' || msg.role === 'assistant') { + recentMessages.push({ + role: msg.role, + content: text, + // Amp threads don't carry per-message timestamps; use thread creation as fallback + timestamp: new Date(thread.created), + }); + } + } + + // Scan last few assistant messages for pending-task signals + const assistantMessages = thread.messages.filter((m) => m.role === 'assistant'); + for (const msg of assistantMessages.slice(-3)) { + if (pendingTasks.length >= 5) break; + const text = extractMessageText(msg).toLowerCase(); + if ( + text.includes('todo') || + text.includes('next step') || + text.includes('remaining') || + text.includes('need to') + ) { + // Extract the first sentence containing the keyword as the task hint + const sentences = extractMessageText(msg).split(/[.!\n]/).filter(Boolean); + for (const sentence of sentences) { + if (pendingTasks.length >= 5) break; + const lower = sentence.toLowerCase(); + if ( + lower.includes('todo') || + lower.includes('next step') || + lower.includes('remaining') || + lower.includes('need to') + ) { + pendingTasks.push(truncate(sentence.trim(), 120)); + } + } + } + } + } + + const trimmed = recentMessages.slice(-cfg.recentMessages); + + const markdown = generateHandoffMarkdown( + session, + trimmed, + filesModified, + pendingTasks, + toolSummaries, + sessionNotes, + cfg, + ); + + return { + session: sessionNotes?.model ? { ...session, model: sessionNotes.model } : session, + recentMessages: trimmed, + filesModified, + pendingTasks, + toolSummaries, + sessionNotes, + markdown, + }; +} diff --git a/src/parsers/antigravity.ts b/src/parsers/antigravity.ts new file mode 100644 index 0000000..4827d50 --- /dev/null +++ b/src/parsers/antigravity.ts @@ -0,0 +1,234 @@ +import * as fs from 'node:fs'; +import * as fsp from 'node:fs/promises'; +import * as path from 'node:path'; +import * as readline from 'node:readline'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { logger } from '../logger.js'; +import type { ConversationMessage, SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; +import { findFiles, listSubdirectories } from '../utils/fs-helpers.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, homeDir } from '../utils/parser-helpers.js'; + +const ANTIGRAVITY_BASE_DIR = path.join( + process.env.GEMINI_CLI_HOME || homeDir(), + '.gemini', + 'antigravity', + 'code_tracker', +); + +const SOURCE_NAME: SessionSource = 'antigravity'; + +// ⚠️ FORMAT NOTE: This parser handles JSON conversation logs from Antigravity's +// code_tracker directory. Real Antigravity installations may also store raw file +// snapshots (binary/text diffs) in code_tracker/ — those are NOT parsed here. +// This parser processes *.json (and legacy *.jsonl) files containing {type, content, timestamp} entries. + +/** Shape of a single line entry after stripping the binary prefix */ +interface AntigravityEntry { + type: string; + timestamp: string; + content: string; +} + +// ── Line Parsing ──────────────────────────────────────────────────────────── + +/** + * Strip binary/protobuf prefix bytes that precede the JSON on each session file line. + * Returns the substring starting from the first `{`, or null if none found. + */ +function stripBinaryPrefix(line: string): string | null { + const idx = line.indexOf('{'); + if (idx === -1) return null; + return line.slice(idx); +} + +/** + * Parse a single line into an entry. + * Returns null for empty lines, lines without JSON, or invalid payloads. + */ +function parseLine(line: string): AntigravityEntry | null { + if (!line) return null; + const json = stripBinaryPrefix(line); + if (!json) return null; + + try { + const obj = JSON.parse(json); + if (typeof obj === 'object' && obj !== null && typeof obj.type === 'string' && typeof obj.content === 'string') { + return { + type: obj.type, + timestamp: typeof obj.timestamp === 'string' ? obj.timestamp : '', + content: obj.content, + }; + } + return null; + } catch { + return null; + } +} + +// ── File I/O ──────────────────────────────────────────────────────────────── + +/** Read and parse all entries from an Antigravity session file (streamed) */ +async function parseSessionFile(filePath: string): Promise { + try { + const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); + const rl = readline.createInterface({ + input: stream, + crlfDelay: Number.POSITIVE_INFINITY, + }); + const entries: AntigravityEntry[] = []; + for await (const line of rl) { + const entry = parseLine(line); + if (entry) entries.push(entry); + } + return entries; + } catch (err) { + logger.debug('antigravity: failed to read session file', filePath, err); + return []; + } +} + +/** Parse an RFC 3339 / ISO 8601 timestamp, falling back to a default Date */ +function parseTimestamp(ts: string, fallback: Date): Date { + if (!ts) return fallback; + const d = new Date(ts); + return Number.isNaN(d.getTime()) ? fallback : d; +} + +/** Tuple returned by findSessionFiles — captures the project directory at discovery time */ +interface SessionFileEntry { + filePath: string; + projectDir: string; +} + +/** Find all *.json / *.jsonl session files under the code_tracker project dirs */ +async function findSessionFiles(): Promise { + if (!fs.existsSync(ANTIGRAVITY_BASE_DIR)) return []; + + const results: SessionFileEntry[] = []; + for (const projectDir of listSubdirectories(ANTIGRAVITY_BASE_DIR)) { + for (const filePath of findFiles(projectDir, { + match: (entry) => entry.name.endsWith('.json') || entry.name.endsWith('.jsonl'), + recursive: true, + })) { + results.push({ filePath, projectDir }); + } + } + return results; +} + +/** + * Derive project name from the discovered project directory. + * "no_repo" falls back to "antigravity". + * Strips trailing _ suffix (e.g., "marketing_c6b0a246..." → "marketing"). + */ +function projectNameFromDir(projectDir: string): string { + const dirName = path.basename(projectDir); + if (dirName === 'no_repo') return 'antigravity'; + const hashSuffix = dirName.match(/_[0-9a-f]{8,}$/); + return hashSuffix ? dirName.slice(0, hashSuffix.index) : dirName; +} + +// ── Public API ────────────────────────────────────────────────────────────── + +/** + * Parse all Antigravity sessions from ~/.gemini/antigravity/code_tracker/ + */ +export async function parseAntigravitySessions(): Promise { + const files = await findSessionFiles(); + const sessions: UnifiedSession[] = []; + + for (const { filePath, projectDir } of files) { + try { + const entries = await parseSessionFile(filePath); + const relevant = entries.filter((e) => e.type === 'user' || e.type === 'assistant'); + if (relevant.length === 0) continue; + + const fileStats = await fsp.stat(filePath); + const mtime = fileStats.mtime; + + let sessionId = path.basename(filePath); + if (sessionId.endsWith('.json')) sessionId = sessionId.slice(0, -5); + else if (sessionId.endsWith('.jsonl')) sessionId = sessionId.slice(0, -6); + const projectName = projectNameFromDir(projectDir); + + const firstUser = relevant.find((e) => e.type === 'user'); + const summary = firstUser ? cleanSummary(firstUser.content) : undefined; + + const createdAt = parseTimestamp(relevant[0].timestamp, mtime); + const updatedAt = parseTimestamp(relevant[relevant.length - 1].timestamp, mtime); + + sessions.push({ + id: sessionId, + source: SOURCE_NAME, + cwd: '', + repo: projectName, + lines: relevant.length, + bytes: fileStats.size, + createdAt, + updatedAt, + originalPath: filePath, + summary, + }); + } catch (err) { + logger.debug('antigravity: skipping unparseable session', filePath, err); + } + } + + return sessions + .filter((s) => s.summary && s.summary.length > 0) + .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +/** + * Extract context from an Antigravity session for cross-tool continuation. + * Antigravity sessions contain only user/assistant messages — no tool calls or token tracking. + */ +export async function extractAntigravityContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + const resolvedConfig = config ?? getPreset('standard'); + const entries = await parseSessionFile(session.originalPath); + + let fallbackDate = session.updatedAt; + try { + const stat = await fsp.stat(session.originalPath); + fallbackDate = stat.mtime; + } catch (err) { + logger.debug('antigravity: stat failed, using session.updatedAt', err); + } + + const allMessages: ConversationMessage[] = []; + for (const entry of entries) { + if (entry.type !== 'user' && entry.type !== 'assistant') continue; + allMessages.push({ + role: entry.type as 'user' | 'assistant', + content: entry.content, + timestamp: parseTimestamp(entry.timestamp, fallbackDate), + }); + } + + const recentMessages = allMessages.slice(-resolvedConfig.recentMessages); + + const markdown = generateHandoffMarkdown( + session, + recentMessages, + [], // filesModified — not tracked by Antigravity + [], // pendingTasks — not tracked by Antigravity + [], // toolSummaries — no tool calls in Antigravity + undefined, // sessionNotes — no tokens/reasoning + resolvedConfig, + ); + + return { + session, + recentMessages, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + sessionNotes: undefined, + markdown, + }; +} diff --git a/src/parsers/claude.ts b/src/parsers/claude.ts index 5253582..93f63cd 100644 --- a/src/parsers/claude.ts +++ b/src/parsers/claude.ts @@ -1,57 +1,37 @@ import * as fs from 'fs'; import * as path from 'path'; -import * as readline from 'readline'; -import type { UnifiedSession, SessionContext, ConversationMessage, ToolUsageSummary, SessionNotes } from '../types/index.js'; -import { generateHandoffMarkdown } from '../utils/markdown.js'; -import { SummaryCollector, shellSummary, fileSummary, grepSummary, globSummary, searchSummary, fetchSummary, mcpSummary, subagentSummary, withResult, truncate } from '../utils/tool-summarizer.js'; - -const CLAUDE_PROJECTS_DIR = path.join(process.env.HOME || '~', '.claude', 'projects'); - -interface ClaudeMessage { - type: string; - uuid: string; - timestamp: string; - sessionId?: string; - cwd?: string; - gitBranch?: string; - slug?: string; - message?: { - role?: string; - content?: string | Array<{ type: string; text?: string }>; - }; - parentUuid?: string; -} +import { logger } from '../logger.js'; +import type { ConversationMessage, ReasoningStep, SessionContext, SessionNotes, SubagentResult, UnifiedSession } from '../types/index.js'; +import type { ClaudeMessage } from '../types/schemas.js'; +import { extractTextFromBlocks, isRealUserMessage } from '../utils/content.js'; +import { findFiles } from '../utils/fs-helpers.js'; +import { getFileStats, readJsonlFile, scanJsonlHead } from '../utils/jsonl.js'; +import { generateHandoffMarkdown, safePath } from '../utils/markdown.js'; +import { cleanSummary, extractRepoFromCwd, homeDir } from '../utils/parser-helpers.js'; +import { + type AnthropicMessage, + extractAnthropicToolData, + extractThinkingHighlights, + isThinkingTool, +} from '../utils/tool-extraction.js'; +import { truncate } from '../utils/tool-summarizer.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; + +const CLAUDE_PROJECTS_DIR = process.env.CLAUDE_CONFIG_DIR + ? path.join(process.env.CLAUDE_CONFIG_DIR, 'projects') + : path.join(homeDir(), '.claude', 'projects'); /** * Find all Claude session files recursively */ async function findSessionFiles(): Promise { - const files: string[] = []; - - if (!fs.existsSync(CLAUDE_PROJECTS_DIR)) { - return files; - } - - const walkDir = (dir: string): void => { - try { - const entries = fs.readdirSync(dir, { withFileTypes: true }); - for (const entry of entries) { - const fullPath = path.join(dir, entry.name); - if (entry.isDirectory()) { - walkDir(fullPath); - } else if (entry.isFile() && entry.name.endsWith('.jsonl') && !entry.name.includes('debug')) { - if (/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\.jsonl$/i.test(entry.name)) { - files.push(fullPath); - } - } - } - } catch { - // Skip directories we can't read - } - }; - - walkDir(CLAUDE_PROJECTS_DIR); - return files; + return findFiles(CLAUDE_PROJECTS_DIR, { + match: (entry) => + entry.name.endsWith('.jsonl') && + !entry.name.includes('debug') && + /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\.jsonl$/i.test(entry.name), + }); } /** @@ -63,84 +43,31 @@ async function parseSessionInfo(filePath: string): Promise<{ gitBranch?: string; firstUserMessage: string; }> { - return new Promise((resolve) => { - const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - let sessionId = ''; - let cwd = ''; - let gitBranch = ''; - let firstUserMessage = ''; - let linesRead = 0; - - rl.on('line', (line) => { - linesRead++; - if (linesRead > 50) { - rl.close(); - stream.close(); - return; - } - - try { - const parsed = JSON.parse(line) as ClaudeMessage; - if (parsed.sessionId && !sessionId) sessionId = parsed.sessionId; - if (parsed.cwd && !cwd) cwd = parsed.cwd; - if (parsed.gitBranch && !gitBranch) gitBranch = parsed.gitBranch; - - // Extract first real user message (not meta/commands) - if (!firstUserMessage && parsed.type === 'user' && parsed.message?.content) { - const content = typeof parsed.message.content === 'string' - ? parsed.message.content - : parsed.message.content.find(c => c.type === 'text')?.text || ''; - - // Skip command-like messages, meta content, and continuation summaries - if (content && !content.startsWith('<') && !content.startsWith('/') && !content.includes('Session Handoff')) { - firstUserMessage = content; - } - } - } catch { - // Skip invalid lines + let sessionId = ''; + let cwd = ''; + let gitBranch = ''; + let firstUserMessage = ''; + + await scanJsonlHead(filePath, 50, (parsed) => { + const msg = parsed as ClaudeMessage; + if (msg.sessionId && !sessionId) sessionId = msg.sessionId; + if (msg.cwd && !cwd) cwd = msg.cwd; + if (msg.gitBranch && !gitBranch) gitBranch = msg.gitBranch; + + if (!firstUserMessage && msg.type === 'user' && msg.message?.content) { + const content = extractTextFromBlocks(msg.message.content); + if (isRealUserMessage(content)) { + firstUserMessage = content; } - }); - - rl.on('close', () => { - if (!sessionId) { - sessionId = path.basename(filePath, '.jsonl'); - } - resolve({ sessionId, cwd, gitBranch, firstUserMessage }); - }); - - rl.on('error', () => resolve({ sessionId: '', cwd: '', firstUserMessage: '' })); - }); -} - -/** - * Count lines and get file size - */ -async function getFileStats(filePath: string): Promise<{ lines: number; bytes: number }> { - return new Promise((resolve) => { - const stats = fs.statSync(filePath); - let lines = 0; - - const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', () => lines++); - rl.on('close', () => resolve({ lines, bytes: stats.size })); - rl.on('error', () => resolve({ lines: 0, bytes: stats.size })); + } + return 'continue'; }); -} -/** - * Extract repo name from cwd path - */ -function extractRepoFromCwd(cwd: string): string { - if (!cwd) return ''; - const parts = cwd.split('/').filter(Boolean); - if (parts.length >= 2) { - return parts.slice(-2).join('/'); + if (!sessionId) { + sessionId = path.basename(filePath, '.jsonl'); } - return parts[parts.length - 1] || ''; + + return { sessionId, cwd, gitBranch, firstUserMessage }; } /** @@ -156,13 +83,7 @@ export async function parseClaudeSessions(): Promise { const stats = await getFileStats(filePath); const fileStats = fs.statSync(filePath); - // Use first user message as summary - const summary = info.firstUserMessage - .replace(/\n/g, ' ') - .replace(/\s+/g, ' ') - .trim() - .slice(0, 50); - + const summary = cleanSummary(info.firstUserMessage); const repo = extractRepoFromCwd(info.cwd); sessions.push({ @@ -178,192 +99,713 @@ export async function parseClaudeSessions(): Promise { originalPath: filePath, summary: summary || undefined, }); - } catch { + } catch (err) { + logger.debug('claude: skipping unparseable session', filePath, err); // Skip files we can't parse } } - return sessions - .filter(s => s.bytes > 200) - .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); + return sessions.filter((s) => s.bytes > 200).sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); } /** - * Read all messages from a Claude session + * Check if a user message contains actual human-typed text blocks + * (as opposed to being entirely tool_result blocks). */ -async function readAllMessages(filePath: string): Promise { - return new Promise((resolve) => { - const messages: ClaudeMessage[] = []; - const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', (line) => { - try { - messages.push(JSON.parse(line) as ClaudeMessage); - } catch { - // Skip invalid lines +function hasHumanTextBlocks(msg: ClaudeMessage): boolean { + const content = msg.message?.content; + if (!content) return false; + if (typeof content === 'string') return true; + return content.some((block) => block.type === 'text' && block.text); +} + +/** + * Parsed queue-operation entry from a queue-operation JSONL event. + */ +interface QueueOperationEntry { + taskId: string; + description: string; + taskType?: string; + operation: string; + status?: string; +} + +interface TaskStatusEntry { + taskId: string; + status?: string; + taskType?: string; + description?: string; + source: 'queue' | 'user_notification' | 'task_output'; +} + +function extractTagValue(text: string, tags: string[]): string | undefined { + for (const tag of tags) { + const m = text.match(new RegExp(`<${tag}>([\\s\\S]*?)<\\/${tag}>`, 'i')); + if (m?.[1]) return m[1].trim(); + } + return undefined; +} + +function extractTaskStatusesFromTaggedText( + text: string, + source: TaskStatusEntry['source'], +): TaskStatusEntry[] { + const statuses: TaskStatusEntry[] = []; + const notificationBlocks = text.match(/[\s\S]*?<\/task-notification>/gi); + + const parseBlock = (block: string): TaskStatusEntry | null => { + const taskId = extractTagValue(block, ['task-id', 'task_id']); + if (!taskId) return null; + + return { + taskId, + status: extractTagValue(block, ['status']), + taskType: extractTagValue(block, ['task-type', 'task_type']), + description: extractTagValue(block, ['summary', 'description']), + source, + }; + }; + + if (notificationBlocks && notificationBlocks.length > 0) { + for (const block of notificationBlocks) { + const parsed = parseBlock(block); + if (parsed) statuses.push(parsed); + } + return statuses; + } + + const single = parseBlock(text); + if (single) statuses.push(single); + return statuses; +} + +function extractToolResultText(content: unknown): string { + if (typeof content === 'string') return content; + if (!Array.isArray(content)) return ''; + return content + .map((item) => { + const block = item as Record; + return typeof block?.text === 'string' ? block.text : ''; + }) + .filter(Boolean) + .join('\n'); +} + +function isTerminalTaskStatus(status?: string): boolean { + if (!status) return false; + const normalized = status.toLowerCase().trim(); + return new Set(['completed', 'complete', 'success', 'succeeded', 'done', 'failed', 'error', 'killed', 'cancelled', 'canceled', 'timeout', 'timed_out']).has(normalized); +} + +function isCompletedTaskStatus(status?: string): boolean { + if (!status) return false; + const normalized = status.toLowerCase().trim(); + return new Set(['completed', 'complete', 'success', 'succeeded', 'done']).has(normalized); +} + +function extractUserTaskNotifications(messages: ClaudeMessage[]): TaskStatusEntry[] { + const statuses: TaskStatusEntry[] = []; + + for (const msg of messages) { + if (msg.type !== 'user') continue; + const content = msg.message?.content; + if (typeof content === 'string') { + statuses.push(...extractTaskStatusesFromTaggedText(content, 'user_notification')); + continue; + } + if (!Array.isArray(content)) continue; + + for (const block of content) { + if (block.type !== 'text' || !block.text) continue; + statuses.push(...extractTaskStatusesFromTaggedText(block.text, 'user_notification')); + } + } + + return statuses; +} + +function extractTaskOutputStatuses(messages: ClaudeMessage[]): TaskStatusEntry[] { + const statuses: TaskStatusEntry[] = []; + const toolUseById = new Map(); + + for (const msg of messages) { + const content = msg.message?.content; + if (!Array.isArray(content)) continue; + + for (const block of content as Array>) { + if (block.type === 'tool_use') { + const id = block.id as string | undefined; + const name = block.name as string | undefined; + if (id && name) toolUseById.set(id, { name }); + continue; } - }); - rl.on('close', () => resolve(messages)); - rl.on('error', () => resolve(messages)); - }); + if (block.type !== 'tool_result') continue; + const toolUseId = block.tool_use_id as string | undefined; + if (!toolUseId) continue; + + const toolUse = toolUseById.get(toolUseId); + if (!toolUse || toolUse.name !== 'TaskOutput') continue; + + const text = extractToolResultText(block.content); + if (!text) continue; + statuses.push(...extractTaskStatusesFromTaggedText(text, 'task_output')); + } + } + + return statuses; } /** - * Extract content from Claude message + * Extract queue-operation events from messages. + * Returns parsed entries with task_id, description, and operation type. */ -function extractContent(msg: ClaudeMessage): string { - if (!msg.message?.content) return ''; - - if (typeof msg.message.content === 'string') { - return msg.message.content; +function parseQueueOperations(messages: ClaudeMessage[]): QueueOperationEntry[] { + const entries: QueueOperationEntry[] = []; + for (const msg of messages) { + if (msg.type !== 'queue-operation') continue; + const raw = msg as Record; + const operation = (raw.operation as string) || ''; + const contentStr = (raw.content as string) || ''; + if (!contentStr) continue; + + // XML-style task notifications (or tagged task payloads) often appear here. + const taggedStatuses = extractTaskStatusesFromTaggedText(contentStr, 'queue'); + if (taggedStatuses.length > 0) { + for (const status of taggedStatuses) { + entries.push({ + taskId: status.taskId, + description: status.description || '', + taskType: status.taskType, + operation, + status: status.status, + }); + } + continue; + } + + try { + const parsed = JSON.parse(contentStr) as Record; + const taskId = (parsed.task_id as string) || ''; + const description = (parsed.description as string) || ''; + if (taskId) { + entries.push({ + taskId, + description, + taskType: (parsed.task_type as string) || undefined, + operation, + status: (parsed.status as string) || undefined, + }); + } + } catch { + const taskId = contentStr.match(/"task_id"\s*:\s*"([^"]+)"/)?.[1]; + if (taskId) { + entries.push({ + taskId, + description: contentStr.match(/"description"\s*:\s*"([^"]+)"/)?.[1] || '', + taskType: contentStr.match(/"task_type"\s*:\s*"([^"]+)"/)?.[1], + operation, + }); + } else { + logger.debug('claude: malformed queue-operation content', contentStr.slice(0, 100)); + } + } } - - return msg.message.content - .filter(c => c.type === 'text' && c.text) - .map(c => c.text) - .join('\n'); + return entries; } /** - * Tools to skip — they don't carry useful handoff context + * Check if a message looks like a rate-limit or termination notice + * rather than a real assistant response. */ -const CLAUDE_SKIP_TOOLS = new Set(['TaskStop', 'ExitPlanMode']); +function isTerminationMessage(text: string): boolean { + const lower = text.toLowerCase(); + return lower.includes('out of extra usage') || + lower.includes('rate limit') || + lower.includes('resets ') || + (text.length < 50 && (lower.includes('usage') || lower.includes('limit'))); +} /** - * Extract tool usage summaries and files modified using shared SummaryCollector + * Read a subagent JSONL file and return its final substantial assistant result. + * Skips short termination/rate-limit messages to find the real output. + * Returns null text if the file doesn't exist, is empty, or has no substantial result. */ -function extractToolData(messages: ClaudeMessage[]): { summaries: ToolUsageSummary[]; filesModified: string[] } { - const collector = new SummaryCollector(); - const toolResultMap = new Map(); +async function extractSubagentResult(filePath: string): Promise<{ text: string | null; status: 'completed' | 'killed'; toolCallCount: number }> { + try { + if (!fs.existsSync(filePath)) { + return { text: null, status: 'killed', toolCallCount: 0 }; + } - // First pass: collect all tool_result blocks - for (const msg of messages) { - if (!msg.message?.content || typeof msg.message.content === 'string') continue; - for (const item of msg.message.content) { - if (item.type !== 'tool_result') continue; - const tr = item as any; - if (!tr.tool_use_id) continue; - let text = ''; - if (typeof tr.content === 'string') text = tr.content; - else if (Array.isArray(tr.content)) { - text = tr.content.find((c: any) => c.type === 'text')?.text || ''; + const subMsgs = await readJsonlFile(filePath); + let toolCallCount = 0; + let lastSubstantialText: string | null = null; + let wasKilled = false; + + for (const m of subMsgs) { + if (m.type === 'assistant' && Array.isArray(m.message?.content)) { + for (const block of m.message!.content) { + if (typeof block === 'object' && block.type === 'tool_use') toolCallCount++; + } + const text = extractTextFromBlocks(m.message?.content); + if (text && text.length > 50 && !isTerminationMessage(text)) { + lastSubstantialText = text; + } + if (text && isTerminationMessage(text)) { + wasKilled = true; + } } - if (text) toolResultMap.set(tr.tool_use_id, text.slice(0, 100)); } + + return { + text: lastSubstantialText, + status: wasKilled ? 'killed' : 'completed', + toolCallCount, + }; + } catch (err) { + logger.debug('claude: failed to read subagent file', filePath, err); + return { text: null, status: 'killed', toolCallCount: 0 }; } +} - // Second pass: process tool_use blocks - for (const msg of messages) { - if (!msg.message?.content || typeof msg.message.content === 'string') continue; - for (const item of msg.message.content) { - if (item.type !== 'tool_use') continue; - const { name, input = {}, id: toolUseId } = item as any; - if (!name || CLAUDE_SKIP_TOOLS.has(name)) continue; - - const result = toolUseId ? toolResultMap.get(toolUseId) : undefined; - const fp = input.file_path || input.path || ''; - - if (name === 'Bash' || name === 'bash') { - collector.add('Bash', shellSummary(input.command || '', result)); - } else if (['Read', 'ReadFile', 'read_file'].includes(name)) { - collector.add(name, withResult(fileSummary('read', fp), result), fp); - } else if (['Write', 'WriteFile', 'write_file'].includes(name)) { - collector.add(name, withResult(fileSummary('write', fp), result), fp, true); - } else if (['Edit', 'EditFile', 'edit_file'].includes(name)) { - collector.add(name, withResult(fileSummary('edit', fp), result), fp, true); - } else if (name === 'Grep') { - collector.add('Grep', withResult(grepSummary(input.pattern || '', input.path), result)); - } else if (name === 'Glob') { - collector.add('Glob', withResult(globSummary(input.pattern || ''), result)); - } else if (name === 'WebFetch') { - collector.add('WebFetch', fetchSummary(input.url || '')); - } else if (name === 'WebSearch') { - collector.add('WebSearch', searchSummary(input.query || '')); - } else if (name === 'Task') { - collector.add('Task', subagentSummary(input.description || '', input.subagent_type || '')); - } else if (name === 'TaskOutput') { - collector.add('TaskOutput', subagentSummary(input.content || input.result || '', input.subagent_type || '')); - } else if (name === 'AskUserQuestion') { - collector.add('AskUserQuestion', `ask: "${truncate(input.question || '', 80)}"`); - } else if (name.startsWith('mcp__') || name.includes('-')) { - collector.add(name, mcpSummary(name, JSON.stringify(input).slice(0, 100), result)); - } else { - collector.add(name, withResult(`${name}(${JSON.stringify(input).slice(0, 100)})`, result)); +/** + * Extract pending tasks from sequential-thinking / crash-think-tool blocks. + * Looks for `next_action` in the input params of thinking tool_use blocks. + */ +function extractPendingFromThinking(messages: ClaudeMessage[], maxTasks: number): string[] { + const tasks: string[] = []; + const thinkingToolNames = new Set([ + 'crash-think-tool', + 'must-use-think-tool-crash-crash', + 'sequential-thinking', + 'think', + ]); + + // Walk backwards so we get the most recent thinking first + for (let i = messages.length - 1; i >= 0 && tasks.length < maxTasks; i--) { + const msg = messages[i]; + if (msg.type !== 'assistant') continue; + const content = msg.message?.content; + if (!Array.isArray(content)) continue; + + for (const block of content) { + if (tasks.length >= maxTasks) break; + if (block.type !== 'tool_use') continue; + const name = (block as Record).name as string; + if (!thinkingToolNames.has(name)) continue; + + const input = (block as Record).input as Record | undefined; + if (!input) continue; + + const nextAction = input.next_action as string | undefined; + if (nextAction && typeof nextAction === 'string' && nextAction.length > 5) { + // Avoid duplicates + const trimmed = truncate(nextAction.trim(), 200); + if (!tasks.includes(trimmed)) { + tasks.push(trimmed); + } } } } - return { summaries: collector.getSummaries(), filesModified: collector.getFilesModified() }; + return tasks; +} + +/** + * Read supplementary tool result files from {session_dir}/tool-results/. + * Returns an array of note strings describing each file found. + */ +function readToolResultsDir(sessionDir: string): string[] { + const toolResultsPath = path.join(sessionDir, 'tool-results'); + const notes: string[] = []; + + try { + if (!fs.existsSync(toolResultsPath)) return notes; + const entries = fs.readdirSync(toolResultsPath, { withFileTypes: true }); + + for (const entry of entries) { + if (!entry.isFile()) continue; + const filePath = path.join(toolResultsPath, entry.name); + try { + const stats = fs.statSync(filePath); + const sizeKb = (stats.size / 1024).toFixed(1); + notes.push(`tool-result: ${entry.name} (${sizeKb} KB)`); + } catch { + notes.push(`tool-result: ${entry.name} (unreadable)`); + } + } + } catch (err) { + logger.debug('claude: failed to read tool-results dir', toolResultsPath, err); + } + + return notes; +} + +function hasCompactionCue(messages: ClaudeMessage[]): boolean { + if (messages.some((m) => m.isCompactSummary)) return true; + + const continuationPattern = /(continued from a previous conversation|ran out of context|summary below covers|conversation compacted)/i; + for (const msg of messages) { + if (msg.type !== 'user' && msg.type !== 'assistant') continue; + const text = extractTextFromBlocks(msg.message?.content); + if (text && continuationPattern.test(text)) return true; + } + + return false; +} + +async function extractLatestCompactSummary(filePath: string, maxChars: number): Promise { + const messages = await readJsonlFile(filePath); + let compact: string | undefined; + for (const msg of messages) { + if (!msg.isCompactSummary || !msg.message?.content) continue; + const text = extractTextFromBlocks(msg.message.content); + if (text) compact = truncate(text, maxChars); + } + return compact; +} + +async function resolvePreviousClaudeSessions(session: UnifiedSession, maxDepth: number): Promise { + if (maxDepth <= 0) return []; + const allSessions = await parseClaudeSessions(); + const candidates = allSessions + .filter((s) => s.id !== session.id && s.cwd === session.cwd) + .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); + + const selected: UnifiedSession[] = []; + const visited = new Set([session.id]); + let cursorTime = session.createdAt.getTime() || session.updatedAt.getTime(); + + for (const candidate of candidates) { + if (selected.length >= maxDepth) break; + if (visited.has(candidate.id)) continue; + if (candidate.updatedAt.getTime() > cursorTime) continue; + + selected.push(candidate); + visited.add(candidate.id); + cursorTime = candidate.createdAt.getTime() || candidate.updatedAt.getTime(); + } + + return selected; +} + +async function buildChainedHistoryPrefix( + session: UnifiedSession, + messages: ClaudeMessage[], + cfg: VerbosityConfig, +): Promise { + if (!cfg.agents.claude.chainCompactedHistory) return undefined; + if (!hasCompactionCue(messages)) return undefined; + + const previous = await resolvePreviousClaudeSessions(session, cfg.agents.claude.chainMaxDepth); + if (previous.length === 0) return undefined; + + const lines: string[] = [ + '# Previous Session Chain Context', + '', + 'The current Claude session appears compacted; best-effort predecessor sessions are included below.', + '', + '## Chained Previous Sessions', + '', + ]; + + const ordered = [...previous].reverse(); // oldest → newest for readable timeline + for (const [index, prev] of ordered.entries()) { + lines.push(`### ${index + 1}. ${prev.id} (${prev.updatedAt.toISOString().slice(0, 16).replace('T', ' ')})`); + lines.push(`- **Session file**: \`${safePath(prev.originalPath)}\``); + if (prev.summary) { + lines.push(`- **Summary**: ${truncate(prev.summary, cfg.agents.claude.chainSummaryChars)}`); + } + + const compact = await extractLatestCompactSummary(prev.originalPath, cfg.agents.claude.chainSummaryChars); + if (compact) { + lines.push(`- **Compact summary**: ${compact}`); + } + lines.push(''); + } + + return lines.join('\n').trim(); } /** * Extract session notes from thinking blocks and model info */ -function extractSessionNotes(messages: ClaudeMessage[]): SessionNotes { +function extractSessionNotes(messages: ClaudeMessage[], config?: VerbosityConfig): SessionNotes { + const cfg = config ?? getPreset('standard'); const notes: SessionNotes = {}; - const reasoning: string[] = []; + // Extract model from first message that has it for (const msg of messages) { - if ((msg as any).model && !notes.model) { - notes.model = (msg as any).model; + if (msg.model && !notes.model) { + notes.model = msg.model; + break; } - if (!msg.message?.content || typeof msg.message.content === 'string') continue; - for (const item of msg.message.content) { - if ((item as any).type === 'thinking' && reasoning.length < 5) { - const text = (item as any).text || ''; - if (text.length > 20) { - const firstLine = text.split(/[.\n]/)[0]?.trim(); - if (firstLine) reasoning.push(truncate(firstLine, 200)); - } - } + } + + // Aggregate token usage, cache tokens, and model from assistant messages + for (const msg of messages) { + if (msg.type !== 'assistant') continue; + const msgObj = msg.message as Record | undefined; + if (!msgObj) continue; + + // Fallback: message.model carries the full model identifier from the API response + if (msgObj.model && !notes.model) { + notes.model = msgObj.model as string; + } + + const usage = msgObj.usage as Record | undefined; + if (!usage) continue; + + if (!notes.tokenUsage) notes.tokenUsage = { input: 0, output: 0 }; + notes.tokenUsage.input += usage.input_tokens || 0; + notes.tokenUsage.output += usage.output_tokens || 0; + + const cacheCreation = usage.cache_creation_input_tokens || 0; + const cacheRead = usage.cache_read_input_tokens || 0; + if (cacheCreation || cacheRead) { + if (!notes.cacheTokens) notes.cacheTokens = { creation: 0, read: 0 }; + notes.cacheTokens.creation += cacheCreation; + notes.cacheTokens.read += cacheRead; } } + // Extract thinking highlights via shared utility + const anthropicMsgs: AnthropicMessage[] = messages + .filter((m) => m.message?.content && Array.isArray(m.message.content)) + .map((m) => ({ + role: m.message!.role, + content: m.message!.content as Array<{ type: string; [key: string]: unknown }>, + })); + + const reasoning = extractThinkingHighlights(anthropicMsgs, cfg.thinking.maxHighlights); if (reasoning.length > 0) notes.reasoning = reasoning; + + // Extract compact summary — take the LAST one (most comprehensive in long sessions) + for (const msg of messages) { + if (msg.isCompactSummary && msg.message?.content) { + const text = extractTextFromBlocks(msg.message.content); + if (text) { + notes.compactSummary = truncate(text, cfg.compactSummary.maxChars); + } + } + } + return notes; } /** * Extract context from a Claude session for cross-tool continuation */ -export async function extractClaudeContext(session: UnifiedSession): Promise { - const messages = await readAllMessages(session.originalPath); +export async function extractClaudeContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + const cfg = config ?? getPreset('standard'); + const messages = await readJsonlFile(session.originalPath); const recentMessages: ConversationMessage[] = []; - - const { summaries: toolSummaries, filesModified } = extractToolData(messages); - const sessionNotes = extractSessionNotes(messages); + + // Extract tool data via shared utility + const anthropicMsgs: AnthropicMessage[] = messages + .filter((m) => m.message?.content && Array.isArray(m.message.content)) + .map((m) => ({ + role: m.message!.role, + content: m.message!.content as Array<{ type: string; [key: string]: unknown }>, + })); + + const { summaries: toolSummaries, filesModified } = extractAnthropicToolData(anthropicMsgs, cfg); + const sessionNotes = extractSessionNotes(messages, cfg); const pendingTasks: string[] = []; - for (const msg of messages.slice(-100)) { - if (msg.type === 'queue-operation' || msg.type === 'system') continue; - if ((msg as any).isCompactSummary) continue; + // ── Extract reasoning steps from thinking tool blocks ───────────────── + if (cfg.mcp.thinkingTools.extractReasoning) { + const steps: ReasoningStep[] = []; + for (const msg of anthropicMsgs) { + for (const block of msg.content) { + if (block.type === 'tool_use') { + const tu = block as { type: string; name?: string; input?: Record }; + if (tu.name && isThinkingTool(tu.name) && tu.input) { + steps.push({ + stepNumber: (tu.input.step_number as number) || 0, + totalSteps: (tu.input.estimated_total as number) || 0, + purpose: String(tu.input.purpose || ''), + thought: truncate(String(tu.input.thought || ''), cfg.mcp.thinkingTools.maxReasoningChars), + outcome: truncate(String(tu.input.outcome || ''), cfg.mcp.thinkingTools.maxReasoningChars), + nextAction: truncate(String(tu.input.next_action || ''), cfg.mcp.thinkingTools.maxReasoningChars), + }); + } + } + } + } + if (steps.length > 0) { + sessionNotes.reasoningSteps = steps; + } + } + + // ── Gap 5: Extract pending tasks from thinking tools ────────────────── + if (cfg.pendingTasks.extractFromThinking) { + const thinkingTasks = extractPendingFromThinking(messages, cfg.pendingTasks.maxTasks); + pendingTasks.push(...thinkingTasks); + } + + // ── Gap 1 + Gap 4: Filter to conversational messages before slicing ─── + // Gap 1: Filter out progress/system noise so we get real conversation turns + // Gap 4: Optionally exclude user messages that are entirely tool_result blocks + const conversational = messages.filter((m) => { + if (m.type !== 'user' && m.type !== 'assistant') return false; + if (m.isCompactSummary) return false; + + // Gap 4: When separateHumanFromToolResults is enabled, skip user messages + // that contain only tool_result blocks (no human text) + if (cfg.agents.claude.separateHumanFromToolResults && m.type === 'user') { + if (!hasHumanTextBlocks(m)) return false; + } + return true; + }); + + for (const msg of conversational.slice(-(cfg.recentMessages * 2))) { if (msg.type === 'user') { - const content = extractContent(msg); + const content = extractTextFromBlocks(msg.message?.content); if (content) { - recentMessages.push({ role: 'user', content, timestamp: new Date(msg.timestamp) }); + recentMessages.push({ + role: 'user', + content: truncate(content, cfg.maxMessageChars), + timestamp: new Date(msg.timestamp), + }); } } else if (msg.type === 'assistant') { - const content = extractContent(msg); + const content = extractTextFromBlocks(msg.message?.content); if (content) { - recentMessages.push({ role: 'assistant', content, timestamp: new Date(msg.timestamp) }); + recentMessages.push({ + role: 'assistant', + content: truncate(content, cfg.maxMessageChars), + timestamp: new Date(msg.timestamp), + }); + } + } + } + + // ── Gap 2: Parse subagent JSONL files ───────────────────────────────── + if (cfg.agents.claude.parseSubagents) { + // Session dir = {project_dir}/{session_id}/ (not just dirname of the .jsonl) + const sessionDir = session.originalPath.replace(/\.jsonl$/, ''); + const queueOps = parseQueueOperations(messages); + const userTaskStatuses = extractUserTaskNotifications(messages); + const taskOutputStatuses = extractTaskOutputStatuses(messages); + + // Terminal states can come from queue ops, user task notifications, or TaskOutput payloads. + const terminalTaskIds = new Set(); + const completedTaskIds = new Set(); + + for (const op of queueOps) { + if (op.operation !== 'enqueue') terminalTaskIds.add(op.taskId); + if (isTerminalTaskStatus(op.status)) terminalTaskIds.add(op.taskId); + if (isCompletedTaskStatus(op.status)) completedTaskIds.add(op.taskId); + } + + for (const statusEntry of [...userTaskStatuses, ...taskOutputStatuses]) { + if (isTerminalTaskStatus(statusEntry.status)) terminalTaskIds.add(statusEntry.taskId); + if (isCompletedTaskStatus(statusEntry.status)) completedTaskIds.add(statusEntry.taskId); + } + + // Deduplicate: keep only unique task_ids (first enqueue wins for description) + const seen = new Set(); + const uniqueTasks = queueOps.filter((op) => { + if (op.operation !== 'enqueue') return false; + if (seen.has(op.taskId)) return false; + seen.add(op.taskId); + return true; + }); + + let subagentCount = 0; + for (const task of uniqueTasks) { + if (subagentCount >= cfg.task.maxSamples) break; + // Only local_agent tasks are backed by subagent transcript files. + if (task.taskType && task.taskType !== 'local_agent') continue; + + const subagentPath = path.join(sessionDir, 'subagents', `agent-${task.taskId}.jsonl`); + const { text, status: extractedStatus, toolCallCount } = await extractSubagentResult(subagentPath); + const status = !text && completedTaskIds.has(task.taskId) + ? 'completed' + : extractedStatus; + + // Always populate structured subagentResults + if (!sessionNotes.subagentResults) sessionNotes.subagentResults = []; + sessionNotes.subagentResults.push({ + taskId: task.taskId, + description: task.description, + status, + result: text ? truncate(text, cfg.task.subagentResultChars) : undefined, + toolCallCount, + }); + + if (text) { + // Legacy reasoning for markdown renderer + if (!sessionNotes.reasoning) sessionNotes.reasoning = []; + sessionNotes.reasoning.push(`Subagent "${task.description}": ${truncate(text, cfg.task.subagentResultChars)}`); + subagentCount++; + } else if (!terminalTaskIds.has(task.taskId)) { + // Incomplete/killed subagent — add to pending tasks + if (cfg.pendingTasks.extractFromSubagents && pendingTasks.length < cfg.pendingTasks.maxTasks) { + pendingTasks.push(`Incomplete subagent: ${task.description}`); + } } } } - const markdown = generateHandoffMarkdown(session, recentMessages.slice(-10), filesModified, pendingTasks, toolSummaries, sessionNotes); + // ── Gap 3: Read tool-results directory ──────────────────────────────── + if (cfg.agents.claude.parseToolResultsDir) { + const toolResultsSessionDir = session.originalPath.replace(/\.jsonl$/, ''); + const toolResultsPath = path.join(toolResultsSessionDir, 'tool-results'); + try { + if (fs.existsSync(toolResultsPath)) { + const entries = fs.readdirSync(toolResultsPath, { withFileTypes: true }); + for (const entry of entries) { + if (!entry.isFile()) continue; + const filePath = path.join(toolResultsPath, entry.name); + try { + const stats = fs.statSync(filePath); + const preview = fs.readFileSync(filePath, 'utf8').slice(0, 200); + if (!sessionNotes.externalToolResults) sessionNotes.externalToolResults = []; + sessionNotes.externalToolResults.push({ + name: entry.name, + sizeBytes: stats.size, + preview: preview.replace(/\n/g, ' ').trim(), + }); + if (!sessionNotes.reasoning) sessionNotes.reasoning = []; + sessionNotes.reasoning.push(`tool-result: ${entry.name} (${(stats.size / 1024).toFixed(1)} KB)`); + } catch { + // skip unreadable files + } + } + } + } catch (err) { + logger.debug('claude: failed to read tool-results dir', toolResultsPath, err); + } + } + + const finalMessages = recentMessages.slice(-cfg.recentMessages); + const dedupedPendingTasks = Array.from(new Set(pendingTasks)).slice(0, cfg.pendingTasks.maxTasks); + + const baseMarkdown = generateHandoffMarkdown( + session, + finalMessages, + filesModified, + dedupedPendingTasks, + toolSummaries, + sessionNotes, + cfg, + ); + const chainPrefix = await buildChainedHistoryPrefix(session, messages, cfg); + const markdown = chainPrefix ? `${chainPrefix}\n\n---\n\n${baseMarkdown}` : baseMarkdown; return { session, - recentMessages: recentMessages.slice(-10), + recentMessages: finalMessages, filesModified, - pendingTasks, + pendingTasks: dedupedPendingTasks, toolSummaries, sessionNotes, markdown, }; } - -// generateHandoffMarkdown is imported from ../utils/markdown.js diff --git a/src/parsers/cline.ts b/src/parsers/cline.ts new file mode 100644 index 0000000..373670b --- /dev/null +++ b/src/parsers/cline.ts @@ -0,0 +1,457 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + SessionNotes, + UnifiedSession, +} from '../types/index.js'; +import type { SessionSource } from '../types/tool-names.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, homeDir } from '../utils/parser-helpers.js'; +import { truncate } from '../utils/tool-summarizer.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; + +// ── Extension Configs ─────────────────────────────────────────────────────── + +/** + * All Cline-family extensions share the same ui_messages.json format. + * Each entry maps a VS Code extension ID to the source label used in UnifiedSession. + */ +const CLINE_EXTENSIONS = [ + { id: 'saoudrizwan.claude-dev', source: 'cline' }, + { id: 'rooveterinaryinc.roo-cline', source: 'roo-code' }, + { id: 'roo-code.roo-cline', source: 'roo-code' }, + { id: 'kilocode.kilo-code', source: 'kilo-code' }, +] as const; + +type ClineSource = (typeof CLINE_EXTENSIONS)[number]['source']; + +// ── Raw Message Shape ─────────────────────────────────────────────────────── + +/** Single entry in ui_messages.json */ +interface ClineRawMessage { + ts: number; + type: string; + say?: string; + ask?: string; + text?: string; + images?: string[]; + partial?: boolean; +} + +/** Token metadata parsed from api_req_started text field */ +interface ApiReqMeta { + tokensIn?: number; + tokensOut?: number; + cacheWrites?: number; + cacheReads?: number; + cost?: number; +} + +// ── Path Discovery ────────────────────────────────────────────────────────── + +/** + * Build candidate globalStorage base directories for the current platform. + * Covers VS Code, VS Code Insiders, and Cursor on macOS / Linux / Windows. + */ +function getGlobalStorageBases(): string[] { + const home = homeDir(); + const bases: string[] = []; + + if (process.platform === 'darwin') { + const appSupport = path.join(home, 'Library', 'Application Support'); + bases.push( + path.join(appSupport, 'Code', 'User', 'globalStorage'), + path.join(appSupport, 'Code - Insiders', 'User', 'globalStorage'), + path.join(appSupport, 'Cursor', 'User', 'globalStorage'), + path.join(appSupport, 'Windsurf', 'User', 'globalStorage'), + ); + } else if (process.platform === 'linux') { + bases.push( + path.join(home, '.config', 'Code', 'User', 'globalStorage'), + path.join(home, '.config', 'Code - Insiders', 'User', 'globalStorage'), + path.join(home, '.config', 'Cursor', 'User', 'globalStorage'), + ); + } else if (process.platform === 'win32') { + const appData = process.env.APPDATA || path.join(home, 'AppData', 'Roaming'); + bases.push( + path.join(appData, 'Code', 'User', 'globalStorage'), + path.join(appData, 'Code - Insiders', 'User', 'globalStorage'), + path.join(appData, 'Cursor', 'User', 'globalStorage'), + ); + } + + return bases; +} + +/** + * Discover all task directories for a given extension across all IDE locations. + * Returns tuples of (task-id directory path, extension source label). + */ +function discoverTaskDirs(): Array<{ taskDir: string; taskId: string; source: ClineSource }> { + const bases = getGlobalStorageBases(); + const results: Array<{ taskDir: string; taskId: string; source: ClineSource }> = []; + + for (const base of bases) { + if (!fs.existsSync(base)) continue; + + for (const ext of CLINE_EXTENSIONS) { + const tasksRoot = path.join(base, ext.id, 'tasks'); + if (!fs.existsSync(tasksRoot)) continue; + + try { + const entries = fs.readdirSync(tasksRoot, { withFileTypes: true }); + for (const entry of entries) { + if (!entry.isDirectory()) continue; + const taskDir = path.join(tasksRoot, entry.name); + const uiFile = path.join(taskDir, 'ui_messages.json'); + if (fs.existsSync(uiFile)) { + results.push({ taskDir, taskId: entry.name, source: ext.source }); + } + } + } catch (err) { + logger.debug(`cline: cannot read tasks dir ${tasksRoot}`, err); + } + } + } + + return results; +} + +// ── Message Parsing ───────────────────────────────────────────────────────── + +/** Read and parse ui_messages.json, returning an empty array on failure */ +function readUiMessages(filePath: string): ClineRawMessage[] { + try { + const content = fs.readFileSync(filePath, 'utf8'); + const parsed = JSON.parse(content); + if (!Array.isArray(parsed)) return []; + return parsed as ClineRawMessage[]; + } catch (err) { + logger.debug('cline: failed to parse ui_messages.json', filePath, err); + return []; + } +} + +/** + * Determine conversation role from a raw Cline message. + * Returns null for messages that aren't conversation turns (metadata, api events). + */ +function classifyRole(msg: ClineRawMessage): 'user' | 'assistant' | null { + if (msg.type !== 'say') return null; + + switch (msg.say) { + case 'user_feedback': + return 'user'; + + case 'text': + // Streaming assistant chunks have partial: true + // Non-partial text without images is user input + return msg.partial === true ? 'assistant' : 'user'; + + case 'completion_result': + return 'assistant'; + + case 'reasoning': + return 'assistant'; + + default: + // api_req_started, api_req_finished, and other event types → not conversation + return null; + } +} + +/** + * Extract the first real user message from a set of raw messages. + * Used for session summary. + */ +function extractFirstUserMessage(messages: ClineRawMessage[]): string { + for (const msg of messages) { + const role = classifyRole(msg); + if (role === 'user' && msg.text && msg.text.length > 0) { + return msg.text; + } + } + return ''; +} + +/** + * Build conversation messages from raw Cline events. + * Deduplicates consecutive assistant streaming chunks (keeps last = most complete). + */ +function buildConversation(messages: ClineRawMessage[]): ConversationMessage[] { + const result: ConversationMessage[] = []; + let lastSay: string | undefined; + let lastPartial = false; + + for (const msg of messages) { + const role = classifyRole(msg); + if (!role || !msg.text) continue; + + const text = msg.text.trim(); + if (!text) continue; + + const ts = msg.ts ? new Date(msg.ts) : undefined; + + // Deduplicate: if the previous message is also a streaming assistant text + // chunk, replace it with this one (which has more complete text). + // Only replace when the previous was also a partial text — never overwrite + // reasoning or other non-streaming assistant messages. + if ( + role === 'assistant' && + msg.say === 'text' && + msg.partial === true && + result.length > 0 && + result[result.length - 1].role === 'assistant' && + lastSay === 'text' && + lastPartial === true + ) { + result[result.length - 1] = { role, content: text, timestamp: ts }; + } else { + result.push({ role, content: text, timestamp: ts }); + } + + lastSay = msg.say; + lastPartial = msg.partial === true; + } + + return result; +} + +// ── Token / Cost Extraction ───────────────────────────────────────────────── + +/** + * Aggregate token usage and cost from api_req_started events. + * Each event's text field contains a JSON object with token counts. + */ +function extractTokenUsage(messages: ClineRawMessage[]): SessionNotes { + const notes: SessionNotes = {}; + let totalIn = 0; + let totalOut = 0; + let totalCacheWrites = 0; + let totalCacheReads = 0; + let totalCost = 0; + let found = false; + + for (const msg of messages) { + if (msg.type !== 'say' || msg.say !== 'api_req_started') continue; + if (!msg.text) continue; + + try { + const meta: ApiReqMeta = JSON.parse(msg.text); + if (meta.tokensIn) { totalIn += meta.tokensIn; found = true; } + if (meta.tokensOut) { totalOut += meta.tokensOut; found = true; } + if (meta.cacheWrites) { totalCacheWrites += meta.cacheWrites; found = true; } + if (meta.cacheReads) { totalCacheReads += meta.cacheReads; found = true; } + if (meta.cost) totalCost += meta.cost; + } catch { + // Malformed JSON in api_req_started — skip silently + } + } + + if (found) { + notes.tokenUsage = { input: totalIn, output: totalOut }; + } + if (totalCacheWrites > 0 || totalCacheReads > 0) { + notes.cacheTokens = { creation: totalCacheWrites, read: totalCacheReads }; + } + + return notes; +} + +/** + * Extract reasoning highlights from "reasoning" say events (max N). + */ +function extractReasoning(messages: ClineRawMessage[], max: number): string[] { + const highlights: string[] = []; + for (const msg of messages) { + if (highlights.length >= max) break; + if (msg.type !== 'say' || msg.say !== 'reasoning') continue; + if (!msg.text || msg.text.length < 10) continue; + highlights.push(truncate(msg.text.trim(), 200)); + } + return highlights; +} + +/** + * Extract pending tasks from the last assistant message. + * Looks for TODO, NEXT, REMAINING patterns in completion results. + */ +function extractPendingTasks(messages: ClineRawMessage[], max: number): string[] { + const tasks: string[] = []; + + // Walk backwards to find the last completion_result or assistant text + for (let i = messages.length - 1; i >= 0 && tasks.length < max; i--) { + const msg = messages[i]; + if (msg.type !== 'say') continue; + if (msg.say !== 'completion_result' && msg.say !== 'text') continue; + if (!msg.text) continue; + + const lines = msg.text.split('\n'); + for (const line of lines) { + if (tasks.length >= max) break; + const trimmed = line.trim(); + const lower = trimmed.toLowerCase(); + if ( + (lower.startsWith('- [ ]') || lower.startsWith('todo:') || lower.includes('next step')) && + trimmed.length > 5 + ) { + tasks.push(truncate(trimmed, 200)); + } + } + + // Only check the last relevant message + if (tasks.length > 0) break; + } + + return tasks; +} + +// ── Session Parsing (shared) ──────────────────────────────────────────────── + +/** + * Discover and parse sessions for all Cline-family extensions, optionally + * filtering to a single source variant. + */ +async function parseSessionsForSource(filterSource?: ClineSource): Promise { + const taskEntries = discoverTaskDirs(); + const sessions: UnifiedSession[] = []; + + for (const { taskDir, taskId, source } of taskEntries) { + if (filterSource && source !== filterSource) continue; + + try { + const uiFile = path.join(taskDir, 'ui_messages.json'); + const messages = readUiMessages(uiFile); + if (messages.length === 0) continue; + + const firstUserMsg = extractFirstUserMessage(messages); + const summary = cleanSummary(firstUserMsg); + if (!summary) continue; // Skip sessions with no real user message + + const fileStats = fs.statSync(uiFile); + + // Derive timestamps: prefer message timestamps, fall back to file stats + const firstTs = messages[0]?.ts; + const lastTs = messages[messages.length - 1]?.ts; + const createdAt = firstTs ? new Date(firstTs) : fileStats.birthtime; + const updatedAt = lastTs ? new Date(lastTs) : fileStats.mtime; + + sessions.push({ + id: taskId, + source: source as SessionSource, + cwd: '', + lines: messages.length, + bytes: fileStats.size, + createdAt, + updatedAt, + originalPath: uiFile, + summary, + }); + } catch (err) { + logger.debug(`cline: skipping unparseable task ${taskId}`, err); + } + } + + return sessions.sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +// ── Context Extraction (shared) ───────────────────────────────────────────── + +/** + * Extract full session context for cross-tool handoff. + * Shared implementation for all three Cline-family variants. + */ +async function extractContextShared( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + const cfg = config ?? getPreset('standard'); + const messages = readUiMessages(session.originalPath); + + // Build conversation messages + const allConversation = buildConversation(messages); + const recentMessages = allConversation.slice(-cfg.recentMessages); + + // Extract token usage and session notes + const sessionNotes: SessionNotes = extractTokenUsage(messages); + + // Extract reasoning highlights + const reasoning = extractReasoning(messages, cfg.thinking?.maxHighlights ?? 5); + if (reasoning.length > 0) sessionNotes.reasoning = reasoning; + + // Extract pending tasks + const pendingTasks = extractPendingTasks(messages, cfg.pendingTasks?.maxTasks ?? 5); + + // Cline's ui_messages.json doesn't track file-level tool calls, + // so filesModified and toolSummaries remain empty + const filesModified: string[] = []; + + const markdown = generateHandoffMarkdown( + session, + recentMessages, + filesModified, + pendingTasks, + [], // toolSummaries — not available from ui_messages.json + sessionNotes, + cfg, + ); + + return { + session: sessionNotes.model ? { ...session, model: sessionNotes.model } : session, + recentMessages, + filesModified, + pendingTasks, + toolSummaries: [], + sessionNotes, + markdown, + }; +} + +// ── Public API: Cline ─────────────────────────────────────────────────────── + +/** Discover sessions for Cline only */ +export async function parseClineSessions(): Promise { + return parseSessionsForSource('cline'); +} + +/** Extract context from a Cline session */ +export async function extractClineContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + return extractContextShared(session, config); +} + +// ── Public API: Roo Code ──────────────────────────────────────────────────── + +/** Discover sessions for Roo Code only */ +export async function parseRooCodeSessions(): Promise { + return parseSessionsForSource('roo-code'); +} + +/** Extract context from a Roo Code session (delegates to shared implementation) */ +export async function extractRooCodeContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + return extractContextShared(session, config); +} + +// ── Public API: Kilo Code ─────────────────────────────────────────────────── + +/** Discover sessions for Kilo Code only */ +export async function parseKiloCodeSessions(): Promise { + return parseSessionsForSource('kilo-code'); +} + +/** Extract context from a Kilo Code session (delegates to shared implementation) */ +export async function extractKiloCodeContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + return extractContextShared(session, config); +} diff --git a/src/parsers/codex.ts b/src/parsers/codex.ts index 620adea..6ebdd3b 100644 --- a/src/parsers/codex.ts +++ b/src/parsers/codex.ts @@ -1,68 +1,42 @@ import * as fs from 'fs'; import * as path from 'path'; -import * as readline from 'readline'; -import type { UnifiedSession, SessionContext, ConversationMessage, ToolUsageSummary, SessionNotes } from '../types/index.js'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + SessionNotes, + ToolUsageSummary, + UnifiedSession, +} from '../types/index.js'; +import type { CodexMessage, CodexSessionMeta } from '../types/schemas.js'; +import { findFiles } from '../utils/fs-helpers.js'; +import { getFileStats, readJsonlFile, scanJsonlHead } from '../utils/jsonl.js'; import { generateHandoffMarkdown } from '../utils/markdown.js'; -import { SummaryCollector, shellSummary, searchSummary, mcpSummary, withResult, truncate } from '../utils/tool-summarizer.js'; - -const CODEX_SESSIONS_DIR = path.join(process.env.HOME || '~', '.codex', 'sessions'); - -interface CodexSessionMeta { - timestamp: string; - type: string; - payload?: { - id?: string; - cwd?: string; - git?: { - branch?: string; - repository_url?: string; - }; - }; -} - -interface CodexEventMsg { - timestamp: string; - type: string; - payload?: { - type?: string; - role?: string; - message?: string; - content?: Array<{ type: string; text?: string }>; - }; - message?: string; -} - -// Union type for any Codex message -type CodexMessage = CodexSessionMeta | CodexEventMsg; +import { cleanSummary, extractRepo, homeDir } from '../utils/parser-helpers.js'; +import { countDiffStats, extractStdoutTail } from '../utils/diff.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { + extractExitCode, + mcpSummary, + SummaryCollector, + searchSummary, + shellSummary, + truncate, + withResult, +} from '../utils/tool-summarizer.js'; + +const CODEX_SESSIONS_DIR = process.env.CODEX_HOME + ? path.join(process.env.CODEX_HOME, 'sessions') + : path.join(homeDir(), '.codex', 'sessions'); /** * Find all Codex session files recursively */ async function findSessionFiles(): Promise { - const files: string[] = []; - - if (!fs.existsSync(CODEX_SESSIONS_DIR)) { - return files; - } - - const walkDir = (dir: string): void => { - try { - const entries = fs.readdirSync(dir, { withFileTypes: true }); - for (const entry of entries) { - const fullPath = path.join(dir, entry.name); - if (entry.isDirectory()) { - walkDir(fullPath); - } else if (entry.isFile() && entry.name.startsWith('rollout-') && entry.name.endsWith('.jsonl')) { - files.push(fullPath); - } - } - } catch { - // Skip directories we can't read - } - }; - - walkDir(CODEX_SESSIONS_DIR); - return files; + return findFiles(CODEX_SESSIONS_DIR, { + match: (entry) => entry.name.startsWith('rollout-') && entry.name.endsWith('.jsonl'), + }); } /** @@ -72,65 +46,31 @@ async function parseSessionInfo(filePath: string): Promise<{ meta: CodexSessionMeta | null; firstUserMessage: string; }> { - return new Promise((resolve) => { - const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - let meta: CodexSessionMeta | null = null; - let firstUserMessage = ''; - let linesRead = 0; - - rl.on('line', (line) => { - linesRead++; - // Read more lines to find user message (increased from 50) - if (linesRead > 150) { - rl.close(); - stream.close(); - return; - } + let meta: CodexSessionMeta | null = null; + let firstUserMessage = ''; - try { - const parsed = JSON.parse(line); - - // Get session meta - if (parsed.type === 'session_meta' && !meta) { - meta = parsed as CodexSessionMeta; - } - - // Get first user message from event_msg - if (!firstUserMessage && parsed.type === 'event_msg' && parsed.payload?.type === 'user_message') { - firstUserMessage = parsed.payload.message || ''; - } - - // Also check for input_text or message types (older formats) - if (!firstUserMessage && parsed.type === 'message' && parsed.role === 'user') { - firstUserMessage = typeof parsed.content === 'string' ? parsed.content : ''; - } - } catch { - // Skip invalid lines + await scanJsonlHead(filePath, 150, (parsed) => { + const msg = parsed as Record; + + if (msg.type === 'session_meta' && !meta) { + meta = msg as unknown as CodexSessionMeta; + } + + if (!firstUserMessage && msg.type === 'event_msg') { + const payload = msg.payload as Record | undefined; + if (payload?.type === 'user_message') { + firstUserMessage = (payload.message as string) || ''; } - }); + } - rl.on('close', () => resolve({ meta, firstUserMessage })); - rl.on('error', () => resolve({ meta: null, firstUserMessage: '' })); - }); -} + if (!firstUserMessage && msg.type === 'message' && (msg as Record).role === 'user') { + firstUserMessage = typeof msg.content === 'string' ? (msg.content as string) : ''; + } -/** - * Count lines and get file size - */ -async function getFileStats(filePath: string): Promise<{ lines: number; bytes: number }> { - return new Promise((resolve) => { - const stats = fs.statSync(filePath); - let lines = 0; - - const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', () => lines++); - rl.on('close', () => resolve({ lines, bytes: stats.size })); - rl.on('error', () => resolve({ lines: 0, bytes: stats.size })); + return 'continue'; }); + + return { meta, firstUserMessage }; } /** @@ -140,35 +80,11 @@ async function getFileStats(filePath: string): Promise<{ lines: number; bytes: n function parseFilename(filename: string): { timestamp: Date; id: string } | null { const match = filename.match(/rollout-(\d{4})-(\d{2})-(\d{2})T(\d{2})-(\d{2})-(\d{2})-(.+)\.jsonl$/); if (!match) return null; - + const [, year, month, day, hour, min, sec, id] = match; const timestamp = new Date(`${year}-${month}-${day}T${hour}:${min}:${sec}Z`); - - return { timestamp, id }; -} -/** - * Extract repo name from git URL or cwd - */ -function extractRepoName(gitUrl?: string, cwd?: string): string { - if (gitUrl) { - // Parse: https://github.com/owner/repo.git or git@github.com:owner/repo.git - const match = gitUrl.match(/[/:]([\w-]+)\/([\w.-]+?)(?:\.git)?$/); - if (match) { - return `${match[1]}/${match[2]}`; - } - } - - if (cwd) { - // Get last 2 path components for context - const parts = cwd.split('/').filter(Boolean); - if (parts.length >= 2) { - return parts.slice(-2).join('/'); - } - return parts[parts.length - 1] || ''; - } - - return ''; + return { timestamp, id }; } /** @@ -188,18 +104,12 @@ export async function parseCodexSessions(): Promise { const stats = await getFileStats(filePath); const fileStats = fs.statSync(filePath); - // Extract cwd from meta, fallback to nothing const cwd = meta?.payload?.cwd || ''; const gitUrl = meta?.payload?.git?.repository_url; const branch = meta?.payload?.git?.branch; - const repo = extractRepoName(gitUrl, cwd); + const repo = extractRepo({ gitUrl, cwd }); - // Use first user message as summary (cleaned up) - const summary = firstUserMessage - .replace(/\n/g, ' ') - .replace(/\s+/g, ' ') - .trim() - .slice(0, 50); + const summary = cleanSummary(firstUserMessage); sessions.push({ id: parsed.id, @@ -214,7 +124,8 @@ export async function parseCodexSessions(): Promise { originalPath: filePath, summary: summary || undefined, }); - } catch { + } catch (err) { + logger.debug('codex: skipping unparseable session', filePath, err); // Skip files we can't parse } } @@ -226,31 +137,37 @@ export async function parseCodexSessions(): Promise { * Read all messages from a Codex session */ async function readAllMessages(filePath: string): Promise { - return new Promise((resolve) => { - const messages: CodexMessage[] = []; - const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', (line) => { - try { - messages.push(JSON.parse(line) as CodexMessage); - } catch { - // Skip invalid lines - } - }); - - rl.on('close', () => resolve(messages)); - rl.on('error', () => resolve(messages)); - }); + return readJsonlFile(filePath); } /** * Common shell tool base commands for category grouping */ const COMMON_SHELL_TOOLS = new Set([ - 'npm', 'git', 'node', 'python', 'find', 'grep', 'cat', 'ls', 'tree', - 'mkdir', 'rm', 'sed', 'awk', 'curl', 'wget', 'docker', 'make', - 'cargo', 'go', 'pip', 'pnpm', 'yarn', 'bun', 'deno', + 'npm', + 'git', + 'node', + 'python', + 'find', + 'grep', + 'cat', + 'ls', + 'tree', + 'mkdir', + 'rm', + 'sed', + 'awk', + 'curl', + 'wget', + 'docker', + 'make', + 'cargo', + 'go', + 'pip', + 'pnpm', + 'yarn', + 'bun', + 'deno', ]); /** @@ -258,94 +175,168 @@ const COMMON_SHELL_TOOLS = new Set([ */ function trackShellFileWrites(cmd: string, collector: SummaryCollector): void { const sedMatch = cmd.match(/sed\s+-i[^'"]*\s+[^'"]*\s+['"]?([^\s'"]+)/); - if (sedMatch) { collector.trackFile(sedMatch[1]); return; } + if (sedMatch) { + collector.trackFile(sedMatch[1]); + return; + } const redirectMatch = cmd.match(/>\s*['"]?([^\s;|&'"]+)/); - if (redirectMatch && !redirectMatch[1].startsWith('>')) { collector.trackFile(redirectMatch[1]); return; } + if (redirectMatch && !redirectMatch[1].startsWith('>')) { + collector.trackFile(redirectMatch[1]); + return; + } const teeMatch = cmd.match(/tee\s+['"]?([^\s;|&'"]+)/); - if (teeMatch) { collector.trackFile(teeMatch[1]); return; } + if (teeMatch) { + collector.trackFile(teeMatch[1]); + return; + } const mvCpMatch = cmd.match(/^(mv|cp)\s+.*\s+['"]?([^\s;|&'"]+)$/); - if (mvCpMatch) { collector.trackFile(mvCpMatch[2]); } + if (mvCpMatch) { + collector.trackFile(mvCpMatch[2]); + } } /** * Extract tool usage summaries and files modified using shared SummaryCollector */ -function extractToolData(messages: CodexMessage[]): { summaries: ToolUsageSummary[]; filesModified: string[] } { - const collector = new SummaryCollector(); +function extractToolData(messages: CodexMessage[], config?: VerbosityConfig): { summaries: ToolUsageSummary[]; filesModified: string[] } { + const collector = new SummaryCollector(config); const outputsById = new Map(); // First pass: collect function_call_output and custom_tool_call_output by call_id for (const msg of messages) { if (msg.type !== 'response_item') continue; - const payload = (msg as any).payload; - if ((payload?.type === 'function_call_output' || payload?.type === 'custom_tool_call_output') && payload.call_id && payload.output) { - outputsById.set(payload.call_id, typeof payload.output === 'string' ? payload.output : JSON.stringify(payload.output)); + const payload = msg.payload; + if ( + (payload?.type === 'function_call_output' || payload?.type === 'custom_tool_call_output') && + payload.call_id && + payload.output + ) { + outputsById.set( + payload.call_id, + typeof payload.output === 'string' ? payload.output : JSON.stringify(payload.output), + ); } } // Second pass: extract tool calls for (const msg of messages) { - const payload = (msg as any).payload; - if (!payload) continue; + if (msg.type === 'response_item') { + const payload = msg.payload; + if (!payload) continue; + + // function_call + if (payload.type === 'function_call' && payload.arguments) { + try { + const args = JSON.parse(payload.arguments); + const name = payload.name || ''; + const output = payload.call_id ? outputsById.get(payload.call_id) : undefined; + + if (name === 'exec_command' || name === 'shell_command') { + const cmd = String(args.cmd || args.command || ''); + if (!cmd) continue; + const baseCmd = cmd.trim().split(/\s+/)[0]; + const category = COMMON_SHELL_TOOLS.has(baseCmd) ? baseCmd : 'shell'; + const exitCode = extractExitCode(output); + const errored = exitCode !== undefined && exitCode !== 0; + const stdoutTail = output ? extractStdoutTail(output, 5) : undefined; + collector.add(category, shellSummary(cmd, output), { + data: { + category: 'shell', + command: cmd, + ...(exitCode !== undefined ? { exitCode } : {}), + ...(stdoutTail ? { stdoutTail } : {}), + ...(errored ? { errored } : {}), + }, + isError: errored, + }); + trackShellFileWrites(cmd, collector); + } else if (name === 'write_stdin') { + collector.add('write_stdin', `stdin: "${truncate(String(args.input || args.data || ''), 60)}"`); + } else if (['read_mcp_resource', 'list_mcp_resources', 'list_mcp_resource_templates'].includes(name)) { + collector.add('mcp-resource', `${name}: ${truncate(String(args.uri || args.server_label || '(all)'), 60)}`, { + data: { category: 'mcp', toolName: name, params: String(args.uri || args.server_label || '') }, + }); + } else if (name === 'request_user_input') { + const question = truncate(String(args.prompt || args.message || ''), 80); + collector.add('user-input', `ask: "${question}"`, { + data: { category: 'ask', question }, + }); + } else if (name === 'update_plan') { + collector.add('plan', `plan: "${truncate(String(args.explanation || ''), 60)}"`); + } else if (name === 'view_image') { + collector.add('view_image', `image: ${truncate(String(args.path || args.url || ''), 60)}`); + } else if (name.startsWith('mcp__') || name.includes('-')) { + const params = JSON.stringify(args).slice(0, 100); + collector.add(name, mcpSummary(name, params, output), { + data: { + category: 'mcp', + toolName: name, + params, + ...(output ? { result: output.slice(0, 100) } : {}), + }, + }); + } else { + collector.add(name, withResult(`${name}(${JSON.stringify(args).slice(0, 80)})`, output), { + data: { + category: 'mcp', + toolName: name, + params: JSON.stringify(args).slice(0, 100), + ...(output ? { result: output.slice(0, 100) } : {}), + }, + }); + } + } catch (err) { + logger.debug('codex: skipping unparseable tool arguments', err); + } + } - // function_call - if (msg.type === 'response_item' && payload.type === 'function_call' && payload.arguments) { - try { - const args = JSON.parse(payload.arguments); - const name = payload.name as string; - const output = payload.call_id ? outputsById.get(payload.call_id) : undefined; - - if (name === 'exec_command' || name === 'shell_command') { - const cmd = (args.cmd || args.command || '') as string; - if (!cmd) continue; - const baseCmd = cmd.trim().split(/\s+/)[0]; - const category = COMMON_SHELL_TOOLS.has(baseCmd) ? baseCmd : 'shell'; - collector.add(category, shellSummary(cmd, output)); - trackShellFileWrites(cmd, collector); - } else if (name === 'write_stdin') { - collector.add('write_stdin', `stdin: "${truncate((args.input || args.data || '') as string, 60)}"`); - } else if (['read_mcp_resource', 'list_mcp_resources', 'list_mcp_resource_templates'].includes(name)) { - collector.add('mcp-resource', `${name}: ${truncate((args.uri || args.server_label || '(all)') as string, 60)}`); - } else if (name === 'request_user_input') { - collector.add('user-input', `ask: "${truncate((args.prompt || args.message || '') as string, 60)}"`); - } else if (name === 'update_plan') { - collector.add('plan', `plan: "${truncate((args.explanation || '') as string, 60)}"`); - } else if (name === 'view_image') { - collector.add('view_image', `image: ${truncate((args.path || args.url || '') as string, 60)}`); - } else if (name.startsWith('mcp__') || name.includes('-')) { - collector.add(name, mcpSummary(name, JSON.stringify(args).slice(0, 100), output)); + // custom_tool_call (e.g. apply_patch) + if (payload.type === 'custom_tool_call' && payload.name) { + const name = payload.name; + const input = payload.input || ''; + if (name === 'apply_patch') { + const fileMatches = input.match(/\*\*\* (?:Add|Update|Delete) File: (.+)/g) || []; + const files = fileMatches.map((m: string) => m.replace(/^\*\*\* (?:Add|Update|Delete) File: /, '')); + const fileList = files.length > 0 ? files.slice(0, 3).join(', ') : '(patch)'; + // Capture the patch content as diff (Codex patches are in unified diff-like format) + const diff = input.length > 0 ? input : undefined; + const diffStats = diff ? countDiffStats(diff) : undefined; + collector.add('apply_patch', `patch: ${truncate(fileList, 70)}`, { + data: { + category: 'edit', + filePath: files[0] || '(multiple)', + ...(diff ? { diff } : {}), + ...(diffStats ? { diffStats } : {}), + }, + filePath: files[0], + isWrite: true, + }); + for (const f of files) collector.trackFile(f); } else { - collector.add(name, withResult(`${name}(${JSON.stringify(args).slice(0, 80)})`, output)); + collector.add(name, `${name}: ${truncate(input, 80)}`); } - } catch { /* skip unparseable arguments */ } - } - - // custom_tool_call (e.g. apply_patch) - if (msg.type === 'response_item' && payload.type === 'custom_tool_call' && payload.name) { - const name = payload.name as string; - const input = (payload.input || '') as string; - if (name === 'apply_patch') { - const fileMatches = input.match(/\*\*\* (?:Add|Update|Delete) File: (.+)/g) || []; - const files = fileMatches.map((m: string) => m.replace(/^\*\*\* (?:Add|Update|Delete) File: /, '')); - const fileList = files.length > 0 ? files.slice(0, 3).join(', ') : '(patch)'; - collector.add('apply_patch', `patch: ${truncate(fileList, 70)}`); - for (const f of files) collector.trackFile(f); - } else { - collector.add(name, `${name}: ${truncate(input, 80)}`); } - } - - // web_search_call - if (msg.type === 'response_item' && payload.type === 'web_search_call') { - collector.add('web_search', searchSummary((payload.action?.query || payload.action?.queries?.[0] || '') as string)); - } - // Task lifecycle events - if (msg.type === 'event_msg') { + // web_search_call + if (payload.type === 'web_search_call') { + const query = String(payload.action?.query || payload.action?.queries?.[0] || ''); + collector.add('web_search', searchSummary(query), { + data: { category: 'search', query }, + }); + } + } else if (msg.type === 'event_msg') { + // Task lifecycle events + const payload = msg.payload; + if (!payload) continue; if (payload.type === 'task_started') { - collector.add('task', `task: started "${truncate(payload.message || '', 60)}"`); + const desc = truncate(payload.message || '', 60); + collector.add('task', `task: started "${desc}"`, { + data: { category: 'task', description: desc }, + }); } else if (payload.type === 'task_complete') { - collector.add('task', 'task: completed'); + collector.add('task', 'task: completed', { + data: { category: 'task', description: 'completed' }, + }); } } } @@ -362,17 +353,16 @@ function extractSessionNotes(messages: CodexMessage[]): SessionNotes { for (const msg of messages) { // Model from turn_context - if ((msg as any).type === 'turn_context') { - const payload = (msg as any).payload; - if (payload?.model && !notes.model) notes.model = payload.model; + if (msg.type === 'turn_context') { + if (msg.payload?.model && !notes.model) notes.model = msg.payload.model; } if (msg.type !== 'event_msg') continue; - const payload = (msg as any).payload; + const payload = msg.payload; if (!payload) continue; if (payload.type === 'agent_reasoning' && reasoning.length < 5) { - const text = (payload.message || '') as string; + const text = payload.message || ''; if (text.length > 20) { const firstLine = text.split(/[.\n]/)[0]?.trim(); if (firstLine) reasoning.push(truncate(firstLine, 200)); @@ -382,6 +372,11 @@ function extractSessionNotes(messages: CodexMessage[]): SessionNotes { // Token usage (take last value — cumulative) if (payload.type === 'token_count') { notes.tokenUsage = { input: payload.input_tokens || 0, output: payload.output_tokens || 0 }; + // Codex may report reasoning tokens separately (OpenAI o-series models) + const reasoningTokens = (payload as Record).reasoning_output_tokens; + if (typeof reasoningTokens === 'number' && reasoningTokens > 0) { + notes.thinkingTokens = reasoningTokens; + } } } @@ -392,10 +387,11 @@ function extractSessionNotes(messages: CodexMessage[]): SessionNotes { /** * Extract context from a Codex session for cross-tool continuation */ -export async function extractCodexContext(session: UnifiedSession): Promise { +export async function extractCodexContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); const messages = await readAllMessages(session.originalPath); - - const { summaries: toolSummaries, filesModified } = extractToolData(messages); + + const { summaries: toolSummaries, filesModified } = extractToolData(messages, resolvedConfig); const sessionNotes = extractSessionNotes(messages); const pendingTasks: string[] = []; @@ -405,68 +401,81 @@ export async function extractCodexContext(session: UnifiedSession): Promise c.type === 'input_text' && c.text) - .map((c: { type: string; text?: string }) => c.text) - .join('\n'); - // Skip system-injected content (AGENTS.md instructions, environment_context, permissions) - if (text && !text.startsWith('') && !text.startsWith(' (c.type === 'output_text' || c.type === 'text') && c.text) - .map((c: { type: string; text?: string }) => c.text) - .join('\n'); - if (text) { - responseItemEntries.push({ role: 'assistant', content: text, timestamp: new Date(msg.timestamp) }); + } else if (msg.type === 'response_item') { + const payload = msg.payload; + if (payload?.role === 'user' && payload.type === 'message') { + const contentParts = payload.content || []; + const text = contentParts + .filter((c) => c.type === 'input_text' && c.text) + .map((c) => c.text) + .join('\n'); + // Skip system-injected content (AGENTS.md instructions, environment_context, permissions) + if ( + text && + !text.startsWith('') && + !text.startsWith(' (c.type === 'output_text' || c.type === 'text') && c.text) + .map((c) => c.text) + .join('\n'); + if (text) { + responseItemEntries.push({ role: 'assistant', content: text, timestamp: new Date(msg.timestamp) }); + } } + // Skip payload.type === 'reasoning' (chain-of-thought, not a message) + // Skip payload.role === 'developer' (system instructions) } - // Skip response_item with payload.type === 'reasoning' (chain-of-thought, not a message) - // Skip response_item with payload.role === 'developer' (system instructions) } // Prefer response_item entries (newer, richer format) when available; fall back to event_msg - const hasResponseItems = responseItemEntries.some(m => m.role === 'user') || responseItemEntries.some(m => m.role === 'assistant'); + const hasResponseItems = + responseItemEntries.some((m) => m.role === 'user') || responseItemEntries.some((m) => m.role === 'assistant'); const allMessages = hasResponseItems ? responseItemEntries : eventMsgEntries; - // Build a balanced tail: keep the last 10 messages but ensure user messages aren't lost. + // Build a balanced tail: keep the last N messages but ensure user messages aren't lost. // Codex sessions can have many consecutive assistant messages (status updates, subagent reports). let trimmed: ConversationMessage[]; - const tail = allMessages.slice(-10); - const hasUser = tail.some(m => m.role === 'user'); - if (hasUser || allMessages.length <= 10) { + const tail = allMessages.slice(-resolvedConfig.recentMessages); + const hasUser = tail.some((m) => m.role === 'user'); + if (hasUser || allMessages.length <= resolvedConfig.recentMessages) { trimmed = tail; } else { - // Include the last user message + everything after it, capped at 10 + // Include the last user message + everything after it, capped at recentMessages let lastUserIdx = -1; for (let i = allMessages.length - 1; i >= 0; i--) { - if (allMessages[i].role === 'user') { lastUserIdx = i; break; } + if (allMessages[i].role === 'user') { + lastUserIdx = i; + break; + } } if (lastUserIdx >= 0) { - trimmed = allMessages.slice(lastUserIdx, lastUserIdx + 10); + trimmed = allMessages.slice(lastUserIdx, lastUserIdx + resolvedConfig.recentMessages); } else { trimmed = tail; } } // Generate markdown for injection - const markdown = generateHandoffMarkdown(session, trimmed, filesModified, pendingTasks, toolSummaries, sessionNotes); + const markdown = generateHandoffMarkdown(session, trimmed, filesModified, pendingTasks, toolSummaries, sessionNotes, resolvedConfig); return { session, diff --git a/src/parsers/copilot.ts b/src/parsers/copilot.ts index 035094a..4cc4d15 100644 --- a/src/parsers/copilot.ts +++ b/src/parsers/copilot.ts @@ -1,76 +1,24 @@ import * as fs from 'fs'; import * as path from 'path'; -import * as readline from 'readline'; import YAML from 'yaml'; -import type { UnifiedSession, SessionContext, ConversationMessage } from '../types/index.js'; +import { logger } from '../logger.js'; +import type { ConversationMessage, SessionContext, ToolUsageSummary, UnifiedSession } from '../types/index.js'; +import { classifyToolName } from '../types/tool-names.js'; +import type { CopilotEvent, CopilotWorkspace } from '../types/schemas.js'; +import { listSubdirectories } from '../utils/fs-helpers.js'; +import { getFileStats, readJsonlFile, scanJsonlHead } from '../utils/jsonl.js'; import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { homeDir } from '../utils/parser-helpers.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; -const COPILOT_SESSIONS_DIR = path.join(process.env.HOME || '~', '.copilot', 'session-state'); - -interface CopilotWorkspace { - id: string; - cwd: string; - git_root?: string; - repository?: string; - branch?: string; - summary?: string; - summary_count?: number; - created_at: string; - updated_at: string; -} - -interface CopilotEvent { - type: string; - id: string; - timestamp: string; - parentId?: string | null; - data?: { - sessionId?: string; - selectedModel?: string; - content?: string; - transformedContent?: string; - messageId?: string; - toolRequests?: Array<{ - name: string; - arguments?: Record; - }>; - context?: { - cwd?: string; - gitRoot?: string; - branch?: string; - repository?: string; - }; - }; -} +const COPILOT_SESSIONS_DIR = path.join(homeDir(), '.copilot', 'session-state'); /** * Find all Copilot session directories */ async function findSessionDirs(): Promise { - const dirs: string[] = []; - - if (!fs.existsSync(COPILOT_SESSIONS_DIR)) { - return dirs; - } - - try { - const entries = fs.readdirSync(COPILOT_SESSIONS_DIR, { withFileTypes: true }); - for (const entry of entries) { - if (entry.isDirectory()) { - const sessionDir = path.join(COPILOT_SESSIONS_DIR, entry.name); - const workspaceFile = path.join(sessionDir, 'workspace.yaml'); - - // Must have workspace.yaml to be a valid session - if (fs.existsSync(workspaceFile)) { - dirs.push(sessionDir); - } - } - } - } catch { - // Skip if we can't read the directory - } - - return dirs; + return listSubdirectories(COPILOT_SESSIONS_DIR).filter((dir) => fs.existsSync(path.join(dir, 'workspace.yaml'))); } /** @@ -80,61 +28,28 @@ function parseWorkspace(workspacePath: string): CopilotWorkspace | null { try { const content = fs.readFileSync(workspacePath, 'utf8'); return YAML.parse(content) as CopilotWorkspace; - } catch { + } catch (err) { + logger.debug('copilot: failed to parse workspace YAML', workspacePath, err); return null; } } -/** - * Count lines and get file size for events.jsonl - */ -async function getEventsStats(eventsPath: string): Promise<{ lines: number; bytes: number }> { - if (!fs.existsSync(eventsPath)) { - return { lines: 0, bytes: 0 }; - } - - return new Promise((resolve) => { - const stats = fs.statSync(eventsPath); - let lines = 0; - - const stream = fs.createReadStream(eventsPath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', () => lines++); - rl.on('close', () => resolve({ lines, bytes: stats.size })); - rl.on('error', () => resolve({ lines: 0, bytes: stats.size })); - }); -} - /** * Extract model from events.jsonl */ async function extractModel(eventsPath: string): Promise { - if (!fs.existsSync(eventsPath)) { - return undefined; - } - - return new Promise((resolve) => { - const stream = fs.createReadStream(eventsPath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', (line) => { - try { - const event = JSON.parse(line) as CopilotEvent; - if (event.type === 'session.start' && event.data?.selectedModel) { - rl.close(); - stream.close(); - resolve(event.data.selectedModel); - return; - } - } catch { - // Skip invalid lines - } - }); + let model: string | undefined; - rl.on('close', () => resolve(undefined)); - rl.on('error', () => resolve(undefined)); + await scanJsonlHead(eventsPath, 50, (parsed) => { + const event = parsed as CopilotEvent; + if (event.type === 'session.start' && event.data?.selectedModel) { + model = event.data.selectedModel; + return 'stop'; + } + return 'continue'; }); + + return model; } /** @@ -148,14 +63,13 @@ export async function parseCopilotSessions(): Promise { try { const workspacePath = path.join(sessionDir, 'workspace.yaml'); const eventsPath = path.join(sessionDir, 'events.jsonl'); - + const workspace = parseWorkspace(workspacePath); if (!workspace) continue; - const stats = await getEventsStats(eventsPath); + const stats = fs.existsSync(eventsPath) ? await getFileStats(eventsPath) : { lines: 0, bytes: 0 }; const model = await extractModel(eventsPath); - // Parse summary - handle multiline YAML let summary = workspace.summary || ''; if (summary.startsWith('|')) { summary = summary.replace(/^\|\n?/, '').split('\n')[0]; @@ -175,56 +89,28 @@ export async function parseCopilotSessions(): Promise { summary: summary.slice(0, 60), model, }); - } catch { + } catch (err) { + logger.debug('copilot: skipping unparseable session', sessionDir, err); // Skip sessions we can't parse } } - // Filter out empty sessions and sort by update time - return sessions - .filter(s => s.bytes > 0) - .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); -} - -/** - * Read all events from a Copilot session - */ -async function readAllEvents(eventsPath: string): Promise { - if (!fs.existsSync(eventsPath)) { - return []; - } - - return new Promise((resolve) => { - const events: CopilotEvent[] = []; - const stream = fs.createReadStream(eventsPath, { encoding: 'utf8' }); - const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - - rl.on('line', (line) => { - try { - events.push(JSON.parse(line) as CopilotEvent); - } catch { - // Skip invalid lines - } - }); - - rl.on('close', () => resolve(events)); - rl.on('error', () => resolve(events)); - }); + return sessions.filter((s) => s.bytes > 0).sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); } /** * Extract context from a Copilot session for cross-tool continuation */ -export async function extractCopilotContext(session: UnifiedSession): Promise { +export async function extractCopilotContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); const eventsPath = path.join(session.originalPath, 'events.jsonl'); - const events = await readAllEvents(eventsPath); - + const events = await readJsonlFile(eventsPath); + const recentMessages: ConversationMessage[] = []; - const filesModified: string[] = []; const pendingTasks: string[] = []; // Process events to extract conversation - for (const event of events.slice(-100)) { // Last 100 events + for (const event of events.slice(-resolvedConfig.recentMessages * 2)) { if (event.type === 'user.message') { const content = event.data?.content || event.data?.transformedContent || ''; if (content) { @@ -243,18 +129,17 @@ export async function extractCopilotContext(session: UnifiedSession): Promise 0 - ? toolRequests.map(t => ({ name: t.name, arguments: t.arguments })) - : undefined, + toolCalls: + toolRequests.length > 0 ? toolRequests.map((t) => ({ name: t.name, arguments: t.arguments })) : undefined, }); } else if (toolRequests.length > 0) { // Assistant message with only tool calls (no text content) - const toolNames = toolRequests.map(t => t.name).join(', '); + const toolNames = toolRequests.map((t) => t.name).join(', '); recentMessages.push({ role: 'assistant', content: `[Used tools: ${toolNames}]`, timestamp: new Date(event.timestamp), - toolCalls: toolRequests.map(t => ({ name: t.name, arguments: t.arguments })), + toolCalls: toolRequests.map((t) => ({ name: t.name, arguments: t.arguments })), }); } } @@ -274,17 +159,112 @@ export async function extractCopilotContext(session: UnifiedSession): Promise }>(); + const files = new Set(); + const defaultSampleLimit = config.mcp.maxSamplesPerNamespace; + + for (const event of events) { + if (event.type !== 'assistant.message') continue; + const toolRequests = event.data?.toolRequests || []; + for (const tr of toolRequests) { + const name = tr.name || 'unknown'; + const category = classifyToolName(name); + if (!category) continue; // skip internal tools + + if (!toolCounts.has(name)) { + toolCounts.set(name, { count: 0, samples: [] }); + } + const entry = toolCounts.get(name)!; + entry.count++; + + const args = tr.arguments || {}; + const fp = (args.path as string) || (args.file_path as string) || ''; + + // Track files from write/edit tool requests + if ((category === 'write' || category === 'edit') && fp) { + files.add(fp); + } + + if (entry.samples.length < defaultSampleLimit) { + const data = buildCopilotSampleData(category, name, args); + const argsStr = Object.keys(args).length > 0 ? JSON.stringify(args).slice(0, 100) : ''; + entry.samples.push({ + summary: argsStr ? `${name}(${argsStr})` : name, + data, + }); + } + } + } + + const summaries = Array.from(toolCounts.entries()).map(([name, { count, samples }]) => ({ + name, + count, + samples, + })); + + return { summaries, filesModified: Array.from(files) }; +} + +/** Build the correct StructuredToolSample for a Copilot tool request based on its classified category */ +function buildCopilotSampleData( + category: import('../types/tool-names.js').ToolSampleCategory, + name: string, + args: Record, +): import('../types/index.js').StructuredToolSample { + const fp = (args.path as string) || (args.file_path as string) || ''; + switch (category) { + case 'shell': + return { category: 'shell', command: (args.command as string) || (args.cmd as string) || '' }; + case 'read': + return { category: 'read', filePath: fp }; + case 'write': + return { category: 'write', filePath: fp }; + case 'edit': + return { category: 'edit', filePath: fp }; + case 'grep': + return { + category: 'grep', + pattern: (args.pattern as string) || (args.query as string) || '', + ...(fp ? { targetPath: fp } : {}), + }; + case 'glob': + return { category: 'glob', pattern: (args.pattern as string) || fp }; + case 'search': + return { category: 'search', query: (args.query as string) || '' }; + case 'fetch': + return { category: 'fetch', url: (args.url as string) || '' }; + case 'task': + return { category: 'task', description: (args.description as string) || '' }; + case 'ask': + return { category: 'ask', question: ((args.question as string) || '').slice(0, 80) }; + default: + return { + category: 'mcp', + toolName: name, + ...(Object.keys(args).length > 0 ? { params: JSON.stringify(args).slice(0, 100) } : {}), + }; + } +} diff --git a/src/parsers/crush.ts b/src/parsers/crush.ts new file mode 100644 index 0000000..82a340f --- /dev/null +++ b/src/parsers/crush.ts @@ -0,0 +1,245 @@ +import { execFileSync } from 'child_process'; +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + SessionNotes, + UnifiedSession, +} from '../types/index.js'; +import type { SessionSource } from '../types/tool-names.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, homeDir } from '../utils/parser-helpers.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; + +// 'crush' is not yet in TOOL_NAMES — use a type assertion until registration is added. +const CRUSH_SOURCE: SessionSource = 'crush'; + +const CRUSH_DB_PATH = path.join(homeDir(), '.crush', 'crush.db'); + +// ── SQLite CLI Helper ─────────────────────────────────────────────────────── + +/** Row shape returned by the session-listing query */ +interface CrushSessionRow { + id: string; + title: string | null; + prompt_tokens: number | null; + completion_tokens: number | null; + cost: number | null; + first_msg_at: number | null; + last_msg_at: number | null; + msg_count: number; +} + +/** Row shape returned by the message query */ +interface CrushMessageRow { + role: string; + parts: string; + created_at: number; + model: string | null; + provider: string | null; +} + +/** + * Execute a read-only SQLite query via the `sqlite3` CLI and return parsed JSON rows. + * Uses execFileSync (no shell) to avoid injection risks with paths. + */ +function querySqlite>(dbPath: string, query: string): T[] { + try { + const raw = execFileSync('sqlite3', [dbPath, '-json', query], { + encoding: 'utf8', + timeout: 5000, + stdio: ['pipe', 'pipe', 'pipe'], + }); + if (!raw.trim()) return []; + return JSON.parse(raw) as T[]; + } catch (err) { + logger.debug('crush: sqlite3 query failed', dbPath, err); + return []; + } +} + +/** + * Check if the Crush database exists and the sqlite3 binary is available. + */ +function isCrushAvailable(): boolean { + if (!fs.existsSync(CRUSH_DB_PATH)) return false; + try { + execFileSync('sqlite3', ['--version'], { + encoding: 'utf8', + timeout: 2000, + stdio: ['pipe', 'pipe', 'pipe'], + }); + return true; + } catch { + return false; + } +} + +// ── Parts Parsing ─────────────────────────────────────────────────────────── + +interface CrushPart { + type: string; + data?: { text?: string }; +} + +/** + * Extract plain text from a Crush message's `parts` JSON column. + * Format: [{"type": "text", "data": {"text": "..."}}] + */ +function extractTextFromParts(partsJson: string): string { + try { + const parts: CrushPart[] = JSON.parse(partsJson); + if (!Array.isArray(parts)) return ''; + return parts + .filter((p) => p.type === 'text' && p.data?.text) + .map((p) => p.data!.text!) + .join('\n'); + } catch { + return ''; + } +} + +/** + * Escape a string for safe embedding in a SQL single-quoted literal. + */ +function sqlEscape(value: string): string { + return value.replace(/'/g, "''"); +} + +// ── Session Listing ───────────────────────────────────────────────────────── + +/** + * Get the text of the first user message in a session (for summary fallback). + */ +function getFirstUserMessage(sessionId: string): string { + const rows = querySqlite( + CRUSH_DB_PATH, + `SELECT role, parts, created_at, model, provider FROM messages WHERE session_id = '${sqlEscape(sessionId)}' AND role = 'user' ORDER BY created_at ASC LIMIT 1`, + ); + if (rows.length === 0) return ''; + return extractTextFromParts(rows[0].parts); +} + +/** + * Parse all Crush sessions from the SQLite database. + */ +export async function parseCrushSessions(): Promise { + if (!isCrushAvailable()) return []; + + const rows = querySqlite( + CRUSH_DB_PATH, + `SELECT s.id, s.title, s.prompt_tokens, s.completion_tokens, s.cost, MIN(m.created_at) AS first_msg_at, MAX(m.created_at) AS last_msg_at, COUNT(m.rowid) AS msg_count FROM sessions s LEFT JOIN messages m ON m.session_id = s.id GROUP BY s.id ORDER BY last_msg_at DESC`, + ); + + const sessions: UnifiedSession[] = []; + + for (const row of rows) { + if (!row.msg_count || row.msg_count === 0) continue; + + let summary = row.title || ''; + if (!summary) { + summary = getFirstUserMessage(row.id); + } + summary = cleanSummary(summary); + if (!summary) continue; + + const createdAt = row.first_msg_at ? new Date(row.first_msg_at) : new Date(); + const updatedAt = row.last_msg_at ? new Date(row.last_msg_at) : createdAt; + + sessions.push({ + id: row.id, + source: CRUSH_SOURCE, + cwd: '', + lines: row.msg_count, + bytes: 0, // SQLite — no per-session file size + createdAt, + updatedAt, + originalPath: CRUSH_DB_PATH, + summary, + }); + } + + return sessions; +} + +// ── Context Extraction ────────────────────────────────────────────────────── + +/** + * Extract context from a Crush session for cross-tool continuation. + */ +export async function extractCrushContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + const resolvedConfig = config ?? getPreset('standard'); + + const msgRows = querySqlite( + CRUSH_DB_PATH, + `SELECT role, parts, created_at, model, provider FROM messages WHERE session_id = '${sqlEscape(session.id)}' ORDER BY created_at ASC`, + ); + + const allMessages: ConversationMessage[] = []; + let model: string | undefined; + + for (const row of msgRows) { + const content = extractTextFromParts(row.parts); + if (!content) continue; + + const role: 'user' | 'assistant' = row.role === 'user' ? 'user' : 'assistant'; + // Crush stores created_at as millisecond epoch + const timestamp = new Date(row.created_at); + + allMessages.push({ role, content, timestamp }); + + if (!model && row.model && role === 'assistant') { + model = row.model; + } + } + + // Get token usage from the session row + const sessionRows = querySqlite<{ prompt_tokens: number | null; completion_tokens: number | null }>( + CRUSH_DB_PATH, + `SELECT prompt_tokens, completion_tokens FROM sessions WHERE id = '${sqlEscape(session.id)}'`, + ); + + let tokenInput = 0; + let tokenOutput = 0; + if (sessionRows.length > 0) { + tokenInput = sessionRows[0].prompt_tokens ?? 0; + tokenOutput = sessionRows[0].completion_tokens ?? 0; + } + + const hasNotes = model || tokenInput || tokenOutput; + const sessionNotes: SessionNotes | undefined = hasNotes + ? { + ...(model ? { model } : {}), + ...(tokenInput || tokenOutput ? { tokenUsage: { input: tokenInput, output: tokenOutput } } : {}), + } + : undefined; + + const trimmed = allMessages.slice(-resolvedConfig.recentMessages); + const enrichedSession = model ? { ...session, model } : session; + + const markdown = generateHandoffMarkdown( + enrichedSession, + trimmed, + [], // filesModified — not tracked in Crush's schema + [], // pendingTasks — not tracked in Crush's schema + [], // toolSummaries — not tracked in Crush's schema + sessionNotes, + resolvedConfig, + ); + + return { + session: enrichedSession, + recentMessages: trimmed, + filesModified: [], + pendingTasks: [], + toolSummaries: [], + sessionNotes, + markdown, + }; +} diff --git a/src/parsers/cursor.ts b/src/parsers/cursor.ts new file mode 100644 index 0000000..6358dd5 --- /dev/null +++ b/src/parsers/cursor.ts @@ -0,0 +1,222 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; +import type { ConversationMessage, SessionContext, SessionNotes, UnifiedSession } from '../types/index.js'; +import type { CursorTranscriptLine } from '../types/schemas.js'; +import { cleanUserQueryText, isRealUserMessage, isSystemContent } from '../utils/content.js'; +import { findFiles } from '../utils/fs-helpers.js'; +import { getFileStats, readJsonlFile, scanJsonlHead } from '../utils/jsonl.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, extractRepoFromCwd, homeDir } from '../utils/parser-helpers.js'; +import { cwdFromSlug } from '../utils/slug.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { + type AnthropicMessage, + extractAnthropicToolData, + extractThinkingHighlights, +} from '../utils/tool-extraction.js'; + +const CURSOR_PROJECTS_DIR = path.join(homeDir(), '.cursor', 'projects'); + +/** + * Find all Cursor agent-transcript JSONL files. + * Structure: ~/.cursor/projects//agent-transcripts//.jsonl + */ +async function findTranscriptFiles(): Promise { + if (!fs.existsSync(CURSOR_PROJECTS_DIR)) return []; + + const files: string[] = []; + try { + const projectDirs = fs.readdirSync(CURSOR_PROJECTS_DIR, { withFileTypes: true }); + for (const projectDir of projectDirs) { + if (!projectDir.isDirectory()) continue; + const transcriptsDir = path.join(CURSOR_PROJECTS_DIR, projectDir.name, 'agent-transcripts'); + const found = findFiles(transcriptsDir, { + match: (entry, fullPath) => entry.name.endsWith('.jsonl') && fullPath.includes('agent-transcripts'), + maxDepth: 2, + }); + files.push(...found); + } + } catch (err) { + logger.debug('cursor: cannot read base directory', CURSOR_PROJECTS_DIR, err); + // Skip if base dir can't be read + } + return files; +} + +/** + * Extract the project slug from a transcript file path. + */ +function getProjectSlug(filePath: string): string { + const parts = filePath.split(path.sep); + const projectsIdx = parts.indexOf('projects'); + if (projectsIdx >= 0 && projectsIdx + 1 < parts.length) { + return parts[projectsIdx + 1]; + } + return ''; +} + +function getSessionId(filePath: string): string { + return path.basename(filePath, '.jsonl'); +} + +/** + * Parse first few messages for summary + */ +async function parseSessionInfo(filePath: string): Promise<{ + firstUserMessage: string; + lineCount: number; + bytes: number; +}> { + let firstUserMessage = ''; + + // Stream-count lines without full JSON parse (fast) + const stats = await getFileStats(filePath); + + // Scan head for first user message + await scanJsonlHead(filePath, 50, (parsed) => { + if (firstUserMessage) return 'continue'; + const line = parsed as CursorTranscriptLine; + if (line.role === 'user') { + for (const block of line.message?.content || []) { + if (block.type === 'text' && block.text) { + const cleaned = cleanUserQueryText(block.text); + if (isRealUserMessage(cleaned)) { + firstUserMessage = cleaned; + return 'stop'; + } + } + } + } + return 'continue'; + }); + + return { firstUserMessage, lineCount: stats.lines, bytes: stats.bytes }; +} + +/** + * Parse all Cursor sessions + */ +export async function parseCursorSessions(): Promise { + const files = await findTranscriptFiles(); + const sessions: UnifiedSession[] = []; + + for (const filePath of files) { + try { + const { firstUserMessage, lineCount, bytes } = await parseSessionInfo(filePath); + const fileStats = fs.statSync(filePath); + const slug = getProjectSlug(filePath); + const cwd = cwdFromSlug(slug); + + const summary = cleanSummary(firstUserMessage); + + sessions.push({ + id: getSessionId(filePath), + source: 'cursor', + cwd, + repo: extractRepoFromCwd(cwd), + lines: lineCount, + bytes, + createdAt: fileStats.birthtime, + updatedAt: fileStats.mtime, + originalPath: filePath, + summary: summary || undefined, + }); + } catch (err) { + logger.debug('cursor: skipping unparseable session', filePath, err); + // Skip files we can't parse + } + } + + return sessions.filter((s) => s.bytes > 100).sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +/** + * Extract context from a Cursor session for cross-tool continuation + */ +export async function extractCursorContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); + const lines = await readJsonlFile(session.originalPath); + const recentMessages: ConversationMessage[] = []; + + // Extract tool data via shared Anthropic utility + const anthropicMsgs: AnthropicMessage[] = lines.map((l) => ({ + role: l.role, + content: l.message.content, + })); + + const { summaries: toolSummaries, filesModified } = extractAnthropicToolData(anthropicMsgs, resolvedConfig); + + // Extract session notes (thinking highlights + token usage) + const sessionNotes: SessionNotes = {}; + const reasoning = extractThinkingHighlights(anthropicMsgs); + if (reasoning.length > 0) sessionNotes.reasoning = reasoning; + + // Aggregate token usage, cache tokens, and model from passthrough fields. + // Cursor CLI agent-transcripts use Anthropic API format — the schema's + // .passthrough() preserves `usage` and `model` on each JSONL line. + for (const line of lines) { + if (line.role !== 'assistant') continue; + const raw = line as Record; + + // Model: take the first one found (all lines in a session use the same model) + const model = (raw.model ?? (raw.message as Record | undefined)?.model) as string | undefined; + if (model && !sessionNotes.model) { + sessionNotes.model = model; + } + + // Usage may be at top level or nested under message (both observed in the wild) + const usage = (raw.usage ?? (raw.message as Record | undefined)?.usage) as + | Record + | undefined; + if (!usage) continue; + + if (!sessionNotes.tokenUsage) sessionNotes.tokenUsage = { input: 0, output: 0 }; + sessionNotes.tokenUsage.input += usage.input_tokens || 0; + sessionNotes.tokenUsage.output += usage.output_tokens || 0; + + const cacheCreation = usage.cache_creation_input_tokens || 0; + const cacheRead = usage.cache_read_input_tokens || 0; + if (cacheCreation || cacheRead) { + if (!sessionNotes.cacheTokens) sessionNotes.cacheTokens = { creation: 0, read: 0 }; + sessionNotes.cacheTokens.creation += cacheCreation; + sessionNotes.cacheTokens.read += cacheRead; + } + } + + const pendingTasks: string[] = []; + + for (const line of lines) { + const textParts: string[] = []; + for (const block of line.message.content) { + if (block.type === 'text' && block.text) { + if (isSystemContent(block.text)) continue; + const cleaned = line.role === 'user' ? cleanUserQueryText(block.text) : block.text; + if (cleaned) textParts.push(cleaned); + } + } + + const text = textParts.join('\n').trim(); + if (!text) continue; + + recentMessages.push({ + role: line.role === 'user' ? 'user' : 'assistant', + content: text, + }); + } + + const trimmed = recentMessages.slice(-resolvedConfig.recentMessages); + + const markdown = generateHandoffMarkdown(session, trimmed, filesModified, pendingTasks, toolSummaries, sessionNotes, resolvedConfig); + + return { + session, + recentMessages: trimmed, + filesModified, + pendingTasks, + toolSummaries, + sessionNotes, + markdown, + }; +} diff --git a/src/parsers/droid.ts b/src/parsers/droid.ts new file mode 100644 index 0000000..e69387b --- /dev/null +++ b/src/parsers/droid.ts @@ -0,0 +1,291 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; +import type { ConversationMessage, SessionContext, SessionNotes, UnifiedSession } from '../types/index.js'; +import type { + DroidCompactionState, + DroidEvent, + DroidMessageEvent, + DroidSessionStart, + DroidSettings, + DroidTodoState, +} from '../types/schemas.js'; +import { DroidSettingsSchema } from '../types/schemas.js'; +import { isSystemContent } from '../utils/content.js'; +import { listSubdirectories } from '../utils/fs-helpers.js'; +import { getFileStats, readJsonlFile, scanJsonlHead } from '../utils/jsonl.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, extractRepoFromCwd, homeDir } from '../utils/parser-helpers.js'; +import { cwdFromSlug } from '../utils/slug.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { + type AnthropicMessage, + extractAnthropicToolData, + extractThinkingHighlights, +} from '../utils/tool-extraction.js'; +import { truncate } from '../utils/tool-summarizer.js'; + +const DROID_SESSIONS_DIR = path.join(homeDir(), '.factory', 'sessions'); + +/** + * Find all Droid session JSONL files. + * Structure: ~/.factory/sessions//.jsonl + */ +async function findSessionFiles(): Promise { + const files: string[] = []; + for (const wsPath of listSubdirectories(DROID_SESSIONS_DIR)) { + try { + const entries = fs.readdirSync(wsPath, { withFileTypes: true }); + for (const entry of entries) { + if (entry.isFile() && entry.name.endsWith('.jsonl')) { + files.push(path.join(wsPath, entry.name)); + } + } + } catch (err) { + logger.debug('droid: cannot read session directory', wsPath, err); + // Skip directories we can't read + } + } + return files; +} + +/** + * Read companion .settings.json for a session + */ +function readSettings(jsonlPath: string): DroidSettings | null { + const settingsPath = jsonlPath.replace(/\.jsonl$/, '.settings.json'); + try { + if (fs.existsSync(settingsPath)) { + const result = DroidSettingsSchema.safeParse(JSON.parse(fs.readFileSync(settingsPath, 'utf8'))); + if (result.success) return result.data; + logger.debug('droid: settings validation failed', settingsPath, result.error.message); + return null; + } + } catch (err) { + logger.debug('droid: failed to read settings', settingsPath, err); + } + return null; +} + +/** + * Parse session metadata from session_start event and first user message + */ +async function parseSessionInfo(filePath: string): Promise<{ + sessionStart: DroidSessionStart | null; + firstUserMessage: string; + firstTimestamp: string; + lastTimestamp: string; +}> { + let sessionStart: DroidSessionStart | null = null; + let firstUserMessage = ''; + let firstTimestamp = ''; + let lastTimestamp = ''; + + await scanJsonlHead(filePath, 100, (parsed) => { + const event = parsed as DroidEvent; + + if (event.type === 'session_start' && !sessionStart) { + sessionStart = event; + } + + if (event.type === 'message') { + if (event.timestamp) { + if (!firstTimestamp) firstTimestamp = event.timestamp; + lastTimestamp = event.timestamp; + } + + if (!firstUserMessage && event.message.role === 'user') { + for (const block of event.message.content) { + if (block.type === 'text' && block.text) { + if (!block.text.startsWith('<') && !block.text.startsWith('/') && !block.text.includes('Session Handoff')) { + firstUserMessage = block.text; + break; + } + } + } + } + } + + return 'continue'; + }); + + return { sessionStart, firstUserMessage, firstTimestamp, lastTimestamp }; +} + +/** + * Parse all Droid sessions + */ +export async function parseDroidSessions(): Promise { + const files = await findSessionFiles(); + const sessions: UnifiedSession[] = []; + + for (const filePath of files) { + try { + const { sessionStart, firstUserMessage, firstTimestamp, lastTimestamp } = await parseSessionInfo(filePath); + if (!sessionStart) continue; + + const fileStats = fs.statSync(filePath); + const stats = await getFileStats(filePath); + const settings = readSettings(filePath); + + const workspaceSlug = path.basename(path.dirname(filePath)); + const cwd = sessionStart.cwd || cwdFromSlug(workspaceSlug); + + const summary = cleanSummary(firstUserMessage); + + const createdAt = firstTimestamp ? new Date(firstTimestamp) : fileStats.birthtime; + const updatedAt = lastTimestamp ? new Date(lastTimestamp) : fileStats.mtime; + + sessions.push({ + id: sessionStart.id, + source: 'droid', + cwd, + repo: extractRepoFromCwd(cwd), + lines: stats.lines, + bytes: fileStats.size, + createdAt, + updatedAt, + originalPath: filePath, + summary: summary || sessionStart.sessionTitle || undefined, + model: settings?.model, + }); + } catch (err) { + logger.debug('droid: skipping unparseable session', filePath, err); + // Skip files we can't parse + } + } + + return sessions.filter((s) => s.lines > 1).sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +/** + * Extract session notes: model info, token usage, reasoning/thinking highlights + */ +function extractSessionNotes(events: DroidEvent[], settings: DroidSettings | null): SessionNotes { + const notes: SessionNotes = {}; + + if (settings?.model) notes.model = settings.model; + if (settings?.tokenUsage) { + notes.tokenUsage = { + input: settings.tokenUsage.inputTokens || 0, + output: settings.tokenUsage.outputTokens || 0, + }; + const cacheCreation = settings.tokenUsage.cacheCreationTokens || 0; + const cacheRead = settings.tokenUsage.cacheReadTokens || 0; + if (cacheCreation || cacheRead) { + notes.cacheTokens = { creation: cacheCreation, read: cacheRead }; + } + if (settings.tokenUsage.thinkingTokens) { + notes.thinkingTokens = settings.tokenUsage.thinkingTokens; + } + } + if (settings?.assistantActiveTimeMs) { + notes.activeTimeMs = settings.assistantActiveTimeMs; + } + + // Extract compaction summary — take the LAST one (most comprehensive) + for (const event of events) { + if (event.type === 'compaction_state') { + const cs = event as DroidCompactionState; + if (cs.summaryText) { + notes.compactSummary = truncate(cs.summaryText, 500); + } + } + } + + // Extract thinking highlights via shared utility + const anthropicMsgs: AnthropicMessage[] = events + .filter((e): e is DroidMessageEvent => e.type === 'message' && e.message.role === 'assistant') + .map((e) => ({ role: e.message.role, content: e.message.content })); + + const reasoning = extractThinkingHighlights(anthropicMsgs); + if (reasoning.length > 0) notes.reasoning = reasoning; + + return notes; +} + +/** + * Extract pending tasks from the most recent todo_state event + */ +function extractPendingTasks(events: DroidEvent[]): string[] { + const tasks: string[] = []; + + let lastTodo: DroidTodoState | null = null; + for (const event of events) { + if (event.type === 'todo_state') { + lastTodo = event; + } + } + + if (!lastTodo) return tasks; + + const todosText = typeof lastTodo.todos === 'string' ? lastTodo.todos : lastTodo.todos?.todos || ''; + if (!todosText) return tasks; + + for (const line of todosText.split('\n')) { + const match = line.match(/^\d+\.\s*\[(in_progress|pending)\]\s+(.+)/); + if (match && tasks.length < 5) { + tasks.push(match[2].trim()); + } + } + + return tasks; +} + +/** + * Extract context from a Droid session for cross-tool continuation + */ +export async function extractDroidContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); + const events = await readJsonlFile(session.originalPath); + const settings = readSettings(session.originalPath); + + // Extract tool data via shared Anthropic utility + const anthropicMsgs: AnthropicMessage[] = events + .filter((e): e is DroidMessageEvent => e.type === 'message') + .map((e) => ({ role: e.message.role, content: e.message.content })); + + const { summaries: toolSummaries, filesModified } = extractAnthropicToolData(anthropicMsgs, resolvedConfig); + const sessionNotes = extractSessionNotes(events, settings); + const pendingTasks = extractPendingTasks(events); + + // Collect conversation messages (text content only) + const recentMessages: ConversationMessage[] = []; + + for (const event of events) { + if (event.type !== 'message') continue; + + const textParts: string[] = []; + for (const block of event.message.content) { + if (block.type === 'text' && block.text) { + if (!isSystemContent(block.text)) { + textParts.push(block.text); + } + } + } + + const text = textParts.join('\n').trim(); + if (!text) continue; + + recentMessages.push({ + role: event.message.role === 'user' ? 'user' : 'assistant', + content: text, + timestamp: event.timestamp ? new Date(event.timestamp) : undefined, + }); + } + + const trimmed = recentMessages.slice(-resolvedConfig.recentMessages); + + const markdown = generateHandoffMarkdown(session, trimmed, filesModified, pendingTasks, toolSummaries, sessionNotes, resolvedConfig); + + return { + session: sessionNotes?.model ? { ...session, model: sessionNotes.model } : session, + recentMessages: trimmed, + filesModified, + pendingTasks, + toolSummaries, + sessionNotes, + markdown, + }; +} diff --git a/src/parsers/gemini.ts b/src/parsers/gemini.ts index 9fcec3c..24023ad 100644 --- a/src/parsers/gemini.ts +++ b/src/parsers/gemini.ts @@ -1,94 +1,59 @@ import * as fs from 'fs'; import * as path from 'path'; -import type { UnifiedSession, SessionContext, ConversationMessage, ToolUsageSummary, SessionNotes } from '../types/index.js'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + SessionNotes, + ToolUsageSummary, + UnifiedSession, +} from '../types/index.js'; +import type { GeminiSession } from '../types/schemas.js'; +import { GeminiSessionSchema } from '../types/schemas.js'; +import { extractTextFromBlocks } from '../utils/content.js'; +import { findFiles, listSubdirectories } from '../utils/fs-helpers.js'; import { generateHandoffMarkdown } from '../utils/markdown.js'; -import { SummaryCollector, fileSummary, mcpSummary, truncate } from '../utils/tool-summarizer.js'; - -const GEMINI_BASE_DIR = path.join(process.env.HOME || '~', '.gemini', 'tmp'); - -interface GeminiToolCall { - name: string; - args?: Record; - result?: Array<{ functionResponse?: { response?: { output?: string } } }>; - status?: string; - resultDisplay?: { - fileName?: string; - filePath?: string; - fileDiff?: string; - originalContent?: string; - newContent?: string; - diffStat?: { - model_added_lines?: number; - model_removed_lines?: number; - }; - isNewFile?: boolean; - }; -} - -interface GeminiThought { - subject?: string; - description?: string; - timestamp?: string; -} - -interface GeminiMessage { - id: string; - timestamp: string; - type: 'user' | 'gemini' | 'info'; - content: string | Array<{ text?: string; type?: string }>; - toolCalls?: GeminiToolCall[]; - thoughts?: GeminiThought[]; - model?: string; - tokens?: { - input?: number; - output?: number; - cached?: number; - thoughts?: number; - tool?: number; - total?: number; - }; -} +import { cleanSummary, homeDir } from '../utils/parser-helpers.js'; +import { classifyToolName } from '../types/tool-names.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { fileSummary, mcpSummary, shellSummary, SummaryCollector, truncate } from '../utils/tool-summarizer.js'; -interface GeminiSession { - sessionId: string; - projectHash: string; - startTime: string; - lastUpdated: string; - messages: GeminiMessage[]; -} +const geminiHome = process.env.GEMINI_CLI_HOME || homeDir(); +const GEMINI_BASE_DIR = path.join(geminiHome, '.gemini', 'tmp'); +const GEMINI_LEGACY_DIR = path.join(geminiHome, '.gemini', 'sessions'); /** - * Find all Gemini session files + * Find all Gemini session files (new and legacy storage formats) */ async function findSessionFiles(): Promise { - const files: string[] = []; - - if (!fs.existsSync(GEMINI_BASE_DIR)) { - return files; + const results: string[] = []; + + // New format: ~/.gemini/tmp//chats/session-*.json + if (fs.existsSync(GEMINI_BASE_DIR)) { + for (const projectDir of listSubdirectories(GEMINI_BASE_DIR)) { + if (path.basename(projectDir) === 'bin') continue; + const chatsDir = path.join(projectDir, 'chats'); + results.push( + ...findFiles(chatsDir, { + match: (entry) => entry.name.startsWith('session-') && entry.name.endsWith('.json'), + recursive: false, + }), + ); + } } - try { - // Iterate through project hash directories - const projectDirs = fs.readdirSync(GEMINI_BASE_DIR, { withFileTypes: true }); - - for (const projectDir of projectDirs) { - if (!projectDir.isDirectory() || projectDir.name === 'bin') continue; - - const chatsDir = path.join(GEMINI_BASE_DIR, projectDir.name, 'chats'); - if (!fs.existsSync(chatsDir)) continue; - - const chatFiles = fs.readdirSync(chatsDir, { withFileTypes: true }); - for (const chatFile of chatFiles) { - if (chatFile.isFile() && chatFile.name.startsWith('session-') && chatFile.name.endsWith('.json')) { - files.push(path.join(chatsDir, chatFile.name)); - } - } - } - } catch { - // Skip directories we can't read + // Legacy format: ~/.gemini/sessions/*.json + if (fs.existsSync(GEMINI_LEGACY_DIR)) { + results.push( + ...findFiles(GEMINI_LEGACY_DIR, { + match: (entry) => entry.name.endsWith('.json'), + recursive: false, + }), + ); } - return files; + return results; } /** @@ -97,8 +62,12 @@ async function findSessionFiles(): Promise { function parseSessionFile(filePath: string): GeminiSession | null { try { const content = fs.readFileSync(filePath, 'utf8'); - return JSON.parse(content) as GeminiSession; - } catch { + const result = GeminiSessionSchema.safeParse(JSON.parse(content)); + if (result.success) return result.data; + logger.debug('gemini: session validation failed', filePath, result.error.message); + return null; + } catch (err) { + logger.debug('gemini: failed to parse session file', filePath, err); return null; } } @@ -107,18 +76,7 @@ function parseSessionFile(filePath: string): GeminiSession | null { * Extract text content from Gemini message (handles both string and array formats) */ function extractGeminiContent(content: string | Array<{ text?: string; type?: string }>): string { - if (typeof content === 'string') { - return content; - } - - if (Array.isArray(content)) { - return content - .filter(part => part.text) - .map(part => part.text) - .join('\n'); - } - - return ''; + return extractTextFromBlocks(content as string | Array<{ type: string; text?: string }>); } /** @@ -133,45 +91,160 @@ function extractFirstUserMessage(session: GeminiSession): string { return ''; } -/** - * Extract repo name from project path - */ -function extractRepoFromPath(projectPath: string): string { - const parts = projectPath.split('/').filter(Boolean); - if (parts.length >= 2) { - return parts.slice(-2).join('/'); - } - return parts[parts.length - 1] || ''; -} - /** * Extract tool usage summaries and files modified using shared SummaryCollector */ -function extractToolData(sessionData: GeminiSession): { summaries: ToolUsageSummary[]; filesModified: string[] } { - const collector = new SummaryCollector(); +function extractToolData(sessionData: GeminiSession, config?: VerbosityConfig): { summaries: ToolUsageSummary[]; filesModified: string[] } { + const collector = new SummaryCollector(config); for (const msg of sessionData.messages) { if (msg.type !== 'gemini' || !msg.toolCalls) continue; for (const tc of msg.toolCalls) { - const { name, args, result, resultDisplay } = tc; - - if (name === 'write_file') { - const fp = resultDisplay?.filePath || (args?.file_path as string) || ''; - let diffStat: { added: number; removed: number } | undefined; - if (resultDisplay?.diffStat) { - diffStat = { added: resultDisplay.diffStat.model_added_lines || 0, removed: resultDisplay.diffStat.model_removed_lines || 0 }; - } else if (resultDisplay?.fileDiff) { - const lines = resultDisplay.fileDiff.split('\n'); - diffStat = { added: lines.filter(l => l.startsWith('+')).length, removed: lines.filter(l => l.startsWith('-')).length }; + const { name, args, result, resultDisplay, status } = tc; + const category = classifyToolName(name); + if (!category) continue; // skip internal tools + + const fp = resultDisplay?.filePath || (args?.file_path as string) || (args?.path as string) || ''; + const resultStr = result?.[0]?.functionResponse?.response?.output; + const isError = status ? !['ok', 'success', 'completed'].includes(status.toLowerCase()) : false; + + switch (category) { + case 'write': { + let diffStat: { added: number; removed: number } | undefined; + if (resultDisplay?.diffStat) { + diffStat = { + added: resultDisplay.diffStat.model_added_lines || 0, + removed: resultDisplay.diffStat.model_removed_lines || 0, + }; + } else if (resultDisplay?.fileDiff) { + const lines = resultDisplay.fileDiff.split('\n'); + diffStat = { + added: lines.filter((l: string) => l.startsWith('+')).length, + removed: lines.filter((l: string) => l.startsWith('-')).length, + }; + } + const isNewFile = resultDisplay?.isNewFile ?? false; + const diff = resultDisplay?.fileDiff || undefined; + collector.add(name, fileSummary('write', fp, diffStat, isNewFile), { + data: { + category: 'write', + filePath: fp, + isNewFile, + ...(diff ? { diff } : {}), + ...(diffStat ? { diffStats: diffStat } : {}), + }, + filePath: fp, + isWrite: true, + isError, + }); + break; + } + case 'read': + collector.add(name, fileSummary('read', fp), { + data: { category: 'read', filePath: fp }, + filePath: fp, + isError, + }); + break; + case 'shell': { + const cmd = (args?.command as string) || (args?.cmd as string) || ''; + const output = resultStr ? String(resultStr) : ''; + collector.add(name, shellSummary(cmd, output || undefined), { + data: { category: 'shell', command: cmd, ...(output ? { stdoutTail: output.slice(-500) } : {}) }, + isError, + }); + break; + } + case 'edit': { + let diffStat: { added: number; removed: number } | undefined; + if (resultDisplay?.diffStat) { + diffStat = { + added: resultDisplay.diffStat.model_added_lines || 0, + removed: resultDisplay.diffStat.model_removed_lines || 0, + }; + } else if (resultDisplay?.fileDiff) { + const dLines = resultDisplay.fileDiff.split('\n'); + diffStat = { + added: dLines.filter((l: string) => l.startsWith('+')).length, + removed: dLines.filter((l: string) => l.startsWith('-')).length, + }; + } + const diff = resultDisplay?.fileDiff || undefined; + collector.add(name, fileSummary('edit', fp, diffStat), { + data: { + category: 'edit', + filePath: fp, + ...(diff ? { diff } : {}), + ...(diffStat ? { diffStats: diffStat } : {}), + }, + filePath: fp, + isWrite: true, + isError, + }); + break; + } + case 'grep': { + const pattern = (args?.pattern as string) || (args?.query as string) || ''; + collector.add(name, `grep "${truncate(pattern, 40)}"`, { + data: { category: 'grep', pattern, ...(fp ? { targetPath: fp } : {}) }, + isError, + }); + break; + } + case 'glob': { + const pattern = (args?.pattern as string) || fp; + collector.add(name, `glob ${truncate(pattern, 50)}`, { + data: { category: 'glob', pattern }, + isError, + }); + break; + } + case 'search': + collector.add(name, `search "${truncate((args?.query as string) || '', 50)}"`, { + data: { category: 'search', query: (args?.query as string) || '' }, + isError, + }); + break; + case 'fetch': + collector.add(name, `fetch ${truncate((args?.url as string) || '', 60)}`, { + data: { + category: 'fetch', + url: (args?.url as string) || '', + ...(resultStr ? { resultPreview: String(resultStr).slice(0, 100) } : {}), + }, + isError, + }); + break; + case 'task': { + const desc = (args?.description as string) || (args?.prompt as string) || ''; + const agentType = (args?.subagent_type as string) || undefined; + collector.add(name, `task "${truncate(desc, 60)}"${agentType ? ` (${agentType})` : ''}`, { + data: { category: 'task', description: desc, ...(agentType ? { agentType } : {}) }, + isError, + }); + break; + } + case 'ask': { + const question = truncate((args?.question as string) || (args?.prompt as string) || '', 80); + collector.add(name, `ask: "${question}"`, { + data: { category: 'ask', question }, + isError, + }); + break; + } + default: { + // mcp — fallback to compact format + const argsStr = args ? JSON.stringify(args).slice(0, 100) : ''; + collector.add(name, mcpSummary(name, argsStr, resultStr), { + data: { + category: 'mcp', + toolName: name, + ...(argsStr ? { params: argsStr } : {}), + ...(resultStr ? { result: String(resultStr).slice(0, 100) } : {}), + }, + isError, + }); } - collector.add('write_file', fileSummary('write', fp, diffStat, resultDisplay?.isNewFile), fp, true); - } else if (name === 'read_file') { - const fp = (args?.file_path as string) || ''; - collector.add('read_file', fileSummary('read', fp), fp); - } else { - const argsStr = args ? JSON.stringify(args).slice(0, 100) : ''; - const resultStr = result?.[0]?.functionResponse?.response?.output; - collector.add(name, mcpSummary(name, argsStr, resultStr)); } } } @@ -195,6 +268,15 @@ function extractSessionNotes(sessionData: GeminiSession): SessionNotes { if (!notes.tokenUsage) notes.tokenUsage = { input: 0, output: 0 }; notes.tokenUsage.input += msg.tokens.input || 0; notes.tokenUsage.output += msg.tokens.output || 0; + + // Accumulate cache and thinking tokens + if (msg.tokens.cached) { + if (!notes.cacheTokens) notes.cacheTokens = { creation: 0, read: 0 }; + notes.cacheTokens.read += msg.tokens.cached; + } + if (msg.tokens.thoughts) { + notes.thinkingTokens = (notes.thinkingTokens || 0) + msg.tokens.thoughts; + } } if (msg.thoughts && reasoning.length < 5) { @@ -225,17 +307,12 @@ export async function parseGeminiSessions(): Promise { // Get cwd from parent directory structure (project hash dir) const projectHashDir = path.dirname(path.dirname(filePath)); const projectHash = path.basename(projectHashDir); - - // Try to get cwd - for now use the project hash dir path - // In a real implementation, we might store a mapping - const cwd = projectHashDir; - + + // Gemini does not store working directory in its session data + const cwd = ''; + const firstUserMessage = extractFirstUserMessage(session); - const summary = firstUserMessage - .replace(/\n/g, ' ') - .replace(/\s+/g, ' ') - .trim() - .slice(0, 50); + const summary = cleanSummary(firstUserMessage); const fileStats = fs.statSync(filePath); const content = fs.readFileSync(filePath, 'utf8'); @@ -245,7 +322,7 @@ export async function parseGeminiSessions(): Promise { id: session.sessionId, source: 'gemini', cwd, - repo: extractRepoFromPath(cwd), + repo: '', lines, bytes: fileStats.size, createdAt: new Date(session.startTime), @@ -253,21 +330,23 @@ export async function parseGeminiSessions(): Promise { originalPath: filePath, summary: summary || undefined, }); - } catch { + } catch (err) { + logger.debug('gemini: skipping unparseable session', filePath, err); // Skip files we can't parse } } // Filter sessions that have real user messages (not just auth flows) return sessions - .filter(s => s.summary && s.summary.length > 0) + .filter((s) => s.summary && s.summary.length > 0) .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); } /** * Extract context from a Gemini session for cross-tool continuation */ -export async function extractGeminiContext(session: UnifiedSession): Promise { +export async function extractGeminiContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); const sessionData = parseSessionFile(session.originalPath); const recentMessages: ConversationMessage[] = []; let filesModified: string[] = []; @@ -276,21 +355,26 @@ export async function extractGeminiContext(session: UnifiedSession): Promise= 5) break; const subject = thought.subject?.toLowerCase() || ''; const description = thought.description?.toLowerCase() || ''; - if (subject.includes('todo') || subject.includes('next') || - subject.includes('remaining') || subject.includes('need to') || - description.includes('need to') || description.includes('next step')) { + if ( + subject.includes('todo') || + subject.includes('next') || + subject.includes('remaining') || + subject.includes('need to') || + description.includes('need to') || + description.includes('next step') + ) { const taskText = thought.subject || thought.description || ''; if (taskText && taskText.length > 0) pendingTasks.push(taskText); } @@ -316,18 +400,21 @@ export async function extractGeminiContext(session: UnifiedSession): Promise { + if (typeof item === 'string') return { path: item }; + if (!item || typeof item !== 'object') return null; + const candidate = item as { path?: unknown; kaos?: unknown }; + if (typeof candidate.path !== 'string' || candidate.path.length === 0) return null; + return { + path: candidate.path, + kaos: typeof candidate.kaos === 'string' && candidate.kaos.length > 0 ? candidate.kaos : undefined, + }; + }) + .filter((entry): entry is KimiWorkDirEntry => entry !== null); + } catch (err) { + logger.debug('kimi: failed to parse kimi.json work_dirs', err); + return []; + } +} + +function buildWorkDirHashIndex(workDirs: KimiWorkDirEntry[]): Map { + const hashIndex = new Map(); + + for (const wd of workDirs) { + const md5 = hashWorkDirPath(wd.path); + const keys = [md5]; + + // Kimi can prefix non-local KAOS sessions as "{kaos}_{md5}". + if (wd.kaos && wd.kaos.toLowerCase() !== 'local') { + keys.push(`${wd.kaos}_${md5}`); + } + + for (const key of keys) { + if (!hashIndex.has(key)) { + hashIndex.set(key, wd.path); + } + } + } + + return hashIndex; +} + +function resolveCwdFromSessionDir(sessionDir: string, hashIndex: Map): string { + const workDirHash = path.basename(path.dirname(sessionDir)); + return hashIndex.get(workDirHash) || ''; +} + +/** + * Find all Kimi session directories + */ +async function findSessionDirs(): Promise { + const results: string[] = []; + + if (!fs.existsSync(KIMI_SESSIONS_DIR)) { + return results; + } + + // Kimi stores sessions as: ~/.kimi/sessions/{workdir_hash}/{session_id}/ + for (const workdirDir of listSubdirectories(KIMI_SESSIONS_DIR)) { + for (const sessionDir of listSubdirectories(workdirDir)) { + const contextPath = path.join(sessionDir, 'context.jsonl'); + if (fs.existsSync(contextPath)) { + results.push(sessionDir); + } + } + } + + return results; +} + +/** + * Parse metadata.json from a Kimi session directory + */ +function parseMetadata(sessionDir: string): KimiMetadata | undefined { + const metadataPath = path.join(sessionDir, 'metadata.json'); + if (!fs.existsSync(metadataPath)) { + return undefined; + } + + try { + const content = fs.readFileSync(metadataPath, 'utf8'); + const result = KimiMetadataSchema.safeParse(JSON.parse(content)); + if (result.success) return result.data; + logger.debug('kimi: metadata validation failed', sessionDir, result.error.message); + return undefined; + } catch (err) { + logger.debug('kimi: failed to parse metadata', sessionDir, err); + return undefined; + } +} + +/** + * Read context.jsonl from a Kimi session directory + */ +async function readContextFile(sessionDir: string): Promise { + try { + const contextPath = path.join(sessionDir, 'context.jsonl'); + return await readJsonlFile(contextPath); + } catch (err) { + logger.debug('kimi: failed to read context', sessionDir, err); + return []; + } +} + +/** + * Extract first real user message from Kimi messages + */ +function extractFirstUserMessage(messages: KimiMessage[]): string { + for (const msg of messages) { + if (msg.role === 'user') { + const text = extractTextFromBlocks(msg.content as string | Array<{ type: string; text?: string }>); + if (text) return text; + } + } + return ''; +} + +/** + * Parse tool call arguments safely + */ +function parseToolArgs(argsStr: string): Record { + try { + return JSON.parse(argsStr); + } catch { + return {}; + } +} + +/** + * Extract tool usage summaries and files modified using shared SummaryCollector + */ +function extractToolData(messages: KimiMessage[], config?: VerbosityConfig): { summaries: ToolUsageSummary[]; filesModified: string[] } { + const collector = new SummaryCollector(config); + + for (const msg of messages) { + if (msg.role !== 'assistant' || !msg.tool_calls) continue; + + for (const tc of msg.tool_calls) { + const name = tc.function.name; + const args = parseToolArgs(tc.function.arguments); + const category = classifyToolName(name); + if (!category) continue; // skip internal tools + + const fp = (args.file_path as string) || (args.path as string) || ''; + + switch (category) { + case 'write': { + collector.add( + name, + fileSummary('write', fp, undefined, false), + { data: { category: 'write', filePath: fp }, filePath: fp, isWrite: true } + ); + break; + } + case 'read': + collector.add(name, fileSummary('read', fp), { + data: { category: 'read', filePath: fp }, + filePath: fp, + }); + break; + case 'shell': { + const cmd = (args.command as string) || (args.cmd as string) || ''; + collector.add(name, shellSummary(cmd), { + data: { category: 'shell', command: cmd }, + }); + break; + } + case 'edit': { + collector.add(name, fileSummary('edit', fp), { + data: { category: 'edit', filePath: fp }, + filePath: fp, + isWrite: true, + }); + break; + } + case 'grep': { + const pattern = (args.pattern as string) || (args.query as string) || ''; + collector.add(name, `grep "${truncate(pattern, 40)}"`, { + data: { category: 'grep', pattern, ...(fp ? { targetPath: fp } : {}) }, + }); + break; + } + case 'glob': { + const pattern = (args.pattern as string) || fp; + collector.add(name, `glob ${truncate(pattern, 50)}`, { + data: { category: 'glob', pattern }, + }); + break; + } + case 'search': + collector.add(name, `search "${truncate((args.query as string) || '', 50)}"`, { + data: { category: 'search', query: (args.query as string) || '' }, + }); + break; + case 'fetch': + collector.add(name, `fetch ${truncate((args.url as string) || '', 60)}`, { + data: { category: 'fetch', url: (args.url as string) || '' }, + }); + break; + case 'task': { + const desc = (args.description as string) || (args.prompt as string) || ''; + const agentType = (args.subagent_type as string) || undefined; + collector.add(name, `task "${truncate(desc, 60)}"${agentType ? ` (${agentType})` : ''}`, { + data: { category: 'task', description: desc, ...(agentType ? { agentType } : {}) }, + }); + break; + } + case 'ask': { + const question = truncate((args.question as string) || (args.prompt as string) || '', 80); + collector.add(name, `ask: "${question}"`, { + data: { category: 'ask', question }, + }); + break; + } + default: { + // mcp — fallback to compact format + const argsStr = Object.keys(args).length > 0 ? JSON.stringify(args).slice(0, 100) : ''; + collector.add(name, mcpSummary(name, argsStr, undefined), { + data: { category: 'mcp', toolName: name, ...(argsStr ? { params: argsStr } : {}) }, + }); + } + } + } + } + + return { summaries: collector.getSummaries(), filesModified: collector.getFilesModified() }; +} + +/** + * Extract session notes (thinking blocks, token usage) + */ +function extractSessionNotes(messages: KimiMessage[]): SessionNotes { + const notes: SessionNotes = {}; + const reasoning: string[] = []; + let latestTokenCount = 0; + + for (const msg of messages) { + // Extract thinking blocks from assistant messages + if (msg.role === 'assistant' && Array.isArray(msg.content)) { + for (const block of msg.content) { + if (block.type === 'think' && block.think) { + const thought = String(block.think).trim(); + if (thought.length > 10 && reasoning.length < 5) { + reasoning.push(truncate(thought, 200)); + } + } + } + } + + // Extract usage info from _usage entries + if (msg.role === '_usage' && 'token_count' in msg) { + const tokenCount = (msg as unknown as { token_count?: unknown }).token_count; + if (typeof tokenCount === 'number' && Number.isFinite(tokenCount) && tokenCount >= 0) { + latestTokenCount = tokenCount; + } + } + } + + if (reasoning.length > 0) notes.reasoning = reasoning; + // Kimi `_usage` only provides a cumulative token_count total. + // Avoid fabricating input/output splits from that total. + if (latestTokenCount > 0) { + logger.debug('kimi: latest token_count snapshot', latestTokenCount); + } + + return notes; +} + +/** + * Parse all Kimi sessions + */ +export async function parseKimiSessions(): Promise { + const sessionDirs = await findSessionDirs(); + const sessions: UnifiedSession[] = []; + const workDirHashIndex = buildWorkDirHashIndex(parseKimiWorkDirs()); + + for (const sessionDir of sessionDirs) { + try { + const contextPath = path.join(sessionDir, 'context.jsonl'); + const contextStats = fs.statSync(contextPath); + + const metadata = parseMetadata(sessionDir); + if (metadata?.archived === true) continue; + const sessionId = metadata?.session_id || path.basename(sessionDir); + if (!sessionId) continue; + + const messages = await readContextFile(sessionDir); + if (messages.length === 0) continue; + + const firstUserMessage = extractFirstUserMessage(messages); + const summary = cleanSummary(firstUserMessage); + + // metadata.json is optional and may be created after context.jsonl + const metadataPath = path.join(sessionDir, 'metadata.json'); + const metadataStats = fs.existsSync(metadataPath) ? fs.statSync(metadataPath) : undefined; + const cwd = resolveCwdFromSessionDir(sessionDir, workDirHashIndex); + + let updatedAt = contextStats.mtime; + if (typeof metadata?.wire_mtime === 'number' && Number.isFinite(metadata.wire_mtime) && metadata.wire_mtime > 0) { + const wireUpdatedAt = new Date(metadata.wire_mtime * 1000); + if (!Number.isNaN(wireUpdatedAt.getTime())) { + updatedAt = wireUpdatedAt; + } + } + + sessions.push({ + id: sessionId, + source: 'kimi', + cwd, + repo: '', + lines: messages.length, + bytes: contextStats.size, + createdAt: metadataStats?.birthtime || contextStats.birthtime, + updatedAt, + originalPath: sessionDir, + summary: summary || metadata?.title || undefined, + }); + } catch (err) { + logger.debug('kimi: skipping unparseable session', sessionDir, err); + // Skip sessions we can't parse + } + } + + return sessions.sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +/** + * Extract context from a Kimi session for cross-tool continuation + */ +export async function extractKimiContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); + const messages = await readContextFile(session.originalPath); + const recentMessages: ConversationMessage[] = []; + const pendingTasks: string[] = []; + const pendingTaskSet = new Set(); + + const toolData = extractToolData(messages, resolvedConfig); + const sessionNotes = extractSessionNotes(messages); + + // Extract recent conversation messages + let messageCount = 0; + for (let i = messages.length - 1; i >= 0 && messageCount < resolvedConfig.recentMessages * 2; i--) { + const msg = messages[i]; + + if (msg.role === 'user') { + const content = extractTextFromBlocks(msg.content as string | Array<{ type: string; text?: string }>); + if (content) { + recentMessages.unshift({ + role: 'user', + content, + }); + messageCount++; + } + } else if (msg.role === 'assistant') { + const content = extractTextFromBlocks(msg.content as string | Array<{ type: string; text?: string }>); + if (content) { + recentMessages.unshift({ + role: 'assistant', + content, + }); + messageCount++; + } + + // Extract pending tasks from thinking blocks + if (Array.isArray(msg.content) && pendingTasks.length < 5) { + for (const block of msg.content) { + if (block.type === 'think' && block.think) { + const thought = String(block.think).toLowerCase(); + if ( + thought.includes('need to') || + thought.includes('next step') || + thought.includes('todo') || + thought.includes('remaining') + ) { + const taskText = String(block.think).trim(); + if (taskText.length > 0 && !pendingTaskSet.has(taskText)) { + pendingTaskSet.add(taskText); + pendingTasks.push(taskText); + } + } + } + } + } + } + } + + const trimmed = trimMessages(recentMessages, resolvedConfig.recentMessages); + + const markdown = generateHandoffMarkdown( + session, + trimmed, + toolData.filesModified, + pendingTasks.slice(0, 5), + toolData.summaries, + sessionNotes, + resolvedConfig, + ); + + return { + session, + recentMessages: trimmed, + filesModified: toolData.filesModified, + pendingTasks: pendingTasks.slice(0, 5), + toolSummaries: toolData.summaries, + sessionNotes, + markdown, + }; +} diff --git a/src/parsers/kiro.ts b/src/parsers/kiro.ts new file mode 100644 index 0000000..c367b6f --- /dev/null +++ b/src/parsers/kiro.ts @@ -0,0 +1,200 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + UnifiedSession, +} from '../types/index.js'; +import { extractTextFromBlocks } from '../utils/content.js'; +import { findFiles, listSubdirectories } from '../utils/fs-helpers.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, homeDir } from '../utils/parser-helpers.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; + +// ── Kiro Session Shape ────────────────────────────────────────────────────── + +/** A single entry in the Kiro history array */ +interface KiroHistoryEntry { + message: { + role: string; + content: string | Array<{ type: string; text?: string }>; + id?: string; + }; +} + +/** Raw Kiro session JSON structure */ +interface KiroSession { + sessionId: string; + title?: string; + workspacePath?: string; + selectedModel?: string; + history: KiroHistoryEntry[]; +} + +// ── Base Path ─────────────────────────────────────────────────────────────── +// macOS: ~/Library/Application Support/Kiro/workspace-sessions/ +const KIRO_BASE_DIR = path.join( + homeDir(), + 'Library', + 'Application Support', + 'Kiro', + 'workspace-sessions', +); + +/** + * Find all Kiro session JSON files. + * Walks workspace subdirectories, skips the `sessions.json` index file. + */ +async function findSessionFiles(): Promise { + if (!fs.existsSync(KIRO_BASE_DIR)) return []; + + const results: string[] = []; + for (const workspaceDir of listSubdirectories(KIRO_BASE_DIR)) { + results.push( + ...findFiles(workspaceDir, { + match: (entry) => entry.name.endsWith('.json') && entry.name !== 'sessions.json', + recursive: false, + }), + ); + } + return results; +} + +/** + * Parse and validate a single Kiro session JSON file. + * Returns null for files that don't match the expected shape. + */ +function parseSessionFile(filePath: string): KiroSession | null { + try { + const content = fs.readFileSync(filePath, 'utf8'); + const data = JSON.parse(content); + if (typeof data.sessionId !== 'string' || !Array.isArray(data.history)) { + logger.debug('kiro: missing sessionId or history', filePath); + return null; + } + return data as KiroSession; + } catch (err) { + logger.debug('kiro: failed to parse session file', filePath, err); + return null; + } +} + +/** + * Extract text content from a Kiro message. + * Handles both plain string and `[{type: "text", text: "..."}]` formats. + */ +function extractContent(content: string | Array<{ type: string; text?: string }>): string { + return extractTextFromBlocks(content); +} + +/** + * Extract the first real user message for use as a session summary. + */ +function extractFirstUserMessage(session: KiroSession): string { + for (const entry of session.history) { + if (entry.message.role === 'user' && entry.message.content) { + return extractContent(entry.message.content); + } + } + return ''; +} + +/** + * Derive a project name from session data. + * Priority: title → basename(workspacePath) → "kiro" + */ +function deriveProjectName(session: KiroSession): string { + if (session.title) return session.title; + if (session.workspacePath) return path.basename(session.workspacePath); + return 'kiro'; +} + +/** + * Parse all Kiro sessions into the unified format. + */ +export async function parseKiroSessions(): Promise { + const files = await findSessionFiles(); + const sessions: UnifiedSession[] = []; + + for (const filePath of files) { + try { + const session = parseSessionFile(filePath); + if (!session) continue; + + const fileStats = fs.statSync(filePath); + const firstUserMessage = extractFirstUserMessage(session); + const summary = cleanSummary(firstUserMessage) || deriveProjectName(session); + + sessions.push({ + id: session.sessionId, + // Type assertion: 'kiro' will be added to TOOL_NAMES separately + source: 'kiro', + cwd: session.workspacePath || '', + lines: session.history.length, + bytes: fileStats.size, + // Kiro has no per-message timestamps — file mtime is the best proxy + createdAt: fileStats.birthtime, + updatedAt: fileStats.mtime, + originalPath: filePath, + summary, + model: session.selectedModel, + }); + } catch (err) { + logger.debug('kiro: skipping unparseable session', filePath, err); + } + } + + return sessions + .filter((s) => s.summary && s.summary.length > 0) + .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +/** + * Extract context from a Kiro session for cross-tool continuation. + */ +export async function extractKiroContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const cfg = config ?? getPreset('standard'); + const sessionData = parseSessionFile(session.originalPath); + const recentMessages: ConversationMessage[] = []; + + if (sessionData) { + for (const entry of sessionData.history) { + const role: 'user' | 'assistant' = entry.message.role === 'user' ? 'user' : 'assistant'; + const content = extractContent(entry.message.content); + if (content) { + recentMessages.push({ role, content }); + } + } + } + + const trimmed = recentMessages.slice(-cfg.recentMessages); + + // Kiro sessions have no tool call data + const filesModified: string[] = []; + const pendingTasks: string[] = []; + + const enrichedSession = sessionData?.selectedModel + ? { ...session, model: sessionData.selectedModel } + : session; + + const markdown = generateHandoffMarkdown( + enrichedSession, + trimmed, + filesModified, + pendingTasks, + [], // toolSummaries — Kiro stores no tool call data + undefined, // sessionNotes — not tracked by Kiro + cfg, + ); + + return { + session: enrichedSession, + recentMessages: trimmed, + filesModified, + pendingTasks, + toolSummaries: [], + markdown, + }; +} diff --git a/src/parsers/opencode.ts b/src/parsers/opencode.ts index cb75565..69b4ec2 100644 --- a/src/parsers/opencode.ts +++ b/src/parsers/opencode.ts @@ -1,100 +1,51 @@ import * as fs from 'fs'; -import * as path from 'path'; import { createRequire } from 'module'; -import type { UnifiedSession, SessionContext, ConversationMessage } from '../types/index.js'; +import * as path from 'path'; +import { z } from 'zod'; +import { logger } from '../logger.js'; +import type { ConversationMessage, SessionContext, ToolUsageSummary, UnifiedSession } from '../types/index.js'; +import type { + OpenCodeProject, + OpenCodeSession, + SqliteMessageRow, + SqlitePartRow, + SqliteProjectRow, + SqliteSessionRow, +} from '../types/schemas.js'; +import { + OpenCodeMessageSchema, + OpenCodePartSchema, + OpenCodeProjectSchema, + OpenCodeSessionSchema, +} from '../types/schemas.js'; +import { findFiles, listSubdirectories } from '../utils/fs-helpers.js'; import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { extractRepoFromCwd, homeDir } from '../utils/parser-helpers.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; -const OPENCODE_BASE_DIR = path.join(process.env.HOME || '~', '.local', 'share', 'opencode'); +const OPENCODE_BASE_DIR = process.env.XDG_DATA_HOME + ? path.join(process.env.XDG_DATA_HOME, 'opencode') + : path.join(homeDir(), '.local', 'share', 'opencode'); const OPENCODE_STORAGE_DIR = path.join(OPENCODE_BASE_DIR, 'storage'); const OPENCODE_DB_PATH = path.join(OPENCODE_BASE_DIR, 'opencode.db'); -interface OpenCodeSession { - id: string; - slug?: string; - version?: string; - projectID: string; - directory: string; - title?: string; - time: { - created: number; - updated: number; - }; - summary?: { - additions?: number; - deletions?: number; - files?: number; - }; +/** Minimal typed interface for node:sqlite DatabaseSync */ +interface SqlitePreparedStatement { + all(...params: unknown[]): unknown[]; + get(...params: unknown[]): unknown | undefined; } -interface OpenCodeProject { - id: string; - worktree: string; - vcs?: string; - time?: { - created: number; - updated: number; - }; +interface SqliteDatabase { + prepare(sql: string): SqlitePreparedStatement; + close(): void; } -interface OpenCodeMessage { - id: string; - sessionID: string; - role: 'user' | 'assistant'; - time: { - created: number; - completed?: number; - }; - summary?: { - title?: string; - }; - path?: { - cwd?: string; - root?: string; - }; -} +/** Zod schema for message data blob stored in SQLite data column */ +const SqliteMsgDataSchema = z.object({ role: z.string() }).passthrough(); -interface OpenCodePart { - id: string; - sessionID: string; - messageID: string; - type: string; - text?: string; -} - -// SQLite row types -interface SqliteSessionRow { - id: string; - project_id: string; - slug: string; - directory: string; - title: string; - version: string; - summary_additions: number | null; - summary_deletions: number | null; - summary_files: number | null; - time_created: number; - time_updated: number; -} - -interface SqliteMessageRow { - id: string; - session_id: string; - time_created: number; - data: string; -} - -interface SqlitePartRow { - id: string; - message_id: string; - session_id: string; - time_created: number; - data: string; -} - -interface SqliteProjectRow { - id: string; - worktree: string; -} +/** Zod schema for part data blob stored in SQLite data column */ +const SqlitePartDataSchema = z.object({ type: z.string(), text: z.string().optional() }).passthrough(); /** * Check if SQLite DB exists and is usable @@ -106,14 +57,15 @@ function hasSqliteDb(): boolean { /** * Open SQLite database using node:sqlite (built-in) */ -function openDb(): { db: any; close: () => void } | null { +function openDb(): { db: SqliteDatabase; close: () => void } | null { try { // Dynamic import of node:sqlite to avoid issues on older Node versions const require = createRequire(import.meta.url); const { DatabaseSync } = require('node:sqlite'); - const db = new DatabaseSync(OPENCODE_DB_PATH, { open: true, readOnly: true }); + const db = new DatabaseSync(OPENCODE_DB_PATH, { open: true, readOnly: true }) as SqliteDatabase; return { db, close: () => db.close() }; - } catch { + } catch (err) { + logger.debug('opencode: failed to open SQLite database', OPENCODE_DB_PATH, err); return null; } } @@ -122,34 +74,17 @@ function openDb(): { db: any; close: () => void } | null { * Find all OpenCode session files */ async function findSessionFiles(): Promise { - const files: string[] = []; const sessionDir = path.join(OPENCODE_STORAGE_DIR, 'session'); - - if (!fs.existsSync(sessionDir)) { - return files; + const results: string[] = []; + for (const projectDir of listSubdirectories(sessionDir)) { + results.push( + ...findFiles(projectDir, { + match: (entry) => entry.name.startsWith('ses_') && entry.name.endsWith('.json'), + recursive: false, + }), + ); } - - try { - // Iterate through project hash directories - const projectDirs = fs.readdirSync(sessionDir, { withFileTypes: true }); - - for (const projectDir of projectDirs) { - if (!projectDir.isDirectory()) continue; - - const projectPath = path.join(sessionDir, projectDir.name); - const sessionFiles = fs.readdirSync(projectPath, { withFileTypes: true }); - - for (const sessionFile of sessionFiles) { - if (sessionFile.isFile() && sessionFile.name.startsWith('ses_') && sessionFile.name.endsWith('.json')) { - files.push(path.join(projectPath, sessionFile.name)); - } - } - } - } catch { - // Skip directories we can't read - } - - return files; + return results; } /** @@ -158,8 +93,12 @@ async function findSessionFiles(): Promise { function parseSessionFile(filePath: string): OpenCodeSession | null { try { const content = fs.readFileSync(filePath, 'utf8'); - return JSON.parse(content) as OpenCodeSession; - } catch { + const result = OpenCodeSessionSchema.safeParse(JSON.parse(content)); + if (result.success) return result.data; + logger.debug('opencode: session validation failed', filePath, result.error.message); + return null; + } catch (err) { + logger.debug('opencode: failed to parse session file', filePath, err); return null; } } @@ -172,10 +111,12 @@ function loadProjectInfo(projectId: string): OpenCodeProject | null { try { if (fs.existsSync(projectFile)) { const content = fs.readFileSync(projectFile, 'utf8'); - return JSON.parse(content) as OpenCodeProject; + const result = OpenCodeProjectSchema.safeParse(JSON.parse(content)); + if (result.success) return result.data; + logger.debug('opencode: project validation failed', projectFile, result.error.message); } - } catch { - // Ignore parse errors + } catch (err) { + logger.debug('opencode: failed to parse project file', projectFile, err); } return null; } @@ -188,30 +129,36 @@ function getFirstUserMessage(sessionId: string): string { if (!fs.existsSync(messageDir)) return ''; try { - const messageFiles = fs.readdirSync(messageDir) - .filter(f => f.startsWith('msg_') && f.endsWith('.json')) + const messageFiles = fs + .readdirSync(messageDir) + .filter((f) => f.startsWith('msg_') && f.endsWith('.json')) .sort(); // Sort to get chronological order for (const msgFile of messageFiles) { const msgPath = path.join(messageDir, msgFile); const msgContent = fs.readFileSync(msgPath, 'utf8'); - const msg = JSON.parse(msgContent) as OpenCodeMessage; - + const msgResult = OpenCodeMessageSchema.safeParse(JSON.parse(msgContent)); + if (!msgResult.success) continue; + const msg = msgResult.data; + if (msg.role === 'user') { // Get the message text from parts const messageId = msg.id; const partDir = path.join(OPENCODE_STORAGE_DIR, 'part', messageId); - + if (fs.existsSync(partDir)) { - const partFiles = fs.readdirSync(partDir) - .filter(f => f.startsWith('prt_') && f.endsWith('.json')) + const partFiles = fs + .readdirSync(partDir) + .filter((f) => f.startsWith('prt_') && f.endsWith('.json')) .sort(); - + for (const partFile of partFiles) { const partPath = path.join(partDir, partFile); const partContent = fs.readFileSync(partPath, 'utf8'); - const part = JSON.parse(partContent) as OpenCodePart; - + const partResult = OpenCodePartSchema.safeParse(JSON.parse(partContent)); + if (!partResult.success) continue; + const part = partResult.data; + if (part.type === 'text' && part.text) { return part.text; } @@ -219,24 +166,13 @@ function getFirstUserMessage(sessionId: string): string { } } } - } catch { - // Ignore errors + } catch (err) { + logger.debug('opencode: failed to read messages for session', sessionId, err); } return ''; } -/** - * Extract repo name from worktree path - */ -function extractRepoFromPath(worktree: string): string { - const parts = worktree.split('/').filter(Boolean); - if (parts.length >= 2) { - return parts.slice(-2).join('/'); - } - return parts[parts.length - 1] || ''; -} - /** * Count message lines for a session */ @@ -245,10 +181,10 @@ function countSessionLines(sessionId: string): number { if (!fs.existsSync(messageDir)) return 0; try { - const messageFiles = fs.readdirSync(messageDir) - .filter(f => f.startsWith('msg_') && f.endsWith('.json')); + const messageFiles = fs.readdirSync(messageDir).filter((f) => f.startsWith('msg_') && f.endsWith('.json')); return messageFiles.length; - } catch { + } catch (err) { + logger.debug('opencode: failed to count messages for session', sessionId, err); return 0; } } @@ -274,11 +210,13 @@ function parseSessionsFromSqlite(): UnifiedSession[] { const handle = openDb(); if (!handle) return []; + const { db, close } = handle; try { - const { db, close } = handle; - const rows = db.prepare( - 'SELECT id, project_id, slug, directory, title, version, summary_additions, summary_deletions, summary_files, time_created, time_updated FROM session ORDER BY time_updated DESC' - ).all() as SqliteSessionRow[]; + const rows = db + .prepare( + 'SELECT id, project_id, slug, directory, title, version, summary_additions, summary_deletions, summary_files, time_created, time_updated FROM session ORDER BY time_updated DESC', + ) + .all() as SqliteSessionRow[]; // Build project lookup const projectRows = db.prepare('SELECT id, worktree FROM project').all() as SqliteProjectRow[]; @@ -288,24 +226,30 @@ function parseSessionsFromSqlite(): UnifiedSession[] { for (const row of rows) { const cwd = row.directory || projectMap.get(row.project_id) || ''; - + // Count messages for this session - const msgCount = db.prepare('SELECT COUNT(*) as cnt FROM message WHERE session_id = ?').get(row.id) as { cnt: number }; + const msgCount = db.prepare('SELECT COUNT(*) as cnt FROM message WHERE session_id = ?').get(row.id) as + | { cnt: number } + | undefined; // Get first user message for summary if no title let summary = row.title || ''; if (!summary || summary.startsWith('New session')) { - const firstMsg = db.prepare( - "SELECT m.id, p.data FROM message m JOIN part p ON p.message_id = m.id WHERE m.session_id = ? AND m.data LIKE '%\"role\":\"user\"%' AND p.data LIKE '%\"type\":\"text\"%' ORDER BY m.time_created ASC LIMIT 1" - ).get(row.id) as { id: string; data: string } | undefined; - + const firstMsg = db + .prepare( + 'SELECT m.id, p.data FROM message m JOIN part p ON p.message_id = m.id WHERE m.session_id = ? AND m.data LIKE \'%"role":"user"%\' AND p.data LIKE \'%"type":"text"%\' ORDER BY m.time_created ASC LIMIT 1', + ) + .get(row.id) as { id: string; data: string } | undefined; + if (firstMsg) { try { const partData = JSON.parse(firstMsg.data); if (partData.text) { summary = partData.text.replace(/\n/g, ' ').replace(/\s+/g, ' ').trim().slice(0, 50); } - } catch { /* ignore */ } + } catch (_err) { + /* ignore malformed part data */ + } } } @@ -313,8 +257,8 @@ function parseSessionsFromSqlite(): UnifiedSession[] { id: row.id, source: 'opencode', cwd, - repo: extractRepoFromPath(cwd), - lines: (msgCount?.cnt ?? 0), + repo: extractRepoFromCwd(cwd), + lines: msgCount?.cnt ?? 0, bytes: 0, // SQLite doesn't have per-session file size createdAt: new Date(row.time_created), updatedAt: new Date(row.time_updated), @@ -324,10 +268,12 @@ function parseSessionsFromSqlite(): UnifiedSession[] { }); } - close(); return sessions; - } catch { + } catch (err) { + logger.debug('opencode: SQLite session query failed', err); return []; + } finally { + close(); } } @@ -346,14 +292,10 @@ async function parseSessionsFromJson(): Promise { // Get project info for worktree const project = loadProjectInfo(session.projectID); const cwd = session.directory || project?.worktree || ''; - + // Get first user message for summary const firstUserMessage = getFirstUserMessage(session.id); - const summary = session.title || firstUserMessage - .replace(/\n/g, ' ') - .replace(/\s+/g, ' ') - .trim() - .slice(0, 50); + const summary = session.title || firstUserMessage.replace(/\n/g, ' ').replace(/\s+/g, ' ').trim().slice(0, 50); const fileStats = fs.statSync(filePath); const lines = countSessionLines(session.id); @@ -362,7 +304,7 @@ async function parseSessionsFromJson(): Promise { id: session.id, source: 'opencode', cwd, - repo: extractRepoFromPath(cwd), + repo: extractRepoFromCwd(cwd), lines, bytes: fileStats.size, createdAt: new Date(session.time.created), @@ -370,7 +312,8 @@ async function parseSessionsFromJson(): Promise { originalPath: filePath, summary: summary || session.slug || undefined, }); - } catch { + } catch (err) { + logger.debug('opencode: skipping unparseable JSON session', filePath, err); // Skip files we can't parse } } @@ -387,7 +330,7 @@ function readAllMessages(sessionId: string): ConversationMessage[] { const msgs = readMessagesFromSqlite(sessionId); if (msgs.length > 0) return msgs; } - + // Fallback to JSON files return readMessagesFromJson(sessionId); } @@ -399,47 +342,49 @@ function readMessagesFromSqlite(sessionId: string): ConversationMessage[] { const handle = openDb(); if (!handle) return []; + const { db, close } = handle; try { - const { db, close } = handle; - // Get messages with their data - const msgRows = db.prepare( - 'SELECT id, session_id, time_created, data FROM message WHERE session_id = ? ORDER BY time_created ASC' - ).all(sessionId) as SqliteMessageRow[]; + const msgRows = db + .prepare('SELECT id, session_id, time_created, data FROM message WHERE session_id = ? ORDER BY time_created ASC') + .all(sessionId) as SqliteMessageRow[]; const messages: ConversationMessage[] = []; for (const msgRow of msgRows) { - const msgData = JSON.parse(msgRow.data) as { role: string; [key: string]: unknown }; - const role = msgData.role === 'user' ? 'user' : 'assistant'; - + const msgDataResult = SqliteMsgDataSchema.safeParse(JSON.parse(msgRow.data)); + if (!msgDataResult.success) continue; + const role: 'user' | 'assistant' = msgDataResult.data.role === 'user' ? 'user' : 'assistant'; + // Get text parts for this message - const partRows = db.prepare( - "SELECT data FROM part WHERE message_id = ? ORDER BY time_created ASC" - ).all(msgRow.id) as SqlitePartRow[]; + const partRows = db + .prepare('SELECT data FROM part WHERE message_id = ? ORDER BY time_created ASC') + .all(msgRow.id) as SqlitePartRow[]; let text = ''; for (const partRow of partRows) { - const partData = JSON.parse(partRow.data) as { type: string; text?: string }; - if (partData.type === 'text' && partData.text) { - text += partData.text + '\n'; + const partDataResult = SqlitePartDataSchema.safeParse(JSON.parse(partRow.data)); + if (!partDataResult.success) continue; + if (partDataResult.data.type === 'text' && partDataResult.data.text) { + text += partDataResult.data.text + '\n'; } } if (text.trim()) { messages.push({ - role: role as 'user' | 'assistant', + role, content: text.trim(), timestamp: new Date(msgRow.time_created), }); } } - close(); return messages; } catch (err) { - console.error('Error reading OpenCode SQLite messages:', err); + logger.debug('opencode: SQLite message query failed for session', sessionId, err); return []; + } finally { + close(); } } @@ -449,33 +394,39 @@ function readMessagesFromSqlite(sessionId: string): ConversationMessage[] { function readMessagesFromJson(sessionId: string): ConversationMessage[] { const messages: ConversationMessage[] = []; const messageDir = path.join(OPENCODE_STORAGE_DIR, 'message', sessionId); - + if (!fs.existsSync(messageDir)) return messages; try { - const messageFiles = fs.readdirSync(messageDir) - .filter(f => f.startsWith('msg_') && f.endsWith('.json')) + const messageFiles = fs + .readdirSync(messageDir) + .filter((f) => f.startsWith('msg_') && f.endsWith('.json')) .sort(); for (const msgFile of messageFiles) { const msgPath = path.join(messageDir, msgFile); const msgContent = fs.readFileSync(msgPath, 'utf8'); - const msg = JSON.parse(msgContent) as OpenCodeMessage; - + const msgResult = OpenCodeMessageSchema.safeParse(JSON.parse(msgContent)); + if (!msgResult.success) continue; + const msg = msgResult.data; + // Get message text from parts const partDir = path.join(OPENCODE_STORAGE_DIR, 'part', msg.id); let text = ''; - + if (fs.existsSync(partDir)) { - const partFiles = fs.readdirSync(partDir) - .filter(f => f.startsWith('prt_') && f.endsWith('.json')) + const partFiles = fs + .readdirSync(partDir) + .filter((f) => f.startsWith('prt_') && f.endsWith('.json')) .sort(); - + for (const partFile of partFiles) { const partPath = path.join(partDir, partFile); const partContent = fs.readFileSync(partPath, 'utf8'); - const part = JSON.parse(partContent) as OpenCodePart; - + const partResult = OpenCodePartSchema.safeParse(JSON.parse(partContent)); + if (!partResult.success) continue; + const part = partResult.data; + if (part.type === 'text' && part.text) { text += part.text + '\n'; } @@ -490,31 +441,80 @@ function readMessagesFromJson(sessionId: string): ConversationMessage[] { }); } } - } catch { + } catch (err) { + logger.debug('opencode: failed to read JSON messages for session', sessionId, err); // Ignore errors } return messages; } +/** + * Extract tool-level summary from OpenCode session metadata. + * OpenCode stores additions/deletions/files at the session level (not per-tool), + * so we produce a single high-level "Edit" summary when data is available. + */ +function extractOpenCodeToolSummaries(sessionId: string): ToolUsageSummary[] { + const summaries: ToolUsageSummary[] = []; + + // Try to read the raw session file for summary.additions/deletions/files + const sessionDir = path.join(OPENCODE_STORAGE_DIR, 'session'); + try { + for (const projectDir of listSubdirectories(sessionDir)) { + const sessionFile = path.join(projectDir, `${sessionId}.json`); + if (!fs.existsSync(sessionFile)) continue; + const content = fs.readFileSync(sessionFile, 'utf8'); + const result = OpenCodeSessionSchema.safeParse(JSON.parse(content)); + if (!result.success) break; + const raw = result.data; + if (raw.summary && (raw.summary.additions || raw.summary.deletions)) { + const added = raw.summary.additions || 0; + const removed = raw.summary.deletions || 0; + const files = raw.summary.files || 0; + summaries.push({ + name: 'Edit', + count: files || 1, + samples: [ + { + summary: `${files} file(s) changed (+${added} -${removed})`, + data: { + category: 'edit', + filePath: `(${files} files)`, + diffStats: { added, removed }, + }, + }, + ], + }); + } + break; // found the session file + } + } catch { + // Silently skip — tool summaries are optional + } + + return summaries; +} + /** * Extract context from an OpenCode session for cross-tool continuation */ -export async function extractOpenCodeContext(session: UnifiedSession): Promise { +export async function extractOpenCodeContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const resolvedConfig = config ?? getPreset('standard'); const recentMessages = readAllMessages(session.id); const filesModified: string[] = []; const pendingTasks: string[] = []; + const toolSummaries = extractOpenCodeToolSummaries(session.id); - const markdown = generateHandoffMarkdown(session, recentMessages.slice(-10), filesModified, pendingTasks, []); + const trimmed = recentMessages.slice(-resolvedConfig.recentMessages); + + const markdown = generateHandoffMarkdown(session, trimmed, filesModified, pendingTasks, toolSummaries, undefined, resolvedConfig); return { session, - recentMessages: recentMessages.slice(-10), + recentMessages: trimmed, filesModified, pendingTasks, - toolSummaries: [], + toolSummaries, markdown, }; } - -// generateHandoffMarkdown is imported from ../utils/markdown.js diff --git a/src/parsers/qwen-code.ts b/src/parsers/qwen-code.ts new file mode 100644 index 0000000..518fd8a --- /dev/null +++ b/src/parsers/qwen-code.ts @@ -0,0 +1,521 @@ +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import * as readline from 'node:readline'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { logger } from '../logger.js'; +import type { + ConversationMessage, + SessionContext, + SessionNotes, + ToolUsageSummary, + UnifiedSession, +} from '../types/index.js'; +import { QwenChatRecordSchema } from '../types/schemas.js'; +import type { QwenChatRecord, QwenContent, QwenFileDiff, QwenPart } from '../types/schemas.js'; +import { classifyToolName } from '../types/tool-names.js'; +import { listSubdirectories } from '../utils/fs-helpers.js'; +import { generateHandoffMarkdown } from '../utils/markdown.js'; +import { cleanSummary, extractRepoFromCwd, homeDir } from '../utils/parser-helpers.js'; +import { fileSummary, mcpSummary, SummaryCollector, shellSummary, truncate } from '../utils/tool-summarizer.js'; + +const qwenHome = process.env.QWEN_HOME || homeDir(); +// Qwen Code stores chats under ~/.qwen/projects//chats/ +// sanitizeCwd replaces all non-alphanumeric chars with '-' +const QWEN_PROJECTS_DIR = path.join(qwenHome, '.qwen', 'projects'); + +// ── Helpers ───────────────────────────────────────────────────────────────── + +/** Type guard: is resultDisplay a FileDiff object (not a string or todo)? */ +function isFileDiff(rd: unknown): rd is QwenFileDiff { + if (!rd || typeof rd !== 'object') return false; + return 'fileName' in rd || 'fileDiff' in rd; +} + +/** Parse a timestamp string defensively, falling back to a given Date */ +function parseTimestamp(ts: string, fallback: Date): Date { + if (!ts) return fallback; + const d = new Date(ts); + return Number.isNaN(d.getTime()) ? fallback : d; +} + +// ── JSONL reading ─────────────────────────────────────────────────────────── + +async function readJsonlRecords(filePath: string): Promise { + const records: QwenChatRecord[] = []; + const input = fs.createReadStream(filePath, 'utf8'); + const rl = readline.createInterface({ input, crlfDelay: Infinity }); + + for await (const line of rl) { + if (!line.trim()) continue; + try { + const parsed = QwenChatRecordSchema.safeParse(JSON.parse(line)); + if (parsed.success) records.push(parsed.data); + } catch { + logger.debug('qwen-code: skipping malformed JSONL line in', filePath); + } + } + + return records; +} + +// ── Text extraction ───────────────────────────────────────────────────────── + +/** Extract non-thought text from parts */ +function extractTextFromParts(parts: QwenPart[] | undefined): string { + if (!parts) return ''; + return parts + .filter((p) => p.text && !p.thought) + .map((p) => p.text!) + .join('\n'); +} + +/** Extract thought/reasoning text from parts */ +function extractThoughtsFromParts(parts: QwenPart[] | undefined): string[] { + if (!parts) return []; + return parts.filter((p) => p.text && p.thought).map((p) => p.text!); +} + +function extractContentText(content: QwenContent | undefined): string { + if (!content?.parts) return ''; + return extractTextFromParts(content.parts); +} + +// ── Session file discovery ────────────────────────────────────────────────── + +async function findSessionFiles(): Promise { + const results: string[] = []; + + if (!fs.existsSync(QWEN_PROJECTS_DIR)) return results; + + for (const projectDir of listSubdirectories(QWEN_PROJECTS_DIR)) { + const chatsDir = path.join(projectDir, 'chats'); + if (!fs.existsSync(chatsDir)) continue; + + try { + const entries = fs.readdirSync(chatsDir, { withFileTypes: true }); + for (const entry of entries) { + if (entry.isFile() && entry.name.endsWith('.jsonl')) { + results.push(path.join(chatsDir, entry.name)); + } + } + } catch (err) { + logger.debug('qwen-code: error reading chats dir', chatsDir, err); + } + } + + return results; +} + +// ── Session metadata extraction ───────────────────────────────────────────── + +async function extractSessionMeta(filePath: string): Promise<{ + sessionId: string; + cwd: string; + gitBranch?: string; + firstUserMessage: string; + firstTimestamp: string; + lastTimestamp: string; + model?: string; + lineCount: number; +} | null> { + const input = fs.createReadStream(filePath, 'utf8'); + const rl = readline.createInterface({ input, crlfDelay: Infinity }); + + let sessionId = ''; + let cwd = ''; + let gitBranch: string | undefined; + let firstUserMessage = ''; + let firstTimestamp = ''; + let lastTimestamp = ''; + let model: string | undefined; + let lineCount = 0; + + for await (const line of rl) { + if (!line.trim()) continue; + lineCount++; + + try { + const parsed = QwenChatRecordSchema.safeParse(JSON.parse(line)); + if (!parsed.success) continue; + const record = parsed.data; + + if (!sessionId && record.sessionId) sessionId = record.sessionId; + if (!cwd && record.cwd) cwd = record.cwd; + if (!gitBranch && record.gitBranch) gitBranch = record.gitBranch; + if (!model && record.model) model = record.model; + + if (!firstTimestamp && record.timestamp) firstTimestamp = record.timestamp; + if (record.timestamp) lastTimestamp = record.timestamp; + + if (!firstUserMessage && record.type === 'user') { + firstUserMessage = extractContentText(record.message); + } + } catch { + // skip malformed line + } + } + + if (!sessionId) return null; + + return { sessionId, cwd, gitBranch, firstUserMessage, firstTimestamp, lastTimestamp, model, lineCount }; +} + +// ── Tool data extraction ──────────────────────────────────────────────────── + +function extractToolData( + records: QwenChatRecord[], + config?: VerbosityConfig, +): { summaries: ToolUsageSummary[]; filesModified: string[] } { + const collector = new SummaryCollector(config); + const processedCallUuids = new Set(); + + for (const record of records) { + // Extract from functionCall parts in assistant messages + if (record.type === 'assistant' && record.message?.parts) { + const hasFunctionCalls = record.message.parts.some((p: QwenPart) => p.functionCall); + if (hasFunctionCalls) processedCallUuids.add(record.uuid); + for (const part of record.message.parts) { + if (!part.functionCall) continue; + const { name, args } = part.functionCall; + const category = classifyToolName(name); + if (!category) continue; + + const fp = (args?.file_path as string) || (args?.path as string) || ''; + + // Try to extract result from a matching functionResponse in the same parts array + let resultStr: string | undefined; + for (const rp of record.message.parts) { + if (rp.functionResponse?.name === name && rp.functionResponse.response?.output) { + resultStr = String(rp.functionResponse.response.output); + break; + } + } + const isResponseError = record.message.parts.some( + (rp: QwenPart) => rp.functionResponse?.name === name && rp.functionResponse.response?.status === 'error', + ); + + switch (category) { + case 'shell': { + const cmd = (args?.command as string) || (args?.cmd as string) || ''; + collector.add(name, shellSummary(cmd, resultStr), { + data: { category: 'shell', command: cmd, ...(resultStr ? { stdoutTail: resultStr.slice(-500) } : {}) }, + isError: isResponseError, + }); + break; + } + case 'write': { + collector.add(name, fileSummary('write', fp), { + data: { category: 'write', filePath: fp }, + filePath: fp, + isWrite: true, + isError: isResponseError, + }); + break; + } + case 'read': + collector.add(name, fileSummary('read', fp), { + data: { category: 'read', filePath: fp }, + filePath: fp, + isError: isResponseError, + }); + break; + case 'edit': + collector.add(name, fileSummary('edit', fp), { + data: { category: 'edit', filePath: fp }, + filePath: fp, + isWrite: true, + isError: isResponseError, + }); + break; + case 'grep': { + const pattern = (args?.pattern as string) || (args?.query as string) || ''; + collector.add(name, `grep "${truncate(pattern, 40)}"`, { + data: { category: 'grep', pattern, ...(fp ? { targetPath: fp } : {}) }, + isError: isResponseError, + }); + break; + } + case 'glob': { + const pattern = (args?.pattern as string) || fp; + collector.add(name, `glob ${truncate(pattern, 50)}`, { + data: { category: 'glob', pattern }, + isError: isResponseError, + }); + break; + } + case 'search': + collector.add(name, `search "${truncate((args?.query as string) || '', 50)}"`, { + data: { category: 'search', query: (args?.query as string) || '' }, + isError: isResponseError, + }); + break; + case 'fetch': { + const url = (args?.url as string) || ''; + collector.add(name, `fetch ${truncate(url, 60)}`, { + data: { + category: 'fetch', + url, + ...(resultStr ? { resultPreview: resultStr.slice(0, 100) } : {}), + }, + isError: isResponseError, + }); + break; + } + case 'task': { + const desc = (args?.description as string) || (args?.prompt as string) || ''; + const agentType = (args?.subagent_type as string) || undefined; + collector.add(name, `task "${truncate(desc, 60)}"${agentType ? ` (${agentType})` : ''}`, { + data: { category: 'task', description: desc, ...(agentType ? { agentType } : {}) }, + isError: isResponseError, + }); + break; + } + case 'ask': { + const question = truncate((args?.question as string) || (args?.prompt as string) || '', 80); + collector.add(name, `ask: "${question}"`, { + data: { category: 'ask', question }, + isError: isResponseError, + }); + break; + } + default: { + const argsStr = args ? JSON.stringify(args).slice(0, 100) : ''; + collector.add(name, mcpSummary(name, argsStr, resultStr), { + data: { + category: 'mcp', + toolName: name, + ...(argsStr ? { params: argsStr } : {}), + ...(resultStr ? { result: resultStr.slice(0, 100) } : {}), + }, + isError: isResponseError, + }); + } + } + } + } + + // Extract from tool_result records (skip if parent already processed via functionCall) + if (record.type === 'tool_result' && record.toolCallResult) { + if (record.parentUuid && processedCallUuids.has(record.parentUuid)) continue; + const tcr = record.toolCallResult; + const displayName = tcr.displayName || ''; + const isError = tcr.status ? !['ok', 'success', 'completed'].includes(tcr.status.toLowerCase()) : false; + + if (displayName && isFileDiff(tcr.resultDisplay)) { + const rd = tcr.resultDisplay; + const fp = rd.fileName || ''; + + let diffStat: { added: number; removed: number } | undefined; + if (rd.diffStat) { + diffStat = { + added: rd.diffStat.model_added_lines || 0, + removed: rd.diffStat.model_removed_lines || 0, + }; + } else if (rd.fileDiff) { + // Fallback: count +/- lines from fileDiff + const lines = rd.fileDiff.split('\n'); + diffStat = { + added: lines.filter((l: string) => l.startsWith('+')).length, + removed: lines.filter((l: string) => l.startsWith('-')).length, + }; + } + + // isNewFile is determined by originalContent === null + const isNew = rd.originalContent === null; + const diff = rd.fileDiff || undefined; + collector.add(displayName, fileSummary(isNew ? 'write' : 'edit', fp, diffStat, isNew), { + data: { + category: isNew ? 'write' : 'edit', + filePath: fp, + isNewFile: isNew, + ...(diff ? { diff } : {}), + ...(diffStat ? { diffStats: diffStat } : {}), + }, + filePath: fp, + isWrite: true, + isError, + }); + } + } + } + + return { summaries: collector.getSummaries(), filesModified: collector.getFilesModified() }; +} + +// ── Session notes extraction ──────────────────────────────────────────────── + +function extractSessionNotes(records: QwenChatRecord[]): SessionNotes { + const notes: SessionNotes = {}; + const reasoning: string[] = []; + + for (const record of records) { + if (record.type !== 'assistant') continue; + + if (record.model && !notes.model) notes.model = record.model; + + // Extract reasoning from thought parts + if (record.message?.parts && reasoning.length < 5) { + for (const thought of extractThoughtsFromParts(record.message.parts)) { + if (reasoning.length >= 5) break; + if (thought.length > 10) reasoning.push(truncate(thought, 200)); + } + } + + if (record.usageMetadata) { + if (!notes.tokenUsage) notes.tokenUsage = { input: 0, output: 0 }; + notes.tokenUsage.input += record.usageMetadata.promptTokenCount || 0; + notes.tokenUsage.output += record.usageMetadata.candidatesTokenCount || 0; + + if (record.usageMetadata.cachedContentTokenCount) { + if (!notes.cacheTokens) notes.cacheTokens = { creation: 0, read: 0 }; + notes.cacheTokens.read += record.usageMetadata.cachedContentTokenCount; + } + if (record.usageMetadata.thoughtsTokenCount) { + notes.thinkingTokens = (notes.thinkingTokens || 0) + record.usageMetadata.thoughtsTokenCount; + } + } + } + + if (reasoning.length > 0) notes.reasoning = reasoning; + return notes; +} + +// ── Public API ────────────────────────────────────────────────────────────── + +/** Reconstruct main conversation path by walking from latest leaf back via parentUuid */ +function reconstructMainPath(records: QwenChatRecord[]): QwenChatRecord[] { + if (records.length === 0) return []; + + const byUuid = new Map(); + const parentUuids = new Set(); + + for (const r of records) { + byUuid.set(r.uuid, r); + if (r.parentUuid) parentUuids.add(r.parentUuid); + } + + // Find the latest leaf (record with no children, latest timestamp) + let latestLeaf = records[records.length - 1]; + let latestTime = 0; + for (const r of records) { + if (!parentUuids.has(r.uuid)) { + const t = new Date(r.timestamp).getTime(); + if (!Number.isNaN(t) && t > latestTime) { + latestTime = t; + latestLeaf = r; + } + } + } + + // Walk back from leaf to root via parentUuid + const pathResult: QwenChatRecord[] = []; + let current: QwenChatRecord | undefined = latestLeaf; + while (current) { + pathResult.unshift(current); + current = current.parentUuid ? byUuid.get(current.parentUuid) : undefined; + } + + return pathResult; +} + +export async function parseQwenCodeSessions(): Promise { + const files = await findSessionFiles(); + const sessions: UnifiedSession[] = []; + + for (const filePath of files) { + try { + const meta = await extractSessionMeta(filePath); + if (!meta) continue; + + const fileStats = fs.statSync(filePath); + + sessions.push({ + id: meta.sessionId, + source: 'qwen-code', + cwd: meta.cwd, + repo: extractRepoFromCwd(meta.cwd), + branch: meta.gitBranch, + lines: meta.lineCount, + bytes: fileStats.size, + createdAt: parseTimestamp(meta.firstTimestamp, fileStats.mtime), + updatedAt: parseTimestamp(meta.lastTimestamp, fileStats.mtime), + originalPath: filePath, + summary: cleanSummary(meta.firstUserMessage) || undefined, + model: meta.model, + }); + } catch (err) { + logger.debug('qwen-code: skipping unparseable session', filePath, err); + } + } + + return sessions + .filter((s) => s.summary && s.summary.length > 0) + .sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); +} + +export async function extractQwenCodeContext( + session: UnifiedSession, + config?: VerbosityConfig, +): Promise { + const resolvedConfig = config ?? getPreset('standard'); + const records = await readJsonlRecords(session.originalPath); + const recentMessages: ConversationMessage[] = []; + const pendingTasks: string[] = []; + + const toolData = extractToolData(records, resolvedConfig); + const sessionNotes = extractSessionNotes(records); + + // Extract recent messages and pending tasks from main conversation path + const mainPath = reconstructMainPath(records); + const messageRecords = mainPath.filter((r) => r.type === 'user' || r.type === 'assistant'); + for (const record of messageRecords.slice(-resolvedConfig.recentMessages * 2)) { + // Extract pending tasks from thought parts + if (record.type === 'assistant' && record.message?.parts && pendingTasks.length < 5) { + for (const thought of extractThoughtsFromParts(record.message.parts)) { + if (pendingTasks.length >= 5) break; + const lower = thought.toLowerCase(); + if ( + lower.includes('todo') || + lower.includes('next') || + lower.includes('remaining') || + lower.includes('need to') || + lower.includes('next step') + ) { + pendingTasks.push(truncate(thought, 200)); + } + } + } + + const text = extractContentText(record.message); + if (!text) continue; + + recentMessages.push({ + role: record.type === 'user' ? 'user' : 'assistant', + content: text, + timestamp: new Date(record.timestamp), + }); + } + + const trimmed = recentMessages.slice(-resolvedConfig.recentMessages); + + const markdown = generateHandoffMarkdown( + session, + trimmed, + toolData.filesModified, + pendingTasks, + toolData.summaries, + sessionNotes, + resolvedConfig, + ); + + return { + session: sessionNotes?.model ? { ...session, model: sessionNotes.model } : session, + recentMessages: trimmed, + filesModified: toolData.filesModified, + pendingTasks, + toolSummaries: toolData.summaries, + sessionNotes, + markdown, + }; +} diff --git a/src/parsers/registry.ts b/src/parsers/registry.ts new file mode 100644 index 0000000..43ce953 --- /dev/null +++ b/src/parsers/registry.ts @@ -0,0 +1,921 @@ +import chalk from 'chalk'; +import type { VerbosityConfig } from '../config/index.js'; +import type { SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; +import { TOOL_NAMES } from '../types/tool-names.js'; +import { + type FlagOccurrence, + type ForwardFlagMapContext, + type ForwardFlagMapper, + type ForwardMapResult, + normalizeAgentSandbox, +} from '../utils/forward-flags.js'; +import { extractAmpContext, parseAmpSessions } from './amp.js'; +import { extractAntigravityContext, parseAntigravitySessions } from './antigravity.js'; +import { extractClaudeContext, parseClaudeSessions } from './claude.js'; +import { + extractClineContext, + extractKiloCodeContext, + extractRooCodeContext, + parseClineSessions, + parseKiloCodeSessions, + parseRooCodeSessions, +} from './cline.js'; +import { extractCodexContext, parseCodexSessions } from './codex.js'; +import { extractCopilotContext, parseCopilotSessions } from './copilot.js'; +import { extractCrushContext, parseCrushSessions } from './crush.js'; +import { extractCursorContext, parseCursorSessions } from './cursor.js'; +import { extractDroidContext, parseDroidSessions } from './droid.js'; +import { extractGeminiContext, parseGeminiSessions } from './gemini.js'; +import { extractKimiContext, parseKimiSessions } from './kimi.js'; +import { extractKiroContext, parseKiroSessions } from './kiro.js'; +import { extractOpenCodeContext, parseOpenCodeSessions } from './opencode.js'; +import { extractQwenCodeContext, parseQwenCodeSessions } from './qwen-code.js'; + +/** + * Adapter interface — single contract for all supported CLI tools. + * To add a new tool, create its parser and add an entry here. + */ +export interface ToolAdapter { + /** Unique identifier — must match a member of the SessionSource union */ + name: SessionSource; + /** Human-readable label (e.g. "Claude Code") */ + label: string; + /** Chalk color function for TUI display */ + color: (s: string) => string; + /** Storage directory path (for help text) */ + storagePath: string; + /** Environment variable that overrides the default storage path (if any) */ + envVar?: string; + /** CLI binary name for availability checks and spawning */ + binaryName: string; + /** Additional binary names to try when the primary name is unavailable */ + binaryFallbacks?: string[]; + /** Discover and index all sessions */ + parseSessions: () => Promise; + /** Extract full context for cross-tool handoff */ + extractContext: (session: UnifiedSession, config?: VerbosityConfig) => Promise; + /** CLI args to resume a session natively */ + nativeResumeArgs: (session: UnifiedSession) => string[]; + /** CLI args to start with a handoff prompt */ + crossToolArgs: (prompt: string, cwd: string) => string[]; + /** Display string for the native resume command */ + resumeCommandDisplay: (session: UnifiedSession) => string; + /** Adapter-level mapping for interactive handoff launch flags */ + mapHandoffFlags?: ForwardFlagMapper; +} + +/** + * Central registry — single source of truth for all supported tools. + * Insertion order determines display order in the TUI. + */ +const _adapters: Partial> = {}; + +function register(adapter: ToolAdapter): void { + _adapters[adapter.name] = adapter; +} + +function normalizePlanOccurrences(context: ForwardFlagMapContext): FlagOccurrence[] { + const fromPlanFlag = context.all('plan'); + const fromMode = context.all('mode').filter((occ) => String(occ.value).toLowerCase() === 'plan'); + const fromApproval = context.all('approvalMode').filter((occ) => String(occ.value).toLowerCase() === 'plan'); + const fromPermission = context.all('permissionMode').filter((occ) => String(occ.value).toLowerCase() === 'plan'); + return [...fromPlanFlag, ...fromMode, ...fromApproval, ...fromPermission]; +} + +function collectAutoApproveOccurrences(context: ForwardFlagMapContext): FlagOccurrence[] { + return context.all('yolo', 'force', 'allowAll', 'dangerouslyBypass', 'dangerouslySkipPermissions'); +} + +function mapCodexFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const sandboxOccurrences = context.all('sandbox'); + const askOccurrences = context.all('askForApproval'); + + if (autoOccurrences.length > 0) { + context.consume(...autoOccurrences, ...fullAutoOccurrences, ...sandboxOccurrences, ...askOccurrences); + args.push('--dangerously-bypass-approvals-and-sandbox'); + + if (fullAutoOccurrences.length > 0 || sandboxOccurrences.length > 0 || askOccurrences.length > 0) { + warnings.push('Codex precedence: auto-approve flags override --full-auto, --sandbox, and --ask-for-approval.'); + } + } else if (fullAutoOccurrences.length > 0) { + context.consume(...fullAutoOccurrences, ...sandboxOccurrences, ...askOccurrences); + args.push('--full-auto'); + + if (sandboxOccurrences.length > 0 || askOccurrences.length > 0) { + warnings.push('Codex precedence: --full-auto overrides --sandbox and --ask-for-approval.'); + } + } else { + const sandbox = context.latestString('sandbox'); + if (sandbox) { + context.consumeKeys('sandbox'); + args.push('--sandbox', sandbox); + } + + const askForApproval = context.latestString('askForApproval'); + if (askForApproval) { + context.consumeKeys('askForApproval'); + args.push('--ask-for-approval', askForApproval); + } + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + for (const directory of context.consumeAllCsvStrings('addDir', 'includeDirectories')) { + args.push('--add-dir', directory); + } + + const cwd = context.latestString('cd', 'workspace'); + if (cwd) { + context.consumeKeys('cd', 'workspace'); + args.push('--cd', cwd); + } + + for (const override of context.consumeAllStrings('config')) { + args.push('--config', override); + } + + return { mappedArgs: args, warnings }; +} + +function mapGeminiFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const explicitApprovalMode = context.latestString('approvalMode'); + const planOccurrences = normalizePlanOccurrences(context); + + if (autoOccurrences.length > 0) { + context.consume(...autoOccurrences, ...context.all('approvalMode'), ...planOccurrences); + args.push('--approval-mode', 'yolo'); + } else if (explicitApprovalMode) { + context.consumeKeys('approvalMode'); + args.push('--approval-mode', explicitApprovalMode); + } else if (planOccurrences.length > 0) { + context.consume(...planOccurrences); + args.push('--approval-mode', 'plan'); + } + + const sandbox = context.latest('sandbox'); + if (sandbox) { + const normalized = String(sandbox.value).toLowerCase(); + if (sandbox.value === true || ['true', '1', 'yes', 'on', 'enabled'].includes(normalized)) { + context.consumeKeys('sandbox'); + args.push('--sandbox'); + } + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + const hasDebug = context.has('debug'); + if (hasDebug) { + context.consumeKeys('debug'); + args.push('--debug'); + } + + for (const directory of context.consumeAllCsvStrings('includeDirectories', 'addDir')) { + args.push('--include-directories', directory); + } + + for (const tool of context.consumeAllCsvStrings('allowedTools', 'allowTool')) { + args.push('--allowed-tools', tool); + } + + for (const serverName of context.consumeAllCsvStrings('allowedMcpServerNames')) { + args.push('--allowed-mcp-server-names', serverName); + } + + return { mappedArgs: args }; +} + +function mapClaudeFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const planOccurrences = normalizePlanOccurrences(context); + + if (autoOccurrences.length > 0) { + context.consume(...autoOccurrences); + args.push('--dangerously-skip-permissions'); + + const permissionOccurrences = context.all('permissionMode'); + if (permissionOccurrences.length > 0 || planOccurrences.length > 0) { + context.consume(...permissionOccurrences, ...planOccurrences); + warnings.push('Claude precedence: auto-approve flags override permission-mode planning options.'); + } + } else { + const permissionMode = context.latestString('permissionMode'); + if (permissionMode) { + context.consumeKeys('permissionMode'); + args.push('--permission-mode', permissionMode); + } else if (planOccurrences.length > 0) { + context.consume(...planOccurrences); + args.push('--permission-mode', 'plan'); + } + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + for (const directory of context.consumeAllCsvStrings('addDir', 'includeDirectories')) { + args.push('--add-dir', directory); + } + + for (const tool of context.consumeAllCsvStrings('allowedTools', 'allowTool')) { + args.push('--allowed-tools', tool); + } + + for (const tool of context.consumeAllCsvStrings('disallowedTools', 'denyTool')) { + args.push('--disallowed-tools', tool); + } + + const agent = context.latestString('agent'); + if (agent) { + context.consumeKeys('agent'); + args.push('--agent', agent); + } + + const debugOccurrence = context.latest('debug'); + if (debugOccurrence) { + context.consumeKeys('debug'); + if (typeof debugOccurrence.value === 'string' && debugOccurrence.value.trim().length > 0) { + args.push('--debug', debugOccurrence.value); + } else { + args.push('--debug'); + } + } + + for (const config of context.consumeAllStrings('mcpConfig', 'additionalMcpConfig')) { + args.push('--mcp-config', config); + } + + return { mappedArgs: args, warnings }; +} + +function mapDroidFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const askOccurrences = context.all('askForApproval'); + const sandboxOccurrences = context.all('sandbox'); + const approvalModeOccurrences = context.all('approvalMode'); + const approvalMode = context.latestString('approvalMode')?.toLowerCase(); + const askForApproval = context.latestString('askForApproval')?.toLowerCase(); + + if ( + autoOccurrences.length > 0 || + fullAutoOccurrences.length > 0 || + askForApproval === 'never' || + approvalMode === 'yolo' + ) { + context.consume( + ...autoOccurrences, + ...fullAutoOccurrences, + ...askOccurrences, + ...sandboxOccurrences, + ...approvalModeOccurrences, + ); + args.push('--skip-permissions-unsafe'); + + if (askOccurrences.length > 0 && askForApproval && askForApproval !== 'never') { + warnings.push('Droid precedence: auto-approve mapping overrides unsupported ask-for-approval values.'); + } + + if (approvalModeOccurrences.length > 0 && approvalMode && approvalMode !== 'yolo') { + warnings.push('Droid: --approval-mode is not supported by droid exec and was ignored.'); + } + } else if (askOccurrences.length > 0 || sandboxOccurrences.length > 0 || approvalModeOccurrences.length > 0) { + context.consume(...askOccurrences, ...sandboxOccurrences, ...approvalModeOccurrences); + + if (askOccurrences.length > 0 || sandboxOccurrences.length > 0) { + warnings.push('Droid: --ask-for-approval and --sandbox are not supported by droid exec and were ignored.'); + } + + if (approvalModeOccurrences.length > 0 && approvalMode !== 'yolo') { + warnings.push('Droid: --approval-mode is not supported by droid exec and was ignored.'); + } + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + const cwd = context.latestString('workspace', 'cd'); + if (cwd) { + context.consumeKeys('workspace', 'cd'); + args.push('--cwd', cwd); + } + + return { mappedArgs: args, warnings }; +} + +function mapOpenCodeFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const askOccurrences = context.all('askForApproval'); + const sandboxOccurrences = context.all('sandbox'); + const approvalModeOccurrences = context.all('approvalMode'); + const permissionModeOccurrences = context.all('permissionMode'); + + if ( + autoOccurrences.length > 0 || + fullAutoOccurrences.length > 0 || + askOccurrences.length > 0 || + sandboxOccurrences.length > 0 || + approvalModeOccurrences.length > 0 || + permissionModeOccurrences.length > 0 + ) { + context.consume( + ...autoOccurrences, + ...fullAutoOccurrences, + ...askOccurrences, + ...sandboxOccurrences, + ...approvalModeOccurrences, + ...permissionModeOccurrences, + ); + warnings.push( + 'OpenCode: auto-approval, permission, and sandbox forwarding flags are not supported and were ignored.', + ); + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + const agent = context.latestString('agent'); + if (agent) { + context.consumeKeys('agent'); + args.push('--agent', agent); + } + + const logLevel = context.latestString('logLevel'); + if (logLevel) { + context.consumeKeys('logLevel'); + args.push('--log-level', logLevel); + } + + return { mappedArgs: args, warnings }; +} + +function mapAmpFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const askOccurrences = context.all('askForApproval'); + const sandboxOccurrences = context.all('sandbox'); + const approvalModeOccurrences = context.all('approvalMode'); + const approvalMode = context.latestString('approvalMode')?.toLowerCase(); + const askForApproval = context.latestString('askForApproval')?.toLowerCase(); + + if ( + autoOccurrences.length > 0 || + fullAutoOccurrences.length > 0 || + askForApproval === 'never' || + approvalMode === 'yolo' + ) { + context.consume( + ...autoOccurrences, + ...fullAutoOccurrences, + ...askOccurrences, + ...sandboxOccurrences, + ...approvalModeOccurrences, + ); + args.push('--dangerously-allow-all'); + + if (askOccurrences.length > 0 && askForApproval && askForApproval !== 'never') { + warnings.push('Amp precedence: auto-approve mapping overrides unsupported ask-for-approval values.'); + } + + if (approvalModeOccurrences.length > 0 && approvalMode && approvalMode !== 'yolo') { + warnings.push('Amp: --approval-mode is not supported and was ignored.'); + } + } else if (askOccurrences.length > 0 || sandboxOccurrences.length > 0 || approvalModeOccurrences.length > 0) { + context.consume(...askOccurrences, ...sandboxOccurrences, ...approvalModeOccurrences); + + if (askOccurrences.length > 0 || sandboxOccurrences.length > 0) { + warnings.push('Amp: --ask-for-approval and --sandbox are not supported and were ignored.'); + } + + if (approvalModeOccurrences.length > 0 && approvalMode) { + warnings.push('Amp: --approval-mode is not supported and was ignored.'); + } + } + + return { mappedArgs: args, warnings }; +} + +function mapCopilotFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const allowAllOccurrences = context.all('allowAll'); + + if (allowAllOccurrences.length > 0 && autoOccurrences.length === allowAllOccurrences.length) { + context.consume(...allowAllOccurrences); + args.push('--allow-all'); + } else if (autoOccurrences.length > 0) { + context.consume(...autoOccurrences); + args.push('--yolo'); + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + for (const directory of context.consumeAllCsvStrings('addDir', 'includeDirectories')) { + args.push('--add-dir', directory); + } + + for (const tool of context.consumeAllCsvStrings('allowedTools', 'allowTool')) { + args.push('--allow-tool', tool); + } + + for (const tool of context.consumeAllCsvStrings('disallowedTools', 'denyTool')) { + args.push('--deny-tool', tool); + } + + const agent = context.latestString('agent'); + if (agent) { + context.consumeKeys('agent'); + args.push('--agent', agent); + } + + const logLevel = context.latestString('logLevel'); + if (logLevel) { + context.consumeKeys('logLevel'); + args.push('--log-level', logLevel); + } + + for (const config of context.consumeAllStrings('additionalMcpConfig', 'mcpConfig')) { + args.push('--additional-mcp-config', config); + } + + return { mappedArgs: args }; +} + +function mapCursorAgentFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + if (autoOccurrences.length > 0 || fullAutoOccurrences.length > 0) { + context.consume(...autoOccurrences, ...fullAutoOccurrences); + args.push('--yolo'); + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + const sandboxOccurrence = context.latest('sandbox'); + if (sandboxOccurrence) { + const normalized = normalizeAgentSandbox(sandboxOccurrence.value); + if (normalized) { + context.consumeKeys('sandbox'); + args.push('--sandbox', normalized); + } + } + + const planOccurrences = normalizePlanOccurrences(context); + if (planOccurrences.length > 0) { + context.consume(...planOccurrences); + args.push('--plan'); + } + + const workspace = context.latestString('workspace', 'cd'); + if (workspace) { + context.consumeKeys('workspace', 'cd'); + args.push('--workspace', workspace); + } + + if (context.consumeAnyBoolean('approveMcps')) { + args.push('--approve-mcps'); + } + + return { mappedArgs: args }; +} + +function mapKimiFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const askOccurrences = context.all('askForApproval'); + const sandboxOccurrences = context.all('sandbox'); + const approvalMode = context.latestString('approvalMode')?.toLowerCase(); + const askForApproval = context.latestString('askForApproval')?.toLowerCase(); + + if ( + autoOccurrences.length > 0 || + fullAutoOccurrences.length > 0 || + approvalMode === 'yolo' || + askForApproval === 'never' + ) { + context.consume( + ...autoOccurrences, + ...fullAutoOccurrences, + ...askOccurrences, + ...sandboxOccurrences, + ...context.all('approvalMode'), + ); + args.push('--yolo'); + + if (askOccurrences.length > 0 && askForApproval && askForApproval !== 'never') { + warnings.push('Kimi precedence: mapped auto-approve behavior overrides unsupported ask-for-approval values.'); + } + } else if (askOccurrences.length > 0 || sandboxOccurrences.length > 0 || context.all('approvalMode').length > 0) { + context.consume(...askOccurrences, ...sandboxOccurrences, ...context.all('approvalMode')); + warnings.push('Kimi: --ask-for-approval, --approval-mode, and --sandbox are not supported and were ignored.'); + } + + const model = context.latestString('model'); + if (model) { + context.consumeKeys('model'); + args.push('--model', model); + } + + for (const directory of context.consumeAllCsvStrings('addDir', 'includeDirectories')) { + args.push('--add-dir', directory); + } + + const workDir = context.latestString('workspace', 'cd'); + if (workDir) { + context.consumeKeys('workspace', 'cd'); + args.push('--work-dir', workDir); + } + + const agent = context.latestString('agent'); + if (agent) { + context.consumeKeys('agent'); + args.push('--agent', agent); + } + + return { mappedArgs: args, warnings }; +} + +function mapKiroFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const askOccurrences = context.all('askForApproval'); + const sandboxOccurrences = context.all('sandbox'); + const approvalMode = context.latestString('approvalMode')?.toLowerCase(); + const askForApproval = context.latestString('askForApproval')?.toLowerCase(); + + if ( + autoOccurrences.length > 0 || + fullAutoOccurrences.length > 0 || + approvalMode === 'yolo' || + askForApproval === 'never' + ) { + context.consume( + ...autoOccurrences, + ...fullAutoOccurrences, + ...askOccurrences, + ...sandboxOccurrences, + ...context.all('approvalMode'), + ); + args.push('--trust-all-tools'); + } else if (askOccurrences.length > 0 || sandboxOccurrences.length > 0 || context.all('approvalMode').length > 0) { + context.consume(...askOccurrences, ...sandboxOccurrences, ...context.all('approvalMode')); + warnings.push('Kiro: --ask-for-approval, --approval-mode, and --sandbox are not supported and were ignored.'); + } + + const agent = context.latestString('agent'); + if (agent) { + context.consumeKeys('agent'); + args.push('--agent', agent); + } + + return { mappedArgs: args, warnings }; +} + +function mapCrushFlags(context: ForwardFlagMapContext): ForwardMapResult { + const args: string[] = []; + const warnings: string[] = []; + + const autoOccurrences = collectAutoApproveOccurrences(context); + const fullAutoOccurrences = context.all('fullAuto'); + const askOccurrences = context.all('askForApproval'); + const sandboxOccurrences = context.all('sandbox'); + const approvalMode = context.latestString('approvalMode')?.toLowerCase(); + const askForApproval = context.latestString('askForApproval')?.toLowerCase(); + + if ( + autoOccurrences.length > 0 || + fullAutoOccurrences.length > 0 || + approvalMode === 'yolo' || + askForApproval === 'never' + ) { + context.consume( + ...autoOccurrences, + ...fullAutoOccurrences, + ...askOccurrences, + ...sandboxOccurrences, + ...context.all('approvalMode'), + ); + args.push('--yolo'); + } else if (askOccurrences.length > 0 || sandboxOccurrences.length > 0 || context.all('approvalMode').length > 0) { + context.consume(...askOccurrences, ...sandboxOccurrences, ...context.all('approvalMode')); + warnings.push('Crush: --ask-for-approval, --approval-mode, and --sandbox are not supported and were ignored.'); + } + + return { mappedArgs: args, warnings }; +} + +// ── Claude Code ────────────────────────────────────────────────────── +register({ + name: 'claude', + label: 'Claude Code', + color: chalk.blue, + storagePath: '~/.claude/projects/', + envVar: 'CLAUDE_CONFIG_DIR', + binaryName: 'claude', + parseSessions: parseClaudeSessions, + extractContext: extractClaudeContext, + nativeResumeArgs: (s) => ['--resume', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `claude --resume ${s.id}`, + mapHandoffFlags: mapClaudeFlags, +}); + +// ── Codex CLI ──────────────────────────────────────────────────────── +register({ + name: 'codex', + label: 'Codex CLI', + color: chalk.magenta, + storagePath: '~/.codex/sessions/', + envVar: 'CODEX_HOME', + binaryName: 'codex', + parseSessions: parseCodexSessions, + extractContext: extractCodexContext, + nativeResumeArgs: (s) => ['-c', `experimental_resume=${s.originalPath}`], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `codex -c experimental_resume="${s.originalPath}"`, + mapHandoffFlags: mapCodexFlags, +}); + +// ── GitHub Copilot CLI ─────────────────────────────────────────────── +register({ + name: 'copilot', + label: 'GitHub Copilot CLI', + color: chalk.green, + storagePath: '~/.copilot/session-state/', + binaryName: 'copilot', + parseSessions: parseCopilotSessions, + extractContext: extractCopilotContext, + nativeResumeArgs: (s) => ['--resume', s.id], + crossToolArgs: (prompt) => ['-i', prompt], + resumeCommandDisplay: (s) => `copilot --resume ${s.id}`, + mapHandoffFlags: mapCopilotFlags, +}); + +// ── Gemini CLI ─────────────────────────────────────────────────────── +register({ + name: 'gemini', + label: 'Gemini CLI', + color: chalk.cyan, + storagePath: '~/.gemini/tmp/*/chats/', + envVar: 'GEMINI_CLI_HOME', + binaryName: 'gemini', + parseSessions: parseGeminiSessions, + extractContext: extractGeminiContext, + nativeResumeArgs: () => ['--resume'], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: () => `gemini --resume`, + mapHandoffFlags: mapGeminiFlags, +}); + +// ── OpenCode ───────────────────────────────────────────────────────── +register({ + name: 'opencode', + label: 'OpenCode', + color: chalk.yellow, + storagePath: '~/.local/share/opencode/storage/', + envVar: 'XDG_DATA_HOME', + binaryName: 'opencode', + parseSessions: parseOpenCodeSessions, + extractContext: extractOpenCodeContext, + nativeResumeArgs: (s) => ['--session', s.id], + crossToolArgs: (prompt) => ['run', prompt], + resumeCommandDisplay: (s) => `opencode --session ${s.id}`, + mapHandoffFlags: mapOpenCodeFlags, +}); + +// ── Factory Droid ──────────────────────────────────────────────────── +register({ + name: 'droid', + label: 'Factory Droid', + color: chalk.red, + storagePath: '~/.factory/sessions/', + binaryName: 'droid', + parseSessions: parseDroidSessions, + extractContext: extractDroidContext, + nativeResumeArgs: (s) => ['-s', s.id], + crossToolArgs: (prompt) => ['exec', prompt], + resumeCommandDisplay: (s) => `droid -s ${s.id}`, + mapHandoffFlags: mapDroidFlags, +}); + +// ── Cursor AI (Agent CLI) ──────────────────────────────────────────── +register({ + name: 'cursor', + label: 'Cursor AI', + color: chalk.blueBright, + storagePath: '~/.cursor/projects/*/agent-transcripts/', + binaryName: 'cursor-agent', + binaryFallbacks: ['agent'], + parseSessions: parseCursorSessions, + extractContext: extractCursorContext, + nativeResumeArgs: (s) => ['--resume', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `cursor-agent --resume ${s.id} (or: agent --resume ${s.id})`, + mapHandoffFlags: mapCursorAgentFlags, +}); + +// ── Amp CLI ────────────────────────────────────────────────────────── +register({ + name: 'amp', + label: 'Amp CLI', + color: chalk.hex('#FF6B35'), + storagePath: '~/.local/share/amp/threads/', + envVar: 'XDG_DATA_HOME', + binaryName: 'amp', + parseSessions: parseAmpSessions, + extractContext: extractAmpContext, + nativeResumeArgs: (s) => ['--thread', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `amp --thread ${s.id}`, + mapHandoffFlags: mapAmpFlags, +}); + +// ── Kiro IDE ───────────────────────────────────────────────────────── +register({ + name: 'kiro', + label: 'Kiro IDE', + color: chalk.hex('#7B68EE'), + storagePath: '~/Library/Application Support/Kiro/workspace-sessions/', + binaryName: 'kiro', + parseSessions: parseKiroSessions, + extractContext: extractKiroContext, + nativeResumeArgs: () => [], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: () => `kiro`, + mapHandoffFlags: mapKiroFlags, +}); + +// ── Crush CLI ──────────────────────────────────────────────────────── +register({ + name: 'crush', + label: 'Crush CLI', + color: chalk.hex('#E63946'), + storagePath: '~/.crush/crush.db', + binaryName: 'crush', + parseSessions: parseCrushSessions, + extractContext: extractCrushContext, + nativeResumeArgs: (s) => ['--session', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `crush --session ${s.id}`, + mapHandoffFlags: mapCrushFlags, +}); + +// ── Cline ──────────────────────────────────────────────────────────── +register({ + name: 'cline', + label: 'Cline', + color: chalk.hex('#00D4AA'), + storagePath: '~/Library/Application Support/Code/User/globalStorage/saoudrizwan.claude-dev/tasks/', + binaryName: 'code', + parseSessions: parseClineSessions, + extractContext: extractClineContext, + nativeResumeArgs: () => [], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: () => `code`, +}); + +// ── Roo Code ───────────────────────────────────────────────────────── +register({ + name: 'roo-code', + label: 'Roo Code', + color: chalk.hex('#FF8C42'), + storagePath: '~/Library/Application Support/Code/User/globalStorage/rooveterinaryinc.roo-cline/tasks/', + binaryName: 'code', + parseSessions: parseRooCodeSessions, + extractContext: extractRooCodeContext, + nativeResumeArgs: () => [], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: () => `code`, +}); + +// ── Kilo Code ──────────────────────────────────────────────────────── +register({ + name: 'kilo-code', + label: 'Kilo Code', + color: chalk.hex('#6C5CE7'), + storagePath: '~/Library/Application Support/Code/User/globalStorage/kilocode.kilo-code/tasks/', + binaryName: 'code', + parseSessions: parseKiloCodeSessions, + extractContext: extractKiloCodeContext, + nativeResumeArgs: () => [], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: () => `code`, +}); + +// ── Antigravity ────────────────────────────────────────────────────── +register({ + name: 'antigravity', + label: 'Antigravity', + color: chalk.hex('#A8DADC'), + storagePath: '~/.gemini/antigravity/code_tracker/', + envVar: 'GEMINI_CLI_HOME', + binaryName: 'antigravity', + parseSessions: parseAntigravitySessions, + extractContext: extractAntigravityContext, + nativeResumeArgs: () => [], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: () => `antigravity`, +}); + +// ── Kimi CLI ────────────────────────────────────────────────────────── +register({ + name: 'kimi', + label: 'Kimi CLI', + color: chalk.hex('#00D4AA'), + storagePath: '~/.kimi/sessions/', + binaryName: 'kimi', + parseSessions: parseKimiSessions, + extractContext: extractKimiContext, + nativeResumeArgs: (s) => ['--session', s.id], + crossToolArgs: (prompt) => ['--prompt', prompt], + resumeCommandDisplay: (s) => `kimi --session ${s.id}`, + mapHandoffFlags: mapKimiFlags, +}); + +// ── Qwen Code ──────────────────────────────────────────────────────── +register({ + name: 'qwen-code', + label: 'Qwen Code', + color: chalk.hex('#6366F1'), + storagePath: '~/.qwen/projects/*/chats/', + envVar: 'QWEN_HOME', + binaryName: 'qwen', + parseSessions: parseQwenCodeSessions, + extractContext: extractQwenCodeContext, + nativeResumeArgs: (s) => ['--resume', s.id], + crossToolArgs: (prompt) => [prompt], + resumeCommandDisplay: (s) => `qwen --resume ${s.id}`, + mapHandoffFlags: mapGeminiFlags, +}); + +// ── Completeness assertion ────────────────────────────────────────── +// Runs at module load — if a new tool is added to TOOL_NAMES but not +// registered here, this throws immediately with a clear message. +const missing = TOOL_NAMES.filter((name) => !(name in _adapters)); +if (missing.length > 0) { + throw new Error(`Registry incomplete: missing adapter(s) for ${missing.join(', ')}`); +} + +// ── Exports ────────────────────────────────────────────────────────── + +/** Type-safe adapter lookup — completeness proven by runtime assertion above */ +export const adapters: Readonly> = _adapters as Record; + +/** Ordered list of all tool names — derived from the canonical TOOL_NAMES array */ +export const ALL_TOOLS: readonly SessionSource[] = TOOL_NAMES; + +/** Formatted help string for --source options */ +export const SOURCE_HELP = `Filter by source (${ALL_TOOLS.join(', ')})`; diff --git a/src/types/content-blocks.ts b/src/types/content-blocks.ts new file mode 100644 index 0000000..6cb7037 --- /dev/null +++ b/src/types/content-blocks.ts @@ -0,0 +1,48 @@ +/** + * Shared content block types for Anthropic-style message formats. + * Used by Claude, Droid, and Cursor parsers — all share the same + * text / thinking / tool_use / tool_result content block structure. + */ +import { z } from 'zod'; + +// ── Zod Schemas ───────────────────────────────────────────────────────────── + +export const TextBlockSchema = z.object({ + type: z.literal('text'), + text: z.string(), +}); + +export const ThinkingBlockSchema = z.object({ + type: z.literal('thinking'), + text: z.string().optional(), + thinking: z.string().optional(), +}); + +export const ToolUseBlockSchema = z.object({ + type: z.literal('tool_use'), + id: z.string(), + name: z.string(), + input: z.record(z.string(), z.unknown()).default({}), +}); + +export const ToolResultBlockSchema = z.object({ + type: z.literal('tool_result'), + tool_use_id: z.string(), + content: z.union([z.string(), z.array(z.object({ type: z.string(), text: z.string().optional() }))]), + is_error: z.boolean().optional(), +}); + +export const ContentBlockSchema = z.discriminatedUnion('type', [ + TextBlockSchema, + ThinkingBlockSchema, + ToolUseBlockSchema, + ToolResultBlockSchema, +]); + +// ── TypeScript Types ──────────────────────────────────────────────────────── + +export type TextBlock = z.infer; +export type ThinkingBlock = z.infer; +export type ToolUseBlock = z.infer; +export type ToolResultBlock = z.infer; +export type ContentBlock = z.infer; diff --git a/src/types/index.ts b/src/types/index.ts index 486d397..c6dd698 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -1,9 +1,13 @@ /** - * Unified Session Types for Codex, Claude, Copilot, Gemini, and OpenCode CLIs + * Unified Session Types for CLI session tools */ -/** Source CLI tool */ -export type SessionSource = 'codex' | 'claude' | 'copilot' | 'gemini' | 'opencode'; +// Import SessionSource locally (used by UnifiedSession below) and re-export +import type { SessionSource, ToolSampleCategory } from './tool-names.js'; + +// Re-export shared content block types +export type { ContentBlock, TextBlock, ThinkingBlock, ToolResultBlock, ToolUseBlock } from './content-blocks.js'; +export { type SessionSource, type ToolSampleCategory, TOOL_NAMES } from './tool-names.js'; /** Unified session metadata */ export interface UnifiedSession { @@ -35,7 +39,7 @@ export interface UnifiedSession { /** Conversation message in normalized format */ export interface ConversationMessage { - role: 'user' | 'assistant' | 'system' | 'tool'; + role: 'user' | 'assistant' | 'system'; content: string; timestamp?: Date; toolCalls?: ToolCall[]; @@ -44,14 +48,148 @@ export interface ConversationMessage { /** Tool call information */ export interface ToolCall { name: string; + /** Unique call ID for matching call → result (Anthropic-format sessions) */ + id?: string; arguments?: Record; result?: string; + /** Whether the tool call succeeded. Absent when status is unknown. */ + success?: boolean; +} + +// ── Structured Tool Sample Data ───────────────────────────────────────────── +// Discriminated union on `category`. Each tool type captures what matters. + +export interface ShellSampleData { + category: 'shell'; + command: string; + exitCode?: number; + /** Last N lines of stdout (joined with \n). Omitted if empty. */ + stdoutTail?: string; + /** True when command exited non-zero or tool reported an error. */ + errored?: boolean; + /** First 200 chars of error output when errored. */ + errorMessage?: string; +} + +export interface ReadSampleData { + category: 'read'; + filePath: string; + lineStart?: number; + lineEnd?: number; +} + +export interface WriteSampleData { + category: 'write'; + filePath: string; + isNewFile?: boolean; + /** Unified diff capped at maxLines. If truncated, ends with "+N lines truncated". */ + diff?: string; + diffStats?: { added: number; removed: number }; + /** First 200 chars of error text when the write failed. */ + errorMessage?: string; +} + +export interface EditSampleData { + category: 'edit'; + filePath: string; + /** Unified diff capped at maxLines. If truncated, ends with "+N lines truncated". */ + diff?: string; + diffStats?: { added: number; removed: number }; + /** First 200 chars of error text when the edit failed. */ + errorMessage?: string; +} + +export interface GrepSampleData { + category: 'grep'; + pattern: string; + targetPath?: string; + matchCount?: number; +} + +export interface GlobSampleData { + category: 'glob'; + pattern: string; + resultCount?: number; +} + +export interface SearchSampleData { + category: 'search'; + query: string; + /** Number of results returned, if parseable. */ + resultCount?: number; + /** First 100 characters of the search result. */ + resultPreview?: string; +} + +export interface FetchSampleData { + category: 'fetch'; + url: string; + /** First 100 characters of fetched content. */ + resultPreview?: string; +} + +export interface TaskSampleData { + category: 'task'; + description: string; + agentType?: string; + /** First 100 characters of task result. */ + resultSummary?: string; } +export interface AskSampleData { + category: 'ask'; + /** Question text, capped at 80 characters. */ + question: string; +} + +export interface McpSampleData { + category: 'mcp'; + /** Full tool name including namespace (e.g. "mcp__github__list_issues"). */ + toolName: string; + /** Truncated params string (each value capped at 100 chars). */ + params?: string; + /** First 100 characters of tool result. */ + result?: string; +} + +export interface ReasoningSampleData { + category: 'reasoning'; + /** Full tool name (e.g. "mcp__crash-think-tool__crash"). */ + toolName: string; + /** Reasoning step number, if available. */ + stepNumber?: number; + /** The thought/reasoning text, truncated per config. */ + thought?: string; + /** Expected or actual outcome. */ + outcome?: string; + /** Planned next action (string or stringified object). */ + nextAction?: string; +} + +/** + * Discriminated union of all structured tool sample types. + * The `category` field is the discriminant — use `switch(data.category)` for narrowing. + */ +export type StructuredToolSample = + | ShellSampleData + | ReadSampleData + | WriteSampleData + | EditSampleData + | GrepSampleData + | GlobSampleData + | SearchSampleData + | FetchSampleData + | TaskSampleData + | AskSampleData + | McpSampleData + | ReasoningSampleData; + /** One-line concise summary of a single tool invocation */ export interface ToolSample { /** e.g. "$ npm test → exit 0" or "edit src/auth.ts (+5 -2)" */ summary: string; + /** Structured data for rich rendering. Absent for legacy/not-yet-updated parsers. */ + data?: StructuredToolSample; } /** Aggregated tool usage: unique tool name + count + representative samples */ @@ -60,18 +198,56 @@ export interface ToolUsageSummary { name: string; /** Number of times this tool was invoked */ count: number; - /** Up to 3 representative samples */ + /** Number of invocations that ended in error */ + errorCount?: number; + /** Up to N representative samples (N varies by category) */ samples: ToolSample[]; } -/** Contextual session notes (reasoning highlights, model info, token usage) */ +/** Result from a subagent/task invocation */ +export interface SubagentResult { + taskId: string; + description: string; + status: 'completed' | 'killed' | 'error'; + /** Final text output from the subagent */ + result?: string; + /** How many tools the subagent used */ + toolCallCount: number; +} + +/** A single reasoning/thinking step captured during the session */ +export interface ReasoningStep { + stepNumber: number; + totalSteps: number; + /** Purpose category: 'analysis', 'decision', etc. */ + purpose: string; + thought: string; + outcome: string; + nextAction: string; +} + +/** Contextual session notes (reasoning highlights, token usage) */ export interface SessionNotes { - /** Model used in the session */ + /** Model used in the session (kept for backwards compatibility with parsers that set it here) */ model?: string; - /** Key reasoning/thinking highlights (max 5) */ + /** Key reasoning/thinking highlights (max 10) */ reasoning?: string[]; /** Token usage statistics */ tokenUsage?: { input: number; output: number }; + /** Cache token breakdown (prompt caching) */ + cacheTokens?: { creation: number; read: number }; + /** Tokens spent on extended thinking / chain-of-thought */ + thinkingTokens?: number; + /** Wall-clock time the AI assistant was actively working (ms) */ + activeTimeMs?: number; + /** Narrative summary from compact/compaction messages */ + compactSummary?: string; + /** Results from subagent/task invocations */ + subagentResults?: SubagentResult[]; + /** Sequential reasoning/thinking steps captured during the session */ + reasoningSteps?: ReasoningStep[]; + /** External tool results (MCP, plugins) with size and preview */ + externalToolResults?: Array<{ name: string; sizeBytes: number; preview: string }>; } /** Extracted context for cross-tool continuation */ @@ -91,22 +267,10 @@ export interface SessionContext { markdown: string; } -/** Session parser interface - each CLI implements this */ -export interface SessionParser { - /** Check if this parser can handle the given path */ - canParse(path: string): boolean; - /** Parse sessions from the default location */ - parseAll(): Promise; - /** Extract full context from a session */ - extractContext(session: UnifiedSession): Promise; -} - -/** Resume options */ -export interface ResumeOptions { - /** Session to resume */ - session: UnifiedSession; - /** Target CLI tool */ - target: SessionSource; - /** Whether to use native resume (same tool) */ - useNative: boolean; +/** Options controlling handoff markdown generation */ +export interface HandoffOptions { + /** Delivery mode — inline embeds full markdown as CLI arg, reference points to file */ + mode: 'inline' | 'reference'; + /** Max bytes for the conversation section (default: 20000 inline, 40000 reference) */ + maxConversationBytes?: number; } diff --git a/src/types/schemas.ts b/src/types/schemas.ts new file mode 100644 index 0000000..9c08c24 --- /dev/null +++ b/src/types/schemas.ts @@ -0,0 +1,663 @@ +/** + * Zod schemas for all parser raw data formats and serialized session. + * Each schema validates untrusted data from disk (JSONL, JSON, YAML, SQLite). + * Schemas use .passthrough() to tolerate extra fields from future tool versions. + */ +import { z } from 'zod'; +import { ContentBlockSchema } from './content-blocks.js'; +import { TOOL_NAMES } from './tool-names.js'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +/** Content that can be a string or an array of blocks */ +const StringOrBlockArray = z.union([ + z.string(), + z.array(z.object({ type: z.string(), text: z.string().optional() }).passthrough()), +]); + +// ── Claude ────────────────────────────────────────────────────────────────── + +export const ClaudeMessageSchema = z + .object({ + type: z.string(), + uuid: z.string(), + timestamp: z.string(), + sessionId: z.string().optional(), + cwd: z.string().optional(), + gitBranch: z.string().optional(), + slug: z.string().optional(), + model: z.string().optional(), + isCompactSummary: z.boolean().optional(), + parentUuid: z.string().optional(), + message: z + .object({ + role: z.string().optional(), + content: z + .union([ + z.string(), + z.array(ContentBlockSchema.or(z.object({ type: z.string(), text: z.string().optional() }).passthrough())), + ]) + .optional(), + }) + .optional(), + }) + .passthrough(); + +export type ClaudeMessage = z.infer; + +// ── Codex ─────────────────────────────────────────────────────────────────── + +/** Codex messages are a discriminated union on the `type` field */ +export const CodexSessionMetaSchema = z + .object({ + timestamp: z.string(), + type: z.literal('session_meta'), + payload: z + .object({ + id: z.string().optional(), + cwd: z.string().optional(), + git: z + .object({ + branch: z.string().optional(), + repository_url: z.string().optional(), + }) + .optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export const CodexEventMsgSchema = z + .object({ + timestamp: z.string(), + type: z.literal('event_msg'), + payload: z + .object({ + type: z.string().optional(), + role: z.string().optional(), + message: z.string().optional(), + content: z.array(z.object({ type: z.string(), text: z.string().optional() }).passthrough()).optional(), + input_tokens: z.number().optional(), + output_tokens: z.number().optional(), + }) + .passthrough() + .optional(), + message: z.string().optional(), + }) + .passthrough(); + +export const CodexResponseItemSchema = z + .object({ + timestamp: z.string(), + type: z.literal('response_item'), + payload: z + .object({ + type: z.string().optional(), + role: z.string().optional(), + name: z.string().optional(), + arguments: z.string().optional(), + call_id: z.string().optional(), + input: z.string().optional(), + output: z.unknown().optional(), + content: z.array(z.object({ type: z.string(), text: z.string().optional() }).passthrough()).optional(), + action: z + .object({ + query: z.string().optional(), + queries: z.array(z.string()).optional(), + }) + .passthrough() + .optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export const CodexTurnContextSchema = z + .object({ + timestamp: z.string(), + type: z.literal('turn_context'), + payload: z + .object({ + model: z.string().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export const CodexMessageSchema = z.discriminatedUnion('type', [ + CodexSessionMetaSchema, + CodexEventMsgSchema, + CodexResponseItemSchema, + CodexTurnContextSchema, +]); + +export type CodexSessionMeta = z.infer; +export type CodexEventMsg = z.infer; +export type CodexResponseItem = z.infer; +export type CodexTurnContext = z.infer; +export type CodexMessage = z.infer; + +// ── Copilot ───────────────────────────────────────────────────────────────── + +export const CopilotWorkspaceSchema = z + .object({ + id: z.string(), + cwd: z.string(), + git_root: z.string().optional(), + repository: z.string().optional(), + branch: z.string().optional(), + summary: z.string().optional(), + summary_count: z.number().optional(), + created_at: z.string(), + updated_at: z.string(), + }) + .passthrough(); + +export const CopilotEventSchema = z + .object({ + type: z.string(), + id: z.string(), + timestamp: z.string(), + parentId: z.union([z.string(), z.null()]).optional(), + data: z + .object({ + sessionId: z.string().optional(), + selectedModel: z.string().optional(), + content: z.string().optional(), + transformedContent: z.string().optional(), + messageId: z.string().optional(), + toolRequests: z + .array( + z + .object({ + name: z.string(), + arguments: z.record(z.string(), z.unknown()).optional(), + }) + .passthrough(), + ) + .optional(), + context: z + .object({ + cwd: z.string().optional(), + gitRoot: z.string().optional(), + branch: z.string().optional(), + repository: z.string().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export type CopilotWorkspace = z.infer; +export type CopilotEvent = z.infer; + +// ── Gemini ────────────────────────────────────────────────────────────────── + +export const GeminiToolCallSchema = z + .object({ + name: z.string(), + args: z.record(z.string(), z.unknown()).optional(), + result: z + .array( + z + .object({ + functionResponse: z + .object({ + response: z + .object({ + output: z.string().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(), + ) + .optional(), + status: z.string().optional(), + resultDisplay: z + .object({ + fileName: z.string().optional(), + filePath: z.string().optional(), + fileDiff: z.string().optional(), + originalContent: z.string().optional(), + newContent: z.string().optional(), + diffStat: z + .object({ + model_added_lines: z.number().optional(), + model_removed_lines: z.number().optional(), + }) + .passthrough() + .optional(), + isNewFile: z.boolean().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export const GeminiThoughtSchema = z + .object({ + subject: z.string().optional(), + description: z.string().optional(), + timestamp: z.string().optional(), + }) + .passthrough(); + +export const GeminiMessageSchema = z + .object({ + id: z.string(), + timestamp: z.string(), + type: z.string(), + content: z.union([ + z.string(), + z.array(z.object({ text: z.string().optional(), type: z.string().optional() }).passthrough()), + ]), + toolCalls: z.array(GeminiToolCallSchema).optional(), + thoughts: z.array(GeminiThoughtSchema).optional(), + model: z.string().optional(), + tokens: z + .object({ + input: z.number().optional(), + output: z.number().optional(), + cached: z.number().optional(), + thoughts: z.number().optional(), + tool: z.number().optional(), + total: z.number().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export const GeminiSessionSchema = z + .object({ + sessionId: z.string(), + projectHash: z.string(), + startTime: z.string(), + lastUpdated: z.string(), + messages: z.array(GeminiMessageSchema), + }) + .passthrough(); + +export type GeminiToolCall = z.infer; +export type GeminiThought = z.infer; +export type GeminiMessage = z.infer; +export type GeminiSession = z.infer; + +// ── OpenCode ──────────────────────────────────────────────────────────────── + +export const OpenCodeSessionSchema = z + .object({ + id: z.string(), + slug: z.string().optional(), + version: z.string().optional(), + projectID: z.string(), + directory: z.string(), + title: z.string().optional(), + time: z.object({ + created: z.number(), + updated: z.number(), + }), + summary: z + .object({ + additions: z.number().optional(), + deletions: z.number().optional(), + files: z.number().optional(), + }) + .optional(), + }) + .passthrough(); + +export const OpenCodeProjectSchema = z + .object({ + id: z.string(), + worktree: z.string(), + vcs: z.string().optional(), + time: z + .object({ + created: z.number(), + updated: z.number(), + }) + .optional(), + }) + .passthrough(); + +export const OpenCodeMessageSchema = z + .object({ + id: z.string(), + sessionID: z.string(), + role: z.enum(['user', 'assistant']), + time: z.object({ + created: z.number(), + completed: z.number().optional(), + }), + summary: z.object({ title: z.string().optional() }).optional(), + path: z.object({ cwd: z.string().optional(), root: z.string().optional() }).optional(), + }) + .passthrough(); + +export const OpenCodePartSchema = z + .object({ + id: z.string(), + sessionID: z.string(), + messageID: z.string(), + type: z.string(), + text: z.string().optional(), + }) + .passthrough(); + +// SQLite row schemas +export const SqliteSessionRowSchema = z.object({ + id: z.string(), + project_id: z.string(), + slug: z.string(), + directory: z.string(), + title: z.string(), + version: z.string(), + summary_additions: z.number().nullable(), + summary_deletions: z.number().nullable(), + summary_files: z.number().nullable(), + time_created: z.number(), + time_updated: z.number(), +}); + +export const SqliteMessageRowSchema = z.object({ + id: z.string(), + session_id: z.string(), + time_created: z.number(), + data: z.string(), +}); + +export const SqlitePartRowSchema = z.object({ + id: z.string(), + message_id: z.string(), + session_id: z.string(), + time_created: z.number(), + data: z.string(), +}); + +export const SqliteProjectRowSchema = z.object({ + id: z.string(), + worktree: z.string(), +}); + +export type OpenCodeSession = z.infer; +export type OpenCodeProject = z.infer; +export type OpenCodeMessage = z.infer; +export type OpenCodePart = z.infer; +export type SqliteSessionRow = z.infer; +export type SqliteMessageRow = z.infer; +export type SqlitePartRow = z.infer; +export type SqliteProjectRow = z.infer; + +// ── Droid ─────────────────────────────────────────────────────────────────── + +export const DroidSessionStartSchema = z + .object({ + type: z.literal('session_start'), + id: z.string(), + title: z.string(), + sessionTitle: z.string(), + owner: z.string().optional(), + version: z.number().optional(), + cwd: z.string(), + isSessionTitleManuallySet: z.boolean().optional(), + sessionTitleAutoStage: z.string().optional(), + }) + .passthrough(); + +export const DroidMessageEventSchema = z + .object({ + type: z.literal('message'), + id: z.string(), + timestamp: z.string(), + parentId: z.string().optional(), + message: z.object({ + role: z.enum(['user', 'assistant']), + content: z.array( + ContentBlockSchema.or(z.object({ type: z.string(), text: z.string().optional() }).passthrough()), + ), + }), + }) + .passthrough(); + +export const DroidTodoStateSchema = z + .object({ + type: z.literal('todo_state'), + id: z.string(), + timestamp: z.string(), + todos: z.union([z.object({ todos: z.string() }).passthrough(), z.string()]), + messageIndex: z.number().optional(), + }) + .passthrough(); + +export const DroidCompactionStateSchema = z + .object({ + type: z.literal('compaction_state'), + id: z.string(), + timestamp: z.string(), + summaryText: z.string().optional(), + summaryTokens: z.number().optional(), + summaryKind: z.string().optional(), + anchorMessage: z.string().optional(), + removedCount: z.number().optional(), + systemInfo: z.unknown().optional(), + }) + .passthrough(); + +export const DroidEventSchema = z.discriminatedUnion('type', [ + DroidSessionStartSchema, + DroidMessageEventSchema, + DroidTodoStateSchema, + DroidCompactionStateSchema, +]); + +export const DroidSettingsSchema = z + .object({ + assistantActiveTimeMs: z.number().optional(), + model: z.string().optional(), + reasoningEffort: z.string().optional(), + interactionMode: z.string().optional(), + autonomyMode: z.string().optional(), + providerLock: z.string().optional(), + providerLockTimestamp: z.string().optional(), + apiProviderLock: z.string().optional(), + specModeReasoningEffort: z.string().optional(), + tokenUsage: z + .object({ + inputTokens: z.number().optional(), + outputTokens: z.number().optional(), + cacheCreationTokens: z.number().optional(), + cacheReadTokens: z.number().optional(), + thinkingTokens: z.number().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export type DroidSessionStart = z.infer; +export type DroidMessageEvent = z.infer; +export type DroidTodoState = z.infer; +export type DroidCompactionState = z.infer; +export type DroidEvent = z.infer; +export type DroidSettings = z.infer; + +// ── Kimi ──────────────────────────────────────────────────────────────────── + +export const KimiMetadataSchema = z + .object({ + session_id: z.string(), + title: z.string().optional(), + title_generated: z.boolean().optional(), + archived: z.boolean().optional(), + archived_at: z.union([z.number(), z.string(), z.null()]).optional(), + wire_mtime: z.number().nullable().optional(), + }) + .passthrough(); + +export const KimiMessageSchema = z + .object({ + role: z.string(), + content: z + .union([z.string(), z.array(z.object({ type: z.string(), text: z.string().optional() }).passthrough())]) + .optional(), + tool_calls: z + .array( + z + .object({ + type: z.literal('function'), + id: z.string(), + function: z.object({ + name: z.string(), + arguments: z.string(), + }), + }) + .passthrough(), + ) + .optional(), + tool_call_id: z.string().optional(), + id: z.number().optional(), + }) + .passthrough(); + +export type KimiMetadata = z.infer; +export type KimiMessage = z.infer; + +// ── Cursor ────────────────────────────────────────────────────────────────── + +export const CursorTranscriptLineSchema = z + .object({ + role: z.enum(['user', 'assistant']), + message: z.object({ + content: z.array( + ContentBlockSchema.or(z.object({ type: z.string(), text: z.string().optional() }).passthrough()), + ), + }), + }) + .passthrough(); + +export type CursorTranscriptLine = z.infer; + +// ── Qwen Code ────────────────────────────────────────────────────────────── + +export const QwenPartSchema = z + .object({ + text: z.string().optional(), + thought: z.boolean().optional(), + functionCall: z + .object({ + name: z.string(), + args: z.record(z.string(), z.unknown()).optional(), + }) + .passthrough() + .optional(), + functionResponse: z + .object({ + name: z.string(), + response: z + .object({ + output: z.string().optional(), + status: z.string().optional(), + }) + .passthrough() + .optional(), + }) + .passthrough() + .optional(), + }) + .passthrough(); + +export type QwenPart = z.infer; + +export const QwenContentSchema = z + .object({ + role: z.string().optional(), + parts: z.array(QwenPartSchema).optional(), + }) + .passthrough(); + +export type QwenContent = z.infer; + +export const QwenFileDiffSchema = z + .object({ + fileName: z.string().optional(), + fileDiff: z.string().optional(), + originalContent: z.union([z.string(), z.null()]).optional(), + diffStat: z + .object({ + model_added_lines: z.number().optional(), + model_removed_lines: z.number().optional(), + }) + .passthrough() + .optional(), + type: z.string().optional(), + }) + .passthrough(); + +export type QwenFileDiff = z.infer; + +export const QwenToolCallResultSchema = z + .object({ + displayName: z.string().optional(), + status: z.string().optional(), + resultDisplay: z.union([z.string(), QwenFileDiffSchema, z.record(z.string(), z.unknown())]).optional(), + }) + .passthrough(); + +export const QwenUsageMetadataSchema = z + .object({ + promptTokenCount: z.number().optional(), + candidatesTokenCount: z.number().optional(), + totalTokenCount: z.number().optional(), + cachedContentTokenCount: z.number().optional(), + thoughtsTokenCount: z.number().optional(), + }) + .passthrough(); + +export const QwenChatRecordSchema = z + .object({ + uuid: z.string(), + parentUuid: z.union([z.string(), z.null()]), + sessionId: z.string(), + timestamp: z.string(), + type: z.enum(['user', 'assistant', 'tool_result', 'system']), + subtype: z.string().optional(), + cwd: z.string(), + version: z.string().optional(), + gitBranch: z.string().optional(), + message: QwenContentSchema.optional(), + usageMetadata: QwenUsageMetadataSchema.optional(), + model: z.string().optional(), + toolCallResult: QwenToolCallResultSchema.optional(), + systemPayload: z.record(z.string(), z.unknown()).optional(), + }) + .passthrough(); + +export type QwenChatRecord = z.infer; + +// ── Serialized Session (Index JSONL) ──────────────────────────────────────── + +export const SerializedSessionSchema = z.object({ + id: z.string(), + source: z.enum(TOOL_NAMES), + cwd: z.string(), + repo: z.string().optional(), + branch: z.string().optional(), + summary: z.string().optional(), + lines: z.number(), + bytes: z.number(), + createdAt: z.string().transform((s) => new Date(s)), + updatedAt: z.string().transform((s) => new Date(s)), + originalPath: z.string(), + model: z.string().optional(), +}); diff --git a/src/types/tool-names.ts b/src/types/tool-names.ts new file mode 100644 index 0000000..35828e0 --- /dev/null +++ b/src/types/tool-names.ts @@ -0,0 +1,150 @@ +/** + * Canonical tool names and derived SessionSource type. + * Adding a new tool: add the name here, then the compiler surfaces every location that needs updating. + */ + +/** Ordered list of all supported tool names — single source of truth */ +export const TOOL_NAMES = Object.freeze([ + 'claude', + 'codex', + 'copilot', + 'gemini', + 'opencode', + 'droid', + 'cursor', + 'amp', + 'kiro', + 'crush', + 'cline', + 'roo-code', + 'kilo-code', + 'antigravity', + 'kimi', + 'qwen-code', +] as const); + +/** Source CLI tool — derived from TOOL_NAMES, never defined manually */ +export type SessionSource = (typeof TOOL_NAMES)[number]; + +// ── Canonical Tool Name Sets ──────────────────────────────────────────────── +// Used by all parsers to classify tool invocations consistently. +// Each set contains all known aliases for a tool category across every CLI. + +/** Shell/command execution tools */ +export const SHELL_TOOLS: ReadonlySet = new Set([ + 'Bash', + 'bash', + 'terminal', + 'run_terminal_command', + 'run_shell_command', + 'Shell', + 'exec_command', + 'shell_command', + 'Execute', +]); + +/** File read tools */ +export const READ_TOOLS: ReadonlySet = new Set(['Read', 'ReadFile', 'read_file']); + +/** File write/create tools */ +export const WRITE_TOOLS: ReadonlySet = new Set(['Write', 'WriteFile', 'write_file', 'Create', 'create_file']); + +/** File edit/patch tools */ +export const EDIT_TOOLS: ReadonlySet = new Set([ + 'Edit', + 'EditFile', + 'edit_file', + 'edit', + 'apply_diff', + 'apply_patch', + 'ApplyPatch', + 'replace', +]); + +/** Search/grep tools */ +export const GREP_TOOLS: ReadonlySet = new Set([ + 'Grep', + 'grep', + 'grep_search', + 'codebase_search', + 'search_file_content', + 'SearchFiles', +]); + +/** Glob/directory listing tools */ +export const GLOB_TOOLS: ReadonlySet = new Set([ + 'Glob', + 'glob', + 'list_directory', + 'ListFiles', + 'file_search', + 'LS', + 'FindFiles', + 'ReadFolder', +]); + +/** Web search tools */ +export const SEARCH_TOOLS: ReadonlySet = new Set(['WebSearch', 'web_search', 'web_search_call']); + +/** Web fetch tools */ +export const FETCH_TOOLS: ReadonlySet = new Set(['WebFetch', 'web_fetch']); + +/** Subagent/task tools */ +export const TASK_TOOLS: ReadonlySet = new Set(['Task', 'task']); + +/** Task output tools */ +export const TASK_OUTPUT_TOOLS: ReadonlySet = new Set(['TaskOutput']); + +/** User interaction tools */ +export const ASK_TOOLS: ReadonlySet = new Set(['AskUserQuestion', 'request_user_input']); + +/** Tools to skip — internal bookkeeping, no useful handoff context */ +export const SKIP_TOOLS: ReadonlySet = new Set([ + 'TaskStop', + 'TodoWrite', + 'todo_write', + 'SaveMemory', + 'save_memory', + 'Skill', + 'skill', + 'Lsp', + 'lsp', + 'update_plan', + 'view_image', +]); + +// ── Tool Sample Classification ────────────────────────────────────────────── + +/** Category for structured tool sample data — discriminant for StructuredToolSample union */ +export type ToolSampleCategory = + | 'shell' + | 'read' + | 'write' + | 'edit' + | 'grep' + | 'glob' + | 'search' + | 'fetch' + | 'task' + | 'ask' + | 'mcp'; + +/** + * Classify a raw tool invocation name into a ToolSampleCategory. + * Returns `undefined` for tools that should be skipped (internal bookkeeping). + * Returns `'mcp'` for unrecognized / MCP-namespaced tools. + */ +export function classifyToolName(name: string): ToolSampleCategory | undefined { + if (SKIP_TOOLS.has(name)) return undefined; + if (SHELL_TOOLS.has(name)) return 'shell'; + if (READ_TOOLS.has(name)) return 'read'; + if (WRITE_TOOLS.has(name)) return 'write'; + if (EDIT_TOOLS.has(name)) return 'edit'; + if (GREP_TOOLS.has(name)) return 'grep'; + if (GLOB_TOOLS.has(name)) return 'glob'; + if (SEARCH_TOOLS.has(name)) return 'search'; + if (FETCH_TOOLS.has(name)) return 'fetch'; + if (TASK_TOOLS.has(name) || TASK_OUTPUT_TOOLS.has(name)) return 'task'; + if (ASK_TOOLS.has(name)) return 'ask'; + return 'mcp'; +} diff --git a/src/utils/content.ts b/src/utils/content.ts new file mode 100644 index 0000000..a835963 --- /dev/null +++ b/src/utils/content.ts @@ -0,0 +1,64 @@ +/** + * Shared content extraction utilities. + * Used across parsers that handle Anthropic-style messages with + * string | Array<{ type, text }> content formats. + */ + +/** + * Extract text from message content that can be a string or an array of blocks. + * Filters to text blocks and joins with newlines. + */ +export function extractTextFromBlocks(content: string | Array<{ type: string; text?: string }> | undefined): string { + if (!content) return ''; + if (typeof content === 'string') return content; + + return content + .filter((c) => c.type === 'text' && c.text) + .map((c) => c.text!) + .join('\n'); +} + +/** + * Check whether a text block contains system-injected content + * that should be excluded from handoff context. + * Common across Claude, Droid, Cursor, and Codex parsers. + */ +export function isSystemContent(text: string): boolean { + return ( + text.startsWith('') || + text.startsWith('') || + text.startsWith('') || + text.startsWith('') || + text.startsWith('# AGENTS.md') + ); +} + +/** + * Check whether a user message is "real" user input vs meta/system content. + * Filters out command-like messages, XML tags, and handoff summaries. + */ +export function isRealUserMessage(text: string): boolean { + if (!text) return false; + return !text.startsWith('<') && !text.startsWith('/') && !text.includes('Session Handoff'); +} + +/** + * Extract repo identifier from a git remote URL. + * Handles HTTPS (github.com/owner/repo.git) and SSH (git@github.com:owner/repo.git). + * Returns 'owner/repo' or empty string. + */ +export function extractRepoFromGitUrl(gitUrl: string): string { + if (!gitUrl) return ''; + const match = gitUrl.match(/[/:]([\w-]+)\/([\w.-]+?)(?:\.git)?$/); + return match ? `${match[1]}/${match[2]}` : ''; +} + +/** + * Extract clean user text from Cursor's tags. + * Returns inner text if tags are present, otherwise returns the original text. + */ +export function cleanUserQueryText(text: string): string { + const match = text.match(/\s*([\s\S]*?)\s*<\/user_query>/); + return match ? match[1].trim() : text; +} diff --git a/src/utils/diff.ts b/src/utils/diff.ts new file mode 100644 index 0000000..0b41fb2 --- /dev/null +++ b/src/utils/diff.ts @@ -0,0 +1,86 @@ +/** + * Minimal diff utilities for handoff context display. + * No external dependencies — formats old/new strings as unified diff notation. + */ + +interface DiffResult { + /** Formatted unified diff string */ + diff: string; + /** Number of lines truncated (0 if none) */ + truncated: number; +} + +/** + * Format new file content as a unified diff (all `+` lines). + * Used for Write tool calls that create new files. + */ +export function formatNewFileDiff(content: string, filePath: string, maxLines = 200): DiffResult { + const lines = content.split('\n'); + const header = `--- /dev/null\n+++ b/${filePath}`; + + const capped = lines.slice(0, maxLines); + const diffLines = capped.map((l) => `+${l}`); + const truncated = Math.max(0, lines.length - maxLines); + + let diff = `${header}\n${diffLines.join('\n')}`; + if (truncated > 0) { + diff += `\n+${truncated} lines truncated`; + } + + return { diff, truncated }; +} + +/** + * Format an edit as a unified diff from old_string → new_string. + * Since we have the exact replacement strings (not full files), + * we format them as a hunk with `-` and `+` lines. + */ +export function formatEditDiff( + oldStr: string, + newStr: string, + filePath: string, + maxLines = 200, +): DiffResult { + const header = `--- a/${filePath}\n+++ b/${filePath}`; + const oldLines = oldStr.split('\n'); + const newLines = newStr.split('\n'); + + const diffLines: string[] = []; + for (const line of oldLines) { + diffLines.push(`-${line}`); + } + for (const line of newLines) { + diffLines.push(`+${line}`); + } + + const capped = diffLines.slice(0, maxLines); + const truncated = Math.max(0, diffLines.length - maxLines); + + let diff = `${header}\n${capped.join('\n')}`; + if (truncated > 0) { + diff += `\n+${truncated} lines truncated`; + } + + return { diff, truncated }; +} + +/** + * Extract the last N non-empty lines from command output. + */ +export function extractStdoutTail(output: string, lines = 5): string { + const allLines = output.split('\n').filter((l) => l.trim().length > 0); + return allLines.slice(-lines).join('\n'); +} + +/** + * Count added/removed lines from a unified diff string. + */ +export function countDiffStats(diff: string): { added: number; removed: number } { + let added = 0; + let removed = 0; + for (const line of diff.split('\n')) { + if (line.startsWith('+') && !line.startsWith('+++')) added++; + else if (line.startsWith('-') && !line.startsWith('---')) removed++; + } + return { added, removed }; +} diff --git a/src/utils/forward-flags.ts b/src/utils/forward-flags.ts new file mode 100644 index 0000000..a040f7e --- /dev/null +++ b/src/utils/forward-flags.ts @@ -0,0 +1,349 @@ +import type { SessionSource } from '../types/index.js'; + +export type CanonicalFlagKey = + | 'model' + | 'yolo' + | 'force' + | 'allowAll' + | 'fullAuto' + | 'dangerouslyBypass' + | 'dangerouslySkipPermissions' + | 'sandbox' + | 'askForApproval' + | 'permissionMode' + | 'approvalMode' + | 'plan' + | 'mode' + | 'addDir' + | 'includeDirectories' + | 'allowedTools' + | 'disallowedTools' + | 'allowTool' + | 'denyTool' + | 'agent' + | 'debug' + | 'logLevel' + | 'mcpConfig' + | 'allowedMcpServerNames' + | 'additionalMcpConfig' + | 'approveMcps' + | 'cd' + | 'workspace' + | 'config'; + +export interface FlagOccurrence { + key: CanonicalFlagKey; + rawIndices: number[]; + value: string | boolean; + sourceFlag: string; +} + +export interface ParsedForwardFlags { + tokens: string[]; + occurrences: FlagOccurrence[]; +} + +export interface ForwardMapResult { + mappedArgs: string[]; + warnings?: string[]; +} + +export type ForwardFlagMapper = (context: ForwardFlagMapContext) => ForwardMapResult; + +export interface ForwardResolution { + mappedArgs: string[]; + passthroughArgs: string[]; + extraArgs: string[]; + warnings: string[]; + parsed: ParsedForwardFlags; + consumedIndices: Set; +} + +interface FlagSpec { + key: CanonicalFlagKey; + names: string[]; + valueMode: 'none' | 'required' | 'optional'; +} + +const FLAG_SPECS: FlagSpec[] = [ + { key: 'dangerouslyBypass', names: ['--dangerously-bypass-approvals-and-sandbox'], valueMode: 'none' }, + { key: 'dangerouslySkipPermissions', names: ['--dangerously-skip-permissions'], valueMode: 'none' }, + { key: 'fullAuto', names: ['--full-auto'], valueMode: 'none' }, + { key: 'askForApproval', names: ['--ask-for-approval', '-a'], valueMode: 'required' }, + { key: 'approvalMode', names: ['--approval-mode'], valueMode: 'required' }, + { key: 'permissionMode', names: ['--permission-mode'], valueMode: 'required' }, + { key: 'allowedMcpServerNames', names: ['--allowed-mcp-server-names'], valueMode: 'required' }, + { key: 'additionalMcpConfig', names: ['--additional-mcp-config'], valueMode: 'required' }, + { key: 'includeDirectories', names: ['--include-directories'], valueMode: 'required' }, + { key: 'disallowedTools', names: ['--disallowed-tools', '--disallowedTools'], valueMode: 'required' }, + { key: 'allowedTools', names: ['--allowed-tools', '--allowedTools'], valueMode: 'required' }, + { key: 'allowTool', names: ['--allow-tool'], valueMode: 'required' }, + { key: 'denyTool', names: ['--deny-tool'], valueMode: 'required' }, + { key: 'approveMcps', names: ['--approve-mcps'], valueMode: 'none' }, + { key: 'addDir', names: ['--add-dir'], valueMode: 'required' }, + { key: 'agent', names: ['--agent'], valueMode: 'required' }, + { key: 'logLevel', names: ['--log-level'], valueMode: 'required' }, + { key: 'mcpConfig', names: ['--mcp-config'], valueMode: 'required' }, + { key: 'workspace', names: ['--workspace'], valueMode: 'required' }, + { key: 'sandbox', names: ['--sandbox', '-s'], valueMode: 'optional' }, + { key: 'model', names: ['--model', '-m'], valueMode: 'required' }, + { key: 'yolo', names: ['--yolo', '-y'], valueMode: 'none' }, + { key: 'allowAll', names: ['--allow-all'], valueMode: 'none' }, + { key: 'force', names: ['--force', '-f'], valueMode: 'none' }, + { key: 'debug', names: ['--debug', '-d'], valueMode: 'optional' }, + { key: 'plan', names: ['--plan'], valueMode: 'none' }, + { key: 'mode', names: ['--mode'], valueMode: 'required' }, + { key: 'cd', names: ['--cd', '-C'], valueMode: 'required' }, + { key: 'config', names: ['--config', '-c'], valueMode: 'required' }, +]; + +function isOptionLike(value: string | undefined): value is string { + return typeof value === 'string' && value.startsWith('-'); +} + +function splitInlineValue(token: string, optionName: string): string | undefined { + const prefix = `${optionName}=`; + return token.startsWith(prefix) ? token.slice(prefix.length) : undefined; +} + +function matchSpecAt( + tokens: string[], + index: number, + spec: FlagSpec, +): { occurrence?: FlagOccurrence; nextIndex: number } | null { + const token = tokens[index]; + + for (const name of spec.names) { + const isExact = token === name; + const inlineValue = splitInlineValue(token, name); + if (!isExact && inlineValue === undefined) continue; + + if (spec.valueMode === 'none') { + return { + occurrence: { key: spec.key, rawIndices: [index], value: true, sourceFlag: name }, + nextIndex: index, + }; + } + + if (inlineValue !== undefined) { + return { + occurrence: { key: spec.key, rawIndices: [index], value: inlineValue, sourceFlag: name }, + nextIndex: index, + }; + } + + const maybeValue = tokens[index + 1]; + if (maybeValue !== undefined && !isOptionLike(maybeValue)) { + return { + occurrence: { key: spec.key, rawIndices: [index, index + 1], value: maybeValue, sourceFlag: name }, + nextIndex: index + 1, + }; + } + + if (spec.valueMode === 'optional') { + return { + occurrence: { key: spec.key, rawIndices: [index], value: true, sourceFlag: name }, + nextIndex: index, + }; + } + + // Required value missing: keep raw token untouched (do not consume). + return { nextIndex: index }; + } + + return null; +} + +export function parseForwardFlags(tokens: string[]): ParsedForwardFlags { + const occurrences: FlagOccurrence[] = []; + + for (let index = 0; index < tokens.length; index += 1) { + let matched = false; + + for (const spec of FLAG_SPECS) { + const result = matchSpecAt(tokens, index, spec); + if (!result) continue; + + if (result.occurrence) { + occurrences.push(result.occurrence); + } + + index = result.nextIndex; + matched = true; + break; + } + + if (!matched) continue; + } + + return { tokens: [...tokens], occurrences }; +} + +function sortByPosition(a: FlagOccurrence, b: FlagOccurrence): number { + return a.rawIndices[0] - b.rawIndices[0]; +} + +function splitCsv(values: string[]): string[] { + return values + .flatMap((value) => value.split(',')) + .map((value) => value.trim()) + .filter(Boolean); +} + +function parseBooleanLike(value: string | boolean): boolean | undefined { + if (typeof value === 'boolean') return value; + const normalized = value.trim().toLowerCase(); + if (['1', 'true', 'yes', 'on', 'enabled'].includes(normalized)) return true; + if (['0', 'false', 'no', 'off', 'disabled'].includes(normalized)) return false; + return undefined; +} + +export class ForwardFlagMapContext { + private readonly consumed = new Set(); + + public readonly warnings: string[] = []; + + constructor(public readonly parsed: ParsedForwardFlags) {} + + public all(...keys: CanonicalFlagKey[]): FlagOccurrence[] { + return this.parsed.occurrences + .filter((occ) => keys.includes(occ.key) && occ.rawIndices.every((index) => !this.consumed.has(index))) + .sort(sortByPosition); + } + + public has(...keys: CanonicalFlagKey[]): boolean { + return this.all(...keys).length > 0; + } + + public latest(...keys: CanonicalFlagKey[]): FlagOccurrence | undefined { + const values = this.all(...keys); + return values.length > 0 ? values[values.length - 1] : undefined; + } + + public latestString(...keys: CanonicalFlagKey[]): string | undefined { + const latest = [...this.all(...keys)].reverse().find((occ) => typeof occ.value === 'string'); + return latest && typeof latest.value === 'string' ? latest.value : undefined; + } + + public allStrings(...keys: CanonicalFlagKey[]): string[] { + return this.all(...keys) + .map((occ) => (typeof occ.value === 'string' ? occ.value : undefined)) + .filter((value): value is string => value !== undefined); + } + + public allCsvStrings(...keys: CanonicalFlagKey[]): string[] { + return splitCsv(this.allStrings(...keys)); + } + + public consume(...occurrences: FlagOccurrence[]): void { + for (const occurrence of occurrences) { + for (const index of occurrence.rawIndices) { + this.consumed.add(index); + } + } + } + + public consumeKeys(...keys: CanonicalFlagKey[]): void { + this.consume(...this.all(...keys)); + } + + public consumeLatest(...keys: CanonicalFlagKey[]): FlagOccurrence | undefined { + const latest = this.latest(...keys); + if (latest) this.consume(latest); + return latest; + } + + public consumeAllStrings(...keys: CanonicalFlagKey[]): string[] { + const occurrences = this.all(...keys).filter((occ) => typeof occ.value === 'string'); + this.consume(...occurrences); + return occurrences.map((occ) => occ.value as string); + } + + public consumeAllCsvStrings(...keys: CanonicalFlagKey[]): string[] { + return splitCsv(this.consumeAllStrings(...keys)); + } + + public consumeAnyBoolean(...keys: CanonicalFlagKey[]): boolean { + const occurrences = this.all(...keys).filter((occ) => parseBooleanLike(occ.value) === true); + this.consume(...occurrences); + return occurrences.length > 0; + } + + public consumedIndices(): Set { + return new Set(this.consumed); + } + + public passthroughArgs(): string[] { + return this.parsed.tokens.filter((_, index) => !this.consumed.has(index)); + } + + public resolveWith(mapper: ForwardFlagMapper): ForwardResolution { + const result = mapper(this); + const mappedArgs = result.mappedArgs; + const passthroughArgs = this.passthroughArgs(); + + return { + mappedArgs, + passthroughArgs, + extraArgs: [...mappedArgs, ...passthroughArgs], + warnings: [...this.warnings, ...(result.warnings || [])], + parsed: this.parsed, + consumedIndices: this.consumedIndices(), + }; + } +} + +export function resolveForwardingArgs(rawTokens: string[] | undefined, mapper?: ForwardFlagMapper): ForwardResolution { + const tokens = rawTokens && rawTokens.length > 0 ? [...rawTokens] : []; + const parsed = parseForwardFlags(tokens); + + if (!mapper || tokens.length === 0) { + return { + mappedArgs: [], + passthroughArgs: tokens, + extraArgs: tokens, + warnings: [], + parsed, + consumedIndices: new Set(), + }; + } + + return new ForwardFlagMapContext(parsed).resolveWith(mapper); +} + +export function normalizeAgentSandbox(value: string | boolean | undefined): 'enabled' | 'disabled' | undefined { + if (value === undefined) return undefined; + if (typeof value === 'boolean') return value ? 'enabled' : 'disabled'; + + const normalized = value.trim().toLowerCase(); + if (['enabled', 'on', 'true', '1', 'read-only', 'workspace-write'].includes(normalized)) return 'enabled'; + if (['disabled', 'off', 'false', '0', 'danger-full-access'].includes(normalized)) return 'disabled'; + return undefined; +} + +export function formatForwardArgs(args: string[]): string { + return args + .map((arg) => { + if (/^[A-Za-z0-9_./:=-]+$/.test(arg)) return arg; + return JSON.stringify(arg); + }) + .join(' '); +} + +export function gatherRawForwardArgs(extraArgs: string[] | undefined, tailArgs: string[] | undefined): string[] { + return [...(extraArgs || []), ...(tailArgs || [])]; +} + +export interface HandoffForwardingOptions { + rawArgs?: string[]; + tailArgs?: string[]; +} + +export function resolveTargetForwarding( + _target: SessionSource, + mapper: ForwardFlagMapper | undefined, + options?: HandoffForwardingOptions, +): ForwardResolution { + const rawTokens = gatherRawForwardArgs(options?.rawArgs, options?.tailArgs); + return resolveForwardingArgs(rawTokens, mapper); +} diff --git a/src/utils/fs-helpers.ts b/src/utils/fs-helpers.ts new file mode 100644 index 0000000..73d41a2 --- /dev/null +++ b/src/utils/fs-helpers.ts @@ -0,0 +1,89 @@ +/** + * Shared filesystem helpers used by multiple parsers. + */ +import * as fs from 'fs'; +import * as path from 'path'; +import { logger } from '../logger.js'; + +export interface FindFilesOptions { + /** Filter predicate — return true to include a file */ + match: (entry: fs.Dirent, fullPath: string) => boolean; + /** Recurse into subdirectories (default: true) */ + recursive?: boolean; + /** Maximum directory depth to recurse (default: Infinity) */ + maxDepth?: number; +} + +/** + * Walk a directory and collect files matching a predicate. + * Returns an empty array if the root doesn't exist. + * Silently skips directories that can't be read. + */ +export function findFiles(root: string, options: FindFilesOptions): string[] { + const files: string[] = []; + + if (!fs.existsSync(root)) return files; + + const recursive = options.recursive ?? true; + const maxDepth = options.maxDepth ?? Infinity; + + const walk = (dir: string, depth: number): void => { + if (depth > maxDepth) return; + + try { + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + let isDir = entry.isDirectory(); + let isFile = entry.isFile(); + if (entry.isSymbolicLink()) { + try { + const stat = fs.statSync(fullPath); + isDir = stat.isDirectory(); + isFile = stat.isFile(); + } catch { + continue; // broken symlink — skip gracefully + } + } + if (isDir && recursive) { + walk(fullPath, depth + 1); + } else if (isFile && options.match(entry, fullPath)) { + files.push(fullPath); + } + } + } catch (err) { + logger.debug('findFiles: cannot read directory', dir, err); + } + }; + + walk(root, 0); + return files; +} + +/** + * List immediate subdirectories of a given path. + * Returns an empty array if the path doesn't exist. + */ +export function listSubdirectories(dir: string): string[] { + if (!fs.existsSync(dir)) return []; + + try { + return fs + .readdirSync(dir, { withFileTypes: true }) + .filter((e) => { + if (e.isDirectory()) return true; + if (e.isSymbolicLink()) { + try { + return fs.statSync(path.join(dir, e.name)).isDirectory(); + } catch { + return false; // broken symlink — skip gracefully + } + } + return false; + }) + .map((e) => path.join(dir, e.name)); + } catch (err) { + logger.debug('listSubdirectories: cannot read directory', dir, err); + return []; + } +} diff --git a/src/utils/index.ts b/src/utils/index.ts index 26f3c1d..da0b861 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -1,28 +1,70 @@ +import { createHash } from 'crypto'; import * as fs from 'fs'; import * as path from 'path'; -import type { UnifiedSession, SessionSource, SessionContext } from '../types/index.js'; -import { parseCodexSessions, extractCodexContext } from '../parsers/codex.js'; -import { parseClaudeSessions, extractClaudeContext } from '../parsers/claude.js'; -import { parseCopilotSessions, extractCopilotContext } from '../parsers/copilot.js'; -import { parseGeminiSessions, extractGeminiContext } from '../parsers/gemini.js'; -import { parseOpenCodeSessions, extractOpenCodeContext } from '../parsers/opencode.js'; - -const CONTINUES_DIR = path.join(process.env.HOME || '~', '.continues'); +import { logger } from '../logger.js'; +import { adapters } from '../parsers/registry.js'; +import type { VerbosityConfig } from '../config/index.js'; +import type { SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; +import { homeDir } from './parser-helpers.js'; + +const CONTINUES_DIR = path.join(homeDir(), '.continues'); const INDEX_FILE = path.join(CONTINUES_DIR, 'sessions.jsonl'); const CONTEXTS_DIR = path.join(CONTINUES_DIR, 'contexts'); // Cache TTL in milliseconds (5 minutes) const INDEX_TTL = 5 * 60 * 1000; +// Prefix for the env fingerprint line stored in the index file +const ENV_FINGERPRINT_PREFIX = '#env:'; + +/** + * Build a fingerprint of environment variables that affect parser storage paths. + * When any of these env vars change, the cached index must be rebuilt. + */ +function computeEnvFingerprint(): string { + const seen = new Set(); + const parts: string[] = []; + for (const adapter of Object.values(adapters)) { + if (adapter.envVar && !seen.has(adapter.envVar)) { + seen.add(adapter.envVar); + const val = process.env[adapter.envVar] || ''; + parts.push(`${adapter.envVar}=${val}`); + } + } + // Hash to avoid leaking user-specific paths in the on-disk cache + return createHash('sha256').update(parts.sort().join('|')).digest('hex'); +} + +/** + * Read the env fingerprint stored in the first line of the index file. + */ +function readStoredFingerprint(): string | null { + try { + const content = fs.readFileSync(INDEX_FILE, 'utf8'); + const firstLine = content.slice(0, content.indexOf('\n')); + if (firstLine.startsWith(ENV_FINGERPRINT_PREFIX)) { + return firstLine.slice(ENV_FINGERPRINT_PREFIX.length); + } + return null; + } catch (err) { + logger.debug('index: failed to read stored fingerprint', err); + return null; + } +} + /** * Ensure continues directories exist */ export function ensureDirectories(): void { - if (!fs.existsSync(CONTINUES_DIR)) { - fs.mkdirSync(CONTINUES_DIR, { recursive: true }); - } - if (!fs.existsSync(CONTEXTS_DIR)) { - fs.mkdirSync(CONTEXTS_DIR, { recursive: true }); + try { + if (!fs.existsSync(CONTINUES_DIR)) { + fs.mkdirSync(CONTINUES_DIR, { recursive: true }); + } + if (!fs.existsSync(CONTEXTS_DIR)) { + fs.mkdirSync(CONTEXTS_DIR, { recursive: true }); + } + } catch (err) { + logger.debug('index: failed to create directories', err); } } @@ -30,13 +72,23 @@ export function ensureDirectories(): void { * Check if index needs rebuilding */ export function indexNeedsRebuild(): boolean { - if (!fs.existsSync(INDEX_FILE)) { - return true; + try { + const stats = fs.statSync(INDEX_FILE); + const age = Date.now() - stats.mtime.getTime(); + if (age > INDEX_TTL) return true; + + // Rebuild if env vars affecting storage paths have changed + const stored = readStoredFingerprint(); + if (stored !== computeEnvFingerprint()) { + logger.debug('index: env fingerprint changed, rebuilding'); + return true; + } + + return false; + } catch (err) { + logger.debug('index: cache stale check failed', err); + return true; // File doesn't exist or can't be read } - - const stats = fs.statSync(INDEX_FILE); - const age = Date.now() - stats.mtime.getTime(); - return age > INDEX_TTL; } /** @@ -50,28 +102,28 @@ export async function buildIndex(force = false): Promise { return loadIndex(); } - // Parse all sessions from all sources in parallel - const [codexSessions, claudeSessions, copilotSessions, geminiSessions, opencodeSessions] = await Promise.all([ - parseCodexSessions(), - parseClaudeSessions(), - parseCopilotSessions(), - parseGeminiSessions(), - parseOpenCodeSessions(), - ]); + // Parse all sessions from all sources in parallel — use allSettled so one + // broken parser doesn't crash the entire CLI + const results = await Promise.allSettled(Object.values(adapters).map((a) => a.parseSessions())); - const allSessions = [...codexSessions, ...claudeSessions, ...copilotSessions, ...geminiSessions, ...opencodeSessions]; + const allSessions = results + .filter((r): r is PromiseFulfilledResult => r.status === 'fulfilled') + .flatMap((r) => r.value); // Sort by updated time (newest first) allSessions.sort((a, b) => b.updatedAt.getTime() - a.updatedAt.getTime()); - // Write to index file - const lines = allSessions.map(s => JSON.stringify({ - ...s, - createdAt: s.createdAt.toISOString(), - updatedAt: s.updatedAt.toISOString(), - })); + // Write to index file — first line is the env fingerprint + const lines = allSessions.map((s) => + JSON.stringify({ + ...s, + createdAt: s.createdAt.toISOString(), + updatedAt: s.updatedAt.toISOString(), + }), + ); - fs.writeFileSync(INDEX_FILE, lines.join('\n') + '\n'); + const fingerprint = `${ENV_FINGERPRINT_PREFIX}${computeEnvFingerprint()}`; + fs.writeFileSync(INDEX_FILE, fingerprint + '\n' + lines.join('\n') + '\n'); return allSessions; } @@ -80,21 +132,32 @@ export async function buildIndex(force = false): Promise { * Load sessions from the index file */ export function loadIndex(): UnifiedSession[] { - if (!fs.existsSync(INDEX_FILE)) { - return []; + try { + const content = fs.readFileSync(INDEX_FILE, 'utf8'); + const lines = content + .trim() + .split('\n') + .filter((l) => l && !l.startsWith(ENV_FINGERPRINT_PREFIX)); + + return lines.flatMap((line) => { + try { + const parsed = JSON.parse(line); + return [ + { + ...parsed, + createdAt: new Date(parsed.createdAt), + updatedAt: new Date(parsed.updatedAt), + } as UnifiedSession, + ]; + } catch (err) { + logger.debug('index: skipping corrupted line in cache', err); + return []; // Skip corrupted lines + } + }); + } catch (err) { + logger.debug('index: cannot read cache file', INDEX_FILE, err); + return []; // File doesn't exist or can't be read } - - const content = fs.readFileSync(INDEX_FILE, 'utf8'); - const lines = content.trim().split('\n').filter(l => l); - - return lines.map(line => { - const parsed = JSON.parse(line); - return { - ...parsed, - createdAt: new Date(parsed.createdAt), - updatedAt: new Date(parsed.updatedAt), - } as UnifiedSession; - }); } /** @@ -109,7 +172,7 @@ export async function getAllSessions(forceRebuild = false): Promise { const all = await getAllSessions(forceRebuild); - return all.filter(s => s.source === source); + return all.filter((s) => s.source === source); } /** @@ -117,27 +180,16 @@ export async function getSessionsBySource(source: SessionSource, forceRebuild = */ export async function findSession(id: string): Promise { const all = await getAllSessions(); - return all.find(s => s.id === id || s.id.startsWith(id)) || null; + return all.find((s) => s.id === id || s.id.startsWith(id)) || null; } /** * Extract context from a session based on its source */ -export async function extractContext(session: UnifiedSession): Promise { - switch (session.source) { - case 'codex': - return extractCodexContext(session); - case 'claude': - return extractClaudeContext(session); - case 'copilot': - return extractCopilotContext(session); - case 'gemini': - return extractGeminiContext(session); - case 'opencode': - return extractOpenCodeContext(session); - default: - throw new Error(`Unknown session source: ${session.source}`); - } +export async function extractContext(session: UnifiedSession, config?: VerbosityConfig): Promise { + const adapter = adapters[session.source]; + if (!adapter) throw new Error(`Unknown session source: ${session.source}`); + return adapter.extractContext(session, config); } /** @@ -145,10 +197,10 @@ export async function extractContext(session: UnifiedSession): Promise JSON.stringify({ - ...s, - createdAt: s.createdAt.toISOString(), - updatedAt: s.updatedAt.toISOString(), - })).join('\n'); + return sessions + .map((s) => + JSON.stringify({ + ...s, + createdAt: s.createdAt.toISOString(), + updatedAt: s.updatedAt.toISOString(), + }), + ) + .join('\n'); } diff --git a/src/utils/jsonl.ts b/src/utils/jsonl.ts new file mode 100644 index 0000000..a38880e --- /dev/null +++ b/src/utils/jsonl.ts @@ -0,0 +1,99 @@ +/** + * Shared JSONL reading utilities. + * Replaces 5+ identical readAllMessages() functions across parsers. + */ +import * as fs from 'fs'; +import * as readline from 'readline'; +import { logger } from '../logger.js'; + +/** + * Read an entire JSONL file into an array. + * Each line is JSON.parse'd; invalid lines are silently skipped. + * Returns an empty array if the file doesn't exist or can't be read. + */ +export async function readJsonlFile(filePath: string): Promise { + if (!fs.existsSync(filePath)) return []; + + return new Promise((resolve) => { + const items: T[] = []; + const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); + const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); + + rl.on('line', (line) => { + try { + items.push(JSON.parse(line)); + } catch (err) { + logger.debug('jsonl: skipping invalid line in', filePath, err); + } + }); + + rl.on('close', () => resolve(items)); + rl.on('error', () => resolve(items)); + }); +} + +/** + * Scan the first N lines of a JSONL file, calling `visitor` on each parsed line. + * The visitor returns 'continue' to keep reading or 'stop' to abort early. + * Useful for extracting metadata from session headers without reading the full file. + */ +export async function scanJsonlHead( + filePath: string, + maxLines: number, + visitor: (parsed: unknown, lineIndex: number) => 'continue' | 'stop', +): Promise { + if (!fs.existsSync(filePath)) return; + + return new Promise((resolve) => { + const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); + const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); + let lineIndex = 0; + let stopped = false; + + rl.on('line', (line) => { + if (stopped || lineIndex >= maxLines) { + if (!stopped) { + stopped = true; + rl.close(); + stream.close(); + } + return; + } + + try { + const parsed = JSON.parse(line); + const action = visitor(parsed, lineIndex); + if (action === 'stop') { + stopped = true; + rl.close(); + stream.close(); + } + } catch (err) { + logger.debug('jsonl: skipping invalid line at index', lineIndex, 'in', filePath, err); + } + + lineIndex++; + }); + + rl.on('close', () => resolve()); + rl.on('error', () => resolve()); + }); +} + +/** + * Count lines in a file and return both count and file size in bytes. + * Used by multiple parsers for session metadata. + */ +export async function getFileStats(filePath: string): Promise<{ lines: number; bytes: number }> { + const stats = fs.statSync(filePath); + + return new Promise((resolve) => { + let lines = 0; + const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); + const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); + + rl.on('line', () => lines++); + rl.on('close', () => resolve({ lines, bytes: stats.size })); + rl.on('error', () => resolve({ lines: 0, bytes: stats.size })); + }); +} diff --git a/src/utils/markdown.ts b/src/utils/markdown.ts index 76a17e0..595ecc6 100644 --- a/src/utils/markdown.ts +++ b/src/utils/markdown.ts @@ -1,13 +1,102 @@ -import type { UnifiedSession, ConversationMessage, ToolUsageSummary, SessionNotes } from '../types/index.js'; - -/** Human-readable labels for each session source */ -export const SOURCE_LABELS: Record = { - claude: 'Claude Code', - copilot: 'GitHub Copilot CLI', - gemini: 'Gemini CLI', - codex: 'Codex CLI', - opencode: 'OpenCode', -}; +import { adapters } from '../parsers/registry.js'; +import * as os from 'os'; +import type { + ConversationMessage, + SessionNotes, + SubagentResult, + ReasoningStep, + StructuredToolSample, + ToolSample, + ToolUsageSummary, + UnifiedSession, +} from '../types/index.js'; +import { + SHELL_TOOLS, + READ_TOOLS, + WRITE_TOOLS, + EDIT_TOOLS, + GREP_TOOLS, + GLOB_TOOLS, + SEARCH_TOOLS, + FETCH_TOOLS, + TASK_TOOLS, + TASK_OUTPUT_TOOLS, + ASK_TOOLS, + classifyToolName, +} from '../types/tool-names.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; + +/** Replace home directory prefix with ~ and escape backticks for safe markdown inline code */ +const _home = os.homedir(); +export function safePath(p: string): string { + const tildified = p.startsWith(_home) ? '~' + p.slice(_home.length) : p; + return tildified.replace(/`/g, '\\`'); +} + +/** Human-readable labels for each session source — derived lazily from the adapter registry */ +let _sourceLabels: Record | null = null; +export function getSourceLabels(): Record { + if (!_sourceLabels) { + _sourceLabels = Object.fromEntries(Object.values(adapters).map((a) => [a.name, a.label])); + } + return _sourceLabels; +} + +// ── Display Caps ──────────────────────────────────────────────────────────── + +interface DisplayCaps { + shellDetailed: number; + shellStdoutLines: number; + writeEditDetailed: number; + writeEditDiffLines: number; + readEntries: number; + grepGlobSearchFetch: number; + mcpTaskAsk: number; +} + +/** Derive display caps from a VerbosityConfig — single source of truth for all limits */ +function capsFromConfig(config: VerbosityConfig): DisplayCaps { + return { + shellDetailed: config.shell.maxSamples, + shellStdoutLines: config.shell.stdoutLines, + writeEditDetailed: config.write.maxSamples, + writeEditDiffLines: config.write.diffLines, + readEntries: config.read.maxSamples, + grepGlobSearchFetch: config.grep.maxSamples, + mcpTaskAsk: config.mcp.maxSamplesPerNamespace, + }; +} + +// ── Category Ordering ─────────────────────────────────────────────────────── + +/** Build sort-order map from the canonical tool name sets — never goes stale */ +function buildCategoryOrder(): Record { + const order: Record = {}; + const mapping: [ReadonlySet, number][] = [ + [SHELL_TOOLS, 0], + [WRITE_TOOLS, 1], + [EDIT_TOOLS, 2], + [READ_TOOLS, 3], + [GREP_TOOLS, 4], + [GLOB_TOOLS, 5], + [SEARCH_TOOLS, 6], + [FETCH_TOOLS, 7], + [TASK_TOOLS, 8], + [TASK_OUTPUT_TOOLS, 8], + [ASK_TOOLS, 9], + ]; + for (const [set, priority] of mapping) { + for (const name of set) order[name] = priority; + } + return order; +} + +const CATEGORY_ORDER: Record = buildCategoryOrder(); + +function getCategoryOrder(name: string): number { + return CATEGORY_ORDER[name] ?? 10; // MCP/unknown go last +} /** * Generate a markdown handoff document from any session source. @@ -20,8 +109,12 @@ export function generateHandoffMarkdown( pendingTasks: string[], toolSummaries: ToolUsageSummary[] = [], sessionNotes?: SessionNotes, + config: VerbosityConfig = getPreset('standard'), + mode: 'inline' | 'reference' = 'inline', ): string { - const sourceLabel = SOURCE_LABELS[session.source] || session.source; + const labels = getSourceLabels(); + const sourceLabel = labels[session.source] || session.source; + const caps = capsFromConfig(config); const lines: string[] = [ '# Session Handoff Context', @@ -36,6 +129,10 @@ export function generateHandoffMarkdown( `| **Working Directory** | \`${session.cwd}\` |`, ]; + if (session.originalPath) { + lines.push(`| **Session File** | \`${safePath(session.originalPath)}\` |`); + } + if (session.repo) { lines.push(`| **Repository** | ${session.repo}${session.branch ? ` @ \`${session.branch}\`` : ''} |`); } @@ -46,8 +143,22 @@ export function generateHandoffMarkdown( lines.push(`| **Model** | ${sessionNotes.model} |`); } lines.push(`| **Last Active** | ${session.updatedAt.toISOString().slice(0, 16).replace('T', ' ')} |`); - if (sessionNotes?.tokenUsage) { - lines.push(`| **Tokens Used** | ${sessionNotes.tokenUsage.input.toLocaleString()} in / ${sessionNotes.tokenUsage.output.toLocaleString()} out |`); + if (sessionNotes?.tokenUsage && (sessionNotes.tokenUsage.input > 0 || sessionNotes.tokenUsage.output > 0)) { + lines.push( + `| **Tokens Used** | ${sessionNotes.tokenUsage.input.toLocaleString()} in / ${sessionNotes.tokenUsage.output.toLocaleString()} out |`, + ); + } + if (sessionNotes?.cacheTokens && (sessionNotes.cacheTokens.read > 0 || sessionNotes.cacheTokens.creation > 0)) { + lines.push( + `| **Cache Tokens** | ${sessionNotes.cacheTokens.read.toLocaleString()} read / ${sessionNotes.cacheTokens.creation.toLocaleString()} created |`, + ); + } + if (sessionNotes?.thinkingTokens && sessionNotes.thinkingTokens > 0) { + lines.push(`| **Thinking Tokens** | ${sessionNotes.thinkingTokens.toLocaleString()} |`); + } + if (sessionNotes?.activeTimeMs) { + const mins = Math.round(sessionNotes.activeTimeMs / 60000); + lines.push(`| **Active Time** | ${mins} min |`); } lines.push(`| **Files Modified** | ${filesModified.length} |`); lines.push(`| **Messages** | ${messages.length} |`); @@ -62,38 +173,53 @@ export function generateHandoffMarkdown( lines.push(''); } + if (sessionNotes?.compactSummary) { + lines.push('## Session Context (Compacted)'); + lines.push(''); + lines.push(`> ${sessionNotes.compactSummary}`); + lines.push(''); + lines.push(''); + } + + // ── Category-aware Tool Activity section ── if (toolSummaries.length > 0) { lines.push('## Tool Activity'); lines.push(''); - const sortedTools = [...toolSummaries].sort((a, b) => b.count - a.count); - for (const tool of sortedTools) { - const sampleStr = tool.samples.map(s => `\`${s.summary}\``).join(' · '); - lines.push(`- **${tool.name}** (×${tool.count}): ${sampleStr}`); - } - lines.push(''); + lines.push(...renderToolActivity(toolSummaries, caps)); lines.push(''); } + // ── Subagent Results ── + if (sessionNotes?.subagentResults && sessionNotes.subagentResults.length > 0) { + lines.push(...renderSubagentResults(sessionNotes.subagentResults, config)); + } + if (sessionNotes?.reasoning && sessionNotes.reasoning.length > 0) { lines.push('## Key Decisions'); lines.push(''); - for (const thought of sessionNotes.reasoning.slice(0, 5)) { - lines.push(`- 💭 ${thought}`); + for (const thought of sessionNotes.reasoning.slice(0, config.thinking.maxHighlights)) { + lines.push(`- ${thought}`); } lines.push(''); lines.push(''); } - // Show last 10 messages for richer context - const recentMessages = messages.slice(-10); + // ── Reasoning Chain ── + if (sessionNotes?.reasoningSteps && sessionNotes.reasoningSteps.length > 0) { + lines.push(...renderReasoningChain(sessionNotes.reasoningSteps)); + } + + // Show recent messages for richer context + const recentMessages = messages.slice(-config.recentMessages); if (recentMessages.length > 0) { lines.push('## Recent Conversation'); lines.push(''); for (const msg of recentMessages) { - const role = msg.role === 'user' ? '👤 User' : '🤖 Assistant'; + const role = msg.role === 'user' ? 'User' : 'Assistant'; lines.push(`### ${role}`); lines.push(''); - lines.push(msg.content.slice(0, 500) + (msg.content.length > 500 ? '…' : '')); + const maxChars = config.maxMessageChars; + lines.push(msg.content.slice(0, maxChars) + (msg.content.length > maxChars ? '\u2026' : '')); lines.push(''); } lines.push(''); @@ -119,9 +245,532 @@ export function generateHandoffMarkdown( lines.push(''); } + if (session.originalPath) { + lines.push('## Session Origin'); + lines.push(''); + lines.push(`This session was extracted from **${labels[session.source] || session.source}** session data.`); + lines.push(`- **Session file**: \`${safePath(session.originalPath)}\``); + lines.push(`- **Session ID**: \`${session.id}\``); + if (session.cwd) { + lines.push(`- **Project directory**: \`${session.cwd}\``); + } + lines.push(''); + lines.push('> To access the raw session data, inspect the file path above.'); + lines.push(''); + } + lines.push('---'); lines.push(''); - lines.push('**You are continuing this session. Pick up exactly where it left off — review the conversation above, check pending tasks, and keep going.**'); + lines.push( + '**You are continuing this session. Pick up exactly where it left off — review the conversation above, check pending tasks, and keep going.**', + ); return lines.join('\n'); } + +// ── MCP Namespace Grouping ─────────────────────────────────────────────────── + +/** + * Group MCP tools sharing a `mcp____*` prefix into a single + * synthetic ToolUsageSummary. Non-namespaced tools pass through unchanged. + */ +function groupMcpByNamespace(summaries: ToolUsageSummary[], mcpSampleCap: number): ToolUsageSummary[] { + const result: ToolUsageSummary[] = []; + const nsGroups = new Map(); + + for (const tool of summaries) { + const category = detectCategory(tool); + if (category !== 'mcp' || !tool.name.startsWith('mcp__')) { + result.push(tool); + continue; + } + // Extract namespace: mcp__github__list_issues → github + const parts = tool.name.split('__'); + if (parts.length < 3) { + result.push(tool); + continue; + } + const ns = parts[1]; + if (!nsGroups.has(ns)) nsGroups.set(ns, []); + nsGroups.get(ns)!.push(tool); + } + + // Merge groups with 2+ tools; leave singletons ungrouped + for (const [ns, tools] of nsGroups) { + if (tools.length === 1) { + result.push(tools[0]); + continue; + } + const totalCount = tools.reduce((s, t) => s + t.count, 0); + const totalErrors = tools.reduce((s, t) => s + (t.errorCount || 0), 0); + const mergedSamples: ToolSample[] = []; + for (const t of tools) { + for (const s of t.samples) { + if (mergedSamples.length < mcpSampleCap) mergedSamples.push(s); + } + } + result.push({ + name: `MCP: ${ns}`, + count: totalCount, + ...(totalErrors > 0 ? { errorCount: totalErrors } : {}), + samples: mergedSamples, + }); + } + + return result; +} + +// ── Category-Aware Rendering ──────────────────────────────────────────────── + +function renderToolActivity(toolSummaries: ToolUsageSummary[], caps: DisplayCaps): string[] { + // Group MCP tools by namespace (e.g. mcp__github__* → "MCP: github") + const grouped = groupMcpByNamespace(toolSummaries, caps.mcpTaskAsk); + const sorted = [...grouped].sort((a, b) => getCategoryOrder(a.name) - getCategoryOrder(b.name)); + const lines: string[] = []; + + for (const tool of sorted) { + const category = detectCategory(tool); + switch (category) { + case 'shell': + lines.push(...renderShellSection(tool, caps)); + break; + case 'write': + lines.push(...renderWriteSection(tool, caps)); + break; + case 'edit': + lines.push(...renderEditSection(tool, caps)); + break; + case 'read': + lines.push(...renderReadSection(tool, caps)); + break; + case 'grep': + lines.push(...renderGrepSection(tool, caps)); + break; + case 'glob': + lines.push(...renderGlobSection(tool, caps)); + break; + case 'search': + case 'fetch': + case 'task': + case 'ask': + case 'mcp': + lines.push(...renderCompactSection(tool, category, caps)); + break; + default: + lines.push(...renderFallbackSection(tool)); + } + lines.push(''); + } + + return lines; +} + +/** Detect the structural category of a ToolUsageSummary from its first sample's data */ +function detectCategory(tool: ToolUsageSummary): string { + const firstData = tool.samples[0]?.data; + if (firstData) return firstData.category; + // Fallback: use canonical classifier from tool-names.ts (never goes stale) + return classifyToolName(tool.name) || 'mcp'; +} + +// ── Shell Renderer ────────────────────────────────────────────────────────── + +function renderShellSection(tool: ToolUsageSummary, caps: DisplayCaps): string[] { + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### Shell (${tool.count} calls${errorStr})`, '']; + + const detailed = tool.samples.slice(0, caps.shellDetailed); + for (const sample of detailed) { + lines.push(...renderShellSample(sample, caps.shellStdoutLines)); + } + + const remaining = tool.count - detailed.length; + if (remaining > 0) { + const allOk = !tool.errorCount ? ' (all exit 0)' : ''; + lines.push(`*...and ${remaining} more shell calls${allOk}*`); + lines.push(''); + } + + return lines; +} + +function renderShellSample(sample: ToolSample, maxStdoutLines: number): string[] { + const d = sample.data; + if (!d || d.category !== 'shell') { + return [`> \`${sample.summary}\``, '']; + } + + const lines: string[] = [`> \`$ ${d.command}\``]; + + if (d.exitCode !== undefined) { + const errorTag = d.errored ? ' **[ERROR]**' : ''; + lines.push(`> Exit: ${d.exitCode}${errorTag}`); + } + + if (d.stdoutTail) { + const tailLines = d.stdoutTail.split('\n').slice(0, maxStdoutLines); + lines.push('> ```'); + for (const tl of tailLines) { + lines.push(`> ${tl}`); + } + lines.push('> ```'); + } else if (d.errored && d.errorMessage) { + const errLines = d.errorMessage.split('\n').slice(0, maxStdoutLines); + lines.push('> ```'); + for (const el of errLines) { + lines.push(`> ${el}`); + } + lines.push('> ```'); + } + + lines.push(''); + return lines; +} + +// ── Write Renderer ────────────────────────────────────────────────────────── + +function renderWriteSection(tool: ToolUsageSummary, caps: DisplayCaps): string[] { + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### Write (${tool.count} calls${errorStr})`, '']; + + const detailed = tool.samples.slice(0, caps.writeEditDetailed); + const overflow: string[] = []; + + for (const sample of detailed) { + lines.push(...renderWriteSample(sample, caps.writeEditDiffLines)); + } + + // Overflow: list remaining files + for (const sample of tool.samples.slice(caps.writeEditDetailed)) { + const d = sample.data; + if (d && d.category === 'write') { + const stats = d.diffStats ? ` (+${d.diffStats.added} -${d.diffStats.removed})` : ''; + overflow.push(`\`${d.filePath}\`${stats}`); + } + } + + const remaining = tool.count - detailed.length; + if (remaining > 0) { + const fileList = overflow.length > 0 ? `: ${overflow.join(', ')}` : ''; + lines.push(`*...and ${remaining} more writes${fileList}*`); + lines.push(''); + } + + return lines; +} + +function renderWriteSample(sample: ToolSample, maxDiffLines: number): string[] { + const d = sample.data; + if (!d || d.category !== 'write') { + return [`> \`${sample.summary}\``, '']; + } + + const newTag = d.isNewFile ? ' (new file)' : ''; + const statsTag = d.diffStats ? ` (+${d.diffStats.added} lines)` : ''; + const lines: string[] = [`> **\`${d.filePath}\`**${newTag}${statsTag}`]; + + if (d.diff) { + const diffLines = d.diff.split('\n'); + // Skip the header lines (--- and +++) + const bodyLines = diffLines.filter((l) => !l.startsWith('---') && !l.startsWith('+++')); + const capped = bodyLines.slice(0, maxDiffLines); + + lines.push('> ```diff'); + for (const dl of capped) { + lines.push(`> ${dl}`); + } + lines.push('> ```'); + + const truncated = bodyLines.length - capped.length; + if (truncated > 0) { + lines.push(`> *+${truncated} lines truncated*`); + } + } + + if (d.errorMessage) { + const errLines = d.errorMessage.split('\n').slice(0, 3); + lines.push('> **Error:**'); + lines.push('> ```'); + for (const el of errLines) lines.push(`> ${el}`); + lines.push('> ```'); + } + + lines.push(''); + return lines; +} + +// ── Edit Renderer ─────────────────────────────────────────────────────────── + +function renderEditSection(tool: ToolUsageSummary, caps: DisplayCaps): string[] { + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### Edit (${tool.count} calls${errorStr})`, '']; + + const detailed = tool.samples.slice(0, caps.writeEditDetailed); + const overflow: string[] = []; + + for (const sample of detailed) { + lines.push(...renderEditSample(sample, caps.writeEditDiffLines)); + } + + for (const sample of tool.samples.slice(caps.writeEditDetailed)) { + const d = sample.data; + if (d && d.category === 'edit') { + const stats = d.diffStats ? ` (+${d.diffStats.added} -${d.diffStats.removed})` : ''; + overflow.push(`\`${d.filePath}\`${stats}`); + } + } + + const remaining = tool.count - detailed.length; + if (remaining > 0) { + const fileList = overflow.length > 0 ? `: ${overflow.join(', ')}` : ''; + lines.push(`*...and ${remaining} more edits${fileList}*`); + lines.push(''); + } + + return lines; +} + +function renderEditSample(sample: ToolSample, maxDiffLines: number): string[] { + const d = sample.data; + if (!d || d.category !== 'edit') { + return [`> \`${sample.summary}\``, '']; + } + + const statsTag = d.diffStats ? ` (+${d.diffStats.added} -${d.diffStats.removed} lines)` : ''; + const lines: string[] = [`> **\`${d.filePath}\`**${statsTag}`]; + + if (d.diff) { + const diffLines = d.diff.split('\n'); + const bodyLines = diffLines.filter((l) => !l.startsWith('---') && !l.startsWith('+++')); + const capped = bodyLines.slice(0, maxDiffLines); + + lines.push('> ```diff'); + for (const dl of capped) { + lines.push(`> ${dl}`); + } + lines.push('> ```'); + + const truncated = bodyLines.length - capped.length; + if (truncated > 0) { + lines.push(`> *+${truncated} lines truncated*`); + } + } + + if (d.errorMessage) { + const errLines = d.errorMessage.split('\n').slice(0, 3); + lines.push('> **Error:**'); + lines.push('> ```'); + for (const el of errLines) lines.push(`> ${el}`); + lines.push('> ```'); + } + + lines.push(''); + return lines; +} + +// ── Read Renderer ─────────────────────────────────────────────────────────── + +function renderReadSection(tool: ToolUsageSummary, caps: DisplayCaps): string[] { + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### Read (${tool.count} calls${errorStr})`, '']; + + const shown = tool.samples.slice(0, caps.readEntries); + for (const sample of shown) { + const d = sample.data; + if (d && d.category === 'read') { + const range = + d.lineStart && d.lineEnd + ? ` (lines ${d.lineStart}-${d.lineEnd})` + : d.lineStart + ? ` (from line ${d.lineStart})` + : ''; + lines.push(`- \`${d.filePath}\`${range}`); + } else { + lines.push(`- \`${sample.summary}\``); + } + } + + const remaining = tool.count - shown.length; + if (remaining > 0) { + lines.push(`- *...and ${remaining} more files read*`); + } + + lines.push(''); + return lines; +} + +// ── Grep Renderer ─────────────────────────────────────────────────────────── + +function renderGrepSection(tool: ToolUsageSummary, caps: DisplayCaps): string[] { + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### Grep (${tool.count} calls${errorStr})`, '']; + + const shown = tool.samples.slice(0, caps.grepGlobSearchFetch); + for (const sample of shown) { + const d = sample.data; + if (d && d.category === 'grep') { + const path = d.targetPath ? ` in \`${d.targetPath}\`` : ''; + const count = d.matchCount !== undefined ? ` — ${d.matchCount} matches` : ''; + lines.push(`- \`"${d.pattern}"\`${path}${count}`); + } else { + lines.push(`- \`${sample.summary}\``); + } + } + + const remaining = tool.count - shown.length; + if (remaining > 0) lines.push(`- *...and ${remaining} more grep searches*`); + lines.push(''); + return lines; +} + +// ── Glob Renderer ─────────────────────────────────────────────────────────── + +function renderGlobSection(tool: ToolUsageSummary, caps: DisplayCaps): string[] { + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### Glob (${tool.count} calls${errorStr})`, '']; + + const shown = tool.samples.slice(0, caps.grepGlobSearchFetch); + for (const sample of shown) { + const d = sample.data; + if (d && d.category === 'glob') { + const count = d.resultCount !== undefined ? ` — ${d.resultCount} files` : ''; + lines.push(`- \`${d.pattern}\`${count}`); + } else { + lines.push(`- \`${sample.summary}\``); + } + } + + const remaining = tool.count - shown.length; + if (remaining > 0) lines.push(`- *...and ${remaining} more glob calls*`); + lines.push(''); + return lines; +} + +// ── Compact Renderer (Search, Fetch, Task, Ask, MCP) ──────────────────────── + +function renderCompactSection( + tool: ToolUsageSummary, + category: string, + caps: DisplayCaps, +): string[] { + const label = COMPACT_LABELS[category] || tool.name; + const errorStr = tool.errorCount ? `, ${tool.errorCount} errors` : ''; + const lines: string[] = [`### ${label} (${tool.count} calls${errorStr})`, '']; + const cap = ['search', 'fetch'].includes(category) ? caps.grepGlobSearchFetch : caps.mcpTaskAsk; + + const shown = tool.samples.slice(0, cap); + for (const sample of shown) { + lines.push(`- ${formatCompactSample(sample, category)}`); + } + + const remaining = tool.count - shown.length; + if (remaining > 0) lines.push(`- *...and ${remaining} more*`); + lines.push(''); + return lines; +} + +const COMPACT_LABELS: Record = { + search: 'Search', + fetch: 'Fetch', + task: 'Task', + ask: 'Ask', + mcp: 'MCP', +}; + +function formatCompactSample(sample: ToolSample, category: string): string { + const d = sample.data; + if (!d) return `\`${sample.summary}\``; + + switch (d.category) { + case 'search': { + const countStr = d.resultCount !== undefined ? ` — ${d.resultCount} results` : ''; + const preview = d.resultPreview ? ` "${d.resultPreview.slice(0, 60)}..."` : ''; + return `"${d.query}"${countStr}${preview}`; + } + case 'fetch': { + const preview = d.resultPreview ? ` — "${d.resultPreview}..."` : ''; + return `\`${d.url}\`${preview}`; + } + case 'task': { + const agentStr = d.agentType ? ` (type: \`${d.agentType}\`)` : ''; + const resultStr = d.resultSummary ? ` — "${d.resultSummary}"` : ''; + return `"${d.description}"${agentStr}${resultStr}`; + } + case 'ask': + return `"${d.question}"`; + case 'mcp': { + const params = d.params ? `(${d.params})` : ''; + const resultStr = d.result ? ` — "${d.result}"` : ''; + return `\`${d.toolName}${params}\`${resultStr}`; + } + default: + return `\`${sample.summary}\``; + } +} + +// ── Subagent Results Renderer ──────────────────────────────────────────────── + +function renderSubagentResults(results: SubagentResult[], config: VerbosityConfig): string[] { + const lines: string[] = ['## Subagent Results', '']; + + for (const r of results) { + lines.push(`### ${r.description} (${r.taskId})`); + if (r.status === 'completed' && r.result) { + const maxChars = config.task.subagentResultChars; + const text = r.result.length > maxChars ? r.result.slice(0, maxChars) + '\u2026' : r.result; + // Render each line as a blockquote + for (const line of text.split('\n')) { + lines.push(`> ${line}`); + } + } else if (r.status === 'completed') { + lines.push('> ✅ Completed'); + } else { + // Non-completed: show status + lines.push(`> \u26a0\ufe0f ${capitalize(r.status)}`); + } + if (r.toolCallCount > 0) { + lines.push(`> Tools used: ${r.toolCallCount}`); + } + lines.push(''); + } + + lines.push(''); + return lines; +} + +function capitalize(s: string): string { + return s.charAt(0).toUpperCase() + s.slice(1); +} + +// ── Reasoning Chain Renderer ──────────────────────────────────────────────── + +function renderReasoningChain(steps: ReasoningStep[]): string[] { + const lines: string[] = ['## Reasoning Chain', '']; + + for (const step of steps) { + const label = `**${capitalize(step.purpose)}** (step ${step.stepNumber}/${step.totalSteps})`; + const thought = step.thought.length > 200 ? step.thought.slice(0, 200) + '\u2026' : step.thought; + let line = `${step.stepNumber}. ${label}: ${thought}`; + if (step.nextAction) { + line += `\n \u2192 Next: ${step.nextAction}`; + } + lines.push(line); + } + + lines.push(''); + lines.push(''); + return lines; +} + +// ── Fallback Renderer ─────────────────────────────────────────────────────── + +function renderFallbackSection(tool: ToolUsageSummary): string[] { + const lines: string[] = [`### ${tool.name} (${tool.count} calls)`, '']; + for (const sample of tool.samples.slice(0, 5)) { + lines.push(`- \`${sample.summary}\``); + } + const remaining = tool.count - Math.min(tool.samples.length, 5); + if (remaining > 0) lines.push(`- *...and ${remaining} more*`); + lines.push(''); + return lines; +} diff --git a/src/utils/parser-helpers.ts b/src/utils/parser-helpers.ts new file mode 100644 index 0000000..3574a03 --- /dev/null +++ b/src/utils/parser-helpers.ts @@ -0,0 +1,65 @@ +import * as os from 'os'; +import type { ConversationMessage } from '../types/index.js'; +import { extractRepoFromGitUrl } from './content.js'; + +/** + * Clean and truncate text for use as a session summary. + * Collapses whitespace and newlines into a single line. + */ +export function cleanSummary(text: string, maxLen = 50): string { + return text.replace(/\n/g, ' ').replace(/\s+/g, ' ').trim().slice(0, maxLen); +} + +/** + * Extract a short repo identifier from a working directory path. + * Returns the last two path components joined with '/'. + */ +export function extractRepoFromCwd(cwd: string): string { + if (!cwd) return ''; + const parts = cwd.split('/').filter(Boolean); + if (parts.length >= 2) { + return parts.slice(-2).join('/'); + } + return parts[parts.length - 1] || ''; +} + +/** + * Extract a repo identifier from a git URL (preferred) or fall back to cwd-based derivation. + * Merges codex's extractRepoName + extractRepoFromCwd into one function. + */ +export function extractRepo(opts: { gitUrl?: string; cwd?: string }): string { + if (opts.gitUrl) { + const fromUrl = extractRepoFromGitUrl(opts.gitUrl); + if (fromUrl) return fromUrl; + } + return extractRepoFromCwd(opts.cwd || ''); +} + +/** + * Get the user's home directory reliably. + * Preferred over `process.env.HOME || '~'` which doesn't expand on all platforms. + */ +export function homeDir(): string { + return os.homedir(); +} + +/** + * Trim messages to a balanced tail: keep the last `maxCount` messages + * but ensure at least one user message is included. + * Used by multiple parsers for the handoff conversation section. + */ +export function trimMessages(messages: ConversationMessage[], maxCount = 10): ConversationMessage[] { + const tail = messages.slice(-maxCount); + const hasUser = tail.some((m) => m.role === 'user'); + + if (hasUser || messages.length <= maxCount) return tail; + + // Include the last user message + everything after it, capped at maxCount + for (let i = messages.length - 1; i >= 0; i--) { + if (messages[i].role === 'user') { + return messages.slice(i, i + maxCount); + } + } + + return tail; +} diff --git a/src/utils/platform.ts b/src/utils/platform.ts new file mode 100644 index 0000000..7012e4d --- /dev/null +++ b/src/utils/platform.ts @@ -0,0 +1,17 @@ +/** + * Cross-platform helpers for spawning processes on Windows vs Unix. + * + * On Windows, npm-installed CLIs are `.cmd` shim files that require + * `shell: true` to execute. The `which` binary doesn't exist — use + * `where.exe` instead. + */ + +export const IS_WINDOWS = process.platform === 'win32'; + +/** `'where'` on Windows, `'which'` on Unix */ +export const WHICH_CMD = IS_WINDOWS ? 'where' : 'which'; + +/** Spread into `spawn`/`spawnSync` options to enable shell on Windows */ +export const SHELL_OPTION: { shell: boolean } | Record = IS_WINDOWS + ? { shell: true } + : {}; diff --git a/src/utils/resume.ts b/src/utils/resume.ts index ad352d8..50b1106 100644 --- a/src/utils/resume.ts +++ b/src/utils/resume.ts @@ -1,42 +1,127 @@ -import { spawn } from 'child_process'; -import * as fs from 'fs'; -import * as path from 'path'; -import type { UnifiedSession, SessionSource, SessionContext } from '../types/index.js'; +import { spawn } from 'node:child_process'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset, loadConfig } from '../config/index.js'; +import { ToolNotAvailableError } from '../errors.js'; +import { logger } from '../logger.js'; +import { ALL_TOOLS, adapters } from '../parsers/registry.js'; +import type { SessionContext, SessionSource, UnifiedSession } from '../types/index.js'; +import { + type ForwardResolution, + formatForwardArgs, + type HandoffForwardingOptions, + resolveTargetForwarding, +} from './forward-flags.js'; import { extractContext, saveContext } from './index.js'; -import { SOURCE_LABELS } from './markdown.js'; +import { getSourceLabels, safePath } from './markdown.js'; +import { IS_WINDOWS, WHICH_CMD } from './platform.js'; + +export interface HandoffContextOptions { + preset?: string; + configPath?: string; + chain?: boolean; +} + +export function getToolBinaryCandidates(tool: SessionSource): string[] { + const adapter = adapters[tool]; + if (!adapter) return []; + return [adapter.binaryName, ...(adapter.binaryFallbacks ?? [])]; +} /** - * Resume a session using native CLI commands + * Resolve mapped + passthrough forward args for cross-tool launches. */ -export async function nativeResume(session: UnifiedSession): Promise { - const cwd = session.cwd; +export function resolveCrossToolForwarding( + target: SessionSource, + options?: HandoffForwardingOptions, +): ForwardResolution { + const adapter = adapters[target]; + return resolveTargetForwarding(target, adapter?.mapHandoffFlags, options); +} - switch (session.source) { - case 'codex': - await runCommand('codex', ['-c', `experimental_resume=${session.originalPath}`], cwd); - break; +function hasConfigOverride(args: string[], key: string): boolean { + const keyPrefix = `${key}=`; - case 'claude': - await runCommand('claude', ['--resume', session.id], cwd); - break; + for (let index = 0; index < args.length; index += 1) { + const token = args[index]; - case 'copilot': - await runCommand('copilot', ['--resume', session.id], cwd); - break; + if ((token === '-c' || token === '--config') && index + 1 < args.length) { + const value = args[index + 1]?.trim(); + if (value?.startsWith(keyPrefix)) return true; + index += 1; + continue; + } + + if (token.startsWith('-c=')) { + if (token.slice(3).trim().startsWith(keyPrefix)) return true; + } - case 'gemini': - // Gemini uses --continue to resume the last session in cwd - await runCommand('gemini', ['--continue'], cwd); - break; + if (token.startsWith('--config=')) { + if (token.slice('--config='.length).trim().startsWith(keyPrefix)) return true; + } + } + + return false; +} - case 'opencode': - // OpenCode uses --session to resume a specific session - await runCommand('opencode', ['--session', session.id], cwd); - break; +export function getDefaultHandoffInitArgs(target: SessionSource, forwardedArgs: string[] = []): string[] { + if (target !== 'codex') return []; - default: - throw new Error(`Unknown session source: ${session.source}`); + const defaults: string[] = []; + + if (!hasConfigOverride(forwardedArgs, 'model_reasoning_effort')) { + defaults.push('-c', 'model_reasoning_effort="high"'); + } + + if (!hasConfigOverride(forwardedArgs, 'model_reasoning_summary')) { + defaults.push('-c', 'model_reasoning_summary="detailed"'); + } + + if (!hasConfigOverride(forwardedArgs, 'model_supports_reasoning_summaries')) { + defaults.push('-c', 'model_supports_reasoning_summaries=true'); + } + + return defaults; +} + +function resolveHandoffConfig(options?: HandoffContextOptions): VerbosityConfig { + const loaded = loadConfig(options?.configPath); + + let config = loaded; + if (options?.preset) { + try { + config = getPreset(options.preset); + } catch { + // Keep loaded config when an invalid preset is provided. + } } + + if (options?.chain === false) { + config = { + ...config, + agents: { + ...config.agents, + claude: { + ...config.agents.claude, + chainCompactedHistory: false, + }, + }, + }; + } + + return config; +} + +/** + * Resume a session using native CLI commands + */ +export async function nativeResume(session: UnifiedSession): Promise { + const cwd = session.cwd || process.cwd(); + const adapter = adapters[session.source]; + if (!adapter) throw new Error(`Unknown session source: ${session.source}`); + const binaryName = await requireToolBinaryName(session.source); + await runCommand(binaryName, adapter.nativeResumeArgs(session), cwd); } /** @@ -46,47 +131,46 @@ export async function crossToolResume( session: UnifiedSession, target: SessionSource, mode: 'inline' | 'reference' = 'inline', + forwarding?: HandoffForwardingOptions, + contextOptions?: HandoffContextOptions, ): Promise { - const context = await extractContext(session); - const cwd = session.cwd; + const context = await extractContext(session, resolveHandoffConfig(contextOptions)); + const cwd = session.cwd || process.cwd(); // Always save handoff file to project directory (for sandboxed tools like Gemini) const localPath = path.join(cwd, '.continues-handoff.md'); - try { fs.writeFileSync(localPath, context.markdown); } catch { /* non-critical */ } + let handoffWritten = false; + try { + fs.writeFileSync(localPath, context.markdown); + handoffWritten = true; + } catch (err) { + logger.debug('resume: failed to write handoff file', localPath, err); + } // Also save to global directory as backup saveContext(context); - // Build prompt based on mode - const prompt = mode === 'inline' - ? buildInlinePrompt(context, session) - : buildReferencePrompt(session, localPath); - - // Each tool has different CLI syntax for accepting a prompt - switch (target) { - case 'codex': - await runCommand('codex', [prompt], cwd); - break; - - case 'claude': - await runCommand('claude', [prompt], cwd); - break; - - case 'copilot': - await runCommand('copilot', ['-i', prompt], cwd); - break; - - case 'gemini': - await runCommand('gemini', [prompt], cwd); - break; - - case 'opencode': - await runCommand('opencode', ['--prompt', prompt], cwd); - break; - - default: - throw new Error(`Unknown target: ${target}`); + // On Windows the prompt references .continues-handoff.md — the write must succeed + if (IS_WINDOWS && !handoffWritten) { + throw new Error( + `Failed to write handoff file to ${localPath}. Cross-tool resume on Windows requires this file. Check directory permissions.`, + ); } + + // Build prompt based on mode + const prompt = IS_WINDOWS + ? buildWindowsSafePrompt(session) + : mode === 'inline' + ? buildInlinePrompt(context, session) + : buildReferencePrompt(session); + + const adapter = adapters[target]; + if (!adapter) throw new Error(`Unknown target: ${target}`); + const binaryName = await requireToolBinaryName(target); + + const resolved = resolveCrossToolForwarding(target, forwarding); + const defaultInitArgs = getDefaultHandoffInitArgs(target, resolved.extraArgs); + await runCommand(binaryName, [...defaultInitArgs, ...resolved.extraArgs, ...adapter.crossToolArgs(prompt, cwd)], cwd); } /** @@ -94,10 +178,11 @@ export async function crossToolResume( * The LLM gets everything upfront — no file reading needed. */ function buildInlinePrompt(context: SessionContext, session: UnifiedSession): string { - const sourceLabel = SOURCE_LABELS[session.source] || session.source; + const sourceLabel = getSourceLabels()[session.source] || session.source; // Simple intro — the handoff markdown already has the full table, conversation, and closing directive - const intro = `I'm continuing a coding session from **${sourceLabel}**. Here's the full context:\n\n---\n\n`; + const sessionFileRef = session.originalPath ? ` (original session: \`${safePath(session.originalPath)}\`)` : ''; + const intro = `I'm continuing a coding session from **${sourceLabel}**${sessionFileRef}. Here's the full context:\n\n---\n\n`; return intro + context.markdown; } @@ -106,8 +191,8 @@ function buildInlinePrompt(context: SessionContext, session: UnifiedSession): st * Build a compact reference prompt that points to the handoff file. * Used when --reference flag is passed (for very large sessions). */ -function buildReferencePrompt(session: UnifiedSession, filePath: string): string { - const sourceLabel = SOURCE_LABELS[session.source] || session.source; +function buildReferencePrompt(session: UnifiedSession): string { + const sourceLabel = getSourceLabels()[session.source] || session.source; return [ `# 🔄 Session Handoff`, @@ -118,17 +203,41 @@ function buildReferencePrompt(session: UnifiedSession, filePath: string): string `|--------|-------|`, `| Previous tool | ${sourceLabel} |`, `| Working directory | \`${session.cwd}\` |`, + session.originalPath ? `| Original session file | \`${safePath(session.originalPath)}\` |` : '', `| Context file | \`.continues-handoff.md\` |`, session.summary ? `| Last task | ${session.summary.slice(0, 80)} |` : '', ``, `Read \`.continues-handoff.md\` first, then continue the work.`, - ].filter(Boolean).join('\n'); + ] + .filter(Boolean) + .join('\n'); +} + +/** + * Build a single-line, cmd.exe-safe prompt for Windows cross-tool handoff. + * + * On Windows, `spawn()` with `shell: true` passes args through `cmd.exe`, + * which treats embedded newlines as command separators and splits on shell + * metacharacters (`|`, `&`, `>`, `<`, `^`, `%`, `!`, backticks, `"`). + * Additionally, `cmd.exe` has an 8191-character command-line limit. + * + * Since `.continues-handoff.md` is already written to the project directory, + * this prompt simply instructs the target tool to read that file. + */ +export function buildWindowsSafePrompt(session: UnifiedSession): string { + return `Continuing a coding session from ${session.source}. Read the file .continues-handoff.md in the current directory for full context and continue where it left off.`; } /** * Resume a session - automatically chooses native or cross-tool */ -export async function resume(session: UnifiedSession, target?: SessionSource, mode: 'inline' | 'reference' = 'inline'): Promise { +export async function resume( + session: UnifiedSession, + target?: SessionSource, + mode: 'inline' | 'reference' = 'inline', + forwarding?: HandoffForwardingOptions, + contextOptions?: HandoffContextOptions, +): Promise { const actualTarget = target || session.source; if (actualTarget === session.source) { @@ -136,7 +245,7 @@ export async function resume(session: UnifiedSession, target?: SessionSource, mo await nativeResume(session); } else { // Different tool - use cross-tool injection - await crossToolResume(session, actualTarget, mode); + await crossToolResume(session, actualTarget, mode, forwarding, contextOptions); } } @@ -145,11 +254,14 @@ export async function resume(session: UnifiedSession, target?: SessionSource, mo */ function runCommand(command: string, args: string[], cwd: string, stdinData?: string): Promise { return new Promise((resolve, reject) => { - const child = spawn(command, args, { - cwd, - stdio: stdinData ? ['pipe', 'inherit', 'inherit'] : 'inherit', - shell: false, - }); + const stdio: import('node:child_process').StdioOptions = stdinData ? ['pipe', 'inherit', 'inherit'] : 'inherit'; + + // On Windows, invoke cmd.exe explicitly to handle .cmd/.bat shims. + // Args stay in the array — no shell:true (avoids DEP0190), no string + // concatenation (avoids command-injection risk). + const child = IS_WINDOWS + ? spawn(process.env.ComSpec ?? 'cmd.exe', ['/c', command, ...args], { cwd, stdio }) + : spawn(command, args, { cwd, stdio }); if (stdinData && child.stdin) { child.stdin.write(stdinData); @@ -171,60 +283,69 @@ function runCommand(command: string, args: string[], cwd: string, stdinData?: st } /** - * Check if a CLI tool is available + * Check if a CLI tool is available by binary name */ -export async function isToolAvailable(tool: SessionSource): Promise { +async function isBinaryAvailable(binaryName: string): Promise { return new Promise((resolve) => { - const child = spawn('which', [tool], { stdio: 'ignore' }); + const child = spawn(WHICH_CMD, [binaryName], { stdio: 'ignore' }); child.on('close', (code) => resolve(code === 0)); child.on('error', () => resolve(false)); }); } +export async function resolveToolBinaryName( + tool: SessionSource, + isAvailable: (binaryName: string) => Promise = isBinaryAvailable, +): Promise { + for (const candidate of getToolBinaryCandidates(tool)) { + if (await isAvailable(candidate)) return candidate; + } + return null; +} + +async function requireToolBinaryName(tool: SessionSource): Promise { + const binaryName = await resolveToolBinaryName(tool); + if (binaryName) return binaryName; + + const adapter = adapters[tool]; + throw new ToolNotAvailableError(adapter?.label ?? tool); +} + /** * Get available tools */ export async function getAvailableTools(): Promise { - const tools: SessionSource[] = []; - - const [hasCodex, hasClaude, hasCopilot, hasGemini, hasOpencode] = await Promise.all([ - isToolAvailable('codex'), - isToolAvailable('claude'), - isToolAvailable('copilot'), - isToolAvailable('gemini'), - isToolAvailable('opencode'), - ]); - - if (hasCodex) tools.push('codex'); - if (hasClaude) tools.push('claude'); - if (hasCopilot) tools.push('copilot'); - if (hasGemini) tools.push('gemini'); - if (hasOpencode) tools.push('opencode'); - - return tools; + const checks = await Promise.allSettled( + ALL_TOOLS.map(async (name) => ({ + name, + ok: (await resolveToolBinaryName(name)) !== null, + })), + ); + + return checks + .filter( + (r): r is PromiseFulfilledResult<{ name: SessionSource; ok: boolean }> => r.status === 'fulfilled' && r.value.ok, + ) + .map((r) => r.value.name); } /** * Get resume command for display purposes */ -export function getResumeCommand(session: UnifiedSession, target?: SessionSource): string { +export function getResumeCommand( + session: UnifiedSession, + target?: SessionSource, + forwarding?: HandoffForwardingOptions, +): string { const actualTarget = target || session.source; if (actualTarget === session.source) { - switch (session.source) { - case 'codex': - return `codex -c experimental_resume="${session.originalPath}"`; - case 'claude': - return `claude --resume ${session.id}`; - case 'copilot': - return `copilot --resume ${session.id}`; - case 'gemini': - return `gemini --continue`; - case 'opencode': - return `opencode --session ${session.id}`; - } + return adapters[session.source].resumeCommandDisplay(session); } - // Cross-tool - return `continues resume ${session.id} --in ${actualTarget}`; + const resolved = resolveCrossToolForwarding(actualTarget, forwarding); + const defaultInitArgs = getDefaultHandoffInitArgs(actualTarget, resolved.extraArgs); + const suffixArgs = [...defaultInitArgs, ...resolved.extraArgs]; + const suffix = suffixArgs.length > 0 ? ` ${formatForwardArgs(suffixArgs)}` : ''; + return `continues resume ${session.id} --in ${actualTarget}${suffix}`; } diff --git a/src/utils/slug.ts b/src/utils/slug.ts new file mode 100644 index 0000000..d878e83 --- /dev/null +++ b/src/utils/slug.ts @@ -0,0 +1,83 @@ +import * as fs from 'fs'; +import { IS_WINDOWS } from './platform.js'; + +/** + * Derive cwd from a slug directory name using recursive backtracking. + * Slugs replace `/` and `.` with `-` in the directory name, e.g.: + * "Users-evolution-Sites-localhost-dzcm-test" → "/Users/evolution/Sites/localhost/dzcm.test" + * + * At each dash, tries: path separator `/`, dot `.`, or literal `-`. + * Validates candidates with fs.existsSync(). Falls back to naive slash replacement. + */ +export function cwdFromSlug(slug: string): string { + const parts = slug.split('-'); + let best: string | null = null; + const isDriveSlug = parts.length > 0 && /^[A-Za-z]$/.test(parts[0] || ''); + + function candidatePaths(segments: string[]): string[] { + const unixPath = '/' + segments.join('/'); + if (segments.length > 0 && /^[A-Za-z]$/.test(segments[0] || '')) { + const drive = segments[0].toUpperCase(); + const rest = segments.slice(1).join('/'); + const winPath = rest ? `${drive}:/${rest}` : `${drive}:/`; + // On Windows prefer drive-letter paths; on Unix keep legacy order. + return IS_WINDOWS ? [winPath, unixPath] : [unixPath, winPath]; + } + return [unixPath]; + } + + function resolve(idx: number, segments: string[]): void { + if (best) return; // already found a match + + if (idx >= parts.length) { + for (const p of candidatePaths(segments)) { + if (fs.existsSync(p)) { + best = p; + break; + } + } + return; + } + + const part = parts[idx]; + + // Option 1: treat dash as path separator (new directory) + resolve(idx + 1, [...segments, part]); + if (best) return; + + if (segments.length > 0) { + const last = segments[segments.length - 1]; + const rest = segments.slice(0, -1); + + // Option 2: treat dash as dot (e.g. dzcm-test → dzcm.test) + resolve(idx + 1, [...rest, last + '.' + part]); + if (best) return; + + // Option 3: keep as literal dash (e.g. laravel-contentai) + resolve(idx + 1, [...rest, last + '-' + part]); + } + } + + resolve(0, []); + if (best) return best; + + if (isDriveSlug && IS_WINDOWS) { + const drive = parts[0].toUpperCase(); + const rest = parts.slice(1).join('/'); + return rest ? `${drive}:/${rest}` : `${drive}:/`; + } + + return '/' + slug.replace(/-/g, '/'); +} + +/** + * Check if a session's cwd matches or is a subdirectory of targetDir. + * Returns false for empty session cwds or root `/` target. + */ +export function matchesCwd(sessionCwd: string, targetDir: string): boolean { + if (!sessionCwd || !targetDir) return false; + const normTarget = targetDir.replace(/\/+$/, ''); + if (normTarget === '') return false; // guard against root '/' + const normSession = sessionCwd.replace(/\/+$/, ''); + return normSession === normTarget || normSession.startsWith(normTarget + '/'); +} diff --git a/src/utils/tool-extraction.ts b/src/utils/tool-extraction.ts new file mode 100644 index 0000000..74befc3 --- /dev/null +++ b/src/utils/tool-extraction.ts @@ -0,0 +1,431 @@ +/** + * Shared tool extraction for parsers using Anthropic-style content blocks + * (tool_use / tool_result). Used by Claude, Droid, and Cursor parsers. + */ +import type { + AskSampleData, + EditSampleData, + FetchSampleData, + GlobSampleData, + GrepSampleData, + McpSampleData, + ReadSampleData, + ReasoningSampleData, + SearchSampleData, + ShellSampleData, + StructuredToolSample, + TaskSampleData, + ToolUsageSummary, + WriteSampleData, +} from '../types/index.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; +import { + ASK_TOOLS, + EDIT_TOOLS, + FETCH_TOOLS, + GLOB_TOOLS, + GREP_TOOLS, + READ_TOOLS, + SEARCH_TOOLS, + SHELL_TOOLS, + SKIP_TOOLS, + TASK_OUTPUT_TOOLS, + TASK_TOOLS, + WRITE_TOOLS, +} from '../types/tool-names.js'; +import { countDiffStats, extractStdoutTail, formatEditDiff, formatNewFileDiff } from './diff.js'; +import { + extractExitCode, + fetchSummary, + fileSummary, + globSummary, + grepSummary, + mcpSummary, + SummaryCollector, + searchSummary, + shellSummary, + subagentSummary, + truncate, + withResult, +} from './tool-summarizer.js'; + +/** Minimal tool_use block shape — works across Claude, Droid, Cursor */ +interface ToolUseItem { + type: 'tool_use'; + id?: string; + name: string; + input?: Record; +} + +/** Minimal tool_result block shape */ +interface ToolResultItem { + type: 'tool_result'; + tool_use_id?: string; + content?: string | Array<{ type: string; text?: string }>; + is_error?: boolean; +} + +/** A message with content blocks (Anthropic format) */ +export interface AnthropicMessage { + role?: string; + content: Array<{ type: string; [key: string]: unknown }>; +} + +/** Stored tool result with full text and error flag */ +interface ToolResultEntry { + text: string; + isError: boolean; +} + +/** + * Extract tool usage summaries and files modified from Anthropic-style messages. + * + * Works with any parser that uses tool_use / tool_result content blocks: + * Claude, Droid, and Cursor all share this pattern. + * + * Two-pass approach: + * 1. Collect all tool_result outputs by tool_use_id (with generous char limits) + * 2. Process tool_use blocks with matched results, constructing structured data + */ +export function extractAnthropicToolData( + messages: AnthropicMessage[], + config: VerbosityConfig = getPreset('standard'), +): { + summaries: ToolUsageSummary[]; + filesModified: string[]; +} { + const collector = new SummaryCollector(config); + const toolResultMap = new Map(); + + // Generous first-pass limit — category-specific limits are applied in the second pass + const firstPassMaxChars = Math.max( + config.shell.maxChars, + config.write.maxChars, + config.edit.maxChars, + config.mcp.resultChars, + config.task.subagentResultChars, + config.mcp.thinkingTools.maxReasoningChars, + ); + + // First pass: collect all tool_result blocks (generous limits for rich extraction) + for (const msg of messages) { + if (!Array.isArray(msg.content)) continue; + for (const item of msg.content) { + if (item.type !== 'tool_result') continue; + const tr = item as unknown as ToolResultItem; + if (!tr.tool_use_id) continue; + + let text = ''; + if (typeof tr.content === 'string') { + text = tr.content; + } else if (Array.isArray(tr.content)) { + text = tr.content.find((c) => c.type === 'text')?.text || ''; + } + if (text) { + toolResultMap.set(tr.tool_use_id, { + text: text.slice(0, firstPassMaxChars), + isError: tr.is_error === true, + }); + } + } + } + + // Second pass: process tool_use blocks with structured data extraction + for (const msg of messages) { + if (!Array.isArray(msg.content)) continue; + for (const item of msg.content) { + if (item.type !== 'tool_use') continue; + const tu = item as unknown as ToolUseItem; + const name = tu.name; + if (!name || SKIP_TOOLS.has(name)) continue; + + const input = tu.input || {}; + const entry = tu.id ? toolResultMap.get(tu.id) : undefined; + const result = entry?.text; + const isError = entry?.isError ?? false; + const fp = (input.file_path as string) || (input.path as string) || ''; + + if (SHELL_TOOLS.has(name)) { + const cmd = (input.command as string) || (input.cmd as string) || ''; + const exitCode = extractExitCode(result); + const errored = isError || (exitCode !== undefined && exitCode !== 0); + const stdoutTail = result ? extractStdoutTail(result, config.shell.stdoutLines) : undefined; + + const errorMessage = errored && result ? result.slice(0, config.shell.maxChars) : undefined; + const data: ShellSampleData = { + category: 'shell', + command: cmd, + ...(exitCode !== undefined ? { exitCode } : {}), + ...(stdoutTail ? { stdoutTail } : {}), + ...(errored ? { errored } : {}), + ...(errorMessage ? { errorMessage } : {}), + }; + collector.add('Bash', shellSummary(cmd, result), { data, isError: errored }); + } else if (READ_TOOLS.has(name)) { + const lineStart = (input.offset as number) || (input.start_line as number) || undefined; + const lineEnd = (input.limit as number) + ? (lineStart || 1) + (input.limit as number) - 1 + : (input.end_line as number) || undefined; + + const data: ReadSampleData = { + category: 'read', + filePath: fp, + ...(lineStart ? { lineStart } : {}), + ...(lineEnd ? { lineEnd } : {}), + }; + collector.add(name, withResult(fileSummary('read', fp), result?.slice(0, 80)), { + data, + filePath: fp, + }); + } else if (WRITE_TOOLS.has(name)) { + const content = (input.content as string) || ''; + let diff: string | undefined; + let diffStats: { added: number; removed: number } | undefined; + // Derive isNewFile from context: tool name hints, result text, or leave undefined + const isNewFile = ['Create', 'create_file'].includes(name) + ? true + : result && /\b(created|new file|overwr)/i.test(result) + ? /\b(created|new file)\b/i.test(result) + : undefined; + + if (content) { + const diffResult = formatNewFileDiff(content, fp, 200); + diff = diffResult.diff; + diffStats = countDiffStats(diff); + } + + const writeErrorMsg = isError && result ? result.slice(0, config.write.maxChars) : undefined; + const data: WriteSampleData = { + category: 'write', + filePath: fp, + ...(isNewFile !== undefined ? { isNewFile } : {}), + ...(diff ? { diff } : {}), + ...(diffStats ? { diffStats } : {}), + ...(writeErrorMsg ? { errorMessage: writeErrorMsg } : {}), + }; + collector.add(name, withResult(fileSummary('write', fp, diffStats, isNewFile ?? false), result?.slice(0, 80)), { + data, + filePath: fp, + isWrite: true, + isError, + }); + } else if (EDIT_TOOLS.has(name)) { + const oldStr = (input.old_string as string) || ''; + const newStr = (input.new_string as string) || ''; + let diff: string | undefined; + let diffStats: { added: number; removed: number } | undefined; + + if (oldStr || newStr) { + const diffResult = formatEditDiff(oldStr, newStr, fp, 200); + diff = diffResult.diff; + diffStats = countDiffStats(diff); + } + + const editErrorMsg = isError && result ? result.slice(0, config.edit.maxChars) : undefined; + const data: EditSampleData = { + category: 'edit', + filePath: fp, + ...(diff ? { diff } : {}), + ...(diffStats ? { diffStats } : {}), + ...(editErrorMsg ? { errorMessage: editErrorMsg } : {}), + }; + collector.add(name, withResult(fileSummary('edit', fp, diffStats), result?.slice(0, 80)), { + data, + filePath: fp, + isWrite: true, + isError, + }); + } else if (GREP_TOOLS.has(name)) { + const pattern = (input.pattern as string) || (input.query as string) || ''; + const targetPath = (input.path as string) || ''; + // Try to parse match count from result (e.g. "Found 5 files" or line count) + const matchCount = result ? parseMatchCount(result) : undefined; + + const data: GrepSampleData = { + category: 'grep', + pattern, + ...(targetPath ? { targetPath } : {}), + ...(matchCount !== undefined ? { matchCount } : {}), + }; + collector.add('Grep', withResult(grepSummary(pattern, targetPath), result?.slice(0, 80)), { data }); + } else if (GLOB_TOOLS.has(name)) { + const pattern = (input.pattern as string) || (input.path as string) || ''; + const resultCount = result ? parseFileCount(result) : undefined; + + const data: GlobSampleData = { + category: 'glob', + pattern, + ...(resultCount !== undefined ? { resultCount } : {}), + }; + collector.add('Glob', withResult(globSummary(pattern), result?.slice(0, 80)), { data }); + } else if (FETCH_TOOLS.has(name)) { + const url = (input.url as string) || ''; + const data: FetchSampleData = { + category: 'fetch', + url, + ...(result ? { resultPreview: result.slice(0, 100) } : {}), + }; + collector.add('WebFetch', fetchSummary(url), { data }); + } else if (SEARCH_TOOLS.has(name)) { + const query = (input.query as string) || ''; + const resultCount = result ? parseMatchCount(result) : undefined; + const resultPreview = result ? result.slice(0, 100) : undefined; + const data: SearchSampleData = { + category: 'search', + query, + ...(resultCount !== undefined ? { resultCount } : {}), + ...(resultPreview ? { resultPreview } : {}), + }; + collector.add('WebSearch', searchSummary(query), { data }); + } else if (TASK_TOOLS.has(name)) { + const description = (input.description as string) || ''; + const agentType = (input.subagent_type as string) || undefined; + const data: TaskSampleData = { + category: 'task', + description, + ...(agentType ? { agentType } : {}), + }; + collector.add('Task', subagentSummary(description, agentType), { data }); + } else if (TASK_OUTPUT_TOOLS.has(name)) { + const description = (input.content as string) || (input.result as string) || ''; + const agentType = (input.subagent_type as string) || undefined; + const data: TaskSampleData = { + category: 'task', + description, + ...(agentType ? { agentType } : {}), + ...(result ? { resultSummary: result.slice(0, 100) } : {}), + }; + collector.add('TaskOutput', subagentSummary(description, agentType), { data }); + } else if (ASK_TOOLS.has(name)) { + const question = truncate((input.question as string) || (input.prompt as string) || '', 80); + const data: AskSampleData = { category: 'ask', question }; + collector.add('AskUserQuestion', `ask: "${question}"`, { data }); + } else if (name.startsWith('mcp__') || name.includes('___') || name.includes('-')) { + // MCP tools — check for thinking/reasoning tools first + if (config.mcp.thinkingTools.extractReasoning && isThinkingTool(name)) { + const maxChars = config.mcp.thinkingTools.maxReasoningChars; + const thought = truncate((input.thought as string) || '', maxChars); + const outcome = truncate((input.outcome as string) || '', maxChars); + const rawNextAction = input.next_action; + const nextAction = typeof rawNextAction === 'string' + ? truncate(rawNextAction, maxChars) + : rawNextAction + ? truncate(JSON.stringify(rawNextAction), maxChars) + : undefined; + const stepNumber = typeof input.step_number === 'number' ? input.step_number : undefined; + + const data: ReasoningSampleData = { + category: 'reasoning', + toolName: name, + ...(stepNumber !== undefined ? { stepNumber } : {}), + ...(thought ? { thought } : {}), + ...(outcome ? { outcome } : {}), + ...(nextAction ? { nextAction } : {}), + }; + const label = `reasoning step${stepNumber ? ` #${stepNumber}` : ''}: ${truncate(thought || outcome || 'thinking', 60)}`; + collector.add(name, label, { data }); + } else { + const params = truncateParams(input, config.mcp.paramChars); + const data: McpSampleData = { + category: 'mcp', + toolName: name, + ...(params ? { params } : {}), + ...(result ? { result: result.slice(0, config.mcp.resultChars) } : {}), + }; + collector.add(name, mcpSummary(name, JSON.stringify(input).slice(0, config.mcp.paramChars), result?.slice(0, 80)), { data }); + + // Some MCP tools mutate local files (e.g. mcp__morph__edit_file). + // Track those paths in filesModified so handoff reflects all edits. + if (name === 'mcp__morph__edit_file') { + const mcpPath = (input.path as string) || (input.file_path as string) || ''; + if (mcpPath) { + collector.trackFile(mcpPath); + } + } + } + } else { + // Generic/unknown tool — treat as MCP-like + const params = truncateParams(input, config.mcp.paramChars); + const data: McpSampleData = { + category: 'mcp', + toolName: name, + ...(params ? { params } : {}), + ...(result ? { result: result.slice(0, config.mcp.resultChars) } : {}), + }; + collector.add(name, withResult(`${name}(${JSON.stringify(input).slice(0, config.mcp.paramChars)})`, result?.slice(0, 80)), { + data, + }); + } + } + } + + return { summaries: collector.getSummaries(), filesModified: collector.getFilesModified() }; +} + +// ── Helpers ───────────────────────────────────────────────────────────────── + +/** Check if a tool name indicates a thinking/reasoning tool */ +export function isThinkingTool(name: string): boolean { + return name.toLowerCase().includes('think'); +} + +/** Truncate each param value to maxChars and format as compact string */ +function truncateParams(input: Record, maxChars = 100): string { + const parts: string[] = []; + for (const [key, val] of Object.entries(input)) { + const str = typeof val === 'string' ? val : JSON.stringify(val) ?? ''; + parts.push(`${key}=${truncate(str, maxChars)}`); + } + return parts.join(', '); +} + +/** Parse match count from grep result text — returns undefined if ambiguous */ +function parseMatchCount(result: string): number | undefined { + const m = + result.match(/(?:found|matched)\s+(\d+)/i) || + result.match(/(\d+)\s+(?:match|result|hit)/i); + if (m) return parseInt(m[1]); + return undefined; +} + +/** Parse file count from glob result text — returns undefined if ambiguous */ +function parseFileCount(result: string): number | undefined { + const m = + result.match(/(?:found|returned)\s+(\d+)/i) || + result.match(/(\d+)\s+(?:file|match|result|entr)/i); + if (m) return parseInt(m[1]); + return undefined; +} + +/** + * Extract thinking/reasoning highlights from Anthropic-style messages. + * Returns up to `limit` first-line summaries from thinking blocks. + * Shared by Claude, Droid, and Cursor parsers. + */ +export function extractThinkingHighlights( + messages: AnthropicMessage[], + maxHighlights?: number, + config: VerbosityConfig = getPreset('standard'), +): string[] { + const limit = maxHighlights ?? config.thinking.maxHighlights; + const reasoning: string[] = []; + + for (const msg of messages) { + if (reasoning.length >= limit) break; + if (!Array.isArray(msg.content)) continue; + + for (const item of msg.content) { + if (reasoning.length >= limit) break; + if (item.type !== 'thinking') continue; + + const text = (item as { thinking?: string; text?: string }).thinking || (item as { text?: string }).text || ''; + if (text.length > 20) { + const firstLine = text.split(/[.\n]/)[0]?.trim(); + if (firstLine) reasoning.push(truncate(firstLine, 200)); + } + } + } + + return reasoning; +} diff --git a/src/utils/tool-summarizer.ts b/src/utils/tool-summarizer.ts index 05c0379..c380d7b 100644 --- a/src/utils/tool-summarizer.ts +++ b/src/utils/tool-summarizer.ts @@ -1,9 +1,11 @@ /** * Shared tool call summarizer — formatting helpers + SummaryCollector. * Each parser normalizes its raw tool events and uses these utilities - * for consistent, concise summaries across all 5 CLIs. + * for consistent, concise summaries across all 7 CLIs. */ -import type { ToolSample, ToolUsageSummary } from '../types/index.js'; +import type { StructuredToolSample, ToolSample, ToolUsageSummary } from '../types/index.js'; +import type { VerbosityConfig } from '../config/index.js'; +import { getPreset } from '../config/index.js'; // ── Formatting Helpers ────────────────────────────────────────────────────── @@ -89,32 +91,80 @@ export function subagentSummary(desc: string, type?: string): string { // ── SummaryCollector ──────────────────────────────────────────────────────── +/** Build per-category sample limits from a VerbosityConfig */ +function buildCategoryLimits(config: VerbosityConfig): Record { + return { + // shell / bash + Bash: config.shell.maxSamples, + shell: config.shell.maxSamples, + // write / create + Write: config.write.maxSamples, + write: config.write.maxSamples, + // edit / patch + Edit: config.edit.maxSamples, + edit: config.edit.maxSamples, + // read + Read: config.read.maxSamples, + read: config.read.maxSamples, + // grep / glob / search / fetch + Grep: config.grep.maxSamples, + Glob: config.grep.maxSamples, + WebSearch: config.grep.maxSamples, + WebFetch: config.grep.maxSamples, + // mcp / task / ask + Task: config.mcp.maxSamplesPerNamespace, + TaskOutput: config.mcp.maxSamplesPerNamespace, + AskUserQuestion: config.mcp.maxSamplesPerNamespace, + }; +} + +const DEFAULT_SAMPLE_LIMIT = 5; + +/** Options for SummaryCollector.add() */ +export interface AddSampleOptions { + /** Structured data for rich rendering */ + data?: StructuredToolSample; + /** File path associated with this invocation */ + filePath?: string; + /** Whether this invocation modified the file */ + isWrite?: boolean; + /** Whether this invocation resulted in an error */ + isError?: boolean; +} + /** * Accumulates tool call summaries by category (tool name). - * Keeps up to `maxSamples` representative samples per category - * and tracks files modified. + * Keeps up to N representative samples per category (category-aware limits) + * and tracks files modified and error counts. */ export class SummaryCollector { - private data = new Map(); + private data = new Map(); private files = new Set(); - private maxSamples: number; + private categoryLimits: Record; - constructor(maxSamples = 3) { - this.maxSamples = maxSamples; + constructor(config?: VerbosityConfig) { + const resolved = config ?? getPreset('standard'); + this.categoryLimits = buildCategoryLimits(resolved); } - /** Add a tool invocation. Optionally tracks file modification. */ - add(category: string, summary: string, filePath?: string, isWrite?: boolean): void { + /** Add a tool invocation. Optionally tracks file modification and errors. */ + add(category: string, summary: string, opts?: AddSampleOptions): void { if (!this.data.has(category)) { - this.data.set(category, { count: 0, samples: [] }); + this.data.set(category, { count: 0, errorCount: 0, samples: [] }); } const entry = this.data.get(category)!; entry.count++; - if (entry.samples.length < this.maxSamples) { - entry.samples.push({ summary }); + if (opts?.isError) entry.errorCount++; + + const maxSamples = this.categoryLimits[category] ?? DEFAULT_SAMPLE_LIMIT; + if (entry.samples.length < maxSamples) { + const sample: ToolSample = { summary }; + if (opts?.data) sample.data = opts.data; + entry.samples.push(sample); } - if (isWrite && filePath) { - this.files.add(filePath); + + if (opts?.isWrite && opts?.filePath) { + this.files.add(opts.filePath); } } @@ -125,9 +175,10 @@ export class SummaryCollector { /** Get aggregated tool usage summaries */ getSummaries(): ToolUsageSummary[] { - return Array.from(this.data.entries()).map(([name, { count, samples }]) => ({ + return Array.from(this.data.entries()).map(([name, { count, errorCount, samples }]) => ({ name, count, + ...(errorCount > 0 ? { errorCount } : {}), samples, })); } diff --git a/test-fixtures/amp/threads/test-thread-1.json b/test-fixtures/amp/threads/test-thread-1.json new file mode 100644 index 0000000..ff24446 --- /dev/null +++ b/test-fixtures/amp/threads/test-thread-1.json @@ -0,0 +1,92 @@ +{ + "id": "test-thread-1", + "title": "Create a login form component", + "created": 1740470400000, + "messages": [ + { + "role": "user", + "messageId": 1, + "content": [ + { "type": "text", "text": "Create a login form component with email and password fields" } + ] + }, + { + "role": "assistant", + "messageId": 2, + "content": [ + { "type": "text", "text": "I'll create a login form component with email and password fields, including form validation and error handling." }, + { "type": "tool_use", "text": "Writing src/components/LoginForm.tsx" } + ] + }, + { + "role": "user", + "messageId": 3, + "content": [ + { "type": "text", "text": "Add a 'Remember me' checkbox and a forgot password link" } + ] + }, + { + "role": "assistant", + "messageId": 4, + "content": [ + { "type": "text", "text": "I'll add a 'Remember me' checkbox and a 'Forgot password?' link below the form fields." }, + { "type": "tool_use", "text": "Updating src/components/LoginForm.tsx" } + ] + }, + { + "role": "user", + "messageId": 5, + "content": [ + { "type": "text", "text": "Now add loading state during form submission" } + ] + }, + { + "role": "assistant", + "messageId": 6, + "content": [ + { "type": "text", "text": "Done! The login form now shows a spinner during submission and disables the button. TODO: add rate limiting for failed attempts. Next step: integrate with the auth API." } + ] + } + ], + "usageLedger": { + "events": [ + { + "model": "claude-sonnet-4-20250514", + "credits": 5, + "tokens": { "input": 150, "output": 200 }, + "operationType": "conversation", + "fromMessageId": 1, + "toMessageId": 2 + }, + { + "model": "claude-sonnet-4-20250514", + "credits": 3, + "tokens": { "input": 0, "output": 15 }, + "operationType": "title-generation", + "fromMessageId": 1, + "toMessageId": 1 + }, + { + "model": "claude-sonnet-4-20250514", + "credits": 8, + "tokens": { "input": 350, "output": 450 }, + "operationType": "conversation", + "fromMessageId": 3, + "toMessageId": 4 + }, + { + "model": "claude-sonnet-4-20250514", + "credits": 6, + "tokens": { "input": 500, "output": 300 }, + "operationType": "conversation", + "fromMessageId": 5, + "toMessageId": 6 + } + ] + }, + "env": { + "initial": { + "tags": ["model:claude-sonnet-4-20250514", "org:anthropic"] + } + } +} diff --git a/test-fixtures/amp/threads/test-thread-edge.json b/test-fixtures/amp/threads/test-thread-edge.json new file mode 100644 index 0000000..0846301 --- /dev/null +++ b/test-fixtures/amp/threads/test-thread-edge.json @@ -0,0 +1,44 @@ +{ + "id": "test-thread-edge", + "created": 1740470500000, + "messages": [ + { + "role": "user", + "messageId": 1, + "content": [ + { "type": "text", "text": "Fix the null pointer bug in the checkout flow" } + ] + }, + { + "role": "assistant", + "messageId": 2, + "content": [] + }, + { + "role": "assistant", + "messageId": 3, + "content": [ + { "type": "text", "text": "" } + ] + }, + { + "role": "user", + "messageId": 4, + "content": [ + { "type": "text", "text": "Are you there?" } + ] + }, + { + "role": "assistant", + "messageId": 5, + "content": [ + { "type": "text", "text": "Sorry for the delay. I found the issue — the cart total is accessed before null checking." }, + { "type": "tool_use", "text": "Reading src/checkout.ts" }, + { "type": "provider", "provider": "anthropic" } + ] + } + ], + "usageLedger": { + "events": [] + } +} diff --git a/test-fixtures/antigravity/code_tracker/test-project/session-binary.jsonl b/test-fixtures/antigravity/code_tracker/test-project/session-binary.jsonl new file mode 100644 index 0000000..d46517f --- /dev/null +++ b/test-fixtures/antigravity/code_tracker/test-project/session-binary.jsonl @@ -0,0 +1,6 @@ +{"type": "user", "content": "Set up a WebSocket server with authentication", "timestamp": "2025-02-25T11:00:00Z"} + +{"type": "assistant", "content": "I will create a WebSocket server using the ws library with JWT-based authentication on the upgrade request.", "timestamp": "2025-02-25T11:00:08Z"} + +{"type": "user", "content": "Add heartbeat mechanism to detect stale connections", "timestamp": "2025-02-25T11:01:00Z"} +{"type": "assistant", "content": "Added a ping/pong heartbeat every 30 seconds. Connections that miss 2 consecutive pongs are terminated.", "timestamp": "2025-02-25T11:01:12Z"} diff --git a/test-fixtures/antigravity/code_tracker/test-project/session.jsonl b/test-fixtures/antigravity/code_tracker/test-project/session.jsonl new file mode 100644 index 0000000..a000e44 --- /dev/null +++ b/test-fixtures/antigravity/code_tracker/test-project/session.jsonl @@ -0,0 +1,6 @@ +{"type": "user", "content": "Implement a merge sort algorithm in TypeScript", "timestamp": "2025-02-25T10:00:00Z"} +{"type": "assistant", "content": "I'll implement a merge sort algorithm in TypeScript. The implementation will be generic, supporting any comparable type via a comparator function.", "timestamp": "2025-02-25T10:00:05Z"} +{"type": "user", "content": "Now add unit tests for edge cases: empty array, single element, already sorted, reverse sorted", "timestamp": "2025-02-25T10:01:00Z"} +{"type": "assistant", "content": "Here are the unit tests covering all edge cases:\n\n1. Empty array → returns []\n2. Single element → returns same\n3. Already sorted → maintains order\n4. Reverse sorted → correctly sorts\n5. Duplicate elements → handles correctly", "timestamp": "2025-02-25T10:01:10Z"} +{"type": "user", "content": "Add a benchmark comparing it against Array.sort()", "timestamp": "2025-02-25T10:02:00Z"} +{"type": "assistant", "content": "Benchmark results show merge sort is competitive for large arrays (>10K elements) but Array.sort() wins on smaller inputs due to lower overhead.", "timestamp": "2025-02-25T10:02:15Z"} diff --git a/test-fixtures/cline/tasks/test-task-1/ui_messages.json b/test-fixtures/cline/tasks/test-task-1/ui_messages.json new file mode 100644 index 0000000..2597944 --- /dev/null +++ b/test-fixtures/cline/tasks/test-task-1/ui_messages.json @@ -0,0 +1,65 @@ +[ + { + "ts": 1740470400000, + "type": "say", + "say": "text", + "text": "Create a React component for a dashboard with charts and KPI cards" + }, + { + "ts": 1740470401000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":100,\"tokensOut\":0,\"cacheWrites\":50,\"cacheReads\":30,\"cost\":0.001}" + }, + { + "ts": 1740470402000, + "type": "say", + "say": "text", + "text": "I'll create a dashboard component with chart widgets and KPI summary cards...", + "partial": true + }, + { + "ts": 1740470403000, + "type": "say", + "say": "text", + "text": "I'll create a dashboard component with chart widgets and KPI summary cards. The layout will use a responsive CSS grid.", + "partial": true + }, + { + "ts": 1740470405000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":250,\"tokensOut\":300,\"cacheWrites\":20,\"cacheReads\":80,\"cost\":0.002}" + }, + { + "ts": 1740470406000, + "type": "say", + "say": "user_feedback", + "text": "Also add a date range picker for filtering the data" + }, + { + "ts": 1740470407000, + "type": "say", + "say": "reasoning", + "text": "The user wants date filtering. I should add a date range picker component that controls the data fetched by the charts and KPI cards. I'll use a controlled component pattern." + }, + { + "ts": 1740470408000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":400,\"tokensOut\":500,\"cost\":0.003}" + }, + { + "ts": 1740470409000, + "type": "say", + "say": "text", + "text": "I've added a DateRangePicker component that filters all dashboard data.", + "partial": true + }, + { + "ts": 1740470410000, + "type": "say", + "say": "completion_result", + "text": "Dashboard component created at src/Dashboard.tsx with charts, KPI cards, and date filtering.\n- [ ] Add export to CSV functionality\nNext step: connect to the real API endpoints" + } +] diff --git a/test-fixtures/kilo-code/tasks/test-task-1/ui_messages.json b/test-fixtures/kilo-code/tasks/test-task-1/ui_messages.json new file mode 100644 index 0000000..b725d17 --- /dev/null +++ b/test-fixtures/kilo-code/tasks/test-task-1/ui_messages.json @@ -0,0 +1,51 @@ +[ + { + "ts": 1740472000000, + "type": "say", + "say": "text", + "text": "Implement rate limiting middleware for our Express API" + }, + { + "ts": 1740472001000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":90,\"tokensOut\":0,\"cost\":0}" + }, + { + "ts": 1740472002000, + "type": "say", + "say": "text", + "text": "I'll implement rate limiting using a sliding window algorithm with Redis as the backing store...", + "partial": true + }, + { + "ts": 1740472004000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":200,\"tokensOut\":250,\"cost\":0.001}" + }, + { + "ts": 1740472005000, + "type": "say", + "say": "user_feedback", + "text": "Add separate limits for authenticated and anonymous users" + }, + { + "ts": 1740472006000, + "type": "say", + "say": "reasoning", + "text": "Need different rate limit tiers: anonymous users get 30 req/min, authenticated users get 100 req/min. I should check for the auth token in the middleware." + }, + { + "ts": 1740472007000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":350,\"tokensOut\":400,\"cost\":0.002}" + }, + { + "ts": 1740472008000, + "type": "say", + "say": "completion_result", + "text": "Rate limiting middleware created at src/middleware/rateLimit.ts with tiered limits." + } +] diff --git a/test-fixtures/kiro/workspace-sessions/test-workspace/test-session-1.json b/test-fixtures/kiro/workspace-sessions/test-workspace/test-session-1.json new file mode 100644 index 0000000..969c268 --- /dev/null +++ b/test-fixtures/kiro/workspace-sessions/test-workspace/test-session-1.json @@ -0,0 +1,36 @@ +{ + "sessionId": "test-session-1", + "title": "Add dark mode toggle", + "workspacePath": "/Users/dev/my-app", + "selectedModel": "claude-sonnet-4-20250514", + "history": [ + { + "message": { + "role": "user", + "content": "Add a dark mode toggle to the app header", + "id": "msg-1" + } + }, + { + "message": { + "role": "assistant", + "content": "I'll add a dark mode toggle component to your app header. This will include a sun/moon icon that switches the theme using CSS custom properties.", + "id": "msg-2" + } + }, + { + "message": { + "role": "user", + "content": "Make sure it persists the preference in localStorage", + "id": "msg-3" + } + }, + { + "message": { + "role": "assistant", + "content": "Done! The dark mode preference now persists in localStorage under the key 'theme-preference'. On page load, it checks the stored value before applying the default system preference.", + "id": "msg-4" + } + } + ] +} diff --git a/test-fixtures/kiro/workspace-sessions/test-workspace/test-session-edge.json b/test-fixtures/kiro/workspace-sessions/test-workspace/test-session-edge.json new file mode 100644 index 0000000..4749ace --- /dev/null +++ b/test-fixtures/kiro/workspace-sessions/test-workspace/test-session-edge.json @@ -0,0 +1,38 @@ +{ + "sessionId": "test-session-edge", + "workspacePath": "/Users/dev/edge-project", + "history": [ + { + "message": { + "role": "user", + "content": [ + { "type": "text", "text": "Refactor the database connection pool" } + ], + "id": "msg-1" + } + }, + { + "message": { + "role": "assistant", + "content": [ + { "type": "text", "text": "I'll refactor the database connection pool to use a singleton pattern with configurable pool size." } + ], + "id": "msg-2" + } + }, + { + "message": { + "role": "user", + "content": "", + "id": "msg-3" + } + }, + { + "message": { + "role": "assistant", + "content": "The refactoring is complete. The pool now supports dynamic scaling between min and max connections.", + "id": "msg-4" + } + } + ] +} diff --git a/test-fixtures/roo-code/tasks/test-task-1/ui_messages.json b/test-fixtures/roo-code/tasks/test-task-1/ui_messages.json new file mode 100644 index 0000000..a308b37 --- /dev/null +++ b/test-fixtures/roo-code/tasks/test-task-1/ui_messages.json @@ -0,0 +1,52 @@ +[ + { + "ts": 1740471000000, + "type": "say", + "say": "text", + "text": "Set up a CI/CD pipeline with GitHub Actions for our Node.js project" + }, + { + "ts": 1740471001000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":120,\"tokensOut\":0,\"cost\":0}" + }, + { + "ts": 1740471002000, + "type": "say", + "say": "text", + "text": "I'll create a GitHub Actions workflow with lint, test, and deploy stages...", + "partial": true + }, + { + "ts": 1740471003000, + "type": "say", + "say": "text", + "text": "I'll create a GitHub Actions workflow with lint, test, and deploy stages. The pipeline will run on push to main and on pull requests.", + "partial": true + }, + { + "ts": 1740471005000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":300,\"tokensOut\":400,\"cacheWrites\":10,\"cacheReads\":50,\"cost\":0.002}" + }, + { + "ts": 1740471006000, + "type": "say", + "say": "user_feedback", + "text": "Add a matrix strategy to test on Node 18 and 20" + }, + { + "ts": 1740471007000, + "type": "say", + "say": "api_req_started", + "text": "{\"request\":\"POST /v1/messages\",\"tokensIn\":450,\"tokensOut\":350,\"cost\":0.003}" + }, + { + "ts": 1740471008000, + "type": "say", + "say": "completion_result", + "text": "CI/CD pipeline created at .github/workflows/ci.yml with matrix testing on Node 18 and 20." + } +] diff --git a/test-fixtures/test-all.sh b/test-fixtures/test-all.sh new file mode 100755 index 0000000..f827794 --- /dev/null +++ b/test-fixtures/test-all.sh @@ -0,0 +1,158 @@ +#!/bin/bash +# ============================================================================ +# cli-continues Parser Integration Test Runner +# +# Symlinks test fixtures to the paths each parser expects, builds the project, +# runs the test harness, then cleans up — restoring any pre-existing data. +# +# Usage: bash test-fixtures/test-all.sh +# (run from the cli-continues root directory) +# ============================================================================ + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +CLI_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +HOME_DIR="$HOME" + +# Track what we've set up so cleanup runs even on error +CLEANUP_ACTIONS=() + +cleanup() { + echo "" + echo "─── Cleanup ───────────────────────────────────────────────────" + for action in "${CLEANUP_ACTIONS[@]:-}"; do + eval "$action" 2>/dev/null || true + done + echo "Cleanup complete." +} + +trap cleanup EXIT + +# Helper: create a symlink, backing up any existing target +# Usage: safe_symlink +safe_symlink() { + local src="$1" + local dst="$2" + local parent + parent="$(dirname "$dst")" + + mkdir -p "$parent" + + if [ -L "$dst" ]; then + # Existing symlink — save and remove + local backup="${dst}.test-backup-link" + local target + target="$(readlink "$dst")" + echo "$target" > "$backup" + rm -f "$dst" + CLEANUP_ACTIONS+=("rm -f '$dst'; if [ -f '${backup}' ]; then ln -sfn \"\$(cat '${backup}')\" '$dst'; rm -f '${backup}'; fi") + elif [ -d "$dst" ]; then + # Existing directory — rename + mv "$dst" "${dst}.test-backup" + CLEANUP_ACTIONS+=("rm -f '$dst'; if [ -d '${dst}.test-backup' ]; then mv '${dst}.test-backup' '$dst'; fi") + elif [ -f "$dst" ]; then + # Existing file — backup + cp "$dst" "${dst}.test-backup" + CLEANUP_ACTIONS+=("rm -f '$dst'; if [ -f '${dst}.test-backup' ]; then mv '${dst}.test-backup' '$dst'; fi") + else + # Nothing exists — just remove on cleanup + CLEANUP_ACTIONS+=("rm -f '$dst'") + fi + + ln -sfn "$src" "$dst" + echo " ✓ $(basename "$dst") → $(basename "$src")" +} + +# Helper: copy a file, backing up existing +safe_copy() { + local src="$1" + local dst="$2" + local parent + parent="$(dirname "$dst")" + + mkdir -p "$parent" + + if [ -f "$dst" ] && [ ! -L "$dst" ]; then + cp "$dst" "${dst}.test-backup" + CLEANUP_ACTIONS+=("rm -f '$dst'; if [ -f '${dst}.test-backup' ]; then mv '${dst}.test-backup' '$dst'; fi") + else + CLEANUP_ACTIONS+=("rm -f '$dst'") + fi + + cp "$src" "$dst" + echo " ✓ $(basename "$dst") (copied)" +} + +echo "╔══════════════════════════════════════════════════════════════╗" +echo "║ cli-continues Integration Test Runner ║" +echo "╚══════════════════════════════════════════════════════════════╝" +echo "" + +# ── Step 1: Build ──────────────────────────────────────────────────────────── + +echo "─── Build ─────────────────────────────────────────────────────" +cd "$CLI_DIR" +if ! npx tsc -b 2>&1; then + echo "❌ Build failed. Fix TypeScript errors before testing." + exit 1 +fi +echo " ✓ Build succeeded" +echo "" + +# ── Step 2: Symlink fixtures ──────────────────────────────────────────────── + +echo "─── Symlinking fixtures ──────────────────────────────────────" + +# AMP: ~/.local/share/amp/threads/ +safe_symlink "$SCRIPT_DIR/amp/threads" "$HOME_DIR/.local/share/amp/threads" + +# KIRO: ~/Library/Application Support/Kiro/workspace-sessions/ +safe_symlink "$SCRIPT_DIR/kiro/workspace-sessions" "$HOME_DIR/Library/Application Support/Kiro/workspace-sessions" + +# CRUSH: ~/.crush/crush.db (file copy — sqlite3 doesn't like symlinked DBs on all platforms) +safe_copy "$SCRIPT_DIR/crush/crush.db" "$HOME_DIR/.crush/crush.db" + +# CLINE: VS Code globalStorage +CLINE_DIR="$HOME_DIR/Library/Application Support/Code/User/globalStorage/saoudrizwan.claude-dev" +safe_symlink "$SCRIPT_DIR/cline/tasks" "$CLINE_DIR/tasks" + +# ROO CODE +ROO_DIR="$HOME_DIR/Library/Application Support/Code/User/globalStorage/rooveterinaryinc.roo-cline" +safe_symlink "$SCRIPT_DIR/roo-code/tasks" "$ROO_DIR/tasks" + +# KILO CODE +KILO_DIR="$HOME_DIR/Library/Application Support/Code/User/globalStorage/kilocode.kilo-code" +safe_symlink "$SCRIPT_DIR/kilo-code/tasks" "$KILO_DIR/tasks" + +# ANTIGRAVITY: ~/.gemini/antigravity/code_tracker/test-project +# Copy (not symlink) because listSubdirectories uses Dirent.isDirectory() +# which returns false for symlinks — only real directories are discovered. +ANTI_DIR="$HOME_DIR/.gemini/antigravity/code_tracker/test-project" +if [ -d "$ANTI_DIR" ] && [ ! -L "$ANTI_DIR" ]; then + mv "$ANTI_DIR" "${ANTI_DIR}.test-backup" + CLEANUP_ACTIONS+=("rm -rf '$ANTI_DIR'; if [ -d '${ANTI_DIR}.test-backup' ]; then mv '${ANTI_DIR}.test-backup' '$ANTI_DIR'; fi") +else + CLEANUP_ACTIONS+=("rm -rf '$ANTI_DIR'") +fi +cp -r "$SCRIPT_DIR/antigravity/code_tracker/test-project" "$ANTI_DIR" +echo " ✓ test-project (copied dir)" + +echo "" + +# ── Step 3: Run test harness ──────────────────────────────────────────────── + +echo "─── Running test harness ─────────────────────────────────────" +cd "$CLI_DIR" +node test-fixtures/test-harness.mjs +TEST_EXIT=$? + +echo "" +if [ $TEST_EXIT -eq 0 ]; then + echo "🎉 All integration tests passed!" +else + echo "⚠️ Some tests failed (exit code: $TEST_EXIT)" +fi + +# Cleanup runs via trap +exit $TEST_EXIT diff --git a/test-fixtures/test-harness.mjs b/test-fixtures/test-harness.mjs new file mode 100644 index 0000000..1ca96d3 --- /dev/null +++ b/test-fixtures/test-harness.mjs @@ -0,0 +1,336 @@ +#!/usr/bin/env node +/** + * Integration test harness for cli-continues parsers. + * + * Imports each parser's parse + extract functions from the compiled dist/ + * and validates they return expected data against the test fixtures. + * + * Prerequisites: + * - Fixtures must be symlinked to expected paths (test-all.sh handles this) + * - Project must be built (`npx tsc -b`) + * + * Usage: node test-fixtures/test-harness.mjs + */ + +import { parseAmpSessions, extractAmpContext } from '../dist/parsers/amp.js'; +import { parseClineSessions, extractClineContext, parseRooCodeSessions, extractRooCodeContext, parseKiloCodeSessions, extractKiloCodeContext } from '../dist/parsers/cline.js'; +import { parseCrushSessions, extractCrushContext } from '../dist/parsers/crush.js'; +import { parseAntigravitySessions, extractAntigravityContext } from '../dist/parsers/antigravity.js'; +import { parseKiroSessions, extractKiroContext } from '../dist/parsers/kiro.js'; + +// ── Test Tracking ─────────────────────────────────────────────────────────── + +let passed = 0; +let failed = 0; +const failures = []; + +function assert(condition, label) { + if (condition) { + passed++; + console.log(` ✅ ${label}`); + } else { + failed++; + failures.push(label); + console.log(` ❌ ${label}`); + } +} + +function section(name) { + console.log(`\n━━━ ${name} ${'━'.repeat(Math.max(0, 60 - name.length))}`); +} + +// ── Amp Tests ─────────────────────────────────────────────────────────────── + +async function testAmp() { + section('Amp Parser'); + + const sessions = await parseAmpSessions(); + assert(sessions.length >= 1, `Found ${sessions.length} Amp session(s) (expected ≥1)`); + + if (sessions.length === 0) return; + + // Find our main test thread + const main = sessions.find(s => s.id === 'test-thread-1'); + assert(!!main, 'Found test-thread-1'); + + if (!main) return; + + assert(main.source === 'amp', `source = "${main.source}" (expected "amp")`); + assert(main.summary?.includes('login form'), `summary contains "login form": "${main.summary?.slice(0, 50)}"`); + assert(main.model === 'claude-sonnet-4-20250514', `model = "${main.model}"`); + assert(main.createdAt instanceof Date && !isNaN(main.createdAt.getTime()), 'createdAt is valid Date'); + assert(main.lines > 0, `lines = ${main.lines} (>0)`); + assert(main.bytes > 0, `bytes = ${main.bytes} (>0)`); + + // Extract context + const ctx = await extractAmpContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(ctx.recentMessages.some(m => m.role === 'user'), 'Has user messages'); + assert(ctx.recentMessages.some(m => m.role === 'assistant'), 'Has assistant messages'); + + // Token usage from usageLedger (excludes title-generation) + assert(!!ctx.sessionNotes?.tokenUsage, 'Has tokenUsage'); + if (ctx.sessionNotes?.tokenUsage) { + // 3 conversation events: 150+350+500 = 1000 input, 200+450+300 = 950 output + assert(ctx.sessionNotes.tokenUsage.input === 1000, + `tokenUsage.input = ${ctx.sessionNotes.tokenUsage.input} (expected 1000)`); + assert(ctx.sessionNotes.tokenUsage.output === 950, + `tokenUsage.output = ${ctx.sessionNotes.tokenUsage.output} (expected 950)`); + } + + // Pending tasks from last assistant message + assert(ctx.pendingTasks.length >= 1, `pendingTasks.length = ${ctx.pendingTasks.length} (≥1)`); + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); + + // Edge case thread (empty content blocks, no usage events) + const edge = sessions.find(s => s.id === 'test-thread-edge'); + assert(!!edge, 'Found test-thread-edge (edge case)'); + if (edge) { + // Should have a summary from the first user message + assert(!!edge.summary, `Edge thread has summary: "${edge.summary?.slice(0, 40)}"`); + } +} + +// ── Cline Tests ───────────────────────────────────────────────────────────── + +async function testCline() { + section('Cline Parser'); + + const sessions = await parseClineSessions(); + // parseClineSessions returns ALL cline-family sessions (cline + roo-code + kilo-code) + const clineSessions = sessions.filter(s => s.source === 'cline'); + assert(clineSessions.length >= 1, `Found ${clineSessions.length} Cline session(s) (expected ≥1)`); + + if (clineSessions.length === 0) return; + + const main = clineSessions[0]; + assert(main.source === 'cline', `source = "${main.source}"`); + assert(!!main.summary, `summary = "${main.summary?.slice(0, 50)}"`); + assert(main.createdAt instanceof Date, 'createdAt is valid Date'); + + // Extract context + const ctx = await extractClineContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(ctx.recentMessages.some(m => m.role === 'user'), 'Has user messages'); + + // Token usage from api_req_started events + assert(!!ctx.sessionNotes?.tokenUsage, 'Has tokenUsage'); + if (ctx.sessionNotes?.tokenUsage) { + assert(ctx.sessionNotes.tokenUsage.input > 0, `tokenUsage.input = ${ctx.sessionNotes.tokenUsage.input} (>0)`); + assert(ctx.sessionNotes.tokenUsage.output > 0, `tokenUsage.output = ${ctx.sessionNotes.tokenUsage.output} (>0)`); + } + + // Cache tokens + assert(!!ctx.sessionNotes?.cacheTokens, 'Has cacheTokens'); + + // Reasoning highlights + assert(Array.isArray(ctx.sessionNotes?.reasoning) && ctx.sessionNotes.reasoning.length >= 1, + `reasoning highlights = ${ctx.sessionNotes?.reasoning?.length ?? 0} (≥1)`); + + // Pending tasks from completion_result + assert(ctx.pendingTasks.length >= 1, `pendingTasks.length = ${ctx.pendingTasks.length} (≥1)`); + + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); +} + +// ── Roo Code Tests ────────────────────────────────────────────────────────── + +async function testRooCode() { + section('Roo Code Parser'); + + const sessions = await parseRooCodeSessions(); + assert(sessions.length >= 1, `Found ${sessions.length} Roo Code session(s) (expected ≥1)`); + + if (sessions.length === 0) return; + + const main = sessions[0]; + assert(main.source === 'roo-code', `source = "${main.source}"`); + assert(!!main.summary, `summary = "${main.summary?.slice(0, 50)}"`); + + const ctx = await extractRooCodeContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); +} + +// ── Kilo Code Tests ───────────────────────────────────────────────────────── + +async function testKiloCode() { + section('Kilo Code Parser'); + + const sessions = await parseKiloCodeSessions(); + assert(sessions.length >= 1, `Found ${sessions.length} Kilo Code session(s) (expected ≥1)`); + + if (sessions.length === 0) return; + + const main = sessions[0]; + assert(main.source === 'kilo-code', `source = "${main.source}"`); + assert(!!main.summary, `summary = "${main.summary?.slice(0, 50)}"`); + + const ctx = await extractKiloCodeContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); +} + +// ── Crush Tests ───────────────────────────────────────────────────────────── + +async function testCrush() { + section('Crush Parser'); + + const sessions = await parseCrushSessions(); + assert(sessions.length >= 1, `Found ${sessions.length} Crush session(s) (expected ≥1)`); + + if (sessions.length === 0) return; + + // Should find test-session-1 (has title "Build API endpoint") + const main = sessions.find(s => s.id === 'test-session-1'); + assert(!!main, 'Found test-session-1'); + + if (!main) return; + + assert(main.source === 'crush', `source = "${main.source}"`); + assert(main.summary?.includes('Build API endpoint') || main.summary?.includes('REST API'), + `summary = "${main.summary?.slice(0, 50)}"`); + assert(main.lines > 0, `lines = ${main.lines} (>0)`); + + // Extract context + const ctx = await extractCrushContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(ctx.recentMessages.some(m => m.role === 'user'), 'Has user messages'); + assert(ctx.recentMessages.some(m => m.role === 'assistant'), 'Has assistant messages'); + + // Token usage from sessions table + assert(!!ctx.sessionNotes?.tokenUsage, 'Has tokenUsage'); + if (ctx.sessionNotes?.tokenUsage) { + assert(ctx.sessionNotes.tokenUsage.input === 1500, + `tokenUsage.input = ${ctx.sessionNotes.tokenUsage.input} (expected 1500)`); + assert(ctx.sessionNotes.tokenUsage.output === 2200, + `tokenUsage.output = ${ctx.sessionNotes.tokenUsage.output} (expected 2200)`); + } + + // Model from message rows + assert(ctx.sessionNotes?.model === 'claude-sonnet-4', + `model = "${ctx.sessionNotes?.model}" (expected "claude-sonnet-4")`); + + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); + + // Edge case: session with null title should use first user message + const edge = sessions.find(s => s.id === 'test-session-edge'); + assert(!!edge, 'Found test-session-edge'); + if (edge) { + assert(!!edge.summary, `Edge session has summary from first message: "${edge.summary?.slice(0, 40)}"`); + } + + // Edge case: session with empty parts + const empty = sessions.find(s => s.id === 'test-session-empty'); + assert(!!empty, 'Found test-session-empty'); +} + +// ── Kiro Tests ────────────────────────────────────────────────────────────── + +async function testKiro() { + section('Kiro Parser'); + + const sessions = await parseKiroSessions(); + assert(sessions.length >= 1, `Found ${sessions.length} Kiro session(s) (expected ≥1)`); + + if (sessions.length === 0) return; + + const main = sessions.find(s => s.id === 'test-session-1'); + assert(!!main, 'Found test-session-1'); + + if (!main) return; + + assert(main.source === 'kiro', `source = "${main.source}"`); + assert(!!main.summary, `summary = "${main.summary?.slice(0, 50)}"`); + assert(main.model === 'claude-sonnet-4-20250514', `model = "${main.model}"`); + + // Extract context + const ctx = await extractKiroContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(ctx.recentMessages.some(m => m.role === 'user'), 'Has user messages'); + assert(ctx.recentMessages.some(m => m.role === 'assistant'), 'Has assistant messages'); + + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); + + // Edge case: test-session-edge has array content format + const edge = sessions.find(s => s.id === 'test-session-edge'); + assert(!!edge, 'Found test-session-edge (array content format)'); + if (edge) { + const edgeCtx = await extractKiroContext(edge); + assert(edgeCtx.recentMessages.length >= 2, + `Edge session recentMessages.length = ${edgeCtx.recentMessages.length} (≥2)`); + } +} + +// ── Antigravity Tests ─────────────────────────────────────────────────────── + +async function testAntigravity() { + section('Antigravity Parser'); + + const sessions = await parseAntigravitySessions(); + assert(sessions.length >= 1, `Found ${sessions.length} Antigravity session(s) (expected ≥1)`); + + if (sessions.length === 0) return; + + // Should find at least the session.jsonl file + const main = sessions.find(s => s.id === 'session'); + assert(!!main, 'Found session.jsonl'); + + if (!main) return; + + assert(main.source === 'antigravity', `source = "${main.source}"`); + assert(!!main.summary, `summary = "${main.summary?.slice(0, 50)}"`); + assert(main.repo === 'test-project', `repo = "${main.repo}" (expected "test-project")`); + assert(main.lines > 0, `lines = ${main.lines} (>0)`); + + // Extract context + const ctx = await extractAntigravityContext(main); + assert(ctx.recentMessages.length >= 2, `recentMessages.length = ${ctx.recentMessages.length} (≥2)`); + assert(ctx.recentMessages.some(m => m.role === 'user'), 'Has user messages'); + assert(ctx.recentMessages.some(m => m.role === 'assistant'), 'Has assistant messages'); + + assert(typeof ctx.markdown === 'string' && ctx.markdown.length > 0, 'markdown is non-empty'); + + // Binary-prefix session should also parse (session-binary.jsonl has blank lines) + const binary = sessions.find(s => s.id === 'session-binary'); + assert(!!binary, 'Found session-binary.jsonl (binary prefix test)'); + if (binary) { + const bCtx = await extractAntigravityContext(binary); + assert(bCtx.recentMessages.length >= 2, + `Binary session recentMessages.length = ${bCtx.recentMessages.length} (≥2)`); + } +} + +// ── Main ──────────────────────────────────────────────────────────────────── + +async function main() { + console.log('╔══════════════════════════════════════════════════════════════╗'); + console.log('║ cli-continues Parser Integration Tests ║'); + console.log('╚══════════════════════════════════════════════════════════════╝'); + + await testAmp(); + await testCline(); + await testRooCode(); + await testKiloCode(); + await testCrush(); + await testKiro(); + await testAntigravity(); + + console.log('\n══════════════════════════════════════════════════════════════'); + console.log(`Results: ${passed} passed, ${failed} failed, ${passed + failed} total`); + + if (failures.length > 0) { + console.log('\nFailed tests:'); + for (const f of failures) { + console.log(` ❌ ${f}`); + } + } + + console.log('══════════════════════════════════════════════════════════════\n'); + process.exit(failed > 0 ? 1 : 0); +} + +main().catch(err => { + console.error('Fatal error:', err); + process.exit(2); +});