diff --git a/package.json b/package.json index b8baa4087..3139dabb0 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "firecrawl-cli", - "version": "1.16.2", - "description": "Command-line interface for Firecrawl. Scrape, crawl, and extract data from any website directly from your terminal.", + "version": "1.17.0", + "description": "Command-line interface for Firecrawl: scrape, crawl, search, extract, debug, and look up docs from your terminal.", "main": "dist/index.js", "bin": { "firecrawl": "dist/index.js" @@ -42,7 +42,12 @@ "markdown", "search", "web search", - "skill" + "skill", + "ai", + "ask", + "docs-search", + "support", + "debug" ], "author": "Firecrawl", "license": "ISC", diff --git a/skills/firecrawl-ask/SKILL.md b/skills/firecrawl-ask/SKILL.md new file mode 100644 index 000000000..e4c434706 --- /dev/null +++ b/skills/firecrawl-ask/SKILL.md @@ -0,0 +1,107 @@ +--- +name: firecrawl-ask +description: | + Diagnose Firecrawl issues with the AI support agent. Use this skill whenever a Firecrawl operation (scrape/crawl/search/map/agent/browser/interact) fails, returns unexpected results, hits rate limits, or behaves in a way the user doesn't expect — especially after a regular `firecrawl ` call has already produced an error or wrong output. Triggers on "why is firecrawl…", "scrape returned empty", "crawl only got N pages but I expected more", "I'm getting a 4xx/5xx", "credits don't match", "what's wrong with this run", or any phrasing where the user is confused by Firecrawl behavior. Returns a 2-4 sentence diagnosis with machine-readable fix parameters and (when possible) a validated fix tested against the live API. +allowed-tools: + - Bash(firecrawl ask *) + - Bash(firecrawl ask:*) + - Bash(npx firecrawl ask *) +--- + +# firecrawl ask + +AI support agent that investigates a failing Firecrawl call and returns a verified fix. Typical latency: 15–30 seconds. + +## When to use + +- A previous `firecrawl ` call **failed** or returned **unexpected results**. +- The user is **confused** by Firecrawl behavior ("why does this site return empty markdown?", "why am I getting 429s?", "why did my crawl stop at 47 pages?"). +- You're about to give up on a scrape/crawl/etc. and would otherwise tell the user to file a support ticket — try `firecrawl ask` first. + +This is the **debugging** counterpart to [firecrawl-docs-search](../firecrawl-docs-search/SKILL.md): use docs-search for "how do I…" questions, use ask for "why didn't it work…" questions. + +## Quick start + +```bash +# Diagnose a failed scrape +firecrawl ask "my scrape of https://example.com returned empty markdown" + +# Pass the failing job's id so the agent can pull the right logs +firecrawl ask "my crawl returned 3 pages but I expected 50" \ + --job-id + +# Add rationale (recommended for AI callers — 1-2 sentences on user intent) +firecrawl ask "scrape times out at 30s on this URL" \ + --rationale "User wants to scrape example.com to feed an AI agent; every attempt times out" +``` + +## Options + +| Option | Description | +| ------------------- | -------------------------------------------------------------------------------------------------------- | +| `` | Required. Free-form description of what went wrong (1-8000 chars) | +| `-r, --rationale` | 1-2 sentences on what the end user is trying to accomplish — recommended for AI callers (≤2000 chars) | +| `-j, --job-id` | Firecrawl job id the failing call returned. Lets the agent pull logs/credit usage for that exact run | +| `--context ` | JSON-stringified object of free-form metadata (status code, formats requested, etc.) the agent considers | +| `-o, --output` | Output file path (default: stdout) | +| `--json` | Output as JSON | +| `--pretty` | Pretty-print JSON | + +## What you get back + +By default, a human-readable summary: + +``` +Confidence: high +Duration: 24.3s + +Answer: +The default 30s timeout isn't enough for example.com — its content loads +via JavaScript after a 5s render delay. Increase timeout to 45000 and set +waitFor: 6000 so the page settles before extraction. + +Suggested fix parameters: +{ + "waitFor": 6000, + "timeout": 45000 +} + +Validation: success + validateScrape with waitFor=6000 returned 200 with 47kb of markdown +``` + +When `validation.tested == true` and `validation.result == "success"`, the agent literally tested the fix against the live API — apply the `fixParameters` directly and rerun. + +## Recipe — agent-friendly debugging loop + +```bash +# 1. Run the actual operation, capture the run id +SCRAPE_OUTPUT=$(firecrawl scrape "https://example.com" --json --pretty -o .firecrawl/scrape.json) || RUN_FAILED=1 + +# 2. If failed or output is suspicious, ask the agent +if [ -n "$RUN_FAILED" ] || [ "$(jq -r '.data | length' .firecrawl/scrape.json 2>/dev/null)" = "null" ]; then + firecrawl ask "scrape of https://example.com returned empty/failed" \ + --rationale "User wants to scrape example.com for downstream summarization" \ + --json --pretty -o .firecrawl/debug.json + + # 3. Apply the suggested fix and retry + WAIT_FOR=$(jq -r '.data.fixParameters.waitFor // empty' .firecrawl/debug.json) + TIMEOUT=$(jq -r '.data.fixParameters.timeout // empty' .firecrawl/debug.json) + if [ -n "$WAIT_FOR" ]; then + firecrawl scrape "https://example.com" --wait-for "$WAIT_FOR" --timeout "$TIMEOUT" -o .firecrawl/scrape-retry.md + fi +fi +``` + +## Tips + +- **Pass `--job-id` whenever possible.** Tools like `debugJob`, `searchLogs`, and `getJob` auto-default to it on the agent side, so the diagnosis comes back with the actual run's logs (not a guess based on the URL). +- The agent runs server-side and is **automatically scoped to your team** — it can only see your own jobs and account data. No need to share API keys or job IDs with anyone. +- If `confidence: low` and `feedback` is non-null, the agent could not produce a usable answer — escalate to human support. +- Don't loop more than 2–3 retries. Each call costs real agent compute (15–60s + downstream tool runs); use `firecrawl docs-search` for general questions instead. + +## See also + +- [firecrawl-docs-search](../firecrawl-docs-search/SKILL.md) — looking up Firecrawl documentation (the "how do I…" counterpart) +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — the most common upstream of an `ask` call +- [firecrawl-cli](../firecrawl-cli/SKILL.md) — full CLI reference and workflow escalation diff --git a/skills/firecrawl-cli/SKILL.md b/skills/firecrawl-cli/SKILL.md index 8d7e6a7c7..697f7232b 100644 --- a/skills/firecrawl-cli/SKILL.md +++ b/skills/firecrawl-cli/SKILL.md @@ -63,6 +63,8 @@ Follow this escalation pattern: | Interact with a page | `scrape` + `interact` | Content requires clicks, form fills, pagination, or login | | Download a site to files | `download` | Save an entire site as local files | | Parse a local file | `parse` | File on disk (PDF, DOCX, XLSX, etc.) — not a URL | +| Learn how Firecrawl works | `docs-search` | "How do I…" question about a Firecrawl feature/parameter | +| Debug a failing run | `ask` | A scrape/crawl/etc. failed or returned unexpected results | For detailed command reference, run `firecrawl --help`. @@ -87,6 +89,8 @@ For detailed command reference, run `firecrawl --help`. - **Clicks, forms, login, pagination, or post-scrape browser actions** -> [firecrawl-interact](../firecrawl-interact/SKILL.md) - **Downloading a site to local files** -> [firecrawl-download](../firecrawl-download/SKILL.md) - **Parsing a local file (PDF, DOCX, XLSX, HTML, etc.)** -> [firecrawl-parse](../firecrawl-parse/SKILL.md) +- **Looking up how Firecrawl works ("how do I…" questions)** -> [firecrawl-docs-search](../firecrawl-docs-search/SKILL.md) +- **Diagnosing a failing run ("why didn't it work…")** -> [firecrawl-ask](../firecrawl-ask/SKILL.md) - **Install, auth, or setup problems** -> [rules/install.md](rules/install.md) - **Output handling and safe file-reading patterns** -> [rules/security.md](rules/security.md) - **Integrating Firecrawl into an app, adding `FIRECRAWL_API_KEY` to `.env`, or choosing endpoint usage in product code** -> use the `firecrawl-build` skills (already installed alongside this CLI skill) @@ -148,3 +152,23 @@ For interact, scrape multiple pages and interact with each independently using t firecrawl credit-usage firecrawl credit-usage --json --pretty -o .firecrawl/credits.json ``` + +## Learning & Debugging + +Two AI-powered commands for understanding Firecrawl behavior. Both are scoped to the caller's team automatically. + +- **`firecrawl docs-search ""`** — answers "how do I…" questions by searching Firecrawl's official documentation. Returns a docs-grounded answer with source citations. Use this **before** generic web search for any Firecrawl-specific question (parameters, endpoints, features, status codes). See [firecrawl-docs-search](../firecrawl-docs-search/SKILL.md). +- **`firecrawl ask ""`** — diagnoses a failing or unexpected run. The AI support agent inspects job logs, account state, and the docs, then returns a 2-4 sentence diagnosis plus machine-readable `fixParameters` (often validated against the live API). Use this **whenever** a `firecrawl ` call returns an error or unexpected output and you'd otherwise be stuck. See [firecrawl-ask](../firecrawl-ask/SKILL.md). + +Quick examples: + +```bash +# Learn how a feature works +firecrawl docs-search "how do I verify webhook signatures?" + +# Debug a failing scrape (pass the job id when you have it) +firecrawl ask "scrape returned empty markdown" --job-id abc-123-def + +# Capture suggested fix and reapply +firecrawl ask "crawl stopped at 47/100 pages" --json --pretty -o .firecrawl/debug.json +``` diff --git a/skills/firecrawl-crawl/SKILL.md b/skills/firecrawl-crawl/SKILL.md index ca6e6b5aa..fd0370fce 100644 --- a/skills/firecrawl-crawl/SKILL.md +++ b/skills/firecrawl-crawl/SKILL.md @@ -56,3 +56,4 @@ firecrawl crawl - [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — scrape individual pages - [firecrawl-map](../firecrawl-map/SKILL.md) — discover URLs before deciding to crawl - [firecrawl-download](../firecrawl-download/SKILL.md) — download site to local files (uses map + scrape) +- [firecrawl-ask](../firecrawl-ask/SKILL.md) — if a crawl stalls, returns fewer pages than expected, or fails, run `firecrawl ask --job-id ` for a diagnosis diff --git a/skills/firecrawl-docs-search/SKILL.md b/skills/firecrawl-docs-search/SKILL.md new file mode 100644 index 000000000..4782addc5 --- /dev/null +++ b/skills/firecrawl-docs-search/SKILL.md @@ -0,0 +1,74 @@ +--- +name: firecrawl-docs-search +description: | + Look up answers in Firecrawl's official documentation. Use this skill whenever the user asks "how do I…" with Firecrawl, needs to know which parameter does what, or wants to learn how an endpoint behaves — e.g. "how do I verify webhook signatures", "what does waitFor do on /scrape", "which formats support change tracking", "how does crawl handle robots.txt", "what's the difference between scrape and parse". Returns a concise, docs-grounded answer with citations to the relevant pages. Prefer this over a generic web search whenever the question is specifically about Firecrawl behavior or configuration — answers come from current documentation, not stale training data. +allowed-tools: + - Bash(firecrawl docs-search *) + - Bash(firecrawl docs-search:*) + - Bash(npx firecrawl docs-search *) +--- + +# firecrawl docs-search + +AI-powered docs lookup grounded in Firecrawl's public documentation. Returns a concise answer plus citations to the source pages. + +## When to use + +- "**How do I…**" questions about Firecrawl features, endpoints, parameters, or configuration. +- Looking up specific behavior — what a parameter does, which formats are supported, what a status code means, how billing works, etc. +- Onboarding to a Firecrawl feature you've never used (webhooks, change tracking, batch scrape, agent extraction, browser sessions, etc.). + +This is the **learning** counterpart to [firecrawl-ask](../firecrawl-ask/SKILL.md): use docs-search for "how do I…" questions, use ask for "why didn't it work…" questions. + +## Quick start + +```bash +# Look up how a feature works +firecrawl docs-search "how do I verify webhook signatures?" + +# Find the right parameter +firecrawl docs-search "which scrape options bypass bot protection on Cloudflare-protected sites?" + +# Save the answer + sources to a file +firecrawl docs-search "what's the difference between /scrape and /parse" \ + --json --pretty -o .firecrawl/docs-q.json +``` + +## Options + +| Option | Description | +| ------------------- | -------------------------------------------------------------------- | +| `` | Required. Plain-English question about Firecrawl (1-8000 chars) | +| `-o, --output` | Output file path (default: stdout) | +| `--json` | Output as JSON (machine-readable, includes evidence array) | +| `--pretty` | Pretty-print JSON | + +## What you get back + +``` +Duration: 11.3s + +Answer: +The signature is sent in the X-Firecrawl-Signature header as an HMAC-SHA256 +of the request body, base64-encoded. Verify by recomputing with your webhook +secret and comparing constant-time. Reject the delivery if it doesn't match. + +Sources: + - webhooks/security.mdx#L1-L52 — Documents webhook signature verification + - webhooks/quickstart.mdx#L88-L102 — Includes a Node.js verification example +``` + +The JSON form (`--json --pretty`) carries an `evidence` array with `pathOrUrl` + `reason` for each source — useful when you need to follow up by scraping the actual docs page. + +## Tips + +- **Prefer docs-search over generic web search** for Firecrawl-specific questions. Public web search returns stale or third-party content; docs-search is grounded in the current docs. +- Pair with [firecrawl-scrape](../firecrawl-scrape/SKILL.md) when you need the **full text** of a page the answer cites — the citations include `pathOrUrl` you can feed back into a scrape. +- If the answer says the agent couldn't find anything (low confidence / empty evidence), the docs may be missing that topic — flag it with the user rather than fabricating an answer. +- Don't use this for **debugging** an actual run — switch to `firecrawl ask` so the agent can investigate the failing job's logs. + +## See also + +- [firecrawl-ask](../firecrawl-ask/SKILL.md) — diagnose a failing run (the "why didn't it work" counterpart) +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — pull the full text of a docs page cited in the evidence +- [firecrawl-cli](../firecrawl-cli/SKILL.md) — full CLI reference and workflow escalation diff --git a/skills/firecrawl-scrape/SKILL.md b/skills/firecrawl-scrape/SKILL.md index 86b8be1f9..bb3dc817f 100644 --- a/skills/firecrawl-scrape/SKILL.md +++ b/skills/firecrawl-scrape/SKILL.md @@ -66,3 +66,5 @@ firecrawl scrape "https://example.com/pricing" --query "What is the enterprise p - [firecrawl-search](../firecrawl-search/SKILL.md) — find pages when you don't have a URL - [firecrawl-interact](../firecrawl-interact/SKILL.md) — when scrape can't get the content, use `interact` to click, fill forms, etc. - [firecrawl-download](../firecrawl-download/SKILL.md) — bulk download an entire site to local files +- [firecrawl-ask](../firecrawl-ask/SKILL.md) — when scrape fails or returns unexpected output, run `firecrawl ask` to get a diagnosis and suggested fix instead of guessing +- [firecrawl-docs-search](../firecrawl-docs-search/SKILL.md) — look up how a scrape parameter works ("what does waitFor do?") diff --git a/src/commands/support.ts b/src/commands/support.ts new file mode 100644 index 000000000..46abba8c5 --- /dev/null +++ b/src/commands/support.ts @@ -0,0 +1,313 @@ +/** + * Support endpoint commands: `firecrawl ask` and `firecrawl docs-search`. + * + * Both wrap api.firecrawl.dev/v2/support/* — `ask` is for diagnosing issues + * with a Firecrawl run (the AI support agent investigates job logs, account + * state, etc.) and `docs-search` is for looking up answers in the public + * docs. The Firecrawl API key is the bearer; no extra config needed. + */ + +import * as fs from 'fs'; +import * as path from 'path'; + +import { getConfig, validateConfig } from '../utils/config'; + +const DEFAULT_API_URL = 'https://api.firecrawl.dev'; + +export interface SupportCommonOptions { + /** API key for Firecrawl */ + apiKey?: string; + /** API URL for Firecrawl */ + apiUrl?: string; + /** Output file path */ + output?: string; + /** Output as JSON format */ + json?: boolean; + /** Pretty print JSON output */ + pretty?: boolean; +} + +export interface AskOptions extends SupportCommonOptions { + /** 1-2 sentences on what the end user is trying to accomplish — recommended for AI callers */ + rationale?: string; + /** Optional Firecrawl job id the failing call was associated with */ + jobId?: string; + /** Free-form metadata (already-parsed JSON object) */ + context?: Record; +} + +export interface DocsSearchOptions extends SupportCommonOptions {} + +export type AskResponse = { + requestId?: string; + answer?: string; + confidence?: 'high' | 'medium' | 'low'; + fixParameters?: Record | null; + validation?: { + tested?: boolean; + result?: 'success' | 'failure' | 'skipped'; + evidence?: string; + } | null; + feedback?: { blockedBy?: string; attempted?: string[] } | null; + durationMs?: number; +}; + +export type DocsSearchResponse = { + requestId?: string; + answer?: string; + evidence?: Array<{ pathOrUrl?: string; reason?: string }>; + usage?: { inputTokens?: number; outputTokens?: number; totalTokens?: number }; + durationMs?: number; +}; + +export interface SupportResult { + success: boolean; + data?: T; + error?: string; +} + +function resolveCreds(opts: SupportCommonOptions): { + apiKey: string; + apiUrl: string; +} { + const config = getConfig(); + const apiKey = opts.apiKey || config.apiKey; + validateConfig(apiKey); + const apiUrl = opts.apiUrl || config.apiUrl || DEFAULT_API_URL; + return { apiKey: apiKey as string, apiUrl }; +} + +async function postJson( + url: string, + apiKey: string, + body: Record +): Promise> { + try { + const response = await fetch(url, { + method: 'POST', + headers: { + Authorization: `Bearer ${apiKey}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify(body), + }); + + const text = await response.text(); + let parsed: unknown; + try { + parsed = text ? JSON.parse(text) : null; + } catch { + parsed = null; + } + + if (!response.ok) { + let errMsg = `HTTP ${response.status} ${response.statusText}`; + if ( + parsed && + typeof parsed === 'object' && + 'error' in parsed && + typeof (parsed as { error?: unknown }).error === 'string' + ) { + errMsg = (parsed as { error: string }).error; + } + return { success: false, error: errMsg }; + } + + return { success: true, data: (parsed ?? {}) as T }; + } catch (err) { + return { + success: false, + error: err instanceof Error ? err.message : 'Unknown error', + }; + } +} + +export async function executeAsk( + question: string, + options: AskOptions = {} +): Promise> { + if (!question || !question.trim()) { + return { success: false, error: 'A question is required' }; + } + + const { apiKey, apiUrl } = resolveCreds(options); + const body: Record = { question: question.trim() }; + if (options.rationale && options.rationale.trim()) { + body.rationale = options.rationale.trim(); + } + if (options.jobId && options.jobId.trim()) { + body.jobId = options.jobId.trim(); + } + if (options.context && Object.keys(options.context).length > 0) { + body.context = options.context; + } + + return postJson( + `${apiUrl.replace(/\/$/, '')}/v2/support/ask`, + apiKey, + body + ); +} + +export async function executeDocsSearch( + question: string, + options: DocsSearchOptions = {} +): Promise> { + if (!question || !question.trim()) { + return { success: false, error: 'A question is required' }; + } + + const { apiKey, apiUrl } = resolveCreds(options); + + return postJson( + `${apiUrl.replace(/\/$/, '')}/v2/support/docs-search`, + apiKey, + { question: question.trim() } + ); +} + +function formatAskReadable(data: AskResponse): string { + const lines: string[] = []; + + if (data.confidence) { + lines.push(`Confidence: ${data.confidence}`); + } + if (typeof data.durationMs === 'number') { + lines.push(`Duration: ${(data.durationMs / 1000).toFixed(1)}s`); + } + if (lines.length > 0) lines.push(''); + + if (data.answer) { + lines.push('Answer:'); + lines.push(data.answer.trim()); + lines.push(''); + } + + if ( + data.fixParameters && + typeof data.fixParameters === 'object' && + Object.keys(data.fixParameters).length > 0 + ) { + lines.push('Suggested fix parameters:'); + try { + lines.push(JSON.stringify(data.fixParameters, null, 2)); + } catch { + lines.push(String(data.fixParameters)); + } + lines.push(''); + } + + if (data.validation && data.validation.tested) { + lines.push(`Validation: ${data.validation.result ?? 'unknown'}`); + if (data.validation.evidence) { + lines.push(` ${data.validation.evidence}`); + } + lines.push(''); + } + + if (data.feedback && data.feedback.blockedBy) { + lines.push(`Stuck — blocked by: ${data.feedback.blockedBy}`); + if (data.feedback.attempted && data.feedback.attempted.length > 0) { + lines.push(`Tools attempted: ${data.feedback.attempted.join(', ')}`); + } + lines.push(''); + } + + if (data.requestId) { + lines.push(`Request id: ${data.requestId}`); + } + + return lines.join('\n').replace(/\n+$/, '') + '\n'; +} + +function formatDocsSearchReadable(data: DocsSearchResponse): string { + const lines: string[] = []; + + if (typeof data.durationMs === 'number') { + lines.push(`Duration: ${(data.durationMs / 1000).toFixed(1)}s`); + lines.push(''); + } + + if (data.answer) { + lines.push('Answer:'); + lines.push(data.answer.trim()); + lines.push(''); + } + + if (data.evidence && data.evidence.length > 0) { + lines.push('Sources:'); + for (const item of data.evidence) { + if (item?.pathOrUrl) { + lines.push( + ` - ${item.pathOrUrl}${item.reason ? ` — ${item.reason}` : ''}` + ); + } + } + lines.push(''); + } + + if (data.requestId) { + lines.push(`Request id: ${data.requestId}`); + } + + return lines.join('\n').replace(/\n+$/, '') + '\n'; +} + +function writeOutput( + result: SupportResult, + options: SupportCommonOptions, + formatReadable: (data: T) => string +): void { + if (!result.success) { + console.error('Error:', result.error); + process.exit(1); + } + + if (!result.data) return; + + let outputContent: string; + if (options.json) { + try { + outputContent = options.pretty + ? JSON.stringify({ success: true, data: result.data }, null, 2) + : JSON.stringify({ success: true, data: result.data }); + } catch (err) { + outputContent = JSON.stringify({ + error: 'Failed to serialize response', + message: err instanceof Error ? err.message : 'Unknown error', + }); + } + } else { + outputContent = formatReadable(result.data); + } + + if (options.output) { + const dir = path.dirname(options.output); + if (dir && !fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } + fs.writeFileSync(options.output, outputContent, 'utf-8'); + console.error(`Output written to: ${options.output}`); + } else { + if (!outputContent.endsWith('\n')) { + outputContent += '\n'; + } + process.stdout.write(outputContent); + } +} + +export async function handleAskCommand( + question: string, + options: AskOptions = {} +): Promise { + const result = await executeAsk(question, options); + writeOutput(result, options, formatAskReadable); +} + +export async function handleDocsSearchCommand( + question: string, + options: DocsSearchOptions = {} +): Promise { + const result = await executeDocsSearch(question, options); + writeOutput(result, options, formatDocsSearchReadable); +} diff --git a/src/index.ts b/src/index.ts index 4ea6442bf..be2bcf6bd 100644 --- a/src/index.ts +++ b/src/index.ts @@ -15,6 +15,7 @@ import { import { initializeConfig, updateConfig } from './utils/config'; import { configure, viewConfig } from './commands/config'; import { handleCreditUsageCommand } from './commands/credit-usage'; +import { handleAskCommand, handleDocsSearchCommand } from './commands/support'; import { handleCrawlCommand } from './commands/crawl'; import { handleMapCommand } from './commands/map'; import { handleParseCommand } from './commands/parse'; @@ -70,6 +71,8 @@ const AUTH_REQUIRED_COMMANDS = [ 'browser', 'interact', 'credit-usage', + 'ask', + 'docs-search', ]; const commandSet = new Set([]); @@ -1756,6 +1759,81 @@ program await handleCreditUsageCommand(options); }); +program + .command('ask ') + .description( + 'Diagnose a Firecrawl issue with the AI support agent. Use when a scrape/crawl/search/etc. failed or returned unexpected results — the agent investigates job logs, account state, and the docs, then returns a fix.' + ) + .option( + '-r, --rationale ', + 'Recommended for AI callers — 1-2 sentences on what the end user is trying to accomplish' + ) + .option( + '-j, --job-id ', + 'Optional Firecrawl job id the failing call was associated with (helps the agent pull the right logs)' + ) + .option( + '--context ', + 'Free-form metadata as a JSON object string, passed to the agent verbatim' + ) + .option( + '-k, --api-key ', + 'Firecrawl API key (overrides global --api-key)' + ) + .option('--api-url ', 'API URL (overrides global --api-url)') + .option('-o, --output ', 'Output file path (default: stdout)') + .option('--json', 'Output as JSON format', false) + .option( + '--pretty', + 'Pretty print JSON output (only applies with --json)', + false + ) + .action(async (questionParts: string[], options) => { + let context: Record | undefined; + if (options.context) { + const parsed = parseJsonInput(options.context, '--context'); + if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) { + context = parsed as Record; + } else { + console.error('Error: --context must be a JSON object'); + process.exit(1); + } + } + const question = (questionParts || []).join(' ').trim(); + await handleAskCommand(question, { + apiKey: options.apiKey, + apiUrl: options.apiUrl, + output: options.output, + json: options.json, + pretty: options.pretty, + rationale: options.rationale, + jobId: options.jobId, + context, + }); + }); + +program + .command('docs-search ') + .description( + "Search Firecrawl's public documentation. Use when you need to learn how a Firecrawl endpoint, parameter, or feature works (e.g., 'how do I verify webhook signatures?'). Returns a docs-grounded answer with source citations." + ) + .option( + '-k, --api-key ', + 'Firecrawl API key (overrides global --api-key)' + ) + .option('--api-url ', 'API URL (overrides global --api-url)') + .option('-o, --output ', 'Output file path (default: stdout)') + .option('--json', 'Output as JSON format', false) + .option( + '--pretty', + 'Pretty print JSON output (only applies with --json)', + false + ) + .action(async (questionParts: string[], options) => { + const question = (questionParts || []).join(' ').trim(); + await handleDocsSearchCommand(question, options); + }); + program .command('version') .description('Display version information')