diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index c3f1463..96f1cd9 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.2.0"
+ ".": "1.3.0"
}
diff --git a/.stats.yml b/.stats.yml
index a0614e1..a13a02a 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 20
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/context-dev%2Fcontext.dev-56a21db16ac3a797f86daca01a2ea115a0365db4b2f9d8accdec4f4d3ee2eb83.yml
-openapi_spec_hash: bfcef090896da96023c4485a3d69e350
-config_hash: 38268bb88fc4dcbb8f2f94dd138b5910
+configured_endpoints: 21
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/context-dev%2Fcontext.dev-97cdb78dc0d72e9df643a89660f2b0c9687f12c6e4d93f7767f6cfc1b4f2e4c7.yml
+openapi_spec_hash: 92fc94fd8865fabe78c2667490ca3884
+config_hash: 682b89b02a20f5d1c13e2c91ecbcf5ce
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 36530d6..126641c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 1.3.0 (2026-04-04)
+
+Full Changelog: [v1.2.0...v1.3.0](https://github.com/context-dot-dev/context-typescript-sdk/compare/v1.2.0...v1.3.0)
+
+### Features
+
+* **api:** manual updates ([a10ea99](https://github.com/context-dot-dev/context-typescript-sdk/commit/a10ea991a4f2518d2105c1ad8932329a20db11f0))
+
## 1.2.0 (2026-04-03)
Full Changelog: [v1.1.0...v1.2.0](https://github.com/context-dot-dev/context-typescript-sdk/compare/v1.1.0...v1.2.0)
diff --git a/api.md b/api.md
index c024bc3..7876331 100644
--- a/api.md
+++ b/api.md
@@ -3,6 +3,7 @@
Types:
- WebScreenshotResponse
+- WebWebCrawlMdResponse
- WebWebScrapeHTMLResponse
- WebWebScrapeImagesResponse
- WebWebScrapeMdResponse
@@ -11,6 +12,7 @@ Types:
Methods:
- client.web.screenshot({ ...params }) -> WebScreenshotResponse
+- client.web.webCrawlMd({ ...params }) -> WebWebCrawlMdResponse
- client.web.webScrapeHTML({ ...params }) -> WebWebScrapeHTMLResponse
- client.web.webScrapeImages({ ...params }) -> WebWebScrapeImagesResponse
- client.web.webScrapeMd({ ...params }) -> WebWebScrapeMdResponse
diff --git a/package.json b/package.json
index fe7e18d..ad27d24 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "context.dev",
- "version": "1.2.0",
+ "version": "1.3.0",
"description": "The official TypeScript library for the Context Dev API",
"author": "Context Dev ",
"types": "dist/index.d.ts",
diff --git a/packages/mcp-server/manifest.json b/packages/mcp-server/manifest.json
index 8548a5d..3eac394 100644
--- a/packages/mcp-server/manifest.json
+++ b/packages/mcp-server/manifest.json
@@ -1,7 +1,7 @@
{
"dxt_version": "0.2",
"name": "context.dev-mcp",
- "version": "1.2.0",
+ "version": "1.3.0",
"description": "The official MCP Server for the Context Dev API",
"author": {
"name": "Context Dev",
diff --git a/packages/mcp-server/package.json b/packages/mcp-server/package.json
index 7e44006..e66e80f 100644
--- a/packages/mcp-server/package.json
+++ b/packages/mcp-server/package.json
@@ -1,6 +1,6 @@
{
"name": "context.dev-mcp",
- "version": "1.2.0",
+ "version": "1.3.0",
"description": "The official MCP Server for the Context Dev API",
"author": "Context Dev ",
"types": "dist/index.d.ts",
diff --git a/packages/mcp-server/src/code-tool-worker.ts b/packages/mcp-server/src/code-tool-worker.ts
index 13de10c..1b551e8 100644
--- a/packages/mcp-server/src/code-tool-worker.ts
+++ b/packages/mcp-server/src/code-tool-worker.ts
@@ -109,6 +109,7 @@ function getTSDiagnostics(code: string): string[] {
const fuse = new Fuse(
[
'client.web.screenshot',
+ 'client.web.webCrawlMd',
'client.web.webScrapeHTML',
'client.web.webScrapeImages',
'client.web.webScrapeMd',
diff --git a/packages/mcp-server/src/local-docs-search.ts b/packages/mcp-server/src/local-docs-search.ts
index e6ee66d..ef5eaa4 100644
--- a/packages/mcp-server/src/local-docs-search.ts
+++ b/packages/mcp-server/src/local-docs-search.ts
@@ -237,6 +237,52 @@ const EMBEDDED_METHODS: MethodEntry[] = [
},
},
},
+ {
+ name: 'web_crawl_md',
+ endpoint: '/web/crawl',
+ httpMethod: 'post',
+ summary: 'Crawl website and extract Markdown',
+ description:
+ 'Performs a crawl starting from a given URL, extracts page content as Markdown, and returns results for all crawled pages. Only follows links within the same domain as the starting URL. Costs 1 credit per successful page crawled.',
+ stainlessPath: '(resource) web > (method) web_crawl_md',
+ qualified: 'client.web.webCrawlMd',
+ params: [
+ 'url: string;',
+ 'followSubdomains?: boolean;',
+ 'includeImages?: boolean;',
+ 'includeLinks?: boolean;',
+ 'maxDepth?: number;',
+ 'maxPages?: number;',
+ 'shortenBase64Images?: boolean;',
+ 'urlRegex?: string;',
+ 'useMainContentOnly?: boolean;',
+ ],
+ response:
+ '{ metadata: { maxCrawlDepth: number; numFailed: number; numSucceeded: number; numUrls: number; }; results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]; }',
+ markdown:
+ "## web_crawl_md\n\n`client.web.webCrawlMd(url: string, followSubdomains?: boolean, includeImages?: boolean, includeLinks?: boolean, maxDepth?: number, maxPages?: number, shortenBase64Images?: boolean, urlRegex?: string, useMainContentOnly?: boolean): { metadata: object; results: object[]; }`\n\n**post** `/web/crawl`\n\nPerforms a crawl starting from a given URL, extracts page content as Markdown, and returns results for all crawled pages. Only follows links within the same domain as the starting URL. Costs 1 credit per successful page crawled.\n\n### Parameters\n\n- `url: string`\n The starting URL for the crawl (must include http:// or https:// protocol)\n\n- `followSubdomains?: boolean`\n When true, follow links on subdomains of the starting URL's domain (e.g. docs.example.com when starting from example.com). www and apex are always treated as equivalent.\n\n- `includeImages?: boolean`\n Include image references in the Markdown output\n\n- `includeLinks?: boolean`\n Preserve hyperlinks in the Markdown output\n\n- `maxDepth?: number`\n Maximum link depth from the starting URL (0 = only the starting page)\n\n- `maxPages?: number`\n Maximum number of pages to crawl. Hard cap: 500.\n\n- `shortenBase64Images?: boolean`\n Truncate base64-encoded image data in the Markdown output\n\n- `urlRegex?: string`\n Regex pattern. Only URLs matching this pattern will be followed and scraped.\n\n- `useMainContentOnly?: boolean`\n Extract only the main content, stripping headers, footers, sidebars, and navigation\n\n### Returns\n\n- `{ metadata: { maxCrawlDepth: number; numFailed: number; numSucceeded: number; numUrls: number; }; results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]; }`\n\n - `metadata: { maxCrawlDepth: number; numFailed: number; numSucceeded: number; numUrls: number; }`\n - `results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webCrawlMd({ url: 'https://example.com' });\n\nconsole.log(response);\n```",
+ perLanguage: {
+ http: {
+ example:
+ 'curl https://api.context.dev/v1/web/crawl \\\n -H \'Content-Type: application/json\' \\\n -H "Authorization: Bearer $CONTEXT_DEV_API_KEY" \\\n -d \'{\n "url": "https://example.com"\n }\'',
+ },
+ python: {
+ method: 'web.web_crawl_md',
+ example:
+ 'import os\nfrom context.dev import ContextDev\n\nclient = ContextDev(\n api_key=os.environ.get("CONTEXT_DEV_API_KEY"), # This is the default and can be omitted\n)\nresponse = client.web.web_crawl_md(\n url="https://example.com",\n)\nprint(response.metadata)',
+ },
+ ruby: {
+ method: 'web.web_crawl_md',
+ example:
+ 'require "context_dev"\n\ncontext_dev = ContextDev::Client.new(api_key: "My API Key")\n\nresponse = context_dev.web.web_crawl_md(url: "https://example.com")\n\nputs(response)',
+ },
+ typescript: {
+ method: 'client.web.webCrawlMd',
+ example:
+ "import ContextDev from 'context.dev';\n\nconst client = new ContextDev({\n apiKey: process.env['CONTEXT_DEV_API_KEY'], // This is the default and can be omitted\n});\n\nconst response = await client.web.webCrawlMd({ url: 'https://example.com' });\n\nconsole.log(response.metadata);",
+ },
+ },
+ },
{
name: 'extract_products',
endpoint: '/brand/ai/products',
diff --git a/packages/mcp-server/src/methods.ts b/packages/mcp-server/src/methods.ts
index 9a8c516..7744ca3 100644
--- a/packages/mcp-server/src/methods.ts
+++ b/packages/mcp-server/src/methods.ts
@@ -16,6 +16,12 @@ export const sdkMethods: SdkMethod[] = [
httpMethod: 'get',
httpPath: '/brand/screenshot',
},
+ {
+ clientCallName: 'client.web.webCrawlMd',
+ fullyQualifiedName: 'web.webCrawlMd',
+ httpMethod: 'post',
+ httpPath: '/web/crawl',
+ },
{
clientCallName: 'client.web.webScrapeHTML',
fullyQualifiedName: 'web.webScrapeHTML',
diff --git a/packages/mcp-server/src/server.ts b/packages/mcp-server/src/server.ts
index 2267fd9..ab2ddbb 100644
--- a/packages/mcp-server/src/server.ts
+++ b/packages/mcp-server/src/server.ts
@@ -28,7 +28,7 @@ export const newMcpServer = async ({
new McpServer(
{
name: 'context_dev_api',
- version: '1.2.0',
+ version: '1.3.0',
},
{
instructions: await getInstructions({ stainlessApiKey, customInstructionsPath }),
diff --git a/src/client.ts b/src/client.ts
index 6bd7f61..5525912 100644
--- a/src/client.ts
+++ b/src/client.ts
@@ -62,6 +62,8 @@ import {
Web,
WebScreenshotParams,
WebScreenshotResponse,
+ WebWebCrawlMdParams,
+ WebWebCrawlMdResponse,
WebWebScrapeHTMLParams,
WebWebScrapeHTMLResponse,
WebWebScrapeImagesParams,
@@ -792,11 +794,13 @@ export declare namespace ContextDev {
export {
Web as Web,
type WebScreenshotResponse as WebScreenshotResponse,
+ type WebWebCrawlMdResponse as WebWebCrawlMdResponse,
type WebWebScrapeHTMLResponse as WebWebScrapeHTMLResponse,
type WebWebScrapeImagesResponse as WebWebScrapeImagesResponse,
type WebWebScrapeMdResponse as WebWebScrapeMdResponse,
type WebWebScrapeSitemapResponse as WebWebScrapeSitemapResponse,
type WebScreenshotParams as WebScreenshotParams,
+ type WebWebCrawlMdParams as WebWebCrawlMdParams,
type WebWebScrapeHTMLParams as WebWebScrapeHTMLParams,
type WebWebScrapeImagesParams as WebWebScrapeImagesParams,
type WebWebScrapeMdParams as WebWebScrapeMdParams,
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 8884bad..95a8754 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -44,11 +44,13 @@ export {
export {
Web,
type WebScreenshotResponse,
+ type WebWebCrawlMdResponse,
type WebWebScrapeHTMLResponse,
type WebWebScrapeImagesResponse,
type WebWebScrapeMdResponse,
type WebWebScrapeSitemapResponse,
type WebScreenshotParams,
+ type WebWebCrawlMdParams,
type WebWebScrapeHTMLParams,
type WebWebScrapeImagesParams,
type WebWebScrapeMdParams,
diff --git a/src/resources/web.ts b/src/resources/web.ts
index 7e4789d..0040ff3 100644
--- a/src/resources/web.ts
+++ b/src/resources/web.ts
@@ -15,6 +15,15 @@ export class Web extends APIResource {
return this._client.get('/brand/screenshot', { query, ...options });
}
+ /**
+ * Performs a crawl starting from a given URL, extracts page content as Markdown,
+ * and returns results for all crawled pages. Only follows links within the same
+ * domain as the starting URL. Costs 1 credit per successful page crawled.
+ */
+ webCrawlMd(body: WebWebCrawlMdParams, options?: RequestOptions): APIPromise {
+ return this._client.post('/web/crawl', { body, ...options });
+ }
+
/**
* Scrapes the given URL and returns the raw HTML content of the page.
*/
@@ -85,6 +94,74 @@ export interface WebScreenshotResponse {
status?: string;
}
+export interface WebWebCrawlMdResponse {
+ metadata: WebWebCrawlMdResponse.Metadata;
+
+ results: Array;
+}
+
+export namespace WebWebCrawlMdResponse {
+ export interface Metadata {
+ /**
+ * Maximum crawl depth reached during the crawl
+ */
+ maxCrawlDepth: number;
+
+ /**
+ * Number of pages that failed to crawl
+ */
+ numFailed: number;
+
+ /**
+ * Number of pages successfully crawled
+ */
+ numSucceeded: number;
+
+ /**
+ * Total number of URLs crawled
+ */
+ numUrls: number;
+ }
+
+ export interface Result {
+ /**
+ * Extracted page content as Markdown (empty string on failure)
+ */
+ markdown: string;
+
+ metadata: Result.Metadata;
+ }
+
+ export namespace Result {
+ export interface Metadata {
+ /**
+ * Depth relative to the start URL. 0 = start URL, 1 = one link away.
+ */
+ crawlDepth: number;
+
+ /**
+ * HTTP status code of the response
+ */
+ statusCode: number;
+
+ /**
+ * true if the page was fetched and parsed successfully
+ */
+ success: boolean;
+
+ /**
+ * The page's content (empty string if unavailable)
+ */
+ title: string;
+
+ /**
+ * The URL that was fetched
+ */
+ url: string;
+ }
+ }
+}
+
export interface WebWebScrapeHTMLResponse {
/**
* Raw HTML content of the page
@@ -239,6 +316,56 @@ export interface WebScreenshotParams {
prioritize?: 'speed' | 'quality';
}
+export interface WebWebCrawlMdParams {
+ /**
+ * The starting URL for the crawl (must include http:// or https:// protocol)
+ */
+ url: string;
+
+ /**
+ * When true, follow links on subdomains of the starting URL's domain (e.g.
+ * docs.example.com when starting from example.com). www and apex are always
+ * treated as equivalent.
+ */
+ followSubdomains?: boolean;
+
+ /**
+ * Include image references in the Markdown output
+ */
+ includeImages?: boolean;
+
+ /**
+ * Preserve hyperlinks in the Markdown output
+ */
+ includeLinks?: boolean;
+
+ /**
+ * Maximum link depth from the starting URL (0 = only the starting page)
+ */
+ maxDepth?: number;
+
+ /**
+ * Maximum number of pages to crawl. Hard cap: 500.
+ */
+ maxPages?: number;
+
+ /**
+ * Truncate base64-encoded image data in the Markdown output
+ */
+ shortenBase64Images?: boolean;
+
+ /**
+ * Regex pattern. Only URLs matching this pattern will be followed and scraped.
+ */
+ urlRegex?: string;
+
+ /**
+ * Extract only the main content, stripping headers, footers, sidebars, and
+ * navigation
+ */
+ useMainContentOnly?: boolean;
+}
+
export interface WebWebScrapeHTMLParams {
/**
* Full URL to scrape (must include http:// or https:// protocol)
@@ -299,11 +426,13 @@ export interface WebWebScrapeSitemapParams {
export declare namespace Web {
export {
type WebScreenshotResponse as WebScreenshotResponse,
+ type WebWebCrawlMdResponse as WebWebCrawlMdResponse,
type WebWebScrapeHTMLResponse as WebWebScrapeHTMLResponse,
type WebWebScrapeImagesResponse as WebWebScrapeImagesResponse,
type WebWebScrapeMdResponse as WebWebScrapeMdResponse,
type WebWebScrapeSitemapResponse as WebWebScrapeSitemapResponse,
type WebScreenshotParams as WebScreenshotParams,
+ type WebWebCrawlMdParams as WebWebCrawlMdParams,
type WebWebScrapeHTMLParams as WebWebScrapeHTMLParams,
type WebWebScrapeImagesParams as WebWebScrapeImagesParams,
type WebWebScrapeMdParams as WebWebScrapeMdParams,
diff --git a/src/version.ts b/src/version.ts
index 54c8a47..39fa5bc 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '1.2.0'; // x-release-please-version
+export const VERSION = '1.3.0'; // x-release-please-version
diff --git a/tests/api-resources/web.test.ts b/tests/api-resources/web.test.ts
index 794f3e5..f4a8d7f 100644
--- a/tests/api-resources/web.test.ts
+++ b/tests/api-resources/web.test.ts
@@ -30,6 +30,33 @@ describe('resource web', () => {
});
});
+ // Mock server tests are disabled
+ test.skip('webCrawlMd: only required params', async () => {
+ const responsePromise = client.web.webCrawlMd({ url: 'https://example.com' });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ // Mock server tests are disabled
+ test.skip('webCrawlMd: required and optional params', async () => {
+ const response = await client.web.webCrawlMd({
+ url: 'https://example.com',
+ followSubdomains: true,
+ includeImages: true,
+ includeLinks: true,
+ maxDepth: 0,
+ maxPages: 1,
+ shortenBase64Images: true,
+ urlRegex: 'urlRegex',
+ useMainContentOnly: true,
+ });
+ });
+
// Mock server tests are disabled
test.skip('webScrapeHTML: only required params', async () => {
const responsePromise = client.web.webScrapeHTML({ url: 'https://example.com' });