diff --git a/.release-please-manifest.json b/.release-please-manifest.json index cf198b9..a09efe7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.11.0" + ".": "1.12.0" } diff --git a/.stats.yml b/.stats.yml index dd3356f..d892d2f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 22 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/context-dev/context.dev-449cbc8437530f305b8e3ed49465d5faf720505a77f539bc8078162139fbbb69.yml -openapi_spec_hash: 8e1dd204746bcfaaf637d93b1d339eb2 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/context-dev/context.dev-3ab7c195646f05374a224a980cab48a6c9844ba7aedf9dba28c7fd8273b173f2.yml +openapi_spec_hash: b10ee8536928665190a110738964fccb config_hash: ff13935d1231ad44cd3822ffe39904b4 diff --git a/CHANGELOG.md b/CHANGELOG.md index 47812e4..06d89de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.12.0 (2026-05-01) + +Full Changelog: [v1.11.0...v1.12.0](https://github.com/context-dot-dev/context-typescript-sdk/compare/v1.11.0...v1.12.0) + +### Features + +* **api:** api update ([d63b80e](https://github.com/context-dot-dev/context-typescript-sdk/commit/d63b80e2306ffd8757ab238c691688ab22a1f69f)) + ## 1.11.0 (2026-05-01) Full Changelog: [v1.10.0...v1.11.0](https://github.com/context-dot-dev/context-typescript-sdk/compare/v1.10.0...v1.11.0) diff --git a/package.json b/package.json index 22a81e2..e365140 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "context.dev", - "version": "1.11.0", + "version": "1.12.0", "description": "The official TypeScript library for the Context Dev API", "author": "Context Dev ", "types": "dist/index.d.ts", diff --git a/packages/mcp-server/manifest.json b/packages/mcp-server/manifest.json index e8aa01e..1ceafd2 100644 --- a/packages/mcp-server/manifest.json +++ b/packages/mcp-server/manifest.json @@ -1,7 +1,7 @@ { "dxt_version": "0.2", "name": "context-dev-mcp", - "version": "1.11.0", + "version": "1.12.0", "description": "The official MCP Server for the Context Dev API", "author": { "name": "Context Dev", diff --git a/packages/mcp-server/package.json b/packages/mcp-server/package.json index 671eeeb..6d2aaf2 100644 --- a/packages/mcp-server/package.json +++ b/packages/mcp-server/package.json @@ -1,6 +1,6 @@ { "name": "context-dev-mcp", - "version": "1.11.0", + "version": "1.12.0", "description": "The official MCP Server for the Context Dev API", "author": "Context Dev ", "types": "dist/index.d.ts", diff --git a/packages/mcp-server/src/local-docs-search.ts b/packages/mcp-server/src/local-docs-search.ts index 8fa51e2..58aed6f 100644 --- a/packages/mcp-server/src/local-docs-search.ts +++ b/packages/mcp-server/src/local-docs-search.ts @@ -58,10 +58,10 @@ const EMBEDDED_METHODS: MethodEntry[] = [ description: 'Scrapes the given URL and returns the raw HTML content of the page.', stainlessPath: '(resource) web > (method) web_scrape_html', qualified: 'client.web.webScrapeHTML', - params: ['url: string;', 'maxAgeMs?: number;', 'parsePDF?: boolean;'], + params: ['url: string;', 'includeFrames?: boolean;', 'maxAgeMs?: number;', 'parsePDF?: boolean;'], response: '{ html: string; success: true; url: string; }', markdown: - "## web_scrape_html\n\n`client.web.webScrapeHTML(url: string, maxAgeMs?: number, parsePDF?: boolean): { html: string; success: true; url: string; }`\n\n**get** `/web/scrape/html`\n\nScrapes the given URL and returns the raw HTML content of the page.\n\n### Parameters\n\n- `url: string`\n Full URL to scrape (must include http:// or https:// protocol)\n\n- `maxAgeMs?: number`\n Return a cached result if a prior scrape for the same parameters exists and is younger than this many milliseconds. Defaults to 1 day (86400000 ms) when omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.\n\n- `parsePDF?: boolean`\n When true (default), PDF URLs are fetched and their text layer is extracted and returned wrapped in . When false, PDF URLs are skipped and a 400 WEBSITE_ACCESS_ERROR is returned.\n\n### Returns\n\n- `{ html: string; success: true; url: string; }`\n\n - `html: string`\n - `success: true`\n - `url: string`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webScrapeHTML({ url: 'https://example.com' });\n\nconsole.log(response);\n```", + "## web_scrape_html\n\n`client.web.webScrapeHTML(url: string, includeFrames?: boolean, maxAgeMs?: number, parsePDF?: boolean): { html: string; success: true; url: string; }`\n\n**get** `/web/scrape/html`\n\nScrapes the given URL and returns the raw HTML content of the page.\n\n### Parameters\n\n- `url: string`\n Full URL to scrape (must include http:// or https:// protocol)\n\n- `includeFrames?: boolean`\n When true, iframes are rendered inline into the returned HTML.\n\n- `maxAgeMs?: number`\n Return a cached result if a prior scrape for the same parameters exists and is younger than this many milliseconds. Defaults to 1 day (86400000 ms) when omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.\n\n- `parsePDF?: boolean`\n When true (default), PDF URLs are fetched and their text layer is extracted and returned wrapped in . When false, PDF URLs are skipped and a 400 WEBSITE_ACCESS_ERROR is returned.\n\n### Returns\n\n- `{ html: string; success: true; url: string; }`\n\n - `html: string`\n - `success: true`\n - `url: string`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webScrapeHTML({ url: 'https://example.com' });\n\nconsole.log(response);\n```", perLanguage: { typescript: { method: 'client.web.webScrapeHTML', @@ -94,6 +94,7 @@ const EMBEDDED_METHODS: MethodEntry[] = [ qualified: 'client.web.webScrapeMd', params: [ 'url: string;', + 'includeFrames?: boolean;', 'includeImages?: boolean;', 'includeLinks?: boolean;', 'maxAgeMs?: number;', @@ -103,7 +104,7 @@ const EMBEDDED_METHODS: MethodEntry[] = [ ], response: '{ markdown: string; success: true; url: string; }', markdown: - "## web_scrape_md\n\n`client.web.webScrapeMd(url: string, includeImages?: boolean, includeLinks?: boolean, maxAgeMs?: number, parsePDF?: boolean, shortenBase64Images?: boolean, useMainContentOnly?: boolean): { markdown: string; success: true; url: string; }`\n\n**get** `/web/scrape/markdown`\n\nScrapes the given URL into LLM usable Markdown.\n\n### Parameters\n\n- `url: string`\n Full URL to scrape into LLM usable Markdown (must include http:// or https:// protocol)\n\n- `includeImages?: boolean`\n Include image references in Markdown output\n\n- `includeLinks?: boolean`\n Preserve hyperlinks in Markdown output\n\n- `maxAgeMs?: number`\n Return a cached result if a prior scrape for the same parameters exists and is younger than this many milliseconds. Defaults to 1 day (86400000 ms) when omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.\n\n- `parsePDF?: boolean`\n When true (default), PDF URLs are fetched and their text layer is extracted and converted to Markdown. When false, PDF URLs are skipped and a 400 WEBSITE_ACCESS_ERROR is returned.\n\n- `shortenBase64Images?: boolean`\n Shorten base64-encoded image data in the Markdown output\n\n- `useMainContentOnly?: boolean`\n Extract only the main content of the page, excluding headers, footers, sidebars, and navigation\n\n### Returns\n\n- `{ markdown: string; success: true; url: string; }`\n\n - `markdown: string`\n - `success: true`\n - `url: string`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webScrapeMd({ url: 'https://example.com' });\n\nconsole.log(response);\n```", + "## web_scrape_md\n\n`client.web.webScrapeMd(url: string, includeFrames?: boolean, includeImages?: boolean, includeLinks?: boolean, maxAgeMs?: number, parsePDF?: boolean, shortenBase64Images?: boolean, useMainContentOnly?: boolean): { markdown: string; success: true; url: string; }`\n\n**get** `/web/scrape/markdown`\n\nScrapes the given URL into LLM usable Markdown.\n\n### Parameters\n\n- `url: string`\n Full URL to scrape into LLM usable Markdown (must include http:// or https:// protocol)\n\n- `includeFrames?: boolean`\n When true, the contents of iframes are rendered to Markdown.\n\n- `includeImages?: boolean`\n Include image references in Markdown output\n\n- `includeLinks?: boolean`\n Preserve hyperlinks in Markdown output\n\n- `maxAgeMs?: number`\n Return a cached result if a prior scrape for the same parameters exists and is younger than this many milliseconds. Defaults to 1 day (86400000 ms) when omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.\n\n- `parsePDF?: boolean`\n When true (default), PDF URLs are fetched and their text layer is extracted and converted to Markdown. When false, PDF URLs are skipped and a 400 WEBSITE_ACCESS_ERROR is returned.\n\n- `shortenBase64Images?: boolean`\n Shorten base64-encoded image data in the Markdown output\n\n- `useMainContentOnly?: boolean`\n Extract only the main content of the page, excluding headers, footers, sidebars, and navigation\n\n### Returns\n\n- `{ markdown: string; success: true; url: string; }`\n\n - `markdown: string`\n - `success: true`\n - `url: string`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webScrapeMd({ url: 'https://example.com' });\n\nconsole.log(response);\n```", perLanguage: { typescript: { method: 'client.web.webScrapeMd', @@ -250,6 +251,7 @@ const EMBEDDED_METHODS: MethodEntry[] = [ params: [ 'url: string;', 'followSubdomains?: boolean;', + 'includeFrames?: boolean;', 'includeImages?: boolean;', 'includeLinks?: boolean;', 'maxAgeMs?: number;', @@ -263,7 +265,7 @@ const EMBEDDED_METHODS: MethodEntry[] = [ response: '{ metadata: { maxCrawlDepth: number; numFailed: number; numSkipped: number; numSucceeded: number; numUrls: number; }; results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]; }', markdown: - "## web_crawl_md\n\n`client.web.webCrawlMd(url: string, followSubdomains?: boolean, includeImages?: boolean, includeLinks?: boolean, maxAgeMs?: number, maxDepth?: number, maxPages?: number, parsePDF?: boolean, shortenBase64Images?: boolean, urlRegex?: string, useMainContentOnly?: boolean): { metadata: object; results: object[]; }`\n\n**post** `/web/crawl`\n\nPerforms a crawl starting from a given URL, extracts page content as Markdown, and returns results for all crawled pages.\n\n### Parameters\n\n- `url: string`\n The starting URL for the crawl (must include http:// or https:// protocol)\n\n- `followSubdomains?: boolean`\n When true, follow links on subdomains of the starting URL's domain (e.g. docs.example.com when starting from example.com). www and apex are always treated as equivalent.\n\n- `includeImages?: boolean`\n Include image references in the Markdown output\n\n- `includeLinks?: boolean`\n Preserve hyperlinks in the Markdown output\n\n- `maxAgeMs?: number`\n Return a cached result if a prior scrape for the same parameters exists and is younger than this many milliseconds. Defaults to 1 day (86400000 ms) when omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.\n\n- `maxDepth?: number`\n Maximum link depth from the starting URL (0 = only the starting page)\n\n- `maxPages?: number`\n Maximum number of pages to crawl. Hard cap: 500.\n\n- `parsePDF?: boolean`\n When true (default), PDF pages are fetched and their text layer is extracted and converted to Markdown alongside HTML pages. When false, PDF pages are skipped entirely (not included in results and not counted as failures).\n\n- `shortenBase64Images?: boolean`\n Truncate base64-encoded image data in the Markdown output\n\n- `urlRegex?: string`\n Regex pattern. Only URLs matching this pattern will be followed and scraped.\n\n- `useMainContentOnly?: boolean`\n Extract only the main content, stripping headers, footers, sidebars, and navigation\n\n### Returns\n\n- `{ metadata: { maxCrawlDepth: number; numFailed: number; numSkipped: number; numSucceeded: number; numUrls: number; }; results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]; }`\n\n - `metadata: { maxCrawlDepth: number; numFailed: number; numSkipped: number; numSucceeded: number; numUrls: number; }`\n - `results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webCrawlMd({ url: 'https://example.com' });\n\nconsole.log(response);\n```", + "## web_crawl_md\n\n`client.web.webCrawlMd(url: string, followSubdomains?: boolean, includeFrames?: boolean, includeImages?: boolean, includeLinks?: boolean, maxAgeMs?: number, maxDepth?: number, maxPages?: number, parsePDF?: boolean, shortenBase64Images?: boolean, urlRegex?: string, useMainContentOnly?: boolean): { metadata: object; results: object[]; }`\n\n**post** `/web/crawl`\n\nPerforms a crawl starting from a given URL, extracts page content as Markdown, and returns results for all crawled pages.\n\n### Parameters\n\n- `url: string`\n The starting URL for the crawl (must include http:// or https:// protocol)\n\n- `followSubdomains?: boolean`\n When true, follow links on subdomains of the starting URL's domain (e.g. docs.example.com when starting from example.com). www and apex are always treated as equivalent.\n\n- `includeFrames?: boolean`\n When true, the contents of iframes are rendered to Markdown for each crawled page.\n\n- `includeImages?: boolean`\n Include image references in the Markdown output\n\n- `includeLinks?: boolean`\n Preserve hyperlinks in the Markdown output\n\n- `maxAgeMs?: number`\n Return a cached result if a prior scrape for the same parameters exists and is younger than this many milliseconds. Defaults to 1 day (86400000 ms) when omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.\n\n- `maxDepth?: number`\n Maximum link depth from the starting URL (0 = only the starting page)\n\n- `maxPages?: number`\n Maximum number of pages to crawl. Hard cap: 500.\n\n- `parsePDF?: boolean`\n When true (default), PDF pages are fetched and their text layer is extracted and converted to Markdown alongside HTML pages. When false, PDF pages are skipped entirely (not included in results and not counted as failures).\n\n- `shortenBase64Images?: boolean`\n Truncate base64-encoded image data in the Markdown output\n\n- `urlRegex?: string`\n Regex pattern. Only URLs matching this pattern will be followed and scraped.\n\n- `useMainContentOnly?: boolean`\n Extract only the main content, stripping headers, footers, sidebars, and navigation\n\n### Returns\n\n- `{ metadata: { maxCrawlDepth: number; numFailed: number; numSkipped: number; numSucceeded: number; numUrls: number; }; results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]; }`\n\n - `metadata: { maxCrawlDepth: number; numFailed: number; numSkipped: number; numSucceeded: number; numUrls: number; }`\n - `results: { markdown: string; metadata: { crawlDepth: number; statusCode: number; success: boolean; title: string; url: string; }; }[]`\n\n### Example\n\n```typescript\nimport ContextDev from 'context.dev';\n\nconst client = new ContextDev();\n\nconst response = await client.web.webCrawlMd({ url: 'https://example.com' });\n\nconsole.log(response);\n```", perLanguage: { typescript: { method: 'client.web.webCrawlMd', diff --git a/packages/mcp-server/src/server.ts b/packages/mcp-server/src/server.ts index e6b1e47..78b0329 100644 --- a/packages/mcp-server/src/server.ts +++ b/packages/mcp-server/src/server.ts @@ -28,7 +28,7 @@ export const newMcpServer = async ({ new McpServer( { name: 'context_dev_api', - version: '1.11.0', + version: '1.12.0', }, { instructions: await getInstructions({ stainlessApiKey, customInstructionsPath }), diff --git a/src/resources/web.ts b/src/resources/web.ts index 8803ef4..0918187 100644 --- a/src/resources/web.ts +++ b/src/resources/web.ts @@ -1062,6 +1062,12 @@ export interface WebWebCrawlMdParams { */ followSubdomains?: boolean; + /** + * When true, the contents of iframes are rendered to Markdown for each crawled + * page. + */ + includeFrames?: boolean; + /** * Include image references in the Markdown output */ @@ -1119,6 +1125,11 @@ export interface WebWebScrapeHTMLParams { */ url: string; + /** + * When true, iframes are rendered inline into the returned HTML. + */ + includeFrames?: boolean; + /** * Return a cached result if a prior scrape for the same parameters exists and is * younger than this many milliseconds. Defaults to 1 day (86400000 ms) when @@ -1148,6 +1159,11 @@ export interface WebWebScrapeMdParams { */ url: string; + /** + * When true, the contents of iframes are rendered to Markdown. + */ + includeFrames?: boolean; + /** * Include image references in Markdown output */ diff --git a/src/version.ts b/src/version.ts index c5ad6f9..f73650e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '1.11.0'; // x-release-please-version +export const VERSION = '1.12.0'; // x-release-please-version diff --git a/tests/api-resources/web.test.ts b/tests/api-resources/web.test.ts index 2724a77..7382421 100644 --- a/tests/api-resources/web.test.ts +++ b/tests/api-resources/web.test.ts @@ -108,6 +108,7 @@ describe('resource web', () => { const response = await client.web.webCrawlMd({ url: 'https://example.com', followSubdomains: true, + includeFrames: true, includeImages: true, includeLinks: true, maxAgeMs: 0, @@ -136,6 +137,7 @@ describe('resource web', () => { test.skip('webScrapeHTML: required and optional params', async () => { const response = await client.web.webScrapeHTML({ url: 'https://example.com', + includeFrames: true, maxAgeMs: 0, parsePDF: true, }); @@ -174,6 +176,7 @@ describe('resource web', () => { test.skip('webScrapeMd: required and optional params', async () => { const response = await client.web.webScrapeMd({ url: 'https://example.com', + includeFrames: true, includeImages: true, includeLinks: true, maxAgeMs: 0,