diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index b5fcdb9..ba2c585 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.43.0"
+ ".": "1.44.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index bee881e..6eedefc 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 20
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/brand-dev%2Fbrand.dev-05a30711e18b0023520a660352d75595a050d1299bf0e3ee4a8cf55ded36aea2.yml
-openapi_spec_hash: 8d0e1115a7d864f27c55cec3255d1e77
+configured_endpoints: 16
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/brand-dev/brand.dev-ca091b4b323baadb654b7292374b8712b6de260f3c1f8f0944caf20795f608ad.yml
+openapi_spec_hash: 2bc6f537bfa055541621423722ea85f2
config_hash: 91cf2dcefb99c39eb9cd3e98e15d6011
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d135655..16c0589 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,51 @@
# Changelog
+## 1.44.0 (2026-05-01)
+
+Full Changelog: [v1.43.0...v1.44.0](https://github.com/context-dot-dev/deprecated-brand-python-sdk/compare/v1.43.0...v1.44.0)
+
+### Features
+
+* **api:** api update ([34da1c0](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/34da1c09d1e5883a6134fba8bef1a2622a841a3d))
+* **api:** api update ([5573ee7](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/5573ee7e42f5018a95e85e66460bfccc3d695a2a))
+* **api:** api update ([e4f1307](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/e4f1307a639a0c14467bd4c6af34eafe96b86033))
+* **api:** api update ([3e783d9](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/3e783d9ad471999f34ed2cbc92a58099f15ce9cb))
+* **api:** api update ([f9047c7](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/f9047c77841d0b1707cdeab18880d93dd2e6a115))
+* **api:** api update ([390e85c](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/390e85cad5c6199e1a3d03de72e77d54f0108234))
+* **api:** api update ([74f22a5](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/74f22a55a3d82b5adcc63d5f12218a162180ae95))
+* **api:** api update ([a04e938](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/a04e9389794725781408dc53d635226406d89cb2))
+* **api:** api update ([b4672ab](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/b4672ab67f11664113177977497c75e6c5ce82f8))
+* **api:** api update ([b494e66](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/b494e66bdee0f1c447ee587836aa74c7955da0aa))
+* **api:** api update ([af4c2fa](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/af4c2fa317c4a5cd2ef86e6562fe41ff7436daed))
+* **api:** api update ([9d4a64e](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/9d4a64e28b1321eda8463d8c86ec8a130c78c318))
+* **api:** api update ([f13cc2b](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/f13cc2be3ce7c3355168ba62bb3bd5ec5aaf4105))
+* **api:** api update ([93a9472](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/93a9472c495a1d34afa82ff7a6889e32ff706e3f))
+* **api:** api update ([bf48ceb](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/bf48ceb8693e734ab3da2b89e80f394d351215e6))
+* **api:** api update ([b5e8905](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/b5e8905b3e8554890bda8c7eb9e4255c363f1a81))
+* **api:** api update ([04fafa1](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/04fafa1b94a4047cc9fb3825ab84e6f517baefc4))
+* **api:** api update ([bd0f93f](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/bd0f93f4df85dc1e822ca6b59fb7ce594976ad49))
+* **api:** api update ([a388d39](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/a388d39c93b50ec075a553957081a45f64b73d39))
+* **api:** api update ([9213463](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/9213463afaf5d76d3b706a5aef49082c72409189))
+* support setting headers via env ([0ce56bb](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/0ce56bbd77bbb7170a90b838c9dff09616dea488))
+
+
+### Bug Fixes
+
+* **client:** preserve hardcoded query params when merging with user params ([a92d7b7](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/a92d7b7c66f52b259cbf31d938c8a70565190b5c))
+* ensure file data are only sent as 1 parameter ([9b72a51](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/9b72a51afe0d9992f3c52ee727c1ffc1ec76312f))
+* use correct field name format for multipart file arrays ([c74c369](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/c74c3699de55fd58f298fa9a9d378db7f677a344))
+
+
+### Performance Improvements
+
+* **client:** optimize file structure copying in multipart requests ([d7e4a18](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/d7e4a181dd0e9146f98bb51bb977300eac439c6e))
+
+
+### Chores
+
+* **internal:** more robust bootstrap script ([28007d9](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/28007d91dbe45b20fc33670466006d9934ac7c5a))
+* **internal:** reformat pyproject.toml ([3e37dca](https://github.com/context-dot-dev/deprecated-brand-python-sdk/commit/3e37dcaafaafaa677893794e66cec86afedb65ca))
+
## 1.43.0 (2026-04-03)
Full Changelog: [v1.42.0...v1.43.0](https://github.com/context-dot-dev/deprecated-brand-python-sdk/compare/v1.42.0...v1.43.0)
diff --git a/api.md b/api.md
index 8b5d705..7f3b993 100644
--- a/api.md
+++ b/api.md
@@ -8,7 +8,6 @@ from brand.dev.types import (
BrandAIProductResponse,
BrandAIProductsResponse,
BrandAIQueryResponse,
- BrandFontsResponse,
BrandIdentifyFromTransactionResponse,
BrandPrefetchResponse,
BrandPrefetchByEmailResponse,
@@ -16,10 +15,7 @@ from brand.dev.types import (
BrandRetrieveByIsinResponse,
BrandRetrieveByNameResponse,
BrandRetrieveByTickerResponse,
- BrandRetrieveNaicsResponse,
BrandRetrieveSimplifiedResponse,
- BrandScreenshotResponse,
- BrandStyleguideResponse,
BrandWebScrapeHTMLResponse,
BrandWebScrapeImagesResponse,
BrandWebScrapeMdResponse,
@@ -33,7 +29,6 @@ Methods:
- client.brand.ai_product(\*\*params) -> BrandAIProductResponse
- client.brand.ai_products(\*\*params) -> BrandAIProductsResponse
- client.brand.ai_query(\*\*params) -> BrandAIQueryResponse
-- client.brand.fonts(\*\*params) -> BrandFontsResponse
- client.brand.identify_from_transaction(\*\*params) -> BrandIdentifyFromTransactionResponse
- client.brand.prefetch(\*\*params) -> BrandPrefetchResponse
- client.brand.prefetch_by_email(\*\*params) -> BrandPrefetchByEmailResponse
@@ -41,10 +36,7 @@ Methods:
- client.brand.retrieve_by_isin(\*\*params) -> BrandRetrieveByIsinResponse
- client.brand.retrieve_by_name(\*\*params) -> BrandRetrieveByNameResponse
- client.brand.retrieve_by_ticker(\*\*params) -> BrandRetrieveByTickerResponse
-- client.brand.retrieve_naics(\*\*params) -> BrandRetrieveNaicsResponse
- client.brand.retrieve_simplified(\*\*params) -> BrandRetrieveSimplifiedResponse
-- client.brand.screenshot(\*\*params) -> BrandScreenshotResponse
-- client.brand.styleguide(\*\*params) -> BrandStyleguideResponse
- client.brand.web_scrape_html(\*\*params) -> BrandWebScrapeHTMLResponse
- client.brand.web_scrape_images(\*\*params) -> BrandWebScrapeImagesResponse
- client.brand.web_scrape_md(\*\*params) -> BrandWebScrapeMdResponse
diff --git a/pyproject.toml b/pyproject.toml
index 1ac8a24..8f59f08 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "brand.dev"
-version = "1.43.0"
+version = "1.44.0"
description = "The official Python library for the brand.dev API"
dynamic = ["readme"]
license = "Apache-2.0"
@@ -168,7 +168,7 @@ show_error_codes = true
#
# We also exclude our `tests` as mypy doesn't always infer
# types correctly and Pyright will still catch any type errors.
-exclude = ['src/brand/dev/_files.py', '_dev/.*.py', 'tests/.*']
+exclude = ["src/brand/dev/_files.py", "_dev/.*.py", "tests/.*"]
strict_equality = true
implicit_reexport = true
diff --git a/scripts/bootstrap b/scripts/bootstrap
index b430fee..fe8451e 100755
--- a/scripts/bootstrap
+++ b/scripts/bootstrap
@@ -4,7 +4,7 @@ set -e
cd "$(dirname "$0")/.."
-if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "${SKIP_BREW:-}" != "1" ] && [ -t 0 ]; then
brew bundle check >/dev/null 2>&1 || {
echo -n "==> Install Homebrew dependencies? (y/N): "
read -r response
diff --git a/src/brand/dev/_base_client.py b/src/brand/dev/_base_client.py
index dc4af6b..ce73b2f 100644
--- a/src/brand/dev/_base_client.py
+++ b/src/brand/dev/_base_client.py
@@ -540,6 +540,10 @@ def _build_request(
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
+ # preserve hard-coded query params from the url
+ if params and prepared_url.query:
+ params = {**dict(prepared_url.params.items()), **params}
+ prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0])
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
diff --git a/src/brand/dev/_client.py b/src/brand/dev/_client.py
index 5b46246..fa051a5 100644
--- a/src/brand/dev/_client.py
+++ b/src/brand/dev/_client.py
@@ -19,7 +19,11 @@
RequestOptions,
not_given,
)
-from ._utils import is_given, get_async_library
+from ._utils import (
+ is_given,
+ is_mapping_t,
+ get_async_library,
+)
from ._compat import cached_property
from ._version import __version__
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
@@ -90,6 +94,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.brand.dev/v1"
+ custom_headers_env = os.environ.get("BRAND_DEV_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
+
super().__init__(
version=__version__,
base_url=base_url,
@@ -264,6 +277,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.brand.dev/v1"
+ custom_headers_env = os.environ.get("BRAND_DEV_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
+
super().__init__(
version=__version__,
base_url=base_url,
diff --git a/src/brand/dev/_files.py b/src/brand/dev/_files.py
index cc14c14..0fdce17 100644
--- a/src/brand/dev/_files.py
+++ b/src/brand/dev/_files.py
@@ -3,8 +3,8 @@
import io
import os
import pathlib
-from typing import overload
-from typing_extensions import TypeGuard
+from typing import Sequence, cast, overload
+from typing_extensions import TypeVar, TypeGuard
import anyio
@@ -17,7 +17,9 @@
HttpxFileContent,
HttpxRequestFiles,
)
-from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+from ._utils import is_list, is_mapping, is_tuple_t, is_mapping_t, is_sequence_t
+
+_T = TypeVar("_T")
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
@@ -121,3 +123,51 @@ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
return await anyio.Path(file).read_bytes()
return file
+
+
+def deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]]) -> _T:
+ """Copy only the containers along the given paths.
+
+ Used to guard against mutation by extract_files without copying the entire structure.
+ Only dicts and lists that lie on a path are copied; everything else
+ is returned by reference.
+
+ For example, given paths=[["foo", "files", "file"]] and the structure:
+ {
+ "foo": {
+ "bar": {"baz": {}},
+ "files": {"file": }
+ }
+ }
+ The root dict, "foo", and "files" are copied (they lie on the path).
+ "bar" and "baz" are returned by reference (off the path).
+ """
+ return _deepcopy_with_paths(item, paths, 0)
+
+
+def _deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]], index: int) -> _T:
+ if not paths:
+ return item
+ if is_mapping(item):
+ key_to_paths: dict[str, list[Sequence[str]]] = {}
+ for path in paths:
+ if index < len(path):
+ key_to_paths.setdefault(path[index], []).append(path)
+
+ # if no path continues through this mapping, it won't be mutated and copying it is redundant
+ if not key_to_paths:
+ return item
+
+ result = dict(item)
+ for key, subpaths in key_to_paths.items():
+ if key in result:
+ result[key] = _deepcopy_with_paths(result[key], subpaths, index + 1)
+ return cast(_T, result)
+ if is_list(item):
+ array_paths = [path for path in paths if index < len(path) and path[index] == ""]
+
+ # if no path expects a list here, nothing will be mutated inside it - return by reference
+ if not array_paths:
+ return cast(_T, item)
+ return cast(_T, [_deepcopy_with_paths(entry, array_paths, index + 1) for entry in item])
+ return item
diff --git a/src/brand/dev/_qs.py b/src/brand/dev/_qs.py
index de8c99b..4127c19 100644
--- a/src/brand/dev/_qs.py
+++ b/src/brand/dev/_qs.py
@@ -2,17 +2,13 @@
from typing import Any, List, Tuple, Union, Mapping, TypeVar
from urllib.parse import parse_qs, urlencode
-from typing_extensions import Literal, get_args
+from typing_extensions import get_args
-from ._types import NotGiven, not_given
+from ._types import NotGiven, ArrayFormat, NestedFormat, not_given
from ._utils import flatten
_T = TypeVar("_T")
-
-ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
-NestedFormat = Literal["dots", "brackets"]
-
PrimitiveData = Union[str, int, float, bool, None]
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
# https://github.com/microsoft/pyright/issues/3555
diff --git a/src/brand/dev/_types.py b/src/brand/dev/_types.py
index fce564e..eff6aae 100644
--- a/src/brand/dev/_types.py
+++ b/src/brand/dev/_types.py
@@ -47,6 +47,9 @@
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")
+ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
+NestedFormat = Literal["dots", "brackets"]
+
# Approximates httpx internal ProxiesTypes and RequestFiles types
# while adding support for `PathLike` instances
diff --git a/src/brand/dev/_utils/__init__.py b/src/brand/dev/_utils/__init__.py
index 10cb66d..1c090e5 100644
--- a/src/brand/dev/_utils/__init__.py
+++ b/src/brand/dev/_utils/__init__.py
@@ -24,7 +24,6 @@
coerce_integer as coerce_integer,
file_from_path as file_from_path,
strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
maybe_coerce_float as maybe_coerce_float,
get_required_header as get_required_header,
diff --git a/src/brand/dev/_utils/_utils.py b/src/brand/dev/_utils/_utils.py
index eec7f4a..199cd23 100644
--- a/src/brand/dev/_utils/_utils.py
+++ b/src/brand/dev/_utils/_utils.py
@@ -17,11 +17,11 @@
)
from pathlib import Path
from datetime import date, datetime
-from typing_extensions import TypeGuard
+from typing_extensions import TypeGuard, get_args
import sniffio
-from .._types import Omit, NotGiven, FileTypes, HeadersLike
+from .._types import Omit, NotGiven, FileTypes, ArrayFormat, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -40,25 +40,45 @@ def extract_files(
query: Mapping[str, object],
*,
paths: Sequence[Sequence[str]],
+ array_format: ArrayFormat = "brackets",
) -> list[tuple[str, FileTypes]]:
"""Recursively extract files from the given dictionary based on specified paths.
A path may look like this ['foo', 'files', '', 'data'].
+ ``array_format`` controls how ```` segments contribute to the emitted
+ field name. Supported values: ``"brackets"`` (``foo[]``), ``"repeat"`` and
+ ``"comma"`` (``foo``), ``"indices"`` (``foo[0]``, ``foo[1]``).
+
Note: this mutates the given dictionary.
"""
files: list[tuple[str, FileTypes]] = []
for path in paths:
- files.extend(_extract_items(query, path, index=0, flattened_key=None))
+ files.extend(_extract_items(query, path, index=0, flattened_key=None, array_format=array_format))
return files
+def _array_suffix(array_format: ArrayFormat, array_index: int) -> str:
+ if array_format == "brackets":
+ return "[]"
+ if array_format == "indices":
+ return f"[{array_index}]"
+ if array_format == "repeat" or array_format == "comma":
+ # Both repeat the bare field name for each file part; there is no
+ # meaningful way to comma-join binary parts.
+ return ""
+ raise NotImplementedError(
+ f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
+ )
+
+
def _extract_items(
obj: object,
path: Sequence[str],
*,
index: int,
flattened_key: str | None,
+ array_format: ArrayFormat,
) -> list[tuple[str, FileTypes]]:
try:
key = path[index]
@@ -75,9 +95,11 @@ def _extract_items(
if is_list(obj):
files: list[tuple[str, FileTypes]] = []
- for entry in obj:
- assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
- files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ for array_index, entry in enumerate(obj):
+ suffix = _array_suffix(array_format, array_index)
+ emitted_key = (flattened_key + suffix) if flattened_key else suffix
+ assert_is_file_content(entry, key=emitted_key)
+ files.append((emitted_key, cast(FileTypes, entry)))
return files
assert_is_file_content(obj, key=flattened_key)
@@ -86,8 +108,9 @@ def _extract_items(
index += 1
if is_dict(obj):
try:
- # We are at the last entry in the path so we must remove the field
- if (len(path)) == index:
+ # Remove the field if there are no more dict keys in the path,
+ # only "" traversal markers or end.
+ if all(p == "" for p in path[index:]):
item = obj.pop(key)
else:
item = obj[key]
@@ -105,6 +128,7 @@ def _extract_items(
path,
index=index,
flattened_key=flattened_key,
+ array_format=array_format,
)
elif is_list(obj):
if key != "":
@@ -116,9 +140,12 @@ def _extract_items(
item,
path,
index=index,
- flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
+ flattened_key=(
+ (flattened_key if flattened_key is not None else "") + _array_suffix(array_format, array_index)
+ ),
+ array_format=array_format,
)
- for item in obj
+ for array_index, item in enumerate(obj)
]
)
@@ -176,21 +203,6 @@ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
return isinstance(obj, Iterable)
-def deepcopy_minimal(item: _T) -> _T:
- """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
-
- - mappings, e.g. `dict`
- - list
-
- This is done for performance reasons.
- """
- if is_mapping(item):
- return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
- if is_list(item):
- return cast(_T, [deepcopy_minimal(entry) for entry in item])
- return item
-
-
# copied from https://github.com/Rapptz/RoboDanny
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
size = len(seq)
diff --git a/src/brand/dev/_version.py b/src/brand/dev/_version.py
index 6c398c0..236a189 100644
--- a/src/brand/dev/_version.py
+++ b/src/brand/dev/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "brand.dev"
-__version__ = "1.43.0" # x-release-please-version
+__version__ = "1.44.0" # x-release-please-version
diff --git a/src/brand/dev/resources/brand.py b/src/brand/dev/resources/brand.py
index 856a490..aada18b 100644
--- a/src/brand/dev/resources/brand.py
+++ b/src/brand/dev/resources/brand.py
@@ -8,16 +8,12 @@
import httpx
from ..types import (
- brand_fonts_params,
brand_ai_query_params,
brand_prefetch_params,
brand_retrieve_params,
brand_ai_product_params,
- brand_screenshot_params,
- brand_styleguide_params,
brand_ai_products_params,
brand_web_scrape_md_params,
- brand_retrieve_naics_params,
brand_web_scrape_html_params,
brand_retrieve_by_isin_params,
brand_retrieve_by_name_params,
@@ -40,16 +36,12 @@
async_to_streamed_response_wrapper,
)
from .._base_client import make_request_options
-from ..types.brand_fonts_response import BrandFontsResponse
from ..types.brand_ai_query_response import BrandAIQueryResponse
from ..types.brand_prefetch_response import BrandPrefetchResponse
from ..types.brand_retrieve_response import BrandRetrieveResponse
from ..types.brand_ai_product_response import BrandAIProductResponse
-from ..types.brand_screenshot_response import BrandScreenshotResponse
-from ..types.brand_styleguide_response import BrandStyleguideResponse
from ..types.brand_ai_products_response import BrandAIProductsResponse
from ..types.brand_web_scrape_md_response import BrandWebScrapeMdResponse
-from ..types.brand_retrieve_naics_response import BrandRetrieveNaicsResponse
from ..types.brand_web_scrape_html_response import BrandWebScrapeHTMLResponse
from ..types.brand_retrieve_by_isin_response import BrandRetrieveByIsinResponse
from ..types.brand_retrieve_by_name_response import BrandRetrieveByNameResponse
@@ -89,61 +81,126 @@ def retrieve(
*,
domain: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -163,8 +220,7 @@ def retrieve(
domain: Domain name to retrieve brand data for (e.g., 'example.com', 'google.com').
Cannot be used with name or ticker parameters.
- force_language: Optional parameter to force the language of the retrieved brand data. Works with
- all three lookup methods.
+ force_language: Optional parameter to force the language of the retrieved brand data.
max_speed: Optional parameter to optimize the API call for maximum speed. When set to true,
the API will skip time-consuming operations for faster response at the cost of
@@ -206,6 +262,7 @@ def ai_product(
self,
*,
url: str,
+ max_age_ms: int | Omit = omit,
timeout_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -215,13 +272,16 @@ def ai_product(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandAIProductResponse:
"""
- Beta feature: Given a single URL, determines if it is a product detail page,
- classifies the platform/product type, and extracts the product information.
- Supports Amazon, TikTok Shop, Etsy, and generic ecommerce sites.
+ Given a single URL, determines if it is a product page and extracts the product
+ information.
Args:
url: The product page URL to extract product data from.
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
timeout_ms: Optional timeout in milliseconds for the request. Maximum allowed value is
300000ms (5 minutes).
@@ -238,6 +298,7 @@ def ai_product(
body=maybe_transform(
{
"url": url,
+ "max_age_ms": max_age_ms,
"timeout_ms": timeout_ms,
},
brand_ai_product_params.BrandAIProductParams,
@@ -253,6 +314,7 @@ def ai_products(
self,
*,
domain: str,
+ max_age_ms: int | Omit = omit,
max_products: int | Omit = omit,
timeout_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -262,15 +324,19 @@ def ai_products(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandAIProductsResponse:
- """Beta feature: Extract product information from a brand's website.
+ """Extract product information from a brand's website.
- We will
- analyze the website and return a list of products with details such as name,
- description, image, pricing, features, and more.
+ We will analyze the website
+ and return a list of products with details such as name, description, image,
+ pricing, features, and more.
Args:
domain: The domain name to analyze.
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
max_products: Maximum number of products to extract.
timeout_ms: Optional timeout in milliseconds for the request. Maximum allowed value is
@@ -291,6 +357,7 @@ def ai_products(
self,
*,
direct_url: str,
+ max_age_ms: int | Omit = omit,
max_products: int | Omit = omit,
timeout_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -300,16 +367,20 @@ def ai_products(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandAIProductsResponse:
- """Beta feature: Extract product information from a brand's website.
+ """Extract product information from a brand's website.
- We will
- analyze the website and return a list of products with details such as name,
- description, image, pricing, features, and more.
+ We will analyze the website
+ and return a list of products with details such as name, description, image,
+ pricing, features, and more.
Args:
direct_url: A specific URL to use directly as the starting point for extraction without
domain resolution.
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
max_products: Maximum number of products to extract.
timeout_ms: Optional timeout in milliseconds for the request. Maximum allowed value is
@@ -330,6 +401,7 @@ def ai_products(
self,
*,
domain: str | Omit = omit,
+ max_age_ms: int | Omit = omit,
max_products: int | Omit = omit,
timeout_ms: int | Omit = omit,
direct_url: str | Omit = omit,
@@ -345,6 +417,7 @@ def ai_products(
body=maybe_transform(
{
"domain": domain,
+ "max_age_ms": max_age_ms,
"max_products": max_products,
"timeout_ms": timeout_ms,
"direct_url": direct_url,
@@ -413,56 +486,6 @@ def ai_query(
cast_to=BrandAIQueryResponse,
)
- def fonts(
- self,
- *,
- domain: str,
- timeout_ms: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandFontsResponse:
- """
- Extract font information from a brand's website including font families, usage
- statistics, fallbacks, and element/word counts.
-
- Args:
- domain: Domain name to extract fonts from (e.g., 'example.com', 'google.com'). The
- domain will be automatically normalized and validated.
-
- timeout_ms: Optional timeout in milliseconds for the request. If the request takes longer
- than this value, it will be aborted with a 408 status code. Maximum allowed
- value is 300000ms (5 minutes).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/brand/fonts",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "domain": domain,
- "timeout_ms": timeout_ms,
- },
- brand_fonts_params.BrandFontsParams,
- ),
- ),
- cast_to=BrandFontsResponse,
- )
-
def identify_from_transaction(
self,
*,
@@ -711,61 +734,126 @@ def identify_from_transaction(
]
| Omit = omit,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
high_confidence_only: bool | Omit = omit,
@@ -796,7 +884,6 @@ def identify_from_transaction(
high_confidence_only: When set to true, the API will perform an additional verification steps to
ensure the identified brand matches the transaction with high confidence.
- Defaults to false.
max_speed: Optional parameter to optimize the API call for maximum speed. When set to true,
the API will skip time-consuming operations for faster response at the cost of
@@ -858,9 +945,7 @@ def prefetch(
) -> BrandPrefetchResponse:
"""
Signal that you may fetch brand data for a particular domain soon to improve
- latency. This endpoint does not charge credits and is available for paid
- customers to optimize future requests. [You must be on a paid plan to use this
- endpoint]
+ latency.
Args:
domain: Domain name to prefetch brand data for
@@ -908,9 +993,7 @@ def prefetch_by_email(
Signal that you may fetch brand data for a particular domain soon to improve
latency. This endpoint accepts an email address, extracts the domain from it,
validates that it's not a disposable or free email provider, and queues the
- domain for prefetching. This endpoint does not charge credits and is available
- for paid customers to optimize future requests. [You must be on a paid plan to
- use this endpoint]
+ domain for prefetching.
Args:
email: Email address to prefetch brand data for. The domain will be extracted from the
@@ -949,61 +1032,126 @@ def retrieve_by_email(
*,
email: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -1017,9 +1165,8 @@ def retrieve_by_email(
) -> BrandRetrieveByEmailResponse:
"""
Retrieve brand information using an email address while detecting disposable and
- free email addresses. This endpoint extracts the domain from the email address
- and returns brand data for that domain. Disposable and free email addresses
- (like gmail.com, yahoo.com) will throw a 422 error.
+ free email addresses. Disposable and free email addresses (like gmail.com,
+ yahoo.com) will throw a 422 error.
Args:
email: Email address to retrieve brand data for (e.g., 'contact@example.com'). The
@@ -1069,61 +1216,126 @@ def retrieve_by_isin(
*,
isin: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -1137,8 +1349,7 @@ def retrieve_by_isin(
) -> BrandRetrieveByIsinResponse:
"""
Retrieve brand information using an ISIN (International Securities
- Identification Number). This endpoint looks up the company associated with the
- ISIN and returns its brand data.
+ Identification Number).
Args:
isin: ISIN (International Securities Identification Number) to retrieve brand data for
@@ -1430,61 +1641,126 @@ def retrieve_by_name(
]
| Omit = omit,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -1496,17 +1772,15 @@ def retrieve_by_name(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandRetrieveByNameResponse:
- """Retrieve brand information using a company name.
-
- This endpoint searches for the
- company by name and returns its brand data.
+ """
+ Retrieve brand information using a company name.
Args:
name: Company name to retrieve brand data for (e.g., 'Apple Inc', 'Microsoft
Corporation'). Must be 3-30 characters.
- country_gl: Optional country code (GL parameter) to specify the country. This affects the
- geographic location used for search queries.
+ country_gl: Optional country code hint (GL parameter) to specify the country for the company
+ name.
force_language: Optional parameter to force the language of the retrieved brand data.
@@ -1552,61 +1826,126 @@ def retrieve_by_ticker(
*,
ticker: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -1693,10 +2032,8 @@ def retrieve_by_ticker(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandRetrieveByTickerResponse:
- """Retrieve brand information using a stock ticker symbol.
-
- This endpoint looks up
- the company associated with the ticker and returns its brand data.
+ """
+ Retrieve brand information using a stock ticker symbol.
Args:
ticker: Stock ticker symbol to retrieve brand data for (e.g., 'AAPL', 'GOOGL', 'BRK.A').
@@ -1743,65 +2080,6 @@ def retrieve_by_ticker(
cast_to=BrandRetrieveByTickerResponse,
)
- def retrieve_naics(
- self,
- *,
- input: str,
- max_results: int | Omit = omit,
- min_results: int | Omit = omit,
- timeout_ms: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandRetrieveNaicsResponse:
- """
- Endpoint to classify any brand into a 2022 NAICS code.
-
- Args:
- input: Brand domain or title to retrieve NAICS code for. If a valid domain is provided
- in `input`, it will be used for classification, otherwise, we will search for
- the brand using the provided title.
-
- max_results: Maximum number of NAICS codes to return. Must be between 1 and 10. Defaults
- to 5.
-
- min_results: Minimum number of NAICS codes to return. Must be at least 1. Defaults to 1.
-
- timeout_ms: Optional timeout in milliseconds for the request. If the request takes longer
- than this value, it will be aborted with a 408 status code. Maximum allowed
- value is 300000ms (5 minutes).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/brand/naics",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "input": input,
- "max_results": max_results,
- "min_results": min_results,
- "timeout_ms": timeout_ms,
- },
- brand_retrieve_naics_params.BrandRetrieveNaicsParams,
- ),
- ),
- cast_to=BrandRetrieveNaicsResponse,
- )
-
def retrieve_simplified(
self,
*,
@@ -1816,8 +2094,8 @@ def retrieve_simplified(
) -> BrandRetrieveSimplifiedResponse:
"""
Returns a simplified version of brand data containing only essential
- information: domain, title, colors, logos, and backdrops. This endpoint is
- optimized for faster responses and reduced data transfer.
+ information: domain, title, colors, logos, and backdrops. Optimized for faster
+ responses and reduced data transfer.
Args:
domain: Domain name to retrieve simplified brand data for
@@ -1852,133 +2130,13 @@ def retrieve_simplified(
cast_to=BrandRetrieveSimplifiedResponse,
)
- def screenshot(
- self,
- *,
- domain: str,
- full_screenshot: Literal["true", "false"] | Omit = omit,
- page: Literal["login", "signup", "blog", "careers", "pricing", "terms", "privacy", "contact"] | Omit = omit,
- prioritize: Literal["speed", "quality"] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandScreenshotResponse:
- """Capture a screenshot of a website.
-
- Supports both viewport (standard browser
- view) and full-page screenshots. Can also screenshot specific page types (login,
- pricing, etc.) by using heuristics to find the appropriate URL. Returns a URL to
- the uploaded screenshot image hosted on our CDN.
-
- Args:
- domain: Domain name to take screenshot of (e.g., 'example.com', 'google.com'). The
- domain will be automatically normalized and validated.
-
- full_screenshot: Optional parameter to determine screenshot type. If 'true', takes a full page
- screenshot capturing all content. If 'false' or not provided, takes a viewport
- screenshot (standard browser view).
-
- page: Optional parameter to specify which page type to screenshot. If provided, the
- system will scrape the domain's links and use heuristics to find the most
- appropriate URL for the specified page type (30 supported languages). If not
- provided, screenshots the main domain landing page.
-
- prioritize: Optional parameter to prioritize screenshot capture. If 'speed', optimizes for
- faster capture with basic quality. If 'quality', optimizes for higher quality
- with longer wait times. Defaults to 'quality' if not provided.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/brand/screenshot",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "domain": domain,
- "full_screenshot": full_screenshot,
- "page": page,
- "prioritize": prioritize,
- },
- brand_screenshot_params.BrandScreenshotParams,
- ),
- ),
- cast_to=BrandScreenshotResponse,
- )
-
- def styleguide(
- self,
- *,
- direct_url: str | Omit = omit,
- domain: str | Omit = omit,
- timeout_ms: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandStyleguideResponse:
- """
- Automatically extract comprehensive design system information from a brand's
- website including colors, typography, spacing, shadows, and UI components.
- Either 'domain' or 'directUrl' must be provided as a query parameter, but not
- both.
-
- Args:
- direct_url: A specific URL to fetch the styleguide from directly, bypassing domain
- resolution (e.g., 'https://example.com/design-system').
-
- domain: Domain name to extract styleguide from (e.g., 'example.com', 'google.com'). The
- domain will be automatically normalized and validated.
-
- timeout_ms: Optional timeout in milliseconds for the request. If the request takes longer
- than this value, it will be aborted with a 408 status code. Maximum allowed
- value is 300000ms (5 minutes).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/brand/styleguide",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "direct_url": direct_url,
- "domain": domain,
- "timeout_ms": timeout_ms,
- },
- brand_styleguide_params.BrandStyleguideParams,
- ),
- ),
- cast_to=BrandStyleguideResponse,
- )
-
def web_scrape_html(
self,
*,
url: str,
+ include_frames: bool | Omit = omit,
+ max_age_ms: int | Omit = omit,
+ parse_pdf: bool | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -1992,6 +2150,16 @@ def web_scrape_html(
Args:
url: Full URL to scrape (must include http:// or https:// protocol)
+ include_frames: When true, iframes are rendered inline into the returned HTML.
+
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 1 day (86400000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
+ parse_pdf: When true (default), PDF URLs are fetched and their text layer is extracted and
+ returned wrapped in …. When false, PDF URLs are skipped
+ and a 400 WEBSITE_ACCESS_ERROR is returned.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -2007,7 +2175,15 @@ def web_scrape_html(
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
- query=maybe_transform({"url": url}, brand_web_scrape_html_params.BrandWebScrapeHTMLParams),
+ query=maybe_transform(
+ {
+ "url": url,
+ "include_frames": include_frames,
+ "max_age_ms": max_age_ms,
+ "parse_pdf": parse_pdf,
+ },
+ brand_web_scrape_html_params.BrandWebScrapeHTMLParams,
+ ),
),
cast_to=BrandWebScrapeHTMLResponse,
)
@@ -2056,8 +2232,11 @@ def web_scrape_md(
self,
*,
url: str,
+ include_frames: bool | Omit = omit,
include_images: bool | Omit = omit,
include_links: bool | Omit = omit,
+ max_age_ms: int | Omit = omit,
+ parse_pdf: bool | Omit = omit,
shorten_base64_images: bool | Omit = omit,
use_main_content_only: bool | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2068,17 +2247,26 @@ def web_scrape_md(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandWebScrapeMdResponse:
"""
- Scrapes the given URL, converts the HTML content to Markdown, and returns the
- result.
+ Scrapes the given URL into LLM usable Markdown.
Args:
- url: Full URL to scrape and convert to markdown (must include http:// or https://
+ url: Full URL to scrape into LLM usable Markdown (must include http:// or https://
protocol)
+ include_frames: When true, the contents of iframes are rendered to Markdown.
+
include_images: Include image references in Markdown output
include_links: Preserve hyperlinks in Markdown output
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 1 day (86400000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
+ parse_pdf: When true (default), PDF URLs are fetched and their text layer is extracted and
+ converted to Markdown. When false, PDF URLs are skipped and a 400
+ WEBSITE_ACCESS_ERROR is returned.
+
shorten_base64_images: Shorten base64-encoded image data in the Markdown output
use_main_content_only: Extract only the main content of the page, excluding headers, footers, sidebars,
@@ -2102,8 +2290,11 @@ def web_scrape_md(
query=maybe_transform(
{
"url": url,
+ "include_frames": include_frames,
"include_images": include_images,
"include_links": include_links,
+ "max_age_ms": max_age_ms,
+ "parse_pdf": parse_pdf,
"shorten_base64_images": shorten_base64_images,
"use_main_content_only": use_main_content_only,
},
@@ -2118,6 +2309,7 @@ def web_scrape_sitemap(
*,
domain: str,
max_links: int | Omit = omit,
+ url_regex: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -2126,17 +2318,17 @@ def web_scrape_sitemap(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandWebScrapeSitemapResponse:
"""
- Crawls the sitemap of the given domain and returns all discovered page URLs.
- Supports sitemap index files (recursive), parallel fetching with concurrency
- control, deduplication, and filters out non-page resources (images, PDFs, etc.).
+ Crawl an entire website's sitemap and return all discovered page URLs.
Args:
- domain: Domain name to crawl sitemaps for (e.g., 'example.com'). The domain will be
- automatically normalized and validated.
+ domain: Domain to build a sitemap for
max_links: Maximum number of links to return from the sitemap crawl. Defaults to 10,000.
Minimum is 1, maximum is 100,000.
+ url_regex: Optional RE2-compatible regex pattern. Only URLs matching this pattern are
+ returned and counted against maxLinks.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -2156,6 +2348,7 @@ def web_scrape_sitemap(
{
"domain": domain,
"max_links": max_links,
+ "url_regex": url_regex,
},
brand_web_scrape_sitemap_params.BrandWebScrapeSitemapParams,
),
@@ -2189,61 +2382,126 @@ async def retrieve(
*,
domain: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -2263,8 +2521,7 @@ async def retrieve(
domain: Domain name to retrieve brand data for (e.g., 'example.com', 'google.com').
Cannot be used with name or ticker parameters.
- force_language: Optional parameter to force the language of the retrieved brand data. Works with
- all three lookup methods.
+ force_language: Optional parameter to force the language of the retrieved brand data.
max_speed: Optional parameter to optimize the API call for maximum speed. When set to true,
the API will skip time-consuming operations for faster response at the cost of
@@ -2306,6 +2563,7 @@ async def ai_product(
self,
*,
url: str,
+ max_age_ms: int | Omit = omit,
timeout_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -2315,13 +2573,16 @@ async def ai_product(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandAIProductResponse:
"""
- Beta feature: Given a single URL, determines if it is a product detail page,
- classifies the platform/product type, and extracts the product information.
- Supports Amazon, TikTok Shop, Etsy, and generic ecommerce sites.
+ Given a single URL, determines if it is a product page and extracts the product
+ information.
Args:
url: The product page URL to extract product data from.
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
timeout_ms: Optional timeout in milliseconds for the request. Maximum allowed value is
300000ms (5 minutes).
@@ -2338,6 +2599,7 @@ async def ai_product(
body=await async_maybe_transform(
{
"url": url,
+ "max_age_ms": max_age_ms,
"timeout_ms": timeout_ms,
},
brand_ai_product_params.BrandAIProductParams,
@@ -2353,6 +2615,7 @@ async def ai_products(
self,
*,
domain: str,
+ max_age_ms: int | Omit = omit,
max_products: int | Omit = omit,
timeout_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2362,15 +2625,19 @@ async def ai_products(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandAIProductsResponse:
- """Beta feature: Extract product information from a brand's website.
+ """Extract product information from a brand's website.
- We will
- analyze the website and return a list of products with details such as name,
- description, image, pricing, features, and more.
+ We will analyze the website
+ and return a list of products with details such as name, description, image,
+ pricing, features, and more.
Args:
domain: The domain name to analyze.
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
max_products: Maximum number of products to extract.
timeout_ms: Optional timeout in milliseconds for the request. Maximum allowed value is
@@ -2391,6 +2658,7 @@ async def ai_products(
self,
*,
direct_url: str,
+ max_age_ms: int | Omit = omit,
max_products: int | Omit = omit,
timeout_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -2400,16 +2668,20 @@ async def ai_products(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandAIProductsResponse:
- """Beta feature: Extract product information from a brand's website.
+ """Extract product information from a brand's website.
- We will
- analyze the website and return a list of products with details such as name,
- description, image, pricing, features, and more.
+ We will analyze the website
+ and return a list of products with details such as name, description, image,
+ pricing, features, and more.
Args:
direct_url: A specific URL to use directly as the starting point for extraction without
domain resolution.
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
max_products: Maximum number of products to extract.
timeout_ms: Optional timeout in milliseconds for the request. Maximum allowed value is
@@ -2430,6 +2702,7 @@ async def ai_products(
self,
*,
domain: str | Omit = omit,
+ max_age_ms: int | Omit = omit,
max_products: int | Omit = omit,
timeout_ms: int | Omit = omit,
direct_url: str | Omit = omit,
@@ -2445,6 +2718,7 @@ async def ai_products(
body=await async_maybe_transform(
{
"domain": domain,
+ "max_age_ms": max_age_ms,
"max_products": max_products,
"timeout_ms": timeout_ms,
"direct_url": direct_url,
@@ -2513,56 +2787,6 @@ async def ai_query(
cast_to=BrandAIQueryResponse,
)
- async def fonts(
- self,
- *,
- domain: str,
- timeout_ms: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandFontsResponse:
- """
- Extract font information from a brand's website including font families, usage
- statistics, fallbacks, and element/word counts.
-
- Args:
- domain: Domain name to extract fonts from (e.g., 'example.com', 'google.com'). The
- domain will be automatically normalized and validated.
-
- timeout_ms: Optional timeout in milliseconds for the request. If the request takes longer
- than this value, it will be aborted with a 408 status code. Maximum allowed
- value is 300000ms (5 minutes).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/brand/fonts",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "domain": domain,
- "timeout_ms": timeout_ms,
- },
- brand_fonts_params.BrandFontsParams,
- ),
- ),
- cast_to=BrandFontsResponse,
- )
-
async def identify_from_transaction(
self,
*,
@@ -2811,61 +3035,126 @@ async def identify_from_transaction(
]
| Omit = omit,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
high_confidence_only: bool | Omit = omit,
@@ -2896,7 +3185,6 @@ async def identify_from_transaction(
high_confidence_only: When set to true, the API will perform an additional verification steps to
ensure the identified brand matches the transaction with high confidence.
- Defaults to false.
max_speed: Optional parameter to optimize the API call for maximum speed. When set to true,
the API will skip time-consuming operations for faster response at the cost of
@@ -2958,9 +3246,7 @@ async def prefetch(
) -> BrandPrefetchResponse:
"""
Signal that you may fetch brand data for a particular domain soon to improve
- latency. This endpoint does not charge credits and is available for paid
- customers to optimize future requests. [You must be on a paid plan to use this
- endpoint]
+ latency.
Args:
domain: Domain name to prefetch brand data for
@@ -3008,9 +3294,7 @@ async def prefetch_by_email(
Signal that you may fetch brand data for a particular domain soon to improve
latency. This endpoint accepts an email address, extracts the domain from it,
validates that it's not a disposable or free email provider, and queues the
- domain for prefetching. This endpoint does not charge credits and is available
- for paid customers to optimize future requests. [You must be on a paid plan to
- use this endpoint]
+ domain for prefetching.
Args:
email: Email address to prefetch brand data for. The domain will be extracted from the
@@ -3049,61 +3333,126 @@ async def retrieve_by_email(
*,
email: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -3117,9 +3466,8 @@ async def retrieve_by_email(
) -> BrandRetrieveByEmailResponse:
"""
Retrieve brand information using an email address while detecting disposable and
- free email addresses. This endpoint extracts the domain from the email address
- and returns brand data for that domain. Disposable and free email addresses
- (like gmail.com, yahoo.com) will throw a 422 error.
+ free email addresses. Disposable and free email addresses (like gmail.com,
+ yahoo.com) will throw a 422 error.
Args:
email: Email address to retrieve brand data for (e.g., 'contact@example.com'). The
@@ -3169,61 +3517,126 @@ async def retrieve_by_isin(
*,
isin: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -3237,8 +3650,7 @@ async def retrieve_by_isin(
) -> BrandRetrieveByIsinResponse:
"""
Retrieve brand information using an ISIN (International Securities
- Identification Number). This endpoint looks up the company associated with the
- ISIN and returns its brand data.
+ Identification Number).
Args:
isin: ISIN (International Securities Identification Number) to retrieve brand data for
@@ -3530,61 +3942,126 @@ async def retrieve_by_name(
]
| Omit = omit,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -3596,17 +4073,15 @@ async def retrieve_by_name(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandRetrieveByNameResponse:
- """Retrieve brand information using a company name.
-
- This endpoint searches for the
- company by name and returns its brand data.
+ """
+ Retrieve brand information using a company name.
Args:
name: Company name to retrieve brand data for (e.g., 'Apple Inc', 'Microsoft
Corporation'). Must be 3-30 characters.
- country_gl: Optional country code (GL parameter) to specify the country. This affects the
- geographic location used for search queries.
+ country_gl: Optional country code hint (GL parameter) to specify the country for the company
+ name.
force_language: Optional parameter to force the language of the retrieved brand data.
@@ -3652,61 +4127,126 @@ async def retrieve_by_ticker(
*,
ticker: str,
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
| Omit = omit,
max_speed: bool | Omit = omit,
@@ -3793,10 +4333,8 @@ async def retrieve_by_ticker(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandRetrieveByTickerResponse:
- """Retrieve brand information using a stock ticker symbol.
-
- This endpoint looks up
- the company associated with the ticker and returns its brand data.
+ """
+ Retrieve brand information using a stock ticker symbol.
Args:
ticker: Stock ticker symbol to retrieve brand data for (e.g., 'AAPL', 'GOOGL', 'BRK.A').
@@ -3843,65 +4381,6 @@ async def retrieve_by_ticker(
cast_to=BrandRetrieveByTickerResponse,
)
- async def retrieve_naics(
- self,
- *,
- input: str,
- max_results: int | Omit = omit,
- min_results: int | Omit = omit,
- timeout_ms: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandRetrieveNaicsResponse:
- """
- Endpoint to classify any brand into a 2022 NAICS code.
-
- Args:
- input: Brand domain or title to retrieve NAICS code for. If a valid domain is provided
- in `input`, it will be used for classification, otherwise, we will search for
- the brand using the provided title.
-
- max_results: Maximum number of NAICS codes to return. Must be between 1 and 10. Defaults
- to 5.
-
- min_results: Minimum number of NAICS codes to return. Must be at least 1. Defaults to 1.
-
- timeout_ms: Optional timeout in milliseconds for the request. If the request takes longer
- than this value, it will be aborted with a 408 status code. Maximum allowed
- value is 300000ms (5 minutes).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/brand/naics",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "input": input,
- "max_results": max_results,
- "min_results": min_results,
- "timeout_ms": timeout_ms,
- },
- brand_retrieve_naics_params.BrandRetrieveNaicsParams,
- ),
- ),
- cast_to=BrandRetrieveNaicsResponse,
- )
-
async def retrieve_simplified(
self,
*,
@@ -3916,8 +4395,8 @@ async def retrieve_simplified(
) -> BrandRetrieveSimplifiedResponse:
"""
Returns a simplified version of brand data containing only essential
- information: domain, title, colors, logos, and backdrops. This endpoint is
- optimized for faster responses and reduced data transfer.
+ information: domain, title, colors, logos, and backdrops. Optimized for faster
+ responses and reduced data transfer.
Args:
domain: Domain name to retrieve simplified brand data for
@@ -3952,133 +4431,13 @@ async def retrieve_simplified(
cast_to=BrandRetrieveSimplifiedResponse,
)
- async def screenshot(
- self,
- *,
- domain: str,
- full_screenshot: Literal["true", "false"] | Omit = omit,
- page: Literal["login", "signup", "blog", "careers", "pricing", "terms", "privacy", "contact"] | Omit = omit,
- prioritize: Literal["speed", "quality"] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandScreenshotResponse:
- """Capture a screenshot of a website.
-
- Supports both viewport (standard browser
- view) and full-page screenshots. Can also screenshot specific page types (login,
- pricing, etc.) by using heuristics to find the appropriate URL. Returns a URL to
- the uploaded screenshot image hosted on our CDN.
-
- Args:
- domain: Domain name to take screenshot of (e.g., 'example.com', 'google.com'). The
- domain will be automatically normalized and validated.
-
- full_screenshot: Optional parameter to determine screenshot type. If 'true', takes a full page
- screenshot capturing all content. If 'false' or not provided, takes a viewport
- screenshot (standard browser view).
-
- page: Optional parameter to specify which page type to screenshot. If provided, the
- system will scrape the domain's links and use heuristics to find the most
- appropriate URL for the specified page type (30 supported languages). If not
- provided, screenshots the main domain landing page.
-
- prioritize: Optional parameter to prioritize screenshot capture. If 'speed', optimizes for
- faster capture with basic quality. If 'quality', optimizes for higher quality
- with longer wait times. Defaults to 'quality' if not provided.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/brand/screenshot",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "domain": domain,
- "full_screenshot": full_screenshot,
- "page": page,
- "prioritize": prioritize,
- },
- brand_screenshot_params.BrandScreenshotParams,
- ),
- ),
- cast_to=BrandScreenshotResponse,
- )
-
- async def styleguide(
- self,
- *,
- direct_url: str | Omit = omit,
- domain: str | Omit = omit,
- timeout_ms: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BrandStyleguideResponse:
- """
- Automatically extract comprehensive design system information from a brand's
- website including colors, typography, spacing, shadows, and UI components.
- Either 'domain' or 'directUrl' must be provided as a query parameter, but not
- both.
-
- Args:
- direct_url: A specific URL to fetch the styleguide from directly, bypassing domain
- resolution (e.g., 'https://example.com/design-system').
-
- domain: Domain name to extract styleguide from (e.g., 'example.com', 'google.com'). The
- domain will be automatically normalized and validated.
-
- timeout_ms: Optional timeout in milliseconds for the request. If the request takes longer
- than this value, it will be aborted with a 408 status code. Maximum allowed
- value is 300000ms (5 minutes).
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/brand/styleguide",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "direct_url": direct_url,
- "domain": domain,
- "timeout_ms": timeout_ms,
- },
- brand_styleguide_params.BrandStyleguideParams,
- ),
- ),
- cast_to=BrandStyleguideResponse,
- )
-
async def web_scrape_html(
self,
*,
url: str,
+ include_frames: bool | Omit = omit,
+ max_age_ms: int | Omit = omit,
+ parse_pdf: bool | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -4092,6 +4451,16 @@ async def web_scrape_html(
Args:
url: Full URL to scrape (must include http:// or https:// protocol)
+ include_frames: When true, iframes are rendered inline into the returned HTML.
+
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 1 day (86400000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
+ parse_pdf: When true (default), PDF URLs are fetched and their text layer is extracted and
+ returned wrapped in …. When false, PDF URLs are skipped
+ and a 400 WEBSITE_ACCESS_ERROR is returned.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -4107,7 +4476,15 @@ async def web_scrape_html(
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
- query=await async_maybe_transform({"url": url}, brand_web_scrape_html_params.BrandWebScrapeHTMLParams),
+ query=await async_maybe_transform(
+ {
+ "url": url,
+ "include_frames": include_frames,
+ "max_age_ms": max_age_ms,
+ "parse_pdf": parse_pdf,
+ },
+ brand_web_scrape_html_params.BrandWebScrapeHTMLParams,
+ ),
),
cast_to=BrandWebScrapeHTMLResponse,
)
@@ -4158,8 +4535,11 @@ async def web_scrape_md(
self,
*,
url: str,
+ include_frames: bool | Omit = omit,
include_images: bool | Omit = omit,
include_links: bool | Omit = omit,
+ max_age_ms: int | Omit = omit,
+ parse_pdf: bool | Omit = omit,
shorten_base64_images: bool | Omit = omit,
use_main_content_only: bool | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -4170,17 +4550,26 @@ async def web_scrape_md(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandWebScrapeMdResponse:
"""
- Scrapes the given URL, converts the HTML content to Markdown, and returns the
- result.
+ Scrapes the given URL into LLM usable Markdown.
Args:
- url: Full URL to scrape and convert to markdown (must include http:// or https://
+ url: Full URL to scrape into LLM usable Markdown (must include http:// or https://
protocol)
+ include_frames: When true, the contents of iframes are rendered to Markdown.
+
include_images: Include image references in Markdown output
include_links: Preserve hyperlinks in Markdown output
+ max_age_ms: Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 1 day (86400000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+
+ parse_pdf: When true (default), PDF URLs are fetched and their text layer is extracted and
+ converted to Markdown. When false, PDF URLs are skipped and a 400
+ WEBSITE_ACCESS_ERROR is returned.
+
shorten_base64_images: Shorten base64-encoded image data in the Markdown output
use_main_content_only: Extract only the main content of the page, excluding headers, footers, sidebars,
@@ -4204,8 +4593,11 @@ async def web_scrape_md(
query=await async_maybe_transform(
{
"url": url,
+ "include_frames": include_frames,
"include_images": include_images,
"include_links": include_links,
+ "max_age_ms": max_age_ms,
+ "parse_pdf": parse_pdf,
"shorten_base64_images": shorten_base64_images,
"use_main_content_only": use_main_content_only,
},
@@ -4220,6 +4612,7 @@ async def web_scrape_sitemap(
*,
domain: str,
max_links: int | Omit = omit,
+ url_regex: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -4228,17 +4621,17 @@ async def web_scrape_sitemap(
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BrandWebScrapeSitemapResponse:
"""
- Crawls the sitemap of the given domain and returns all discovered page URLs.
- Supports sitemap index files (recursive), parallel fetching with concurrency
- control, deduplication, and filters out non-page resources (images, PDFs, etc.).
+ Crawl an entire website's sitemap and return all discovered page URLs.
Args:
- domain: Domain name to crawl sitemaps for (e.g., 'example.com'). The domain will be
- automatically normalized and validated.
+ domain: Domain to build a sitemap for
max_links: Maximum number of links to return from the sitemap crawl. Defaults to 10,000.
Minimum is 1, maximum is 100,000.
+ url_regex: Optional RE2-compatible regex pattern. Only URLs matching this pattern are
+ returned and counted against maxLinks.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -4258,6 +4651,7 @@ async def web_scrape_sitemap(
{
"domain": domain,
"max_links": max_links,
+ "url_regex": url_regex,
},
brand_web_scrape_sitemap_params.BrandWebScrapeSitemapParams,
),
@@ -4282,9 +4676,6 @@ def __init__(self, brand: BrandResource) -> None:
self.ai_query = to_raw_response_wrapper(
brand.ai_query,
)
- self.fonts = to_raw_response_wrapper(
- brand.fonts,
- )
self.identify_from_transaction = to_raw_response_wrapper(
brand.identify_from_transaction,
)
@@ -4306,18 +4697,9 @@ def __init__(self, brand: BrandResource) -> None:
self.retrieve_by_ticker = to_raw_response_wrapper(
brand.retrieve_by_ticker,
)
- self.retrieve_naics = to_raw_response_wrapper(
- brand.retrieve_naics,
- )
self.retrieve_simplified = to_raw_response_wrapper(
brand.retrieve_simplified,
)
- self.screenshot = to_raw_response_wrapper(
- brand.screenshot,
- )
- self.styleguide = to_raw_response_wrapper(
- brand.styleguide,
- )
self.web_scrape_html = to_raw_response_wrapper(
brand.web_scrape_html,
)
@@ -4348,9 +4730,6 @@ def __init__(self, brand: AsyncBrandResource) -> None:
self.ai_query = async_to_raw_response_wrapper(
brand.ai_query,
)
- self.fonts = async_to_raw_response_wrapper(
- brand.fonts,
- )
self.identify_from_transaction = async_to_raw_response_wrapper(
brand.identify_from_transaction,
)
@@ -4372,18 +4751,9 @@ def __init__(self, brand: AsyncBrandResource) -> None:
self.retrieve_by_ticker = async_to_raw_response_wrapper(
brand.retrieve_by_ticker,
)
- self.retrieve_naics = async_to_raw_response_wrapper(
- brand.retrieve_naics,
- )
self.retrieve_simplified = async_to_raw_response_wrapper(
brand.retrieve_simplified,
)
- self.screenshot = async_to_raw_response_wrapper(
- brand.screenshot,
- )
- self.styleguide = async_to_raw_response_wrapper(
- brand.styleguide,
- )
self.web_scrape_html = async_to_raw_response_wrapper(
brand.web_scrape_html,
)
@@ -4414,9 +4784,6 @@ def __init__(self, brand: BrandResource) -> None:
self.ai_query = to_streamed_response_wrapper(
brand.ai_query,
)
- self.fonts = to_streamed_response_wrapper(
- brand.fonts,
- )
self.identify_from_transaction = to_streamed_response_wrapper(
brand.identify_from_transaction,
)
@@ -4438,18 +4805,9 @@ def __init__(self, brand: BrandResource) -> None:
self.retrieve_by_ticker = to_streamed_response_wrapper(
brand.retrieve_by_ticker,
)
- self.retrieve_naics = to_streamed_response_wrapper(
- brand.retrieve_naics,
- )
self.retrieve_simplified = to_streamed_response_wrapper(
brand.retrieve_simplified,
)
- self.screenshot = to_streamed_response_wrapper(
- brand.screenshot,
- )
- self.styleguide = to_streamed_response_wrapper(
- brand.styleguide,
- )
self.web_scrape_html = to_streamed_response_wrapper(
brand.web_scrape_html,
)
@@ -4480,9 +4838,6 @@ def __init__(self, brand: AsyncBrandResource) -> None:
self.ai_query = async_to_streamed_response_wrapper(
brand.ai_query,
)
- self.fonts = async_to_streamed_response_wrapper(
- brand.fonts,
- )
self.identify_from_transaction = async_to_streamed_response_wrapper(
brand.identify_from_transaction,
)
@@ -4504,18 +4859,9 @@ def __init__(self, brand: AsyncBrandResource) -> None:
self.retrieve_by_ticker = async_to_streamed_response_wrapper(
brand.retrieve_by_ticker,
)
- self.retrieve_naics = async_to_streamed_response_wrapper(
- brand.retrieve_naics,
- )
self.retrieve_simplified = async_to_streamed_response_wrapper(
brand.retrieve_simplified,
)
- self.screenshot = async_to_streamed_response_wrapper(
- brand.screenshot,
- )
- self.styleguide = async_to_streamed_response_wrapper(
- brand.styleguide,
- )
self.web_scrape_html = async_to_streamed_response_wrapper(
brand.web_scrape_html,
)
diff --git a/src/brand/dev/types/__init__.py b/src/brand/dev/types/__init__.py
index 2b7129d..8bf4c53 100644
--- a/src/brand/dev/types/__init__.py
+++ b/src/brand/dev/types/__init__.py
@@ -2,8 +2,6 @@
from __future__ import annotations
-from .brand_fonts_params import BrandFontsParams as BrandFontsParams
-from .brand_fonts_response import BrandFontsResponse as BrandFontsResponse
from .brand_ai_query_params import BrandAIQueryParams as BrandAIQueryParams
from .brand_prefetch_params import BrandPrefetchParams as BrandPrefetchParams
from .brand_retrieve_params import BrandRetrieveParams as BrandRetrieveParams
@@ -11,20 +9,14 @@
from .brand_ai_query_response import BrandAIQueryResponse as BrandAIQueryResponse
from .brand_prefetch_response import BrandPrefetchResponse as BrandPrefetchResponse
from .brand_retrieve_response import BrandRetrieveResponse as BrandRetrieveResponse
-from .brand_screenshot_params import BrandScreenshotParams as BrandScreenshotParams
-from .brand_styleguide_params import BrandStyleguideParams as BrandStyleguideParams
from .brand_ai_products_params import BrandAIProductsParams as BrandAIProductsParams
from .brand_ai_product_response import BrandAIProductResponse as BrandAIProductResponse
-from .brand_screenshot_response import BrandScreenshotResponse as BrandScreenshotResponse
-from .brand_styleguide_response import BrandStyleguideResponse as BrandStyleguideResponse
from .brand_ai_products_response import BrandAIProductsResponse as BrandAIProductsResponse
from .brand_web_scrape_md_params import BrandWebScrapeMdParams as BrandWebScrapeMdParams
-from .brand_retrieve_naics_params import BrandRetrieveNaicsParams as BrandRetrieveNaicsParams
from .brand_web_scrape_html_params import BrandWebScrapeHTMLParams as BrandWebScrapeHTMLParams
from .brand_web_scrape_md_response import BrandWebScrapeMdResponse as BrandWebScrapeMdResponse
from .brand_retrieve_by_isin_params import BrandRetrieveByIsinParams as BrandRetrieveByIsinParams
from .brand_retrieve_by_name_params import BrandRetrieveByNameParams as BrandRetrieveByNameParams
-from .brand_retrieve_naics_response import BrandRetrieveNaicsResponse as BrandRetrieveNaicsResponse
from .brand_prefetch_by_email_params import BrandPrefetchByEmailParams as BrandPrefetchByEmailParams
from .brand_retrieve_by_email_params import BrandRetrieveByEmailParams as BrandRetrieveByEmailParams
from .brand_web_scrape_html_response import BrandWebScrapeHTMLResponse as BrandWebScrapeHTMLResponse
diff --git a/src/brand/dev/types/brand_ai_product_params.py b/src/brand/dev/types/brand_ai_product_params.py
index 17e62c9..bc65ee6 100644
--- a/src/brand/dev/types/brand_ai_product_params.py
+++ b/src/brand/dev/types/brand_ai_product_params.py
@@ -13,6 +13,13 @@ class BrandAIProductParams(TypedDict, total=False):
url: Required[str]
"""The product page URL to extract product data from."""
+ max_age_ms: Annotated[int, PropertyInfo(alias="maxAgeMs")]
+ """
+ Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+ """
+
timeout_ms: Annotated[int, PropertyInfo(alias="timeoutMS")]
"""Optional timeout in milliseconds for the request.
diff --git a/src/brand/dev/types/brand_ai_product_response.py b/src/brand/dev/types/brand_ai_product_response.py
index eb31de2..1880ca5 100644
--- a/src/brand/dev/types/brand_ai_product_response.py
+++ b/src/brand/dev/types/brand_ai_product_response.py
@@ -23,6 +23,9 @@ class Product(BaseModel):
name: str
"""Name of the product"""
+ sku: Optional[str] = None
+ """Stock Keeping Unit (product identifier). Null if no identifier is found."""
+
tags: List[str]
"""Tags associated with the product"""
diff --git a/src/brand/dev/types/brand_ai_products_params.py b/src/brand/dev/types/brand_ai_products_params.py
index 9a61efe..978a605 100644
--- a/src/brand/dev/types/brand_ai_products_params.py
+++ b/src/brand/dev/types/brand_ai_products_params.py
@@ -14,6 +14,13 @@ class ByDomain(TypedDict, total=False):
domain: Required[str]
"""The domain name to analyze."""
+ max_age_ms: Annotated[int, PropertyInfo(alias="maxAgeMs")]
+ """
+ Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+ """
+
max_products: Annotated[int, PropertyInfo(alias="maxProducts")]
"""Maximum number of products to extract."""
@@ -31,6 +38,13 @@ class ByDirectURL(TypedDict, total=False):
domain resolution.
"""
+ max_age_ms: Annotated[int, PropertyInfo(alias="maxAgeMs")]
+ """
+ Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 7 days (604800000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+ """
+
max_products: Annotated[int, PropertyInfo(alias="maxProducts")]
"""Maximum number of products to extract."""
diff --git a/src/brand/dev/types/brand_ai_products_response.py b/src/brand/dev/types/brand_ai_products_response.py
index 4100a1a..a074795 100644
--- a/src/brand/dev/types/brand_ai_products_response.py
+++ b/src/brand/dev/types/brand_ai_products_response.py
@@ -21,6 +21,9 @@ class Product(BaseModel):
name: str
"""Name of the product"""
+ sku: Optional[str] = None
+ """Stock Keeping Unit (product identifier). Null if no identifier is found."""
+
tags: List[str]
"""Tags associated with the product"""
diff --git a/src/brand/dev/types/brand_fonts_params.py b/src/brand/dev/types/brand_fonts_params.py
deleted file mode 100644
index db13d2f..0000000
--- a/src/brand/dev/types/brand_fonts_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["BrandFontsParams"]
-
-
-class BrandFontsParams(TypedDict, total=False):
- domain: Required[str]
- """Domain name to extract fonts from (e.g., 'example.com', 'google.com').
-
- The domain will be automatically normalized and validated.
- """
-
- timeout_ms: Annotated[int, PropertyInfo(alias="timeoutMS")]
- """Optional timeout in milliseconds for the request.
-
- If the request takes longer than this value, it will be aborted with a 408
- status code. Maximum allowed value is 300000ms (5 minutes).
- """
diff --git a/src/brand/dev/types/brand_fonts_response.py b/src/brand/dev/types/brand_fonts_response.py
deleted file mode 100644
index 2721af9..0000000
--- a/src/brand/dev/types/brand_fonts_response.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-
-from .._models import BaseModel
-
-__all__ = ["BrandFontsResponse", "Font"]
-
-
-class Font(BaseModel):
- fallbacks: List[str]
- """Array of fallback font families"""
-
- font: str
- """Font family name"""
-
- num_elements: float
- """Number of elements using this font"""
-
- num_words: float
- """Number of words using this font"""
-
- percent_elements: float
- """Percentage of elements using this font"""
-
- percent_words: float
- """Percentage of words using this font"""
-
- uses: List[str]
- """Array of CSS selectors or element types where this font is used"""
-
-
-class BrandFontsResponse(BaseModel):
- code: int
- """HTTP status code, e.g., 200"""
-
- domain: str
- """The normalized domain that was processed"""
-
- fonts: List[Font]
- """Array of font usage information"""
-
- status: str
- """Status of the response, e.g., 'ok'"""
diff --git a/src/brand/dev/types/brand_identify_from_transaction_params.py b/src/brand/dev/types/brand_identify_from_transaction_params.py
index e04b1e5..e6ecadd 100644
--- a/src/brand/dev/types/brand_identify_from_transaction_params.py
+++ b/src/brand/dev/types/brand_identify_from_transaction_params.py
@@ -263,61 +263,126 @@ class BrandIdentifyFromTransactionParams(TypedDict, total=False):
"""
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
"""Optional parameter to force the language of the retrieved brand data."""
@@ -325,7 +390,6 @@ class BrandIdentifyFromTransactionParams(TypedDict, total=False):
"""
When set to true, the API will perform an additional verification steps to
ensure the identified brand matches the transaction with high confidence.
- Defaults to false.
"""
max_speed: Annotated[bool, PropertyInfo(alias="maxSpeed")]
diff --git a/src/brand/dev/types/brand_identify_from_transaction_response.py b/src/brand/dev/types/brand_identify_from_transaction_response.py
index 5c1261d..5d499ea 100644
--- a/src/brand/dev/types/brand_identify_from_transaction_response.py
+++ b/src/brand/dev/types/brand_identify_from_transaction_response.py
@@ -415,8 +415,42 @@ class BrandLogo(BaseModel):
class BrandSocial(BaseModel):
- type: Optional[str] = None
- """Type of social media, e.g., 'facebook', 'twitter'"""
+ type: Optional[
+ Literal[
+ "x",
+ "facebook",
+ "instagram",
+ "linkedin",
+ "youtube",
+ "pinterest",
+ "tiktok",
+ "dribbble",
+ "github",
+ "behance",
+ "snapchat",
+ "whatsapp",
+ "telegram",
+ "line",
+ "discord",
+ "twitch",
+ "vimeo",
+ "imdb",
+ "tumblr",
+ "flickr",
+ "giphy",
+ "medium",
+ "spotify",
+ "soundcloud",
+ "tripadvisor",
+ "yelp",
+ "producthunt",
+ "reddit",
+ "crunchbase",
+ "appstore",
+ "playstore",
+ ]
+ ] = None
+ """Type of social media platform"""
url: Optional[str] = None
"""URL of the social media page"""
@@ -470,6 +504,136 @@ class Brand(BaseModel):
phone: Optional[str] = None
"""Company phone number"""
+ primary_language: Optional[
+ Literal[
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "armenian",
+ "assamese",
+ "aymara",
+ "azeri",
+ "basque",
+ "belarusian",
+ "bengali",
+ "bosnian",
+ "bulgarian",
+ "burmese",
+ "cantonese",
+ "catalan",
+ "cebuano",
+ "chinese",
+ "corsican",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "esperanto",
+ "estonian",
+ "farsi",
+ "fijian",
+ "finnish",
+ "french",
+ "galician",
+ "georgian",
+ "german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
+ "hausa",
+ "hawaiian",
+ "hebrew",
+ "hindi",
+ "hmong",
+ "hungarian",
+ "icelandic",
+ "igbo",
+ "indonesian",
+ "irish",
+ "italian",
+ "japanese",
+ "javanese",
+ "kannada",
+ "kazakh",
+ "khmer",
+ "kinyarwanda",
+ "korean",
+ "kurdish",
+ "kyrgyz",
+ "lao",
+ "latin",
+ "latvian",
+ "lingala",
+ "lithuanian",
+ "luxembourgish",
+ "macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
+ "mongolian",
+ "nepali",
+ "norwegian",
+ "odia",
+ "oromo",
+ "pashto",
+ "pidgin",
+ "polish",
+ "portuguese",
+ "punjabi",
+ "quechua",
+ "romanian",
+ "russian",
+ "samoan",
+ "scottish-gaelic",
+ "serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
+ "slovak",
+ "slovene",
+ "somali",
+ "spanish",
+ "sundanese",
+ "swahili",
+ "swedish",
+ "tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
+ "thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
+ "turkish",
+ "turkmen",
+ "ukrainian",
+ "urdu",
+ "uyghur",
+ "uzbek",
+ "vietnamese",
+ "welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
+ ]
+ ] = None
+ """The primary language of the brand's website content.
+
+ Detected from the HTML lang tag, page content analysis, or social media
+ descriptions.
+ """
+
slogan: Optional[str] = None
"""The brand's slogan"""
diff --git a/src/brand/dev/types/brand_retrieve_by_email_params.py b/src/brand/dev/types/brand_retrieve_by_email_params.py
index 886c213..84949e7 100644
--- a/src/brand/dev/types/brand_retrieve_by_email_params.py
+++ b/src/brand/dev/types/brand_retrieve_by_email_params.py
@@ -18,61 +18,126 @@ class BrandRetrieveByEmailParams(TypedDict, total=False):
"""
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
"""Optional parameter to force the language of the retrieved brand data."""
diff --git a/src/brand/dev/types/brand_retrieve_by_email_response.py b/src/brand/dev/types/brand_retrieve_by_email_response.py
index 922fa75..db6b862 100644
--- a/src/brand/dev/types/brand_retrieve_by_email_response.py
+++ b/src/brand/dev/types/brand_retrieve_by_email_response.py
@@ -415,8 +415,42 @@ class BrandLogo(BaseModel):
class BrandSocial(BaseModel):
- type: Optional[str] = None
- """Type of social media, e.g., 'facebook', 'twitter'"""
+ type: Optional[
+ Literal[
+ "x",
+ "facebook",
+ "instagram",
+ "linkedin",
+ "youtube",
+ "pinterest",
+ "tiktok",
+ "dribbble",
+ "github",
+ "behance",
+ "snapchat",
+ "whatsapp",
+ "telegram",
+ "line",
+ "discord",
+ "twitch",
+ "vimeo",
+ "imdb",
+ "tumblr",
+ "flickr",
+ "giphy",
+ "medium",
+ "spotify",
+ "soundcloud",
+ "tripadvisor",
+ "yelp",
+ "producthunt",
+ "reddit",
+ "crunchbase",
+ "appstore",
+ "playstore",
+ ]
+ ] = None
+ """Type of social media platform"""
url: Optional[str] = None
"""URL of the social media page"""
@@ -470,6 +504,136 @@ class Brand(BaseModel):
phone: Optional[str] = None
"""Company phone number"""
+ primary_language: Optional[
+ Literal[
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "armenian",
+ "assamese",
+ "aymara",
+ "azeri",
+ "basque",
+ "belarusian",
+ "bengali",
+ "bosnian",
+ "bulgarian",
+ "burmese",
+ "cantonese",
+ "catalan",
+ "cebuano",
+ "chinese",
+ "corsican",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "esperanto",
+ "estonian",
+ "farsi",
+ "fijian",
+ "finnish",
+ "french",
+ "galician",
+ "georgian",
+ "german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
+ "hausa",
+ "hawaiian",
+ "hebrew",
+ "hindi",
+ "hmong",
+ "hungarian",
+ "icelandic",
+ "igbo",
+ "indonesian",
+ "irish",
+ "italian",
+ "japanese",
+ "javanese",
+ "kannada",
+ "kazakh",
+ "khmer",
+ "kinyarwanda",
+ "korean",
+ "kurdish",
+ "kyrgyz",
+ "lao",
+ "latin",
+ "latvian",
+ "lingala",
+ "lithuanian",
+ "luxembourgish",
+ "macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
+ "mongolian",
+ "nepali",
+ "norwegian",
+ "odia",
+ "oromo",
+ "pashto",
+ "pidgin",
+ "polish",
+ "portuguese",
+ "punjabi",
+ "quechua",
+ "romanian",
+ "russian",
+ "samoan",
+ "scottish-gaelic",
+ "serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
+ "slovak",
+ "slovene",
+ "somali",
+ "spanish",
+ "sundanese",
+ "swahili",
+ "swedish",
+ "tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
+ "thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
+ "turkish",
+ "turkmen",
+ "ukrainian",
+ "urdu",
+ "uyghur",
+ "uzbek",
+ "vietnamese",
+ "welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
+ ]
+ ] = None
+ """The primary language of the brand's website content.
+
+ Detected from the HTML lang tag, page content analysis, or social media
+ descriptions.
+ """
+
slogan: Optional[str] = None
"""The brand's slogan"""
diff --git a/src/brand/dev/types/brand_retrieve_by_isin_params.py b/src/brand/dev/types/brand_retrieve_by_isin_params.py
index db559fa..5be121e 100644
--- a/src/brand/dev/types/brand_retrieve_by_isin_params.py
+++ b/src/brand/dev/types/brand_retrieve_by_isin_params.py
@@ -18,61 +18,126 @@ class BrandRetrieveByIsinParams(TypedDict, total=False):
"""
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
"""Optional parameter to force the language of the retrieved brand data."""
diff --git a/src/brand/dev/types/brand_retrieve_by_isin_response.py b/src/brand/dev/types/brand_retrieve_by_isin_response.py
index 21a860e..3d080e9 100644
--- a/src/brand/dev/types/brand_retrieve_by_isin_response.py
+++ b/src/brand/dev/types/brand_retrieve_by_isin_response.py
@@ -415,8 +415,42 @@ class BrandLogo(BaseModel):
class BrandSocial(BaseModel):
- type: Optional[str] = None
- """Type of social media, e.g., 'facebook', 'twitter'"""
+ type: Optional[
+ Literal[
+ "x",
+ "facebook",
+ "instagram",
+ "linkedin",
+ "youtube",
+ "pinterest",
+ "tiktok",
+ "dribbble",
+ "github",
+ "behance",
+ "snapchat",
+ "whatsapp",
+ "telegram",
+ "line",
+ "discord",
+ "twitch",
+ "vimeo",
+ "imdb",
+ "tumblr",
+ "flickr",
+ "giphy",
+ "medium",
+ "spotify",
+ "soundcloud",
+ "tripadvisor",
+ "yelp",
+ "producthunt",
+ "reddit",
+ "crunchbase",
+ "appstore",
+ "playstore",
+ ]
+ ] = None
+ """Type of social media platform"""
url: Optional[str] = None
"""URL of the social media page"""
@@ -470,6 +504,136 @@ class Brand(BaseModel):
phone: Optional[str] = None
"""Company phone number"""
+ primary_language: Optional[
+ Literal[
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "armenian",
+ "assamese",
+ "aymara",
+ "azeri",
+ "basque",
+ "belarusian",
+ "bengali",
+ "bosnian",
+ "bulgarian",
+ "burmese",
+ "cantonese",
+ "catalan",
+ "cebuano",
+ "chinese",
+ "corsican",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "esperanto",
+ "estonian",
+ "farsi",
+ "fijian",
+ "finnish",
+ "french",
+ "galician",
+ "georgian",
+ "german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
+ "hausa",
+ "hawaiian",
+ "hebrew",
+ "hindi",
+ "hmong",
+ "hungarian",
+ "icelandic",
+ "igbo",
+ "indonesian",
+ "irish",
+ "italian",
+ "japanese",
+ "javanese",
+ "kannada",
+ "kazakh",
+ "khmer",
+ "kinyarwanda",
+ "korean",
+ "kurdish",
+ "kyrgyz",
+ "lao",
+ "latin",
+ "latvian",
+ "lingala",
+ "lithuanian",
+ "luxembourgish",
+ "macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
+ "mongolian",
+ "nepali",
+ "norwegian",
+ "odia",
+ "oromo",
+ "pashto",
+ "pidgin",
+ "polish",
+ "portuguese",
+ "punjabi",
+ "quechua",
+ "romanian",
+ "russian",
+ "samoan",
+ "scottish-gaelic",
+ "serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
+ "slovak",
+ "slovene",
+ "somali",
+ "spanish",
+ "sundanese",
+ "swahili",
+ "swedish",
+ "tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
+ "thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
+ "turkish",
+ "turkmen",
+ "ukrainian",
+ "urdu",
+ "uyghur",
+ "uzbek",
+ "vietnamese",
+ "welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
+ ]
+ ] = None
+ """The primary language of the brand's website content.
+
+ Detected from the HTML lang tag, page content analysis, or social media
+ descriptions.
+ """
+
slogan: Optional[str] = None
"""The brand's slogan"""
diff --git a/src/brand/dev/types/brand_retrieve_by_name_params.py b/src/brand/dev/types/brand_retrieve_by_name_params.py
index 847bdb3..d774223 100644
--- a/src/brand/dev/types/brand_retrieve_by_name_params.py
+++ b/src/brand/dev/types/brand_retrieve_by_name_params.py
@@ -257,67 +257,132 @@ class BrandRetrieveByNameParams(TypedDict, total=False):
"zm",
"zw",
]
- """Optional country code (GL parameter) to specify the country.
-
- This affects the geographic location used for search queries.
+ """
+ Optional country code hint (GL parameter) to specify the country for the company
+ name.
"""
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
"""Optional parameter to force the language of the retrieved brand data."""
diff --git a/src/brand/dev/types/brand_retrieve_by_name_response.py b/src/brand/dev/types/brand_retrieve_by_name_response.py
index 1e462e7..9a5b18e 100644
--- a/src/brand/dev/types/brand_retrieve_by_name_response.py
+++ b/src/brand/dev/types/brand_retrieve_by_name_response.py
@@ -415,8 +415,42 @@ class BrandLogo(BaseModel):
class BrandSocial(BaseModel):
- type: Optional[str] = None
- """Type of social media, e.g., 'facebook', 'twitter'"""
+ type: Optional[
+ Literal[
+ "x",
+ "facebook",
+ "instagram",
+ "linkedin",
+ "youtube",
+ "pinterest",
+ "tiktok",
+ "dribbble",
+ "github",
+ "behance",
+ "snapchat",
+ "whatsapp",
+ "telegram",
+ "line",
+ "discord",
+ "twitch",
+ "vimeo",
+ "imdb",
+ "tumblr",
+ "flickr",
+ "giphy",
+ "medium",
+ "spotify",
+ "soundcloud",
+ "tripadvisor",
+ "yelp",
+ "producthunt",
+ "reddit",
+ "crunchbase",
+ "appstore",
+ "playstore",
+ ]
+ ] = None
+ """Type of social media platform"""
url: Optional[str] = None
"""URL of the social media page"""
@@ -470,6 +504,136 @@ class Brand(BaseModel):
phone: Optional[str] = None
"""Company phone number"""
+ primary_language: Optional[
+ Literal[
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "armenian",
+ "assamese",
+ "aymara",
+ "azeri",
+ "basque",
+ "belarusian",
+ "bengali",
+ "bosnian",
+ "bulgarian",
+ "burmese",
+ "cantonese",
+ "catalan",
+ "cebuano",
+ "chinese",
+ "corsican",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "esperanto",
+ "estonian",
+ "farsi",
+ "fijian",
+ "finnish",
+ "french",
+ "galician",
+ "georgian",
+ "german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
+ "hausa",
+ "hawaiian",
+ "hebrew",
+ "hindi",
+ "hmong",
+ "hungarian",
+ "icelandic",
+ "igbo",
+ "indonesian",
+ "irish",
+ "italian",
+ "japanese",
+ "javanese",
+ "kannada",
+ "kazakh",
+ "khmer",
+ "kinyarwanda",
+ "korean",
+ "kurdish",
+ "kyrgyz",
+ "lao",
+ "latin",
+ "latvian",
+ "lingala",
+ "lithuanian",
+ "luxembourgish",
+ "macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
+ "mongolian",
+ "nepali",
+ "norwegian",
+ "odia",
+ "oromo",
+ "pashto",
+ "pidgin",
+ "polish",
+ "portuguese",
+ "punjabi",
+ "quechua",
+ "romanian",
+ "russian",
+ "samoan",
+ "scottish-gaelic",
+ "serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
+ "slovak",
+ "slovene",
+ "somali",
+ "spanish",
+ "sundanese",
+ "swahili",
+ "swedish",
+ "tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
+ "thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
+ "turkish",
+ "turkmen",
+ "ukrainian",
+ "urdu",
+ "uyghur",
+ "uzbek",
+ "vietnamese",
+ "welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
+ ]
+ ] = None
+ """The primary language of the brand's website content.
+
+ Detected from the HTML lang tag, page content analysis, or social media
+ descriptions.
+ """
+
slogan: Optional[str] = None
"""The brand's slogan"""
diff --git a/src/brand/dev/types/brand_retrieve_by_ticker_params.py b/src/brand/dev/types/brand_retrieve_by_ticker_params.py
index d328385..9d1124f 100644
--- a/src/brand/dev/types/brand_retrieve_by_ticker_params.py
+++ b/src/brand/dev/types/brand_retrieve_by_ticker_params.py
@@ -17,61 +17,126 @@ class BrandRetrieveByTickerParams(TypedDict, total=False):
"""
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
"""Optional parameter to force the language of the retrieved brand data."""
diff --git a/src/brand/dev/types/brand_retrieve_by_ticker_response.py b/src/brand/dev/types/brand_retrieve_by_ticker_response.py
index 9815a65..964d7a4 100644
--- a/src/brand/dev/types/brand_retrieve_by_ticker_response.py
+++ b/src/brand/dev/types/brand_retrieve_by_ticker_response.py
@@ -415,8 +415,42 @@ class BrandLogo(BaseModel):
class BrandSocial(BaseModel):
- type: Optional[str] = None
- """Type of social media, e.g., 'facebook', 'twitter'"""
+ type: Optional[
+ Literal[
+ "x",
+ "facebook",
+ "instagram",
+ "linkedin",
+ "youtube",
+ "pinterest",
+ "tiktok",
+ "dribbble",
+ "github",
+ "behance",
+ "snapchat",
+ "whatsapp",
+ "telegram",
+ "line",
+ "discord",
+ "twitch",
+ "vimeo",
+ "imdb",
+ "tumblr",
+ "flickr",
+ "giphy",
+ "medium",
+ "spotify",
+ "soundcloud",
+ "tripadvisor",
+ "yelp",
+ "producthunt",
+ "reddit",
+ "crunchbase",
+ "appstore",
+ "playstore",
+ ]
+ ] = None
+ """Type of social media platform"""
url: Optional[str] = None
"""URL of the social media page"""
@@ -470,6 +504,136 @@ class Brand(BaseModel):
phone: Optional[str] = None
"""Company phone number"""
+ primary_language: Optional[
+ Literal[
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "armenian",
+ "assamese",
+ "aymara",
+ "azeri",
+ "basque",
+ "belarusian",
+ "bengali",
+ "bosnian",
+ "bulgarian",
+ "burmese",
+ "cantonese",
+ "catalan",
+ "cebuano",
+ "chinese",
+ "corsican",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "esperanto",
+ "estonian",
+ "farsi",
+ "fijian",
+ "finnish",
+ "french",
+ "galician",
+ "georgian",
+ "german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
+ "hausa",
+ "hawaiian",
+ "hebrew",
+ "hindi",
+ "hmong",
+ "hungarian",
+ "icelandic",
+ "igbo",
+ "indonesian",
+ "irish",
+ "italian",
+ "japanese",
+ "javanese",
+ "kannada",
+ "kazakh",
+ "khmer",
+ "kinyarwanda",
+ "korean",
+ "kurdish",
+ "kyrgyz",
+ "lao",
+ "latin",
+ "latvian",
+ "lingala",
+ "lithuanian",
+ "luxembourgish",
+ "macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
+ "mongolian",
+ "nepali",
+ "norwegian",
+ "odia",
+ "oromo",
+ "pashto",
+ "pidgin",
+ "polish",
+ "portuguese",
+ "punjabi",
+ "quechua",
+ "romanian",
+ "russian",
+ "samoan",
+ "scottish-gaelic",
+ "serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
+ "slovak",
+ "slovene",
+ "somali",
+ "spanish",
+ "sundanese",
+ "swahili",
+ "swedish",
+ "tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
+ "thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
+ "turkish",
+ "turkmen",
+ "ukrainian",
+ "urdu",
+ "uyghur",
+ "uzbek",
+ "vietnamese",
+ "welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
+ ]
+ ] = None
+ """The primary language of the brand's website content.
+
+ Detected from the HTML lang tag, page content analysis, or social media
+ descriptions.
+ """
+
slogan: Optional[str] = None
"""The brand's slogan"""
diff --git a/src/brand/dev/types/brand_retrieve_naics_params.py b/src/brand/dev/types/brand_retrieve_naics_params.py
deleted file mode 100644
index 2803c13..0000000
--- a/src/brand/dev/types/brand_retrieve_naics_params.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["BrandRetrieveNaicsParams"]
-
-
-class BrandRetrieveNaicsParams(TypedDict, total=False):
- input: Required[str]
- """Brand domain or title to retrieve NAICS code for.
-
- If a valid domain is provided in `input`, it will be used for classification,
- otherwise, we will search for the brand using the provided title.
- """
-
- max_results: Annotated[int, PropertyInfo(alias="maxResults")]
- """Maximum number of NAICS codes to return.
-
- Must be between 1 and 10. Defaults to 5.
- """
-
- min_results: Annotated[int, PropertyInfo(alias="minResults")]
- """Minimum number of NAICS codes to return. Must be at least 1. Defaults to 1."""
-
- timeout_ms: Annotated[int, PropertyInfo(alias="timeoutMS")]
- """Optional timeout in milliseconds for the request.
-
- If the request takes longer than this value, it will be aborted with a 408
- status code. Maximum allowed value is 300000ms (5 minutes).
- """
diff --git a/src/brand/dev/types/brand_retrieve_naics_response.py b/src/brand/dev/types/brand_retrieve_naics_response.py
deleted file mode 100644
index a53da5e..0000000
--- a/src/brand/dev/types/brand_retrieve_naics_response.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["BrandRetrieveNaicsResponse", "Code"]
-
-
-class Code(BaseModel):
- code: str
- """NAICS code"""
-
- confidence: Literal["high", "medium", "low"]
- """Confidence level for how well this NAICS code matches the company description"""
-
- name: str
- """NAICS title"""
-
-
-class BrandRetrieveNaicsResponse(BaseModel):
- codes: Optional[List[Code]] = None
- """Array of NAICS codes and titles."""
-
- domain: Optional[str] = None
- """Domain found for the brand"""
-
- status: Optional[str] = None
- """Status of the response, e.g., 'ok'"""
-
- type: Optional[str] = None
- """Industry classification type, for naics api it will be `naics`"""
diff --git a/src/brand/dev/types/brand_retrieve_params.py b/src/brand/dev/types/brand_retrieve_params.py
index 03e102a..d25c4c0 100644
--- a/src/brand/dev/types/brand_retrieve_params.py
+++ b/src/brand/dev/types/brand_retrieve_params.py
@@ -17,66 +17,128 @@ class BrandRetrieveParams(TypedDict, total=False):
"""
force_language: Literal[
+ "afrikaans",
"albanian",
+ "amharic",
"arabic",
+ "armenian",
+ "assamese",
+ "aymara",
"azeri",
+ "basque",
+ "belarusian",
"bengali",
+ "bosnian",
"bulgarian",
+ "burmese",
"cantonese",
+ "catalan",
"cebuano",
+ "chinese",
+ "corsican",
"croatian",
"czech",
"danish",
"dutch",
"english",
+ "esperanto",
"estonian",
"farsi",
+ "fijian",
"finnish",
"french",
+ "galician",
+ "georgian",
"german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
"hausa",
"hawaiian",
+ "hebrew",
"hindi",
+ "hmong",
"hungarian",
"icelandic",
+ "igbo",
"indonesian",
+ "irish",
"italian",
+ "japanese",
+ "javanese",
+ "kannada",
"kazakh",
+ "khmer",
+ "kinyarwanda",
"korean",
+ "kurdish",
"kyrgyz",
+ "lao",
"latin",
"latvian",
+ "lingala",
"lithuanian",
+ "luxembourgish",
"macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
"mongolian",
"nepali",
"norwegian",
+ "odia",
+ "oromo",
"pashto",
"pidgin",
"polish",
"portuguese",
+ "punjabi",
+ "quechua",
"romanian",
"russian",
+ "samoan",
+ "scottish-gaelic",
"serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
"slovak",
"slovene",
"somali",
"spanish",
+ "sundanese",
"swahili",
"swedish",
"tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
"thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
"turkish",
+ "turkmen",
"ukrainian",
"urdu",
+ "uyghur",
"uzbek",
"vietnamese",
"welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
]
- """Optional parameter to force the language of the retrieved brand data.
-
- Works with all three lookup methods.
- """
+ """Optional parameter to force the language of the retrieved brand data."""
max_speed: Annotated[bool, PropertyInfo(alias="maxSpeed")]
"""Optional parameter to optimize the API call for maximum speed.
diff --git a/src/brand/dev/types/brand_retrieve_response.py b/src/brand/dev/types/brand_retrieve_response.py
index 28aea6f..d272dab 100644
--- a/src/brand/dev/types/brand_retrieve_response.py
+++ b/src/brand/dev/types/brand_retrieve_response.py
@@ -415,8 +415,42 @@ class BrandLogo(BaseModel):
class BrandSocial(BaseModel):
- type: Optional[str] = None
- """Type of social media, e.g., 'facebook', 'twitter'"""
+ type: Optional[
+ Literal[
+ "x",
+ "facebook",
+ "instagram",
+ "linkedin",
+ "youtube",
+ "pinterest",
+ "tiktok",
+ "dribbble",
+ "github",
+ "behance",
+ "snapchat",
+ "whatsapp",
+ "telegram",
+ "line",
+ "discord",
+ "twitch",
+ "vimeo",
+ "imdb",
+ "tumblr",
+ "flickr",
+ "giphy",
+ "medium",
+ "spotify",
+ "soundcloud",
+ "tripadvisor",
+ "yelp",
+ "producthunt",
+ "reddit",
+ "crunchbase",
+ "appstore",
+ "playstore",
+ ]
+ ] = None
+ """Type of social media platform"""
url: Optional[str] = None
"""URL of the social media page"""
@@ -470,6 +504,136 @@ class Brand(BaseModel):
phone: Optional[str] = None
"""Company phone number"""
+ primary_language: Optional[
+ Literal[
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "armenian",
+ "assamese",
+ "aymara",
+ "azeri",
+ "basque",
+ "belarusian",
+ "bengali",
+ "bosnian",
+ "bulgarian",
+ "burmese",
+ "cantonese",
+ "catalan",
+ "cebuano",
+ "chinese",
+ "corsican",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "esperanto",
+ "estonian",
+ "farsi",
+ "fijian",
+ "finnish",
+ "french",
+ "galician",
+ "georgian",
+ "german",
+ "greek",
+ "guarani",
+ "gujarati",
+ "haitian-creole",
+ "hausa",
+ "hawaiian",
+ "hebrew",
+ "hindi",
+ "hmong",
+ "hungarian",
+ "icelandic",
+ "igbo",
+ "indonesian",
+ "irish",
+ "italian",
+ "japanese",
+ "javanese",
+ "kannada",
+ "kazakh",
+ "khmer",
+ "kinyarwanda",
+ "korean",
+ "kurdish",
+ "kyrgyz",
+ "lao",
+ "latin",
+ "latvian",
+ "lingala",
+ "lithuanian",
+ "luxembourgish",
+ "macedonian",
+ "malagasy",
+ "malay",
+ "malayalam",
+ "maltese",
+ "maori",
+ "marathi",
+ "mongolian",
+ "nepali",
+ "norwegian",
+ "odia",
+ "oromo",
+ "pashto",
+ "pidgin",
+ "polish",
+ "portuguese",
+ "punjabi",
+ "quechua",
+ "romanian",
+ "russian",
+ "samoan",
+ "scottish-gaelic",
+ "serbian",
+ "sesotho",
+ "shona",
+ "sindhi",
+ "sinhala",
+ "slovak",
+ "slovene",
+ "somali",
+ "spanish",
+ "sundanese",
+ "swahili",
+ "swedish",
+ "tagalog",
+ "tajik",
+ "tamil",
+ "tatar",
+ "telugu",
+ "thai",
+ "tibetan",
+ "tigrinya",
+ "tongan",
+ "tswana",
+ "turkish",
+ "turkmen",
+ "ukrainian",
+ "urdu",
+ "uyghur",
+ "uzbek",
+ "vietnamese",
+ "welsh",
+ "wolof",
+ "xhosa",
+ "yiddish",
+ "yoruba",
+ "zulu",
+ ]
+ ] = None
+ """The primary language of the brand's website content.
+
+ Detected from the HTML lang tag, page content analysis, or social media
+ descriptions.
+ """
+
slogan: Optional[str] = None
"""The brand's slogan"""
diff --git a/src/brand/dev/types/brand_screenshot_params.py b/src/brand/dev/types/brand_screenshot_params.py
deleted file mode 100644
index 4f26b1f..0000000
--- a/src/brand/dev/types/brand_screenshot_params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, Required, Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["BrandScreenshotParams"]
-
-
-class BrandScreenshotParams(TypedDict, total=False):
- domain: Required[str]
- """Domain name to take screenshot of (e.g., 'example.com', 'google.com').
-
- The domain will be automatically normalized and validated.
- """
-
- full_screenshot: Annotated[Literal["true", "false"], PropertyInfo(alias="fullScreenshot")]
- """Optional parameter to determine screenshot type.
-
- If 'true', takes a full page screenshot capturing all content. If 'false' or not
- provided, takes a viewport screenshot (standard browser view).
- """
-
- page: Literal["login", "signup", "blog", "careers", "pricing", "terms", "privacy", "contact"]
- """Optional parameter to specify which page type to screenshot.
-
- If provided, the system will scrape the domain's links and use heuristics to
- find the most appropriate URL for the specified page type (30 supported
- languages). If not provided, screenshots the main domain landing page.
- """
-
- prioritize: Literal["speed", "quality"]
- """Optional parameter to prioritize screenshot capture.
-
- If 'speed', optimizes for faster capture with basic quality. If 'quality',
- optimizes for higher quality with longer wait times. Defaults to 'quality' if
- not provided.
- """
diff --git a/src/brand/dev/types/brand_screenshot_response.py b/src/brand/dev/types/brand_screenshot_response.py
deleted file mode 100644
index c43ae74..0000000
--- a/src/brand/dev/types/brand_screenshot_response.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-
-__all__ = ["BrandScreenshotResponse"]
-
-
-class BrandScreenshotResponse(BaseModel):
- code: Optional[int] = None
- """HTTP status code"""
-
- domain: Optional[str] = None
- """The normalized domain that was processed"""
-
- screenshot: Optional[str] = None
- """Public URL of the uploaded screenshot image"""
-
- screenshot_type: Optional[Literal["viewport", "fullPage"]] = FieldInfo(alias="screenshotType", default=None)
- """Type of screenshot that was captured"""
-
- status: Optional[str] = None
- """Status of the response, e.g., 'ok'"""
diff --git a/src/brand/dev/types/brand_styleguide_params.py b/src/brand/dev/types/brand_styleguide_params.py
deleted file mode 100644
index 9c858b5..0000000
--- a/src/brand/dev/types/brand_styleguide_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Annotated, TypedDict
-
-from .._utils import PropertyInfo
-
-__all__ = ["BrandStyleguideParams"]
-
-
-class BrandStyleguideParams(TypedDict, total=False):
- direct_url: Annotated[str, PropertyInfo(alias="directUrl")]
- """
- A specific URL to fetch the styleguide from directly, bypassing domain
- resolution (e.g., 'https://example.com/design-system').
- """
-
- domain: str
- """Domain name to extract styleguide from (e.g., 'example.com', 'google.com').
-
- The domain will be automatically normalized and validated.
- """
-
- timeout_ms: Annotated[int, PropertyInfo(alias="timeoutMS")]
- """Optional timeout in milliseconds for the request.
-
- If the request takes longer than this value, it will be aborted with a 408
- status code. Maximum allowed value is 300000ms (5 minutes).
- """
diff --git a/src/brand/dev/types/brand_styleguide_response.py b/src/brand/dev/types/brand_styleguide_response.py
deleted file mode 100644
index edc2d29..0000000
--- a/src/brand/dev/types/brand_styleguide_response.py
+++ /dev/null
@@ -1,395 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from pydantic import Field as FieldInfo
-
-from .._models import BaseModel
-
-__all__ = [
- "BrandStyleguideResponse",
- "Styleguide",
- "StyleguideColors",
- "StyleguideComponents",
- "StyleguideComponentsButton",
- "StyleguideComponentsButtonLink",
- "StyleguideComponentsButtonPrimary",
- "StyleguideComponentsButtonSecondary",
- "StyleguideComponentsCard",
- "StyleguideElementSpacing",
- "StyleguideShadows",
- "StyleguideTypography",
- "StyleguideTypographyHeadings",
- "StyleguideTypographyHeadingsH1",
- "StyleguideTypographyHeadingsH2",
- "StyleguideTypographyHeadingsH3",
- "StyleguideTypographyHeadingsH4",
- "StyleguideTypographyP",
-]
-
-
-class StyleguideColors(BaseModel):
- """Primary colors used on the website"""
-
- accent: str
- """Accent color (hex format)"""
-
- background: str
- """Background color (hex format)"""
-
- text: str
- """Text color (hex format)"""
-
-
-class StyleguideComponentsButtonLink(BaseModel):
- background_color: str = FieldInfo(alias="backgroundColor")
-
- border_color: str = FieldInfo(alias="borderColor")
- """
- Border color as CSS hex (#RRGGBB or #RRGGBBAA when computed border-color has
- alpha)
- """
-
- border_radius: str = FieldInfo(alias="borderRadius")
-
- border_style: str = FieldInfo(alias="borderStyle")
-
- border_width: str = FieldInfo(alias="borderWidth")
-
- box_shadow: str = FieldInfo(alias="boxShadow")
- """Computed box-shadow (comma-separated layers when present)"""
-
- color: str
-
- css: str
- """Ready-to-use CSS declaration block for this component style"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- min_height: str = FieldInfo(alias="minHeight")
- """Sampled minimum height of the button box (typically px)"""
-
- min_width: str = FieldInfo(alias="minWidth")
- """Sampled minimum width of the button box (typically px)"""
-
- padding: str
-
- text_decoration: str = FieldInfo(alias="textDecoration")
-
- font_fallbacks: Optional[List[str]] = FieldInfo(alias="fontFallbacks", default=None)
- """Full ordered font list from computed font-family"""
-
- font_family: Optional[str] = FieldInfo(alias="fontFamily", default=None)
- """Primary button typeface (first in fontFallbacks)"""
-
- text_decoration_color: Optional[str] = FieldInfo(alias="textDecorationColor", default=None)
- """Hex color of the underline when it differs from the text color"""
-
-
-class StyleguideComponentsButtonPrimary(BaseModel):
- background_color: str = FieldInfo(alias="backgroundColor")
-
- border_color: str = FieldInfo(alias="borderColor")
- """
- Border color as CSS hex (#RRGGBB or #RRGGBBAA when computed border-color has
- alpha)
- """
-
- border_radius: str = FieldInfo(alias="borderRadius")
-
- border_style: str = FieldInfo(alias="borderStyle")
-
- border_width: str = FieldInfo(alias="borderWidth")
-
- box_shadow: str = FieldInfo(alias="boxShadow")
- """Computed box-shadow (comma-separated layers when present)"""
-
- color: str
-
- css: str
- """Ready-to-use CSS declaration block for this component style"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- min_height: str = FieldInfo(alias="minHeight")
- """Sampled minimum height of the button box (typically px)"""
-
- min_width: str = FieldInfo(alias="minWidth")
- """Sampled minimum width of the button box (typically px)"""
-
- padding: str
-
- text_decoration: str = FieldInfo(alias="textDecoration")
-
- font_fallbacks: Optional[List[str]] = FieldInfo(alias="fontFallbacks", default=None)
- """Full ordered font list from computed font-family"""
-
- font_family: Optional[str] = FieldInfo(alias="fontFamily", default=None)
- """Primary button typeface (first in fontFallbacks)"""
-
- text_decoration_color: Optional[str] = FieldInfo(alias="textDecorationColor", default=None)
- """Hex color of the underline when it differs from the text color"""
-
-
-class StyleguideComponentsButtonSecondary(BaseModel):
- background_color: str = FieldInfo(alias="backgroundColor")
-
- border_color: str = FieldInfo(alias="borderColor")
- """
- Border color as CSS hex (#RRGGBB or #RRGGBBAA when computed border-color has
- alpha)
- """
-
- border_radius: str = FieldInfo(alias="borderRadius")
-
- border_style: str = FieldInfo(alias="borderStyle")
-
- border_width: str = FieldInfo(alias="borderWidth")
-
- box_shadow: str = FieldInfo(alias="boxShadow")
- """Computed box-shadow (comma-separated layers when present)"""
-
- color: str
-
- css: str
- """Ready-to-use CSS declaration block for this component style"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- min_height: str = FieldInfo(alias="minHeight")
- """Sampled minimum height of the button box (typically px)"""
-
- min_width: str = FieldInfo(alias="minWidth")
- """Sampled minimum width of the button box (typically px)"""
-
- padding: str
-
- text_decoration: str = FieldInfo(alias="textDecoration")
-
- font_fallbacks: Optional[List[str]] = FieldInfo(alias="fontFallbacks", default=None)
- """Full ordered font list from computed font-family"""
-
- font_family: Optional[str] = FieldInfo(alias="fontFamily", default=None)
- """Primary button typeface (first in fontFallbacks)"""
-
- text_decoration_color: Optional[str] = FieldInfo(alias="textDecorationColor", default=None)
- """Hex color of the underline when it differs from the text color"""
-
-
-class StyleguideComponentsButton(BaseModel):
- """Button component styles"""
-
- link: Optional[StyleguideComponentsButtonLink] = None
-
- primary: Optional[StyleguideComponentsButtonPrimary] = None
-
- secondary: Optional[StyleguideComponentsButtonSecondary] = None
-
-
-class StyleguideComponentsCard(BaseModel):
- """Card component style"""
-
- background_color: str = FieldInfo(alias="backgroundColor")
-
- border_color: str = FieldInfo(alias="borderColor")
- """
- Border color as CSS hex (#RRGGBB or #RRGGBBAA when computed border-color has
- alpha)
- """
-
- border_radius: str = FieldInfo(alias="borderRadius")
-
- border_style: str = FieldInfo(alias="borderStyle")
-
- border_width: str = FieldInfo(alias="borderWidth")
-
- box_shadow: str = FieldInfo(alias="boxShadow")
-
- css: str
- """Ready-to-use CSS declaration block for this component style"""
-
- padding: str
-
- text_color: str = FieldInfo(alias="textColor")
-
-
-class StyleguideComponents(BaseModel):
- """UI component styles"""
-
- button: StyleguideComponentsButton
- """Button component styles"""
-
- card: Optional[StyleguideComponentsCard] = None
- """Card component style"""
-
-
-class StyleguideElementSpacing(BaseModel):
- """Spacing system used on the website"""
-
- lg: str
-
- md: str
-
- sm: str
-
- xl: str
-
- xs: str
-
-
-class StyleguideShadows(BaseModel):
- """Shadow styles used on the website"""
-
- inner: str
-
- lg: str
-
- md: str
-
- sm: str
-
- xl: str
-
-
-class StyleguideTypographyHeadingsH1(BaseModel):
- font_fallbacks: List[str] = FieldInfo(alias="fontFallbacks")
- """Full ordered font list from resolved computed font-family"""
-
- font_family: str = FieldInfo(alias="fontFamily")
- """Primary face (first family in the computed stack)"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- letter_spacing: str = FieldInfo(alias="letterSpacing")
-
- line_height: str = FieldInfo(alias="lineHeight")
-
-
-class StyleguideTypographyHeadingsH2(BaseModel):
- font_fallbacks: List[str] = FieldInfo(alias="fontFallbacks")
- """Full ordered font list from resolved computed font-family"""
-
- font_family: str = FieldInfo(alias="fontFamily")
- """Primary face (first family in the computed stack)"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- letter_spacing: str = FieldInfo(alias="letterSpacing")
-
- line_height: str = FieldInfo(alias="lineHeight")
-
-
-class StyleguideTypographyHeadingsH3(BaseModel):
- font_fallbacks: List[str] = FieldInfo(alias="fontFallbacks")
- """Full ordered font list from resolved computed font-family"""
-
- font_family: str = FieldInfo(alias="fontFamily")
- """Primary face (first family in the computed stack)"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- letter_spacing: str = FieldInfo(alias="letterSpacing")
-
- line_height: str = FieldInfo(alias="lineHeight")
-
-
-class StyleguideTypographyHeadingsH4(BaseModel):
- font_fallbacks: List[str] = FieldInfo(alias="fontFallbacks")
- """Full ordered font list from resolved computed font-family"""
-
- font_family: str = FieldInfo(alias="fontFamily")
- """Primary face (first family in the computed stack)"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- letter_spacing: str = FieldInfo(alias="letterSpacing")
-
- line_height: str = FieldInfo(alias="lineHeight")
-
-
-class StyleguideTypographyHeadings(BaseModel):
- """Heading styles"""
-
- h1: Optional[StyleguideTypographyHeadingsH1] = None
-
- h2: Optional[StyleguideTypographyHeadingsH2] = None
-
- h3: Optional[StyleguideTypographyHeadingsH3] = None
-
- h4: Optional[StyleguideTypographyHeadingsH4] = None
-
-
-class StyleguideTypographyP(BaseModel):
- font_fallbacks: List[str] = FieldInfo(alias="fontFallbacks")
- """Full ordered font list from resolved computed font-family"""
-
- font_family: str = FieldInfo(alias="fontFamily")
- """Primary face (first family in the computed stack)"""
-
- font_size: str = FieldInfo(alias="fontSize")
-
- font_weight: float = FieldInfo(alias="fontWeight")
-
- letter_spacing: str = FieldInfo(alias="letterSpacing")
-
- line_height: str = FieldInfo(alias="lineHeight")
-
-
-class StyleguideTypography(BaseModel):
- """Typography styles used on the website"""
-
- headings: StyleguideTypographyHeadings
- """Heading styles"""
-
- p: Optional[StyleguideTypographyP] = None
-
-
-class Styleguide(BaseModel):
- """Comprehensive styleguide data extracted from the website"""
-
- colors: StyleguideColors
- """Primary colors used on the website"""
-
- components: StyleguideComponents
- """UI component styles"""
-
- element_spacing: StyleguideElementSpacing = FieldInfo(alias="elementSpacing")
- """Spacing system used on the website"""
-
- mode: Literal["light", "dark"]
- """The primary color mode of the website design"""
-
- shadows: StyleguideShadows
- """Shadow styles used on the website"""
-
- typography: StyleguideTypography
- """Typography styles used on the website"""
-
-
-class BrandStyleguideResponse(BaseModel):
- code: Optional[int] = None
- """HTTP status code"""
-
- domain: Optional[str] = None
- """The normalized domain that was processed"""
-
- status: Optional[str] = None
- """Status of the response, e.g., 'ok'"""
-
- styleguide: Optional[Styleguide] = None
- """Comprehensive styleguide data extracted from the website"""
diff --git a/src/brand/dev/types/brand_web_scrape_html_params.py b/src/brand/dev/types/brand_web_scrape_html_params.py
index 86246f4..60b8907 100644
--- a/src/brand/dev/types/brand_web_scrape_html_params.py
+++ b/src/brand/dev/types/brand_web_scrape_html_params.py
@@ -2,7 +2,9 @@
from __future__ import annotations
-from typing_extensions import Required, TypedDict
+from typing_extensions import Required, Annotated, TypedDict
+
+from .._utils import PropertyInfo
__all__ = ["BrandWebScrapeHTMLParams"]
@@ -10,3 +12,20 @@
class BrandWebScrapeHTMLParams(TypedDict, total=False):
url: Required[str]
"""Full URL to scrape (must include http:// or https:// protocol)"""
+
+ include_frames: Annotated[bool, PropertyInfo(alias="includeFrames")]
+ """When true, iframes are rendered inline into the returned HTML."""
+
+ max_age_ms: Annotated[int, PropertyInfo(alias="maxAgeMs")]
+ """
+ Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 1 day (86400000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+ """
+
+ parse_pdf: Annotated[bool, PropertyInfo(alias="parsePDF")]
+ """
+ When true (default), PDF URLs are fetched and their text layer is extracted and
+ returned wrapped in …. When false, PDF URLs are skipped
+ and a 400 WEBSITE_ACCESS_ERROR is returned.
+ """
diff --git a/src/brand/dev/types/brand_web_scrape_md_params.py b/src/brand/dev/types/brand_web_scrape_md_params.py
index 6bf905e..1550f88 100644
--- a/src/brand/dev/types/brand_web_scrape_md_params.py
+++ b/src/brand/dev/types/brand_web_scrape_md_params.py
@@ -12,16 +12,33 @@
class BrandWebScrapeMdParams(TypedDict, total=False):
url: Required[str]
"""
- Full URL to scrape and convert to markdown (must include http:// or https://
+ Full URL to scrape into LLM usable Markdown (must include http:// or https://
protocol)
"""
+ include_frames: Annotated[bool, PropertyInfo(alias="includeFrames")]
+ """When true, the contents of iframes are rendered to Markdown."""
+
include_images: Annotated[bool, PropertyInfo(alias="includeImages")]
"""Include image references in Markdown output"""
include_links: Annotated[bool, PropertyInfo(alias="includeLinks")]
"""Preserve hyperlinks in Markdown output"""
+ max_age_ms: Annotated[int, PropertyInfo(alias="maxAgeMs")]
+ """
+ Return a cached result if a prior scrape for the same parameters exists and is
+ younger than this many milliseconds. Defaults to 1 day (86400000 ms) when
+ omitted. Max is 30 days (2592000000 ms). Set to 0 to always scrape fresh.
+ """
+
+ parse_pdf: Annotated[bool, PropertyInfo(alias="parsePDF")]
+ """
+ When true (default), PDF URLs are fetched and their text layer is extracted and
+ converted to Markdown. When false, PDF URLs are skipped and a 400
+ WEBSITE_ACCESS_ERROR is returned.
+ """
+
shorten_base64_images: Annotated[bool, PropertyInfo(alias="shortenBase64Images")]
"""Shorten base64-encoded image data in the Markdown output"""
diff --git a/src/brand/dev/types/brand_web_scrape_sitemap_params.py b/src/brand/dev/types/brand_web_scrape_sitemap_params.py
index e675dae..0a3be07 100644
--- a/src/brand/dev/types/brand_web_scrape_sitemap_params.py
+++ b/src/brand/dev/types/brand_web_scrape_sitemap_params.py
@@ -11,13 +11,16 @@
class BrandWebScrapeSitemapParams(TypedDict, total=False):
domain: Required[str]
- """Domain name to crawl sitemaps for (e.g., 'example.com').
-
- The domain will be automatically normalized and validated.
- """
+ """Domain to build a sitemap for"""
max_links: Annotated[int, PropertyInfo(alias="maxLinks")]
"""Maximum number of links to return from the sitemap crawl.
Defaults to 10,000. Minimum is 1, maximum is 100,000.
"""
+
+ url_regex: Annotated[str, PropertyInfo(alias="urlRegex")]
+ """Optional RE2-compatible regex pattern.
+
+ Only URLs matching this pattern are returned and counted against maxLinks.
+ """
diff --git a/tests/api_resources/test_brand.py b/tests/api_resources/test_brand.py
index 5587585..c8673b0 100644
--- a/tests/api_resources/test_brand.py
+++ b/tests/api_resources/test_brand.py
@@ -10,16 +10,12 @@
from brand.dev import BrandDev, AsyncBrandDev
from tests.utils import assert_matches_type
from brand.dev.types import (
- BrandFontsResponse,
BrandAIQueryResponse,
BrandPrefetchResponse,
BrandRetrieveResponse,
BrandAIProductResponse,
BrandAIProductsResponse,
- BrandScreenshotResponse,
- BrandStyleguideResponse,
BrandWebScrapeMdResponse,
- BrandRetrieveNaicsResponse,
BrandWebScrapeHTMLResponse,
BrandRetrieveByIsinResponse,
BrandRetrieveByNameResponse,
@@ -51,7 +47,7 @@ def test_method_retrieve(self, client: BrandDev) -> None:
def test_method_retrieve_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.retrieve(
domain="domain",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -96,6 +92,7 @@ def test_method_ai_product(self, client: BrandDev) -> None:
def test_method_ai_product_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.ai_product(
url="https://example.com",
+ max_age_ms=0,
timeout_ms=1000,
)
assert_matches_type(BrandAIProductResponse, brand, path=["response"])
@@ -139,6 +136,7 @@ def test_method_ai_products_overload_1(self, client: BrandDev) -> None:
def test_method_ai_products_with_all_params_overload_1(self, client: BrandDev) -> None:
brand = client.brand.ai_products(
domain="domain",
+ max_age_ms=0,
max_products=1,
timeout_ms=1000,
)
@@ -183,6 +181,7 @@ def test_method_ai_products_overload_2(self, client: BrandDev) -> None:
def test_method_ai_products_with_all_params_overload_2(self, client: BrandDev) -> None:
brand = client.brand.ai_products(
direct_url="https://example.com",
+ max_age_ms=0,
max_products=1,
timeout_ms=1000,
)
@@ -305,49 +304,6 @@ def test_streaming_response_ai_query(self, client: BrandDev) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_fonts(self, client: BrandDev) -> None:
- brand = client.brand.fonts(
- domain="domain",
- )
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_fonts_with_all_params(self, client: BrandDev) -> None:
- brand = client.brand.fonts(
- domain="domain",
- timeout_ms=1000,
- )
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_raw_response_fonts(self, client: BrandDev) -> None:
- response = client.brand.with_raw_response.fonts(
- domain="domain",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = response.parse()
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_streaming_response_fonts(self, client: BrandDev) -> None:
- with client.brand.with_streaming_response.fonts(
- domain="domain",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = response.parse()
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
def test_method_identify_from_transaction(self, client: BrandDev) -> None:
@@ -363,7 +319,7 @@ def test_method_identify_from_transaction_with_all_params(self, client: BrandDev
transaction_info="transaction_info",
city="city",
country_gl="ad",
- force_language="albanian",
+ force_language="afrikaans",
high_confidence_only=True,
max_speed=True,
mcc="mcc",
@@ -497,7 +453,7 @@ def test_method_retrieve_by_email(self, client: BrandDev) -> None:
def test_method_retrieve_by_email_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.retrieve_by_email(
email="dev@stainless.com",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -542,7 +498,7 @@ def test_method_retrieve_by_isin(self, client: BrandDev) -> None:
def test_method_retrieve_by_isin_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.retrieve_by_isin(
isin="SE60513A9993",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -588,7 +544,7 @@ def test_method_retrieve_by_name_with_all_params(self, client: BrandDev) -> None
brand = client.brand.retrieve_by_name(
name="xxx",
country_gl="ad",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -633,7 +589,7 @@ def test_method_retrieve_by_ticker(self, client: BrandDev) -> None:
def test_method_retrieve_by_ticker_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.retrieve_by_ticker(
ticker="ticker",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
ticker_exchange="AMEX",
timeout_ms=1000,
@@ -666,51 +622,6 @@ def test_streaming_response_retrieve_by_ticker(self, client: BrandDev) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_retrieve_naics(self, client: BrandDev) -> None:
- brand = client.brand.retrieve_naics(
- input="input",
- )
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_retrieve_naics_with_all_params(self, client: BrandDev) -> None:
- brand = client.brand.retrieve_naics(
- input="input",
- max_results=1,
- min_results=1,
- timeout_ms=1000,
- )
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_raw_response_retrieve_naics(self, client: BrandDev) -> None:
- response = client.brand.with_raw_response.retrieve_naics(
- input="input",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = response.parse()
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_streaming_response_retrieve_naics(self, client: BrandDev) -> None:
- with client.brand.with_streaming_response.retrieve_naics(
- input="input",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = response.parse()
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
def test_method_retrieve_simplified(self, client: BrandDev) -> None:
@@ -756,92 +667,20 @@ def test_streaming_response_retrieve_simplified(self, client: BrandDev) -> None:
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_screenshot(self, client: BrandDev) -> None:
- brand = client.brand.screenshot(
- domain="domain",
- )
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_screenshot_with_all_params(self, client: BrandDev) -> None:
- brand = client.brand.screenshot(
- domain="domain",
- full_screenshot="true",
- page="login",
- prioritize="speed",
- )
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_raw_response_screenshot(self, client: BrandDev) -> None:
- response = client.brand.with_raw_response.screenshot(
- domain="domain",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = response.parse()
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_streaming_response_screenshot(self, client: BrandDev) -> None:
- with client.brand.with_streaming_response.screenshot(
- domain="domain",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = response.parse()
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_styleguide(self, client: BrandDev) -> None:
- brand = client.brand.styleguide()
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_method_styleguide_with_all_params(self, client: BrandDev) -> None:
- brand = client.brand.styleguide(
- direct_url="https://example.com",
- domain="domain",
- timeout_ms=1000,
+ def test_method_web_scrape_html(self, client: BrandDev) -> None:
+ brand = client.brand.web_scrape_html(
+ url="https://example.com",
)
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_raw_response_styleguide(self, client: BrandDev) -> None:
- response = client.brand.with_raw_response.styleguide()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = response.parse()
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- def test_streaming_response_styleguide(self, client: BrandDev) -> None:
- with client.brand.with_streaming_response.styleguide() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = response.parse()
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
+ assert_matches_type(BrandWebScrapeHTMLResponse, brand, path=["response"])
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- def test_method_web_scrape_html(self, client: BrandDev) -> None:
+ def test_method_web_scrape_html_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.web_scrape_html(
url="https://example.com",
+ include_frames=True,
+ max_age_ms=0,
+ parse_pdf=True,
)
assert_matches_type(BrandWebScrapeHTMLResponse, brand, path=["response"])
@@ -918,8 +757,11 @@ def test_method_web_scrape_md(self, client: BrandDev) -> None:
def test_method_web_scrape_md_with_all_params(self, client: BrandDev) -> None:
brand = client.brand.web_scrape_md(
url="https://example.com",
+ include_frames=True,
include_images=True,
include_links=True,
+ max_age_ms=0,
+ parse_pdf=True,
shorten_base64_images=True,
use_main_content_only=True,
)
@@ -965,6 +807,7 @@ def test_method_web_scrape_sitemap_with_all_params(self, client: BrandDev) -> No
brand = client.brand.web_scrape_sitemap(
domain="domain",
max_links=1,
+ url_regex="^https?://[^/]+/blog/",
)
assert_matches_type(BrandWebScrapeSitemapResponse, brand, path=["response"])
@@ -1013,7 +856,7 @@ async def test_method_retrieve(self, async_client: AsyncBrandDev) -> None:
async def test_method_retrieve_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.retrieve(
domain="domain",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -1058,6 +901,7 @@ async def test_method_ai_product(self, async_client: AsyncBrandDev) -> None:
async def test_method_ai_product_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.ai_product(
url="https://example.com",
+ max_age_ms=0,
timeout_ms=1000,
)
assert_matches_type(BrandAIProductResponse, brand, path=["response"])
@@ -1101,6 +945,7 @@ async def test_method_ai_products_overload_1(self, async_client: AsyncBrandDev)
async def test_method_ai_products_with_all_params_overload_1(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.ai_products(
domain="domain",
+ max_age_ms=0,
max_products=1,
timeout_ms=1000,
)
@@ -1145,6 +990,7 @@ async def test_method_ai_products_overload_2(self, async_client: AsyncBrandDev)
async def test_method_ai_products_with_all_params_overload_2(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.ai_products(
direct_url="https://example.com",
+ max_age_ms=0,
max_products=1,
timeout_ms=1000,
)
@@ -1267,49 +1113,6 @@ async def test_streaming_response_ai_query(self, async_client: AsyncBrandDev) ->
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_fonts(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.fonts(
- domain="domain",
- )
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_fonts_with_all_params(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.fonts(
- domain="domain",
- timeout_ms=1000,
- )
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_raw_response_fonts(self, async_client: AsyncBrandDev) -> None:
- response = await async_client.brand.with_raw_response.fonts(
- domain="domain",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = await response.parse()
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_streaming_response_fonts(self, async_client: AsyncBrandDev) -> None:
- async with async_client.brand.with_streaming_response.fonts(
- domain="domain",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = await response.parse()
- assert_matches_type(BrandFontsResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
async def test_method_identify_from_transaction(self, async_client: AsyncBrandDev) -> None:
@@ -1325,7 +1128,7 @@ async def test_method_identify_from_transaction_with_all_params(self, async_clie
transaction_info="transaction_info",
city="city",
country_gl="ad",
- force_language="albanian",
+ force_language="afrikaans",
high_confidence_only=True,
max_speed=True,
mcc="mcc",
@@ -1459,7 +1262,7 @@ async def test_method_retrieve_by_email(self, async_client: AsyncBrandDev) -> No
async def test_method_retrieve_by_email_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.retrieve_by_email(
email="dev@stainless.com",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -1504,7 +1307,7 @@ async def test_method_retrieve_by_isin(self, async_client: AsyncBrandDev) -> Non
async def test_method_retrieve_by_isin_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.retrieve_by_isin(
isin="SE60513A9993",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -1550,7 +1353,7 @@ async def test_method_retrieve_by_name_with_all_params(self, async_client: Async
brand = await async_client.brand.retrieve_by_name(
name="xxx",
country_gl="ad",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
timeout_ms=1000,
)
@@ -1595,7 +1398,7 @@ async def test_method_retrieve_by_ticker(self, async_client: AsyncBrandDev) -> N
async def test_method_retrieve_by_ticker_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.retrieve_by_ticker(
ticker="ticker",
- force_language="albanian",
+ force_language="afrikaans",
max_speed=True,
ticker_exchange="AMEX",
timeout_ms=1000,
@@ -1628,51 +1431,6 @@ async def test_streaming_response_retrieve_by_ticker(self, async_client: AsyncBr
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_retrieve_naics(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.retrieve_naics(
- input="input",
- )
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_retrieve_naics_with_all_params(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.retrieve_naics(
- input="input",
- max_results=1,
- min_results=1,
- timeout_ms=1000,
- )
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_raw_response_retrieve_naics(self, async_client: AsyncBrandDev) -> None:
- response = await async_client.brand.with_raw_response.retrieve_naics(
- input="input",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = await response.parse()
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_streaming_response_retrieve_naics(self, async_client: AsyncBrandDev) -> None:
- async with async_client.brand.with_streaming_response.retrieve_naics(
- input="input",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = await response.parse()
- assert_matches_type(BrandRetrieveNaicsResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
async def test_method_retrieve_simplified(self, async_client: AsyncBrandDev) -> None:
@@ -1718,92 +1476,20 @@ async def test_streaming_response_retrieve_simplified(self, async_client: AsyncB
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_screenshot(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.screenshot(
- domain="domain",
- )
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_screenshot_with_all_params(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.screenshot(
- domain="domain",
- full_screenshot="true",
- page="login",
- prioritize="speed",
- )
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_raw_response_screenshot(self, async_client: AsyncBrandDev) -> None:
- response = await async_client.brand.with_raw_response.screenshot(
- domain="domain",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = await response.parse()
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_streaming_response_screenshot(self, async_client: AsyncBrandDev) -> None:
- async with async_client.brand.with_streaming_response.screenshot(
- domain="domain",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = await response.parse()
- assert_matches_type(BrandScreenshotResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_styleguide(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.styleguide()
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_method_styleguide_with_all_params(self, async_client: AsyncBrandDev) -> None:
- brand = await async_client.brand.styleguide(
- direct_url="https://example.com",
- domain="domain",
- timeout_ms=1000,
+ async def test_method_web_scrape_html(self, async_client: AsyncBrandDev) -> None:
+ brand = await async_client.brand.web_scrape_html(
+ url="https://example.com",
)
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_raw_response_styleguide(self, async_client: AsyncBrandDev) -> None:
- response = await async_client.brand.with_raw_response.styleguide()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- brand = await response.parse()
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- @pytest.mark.skip(reason="Mock server tests are disabled")
- @parametrize
- async def test_streaming_response_styleguide(self, async_client: AsyncBrandDev) -> None:
- async with async_client.brand.with_streaming_response.styleguide() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- brand = await response.parse()
- assert_matches_type(BrandStyleguideResponse, brand, path=["response"])
-
- assert cast(Any, response.is_closed) is True
+ assert_matches_type(BrandWebScrapeHTMLResponse, brand, path=["response"])
@pytest.mark.skip(reason="Mock server tests are disabled")
@parametrize
- async def test_method_web_scrape_html(self, async_client: AsyncBrandDev) -> None:
+ async def test_method_web_scrape_html_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.web_scrape_html(
url="https://example.com",
+ include_frames=True,
+ max_age_ms=0,
+ parse_pdf=True,
)
assert_matches_type(BrandWebScrapeHTMLResponse, brand, path=["response"])
@@ -1880,8 +1566,11 @@ async def test_method_web_scrape_md(self, async_client: AsyncBrandDev) -> None:
async def test_method_web_scrape_md_with_all_params(self, async_client: AsyncBrandDev) -> None:
brand = await async_client.brand.web_scrape_md(
url="https://example.com",
+ include_frames=True,
include_images=True,
include_links=True,
+ max_age_ms=0,
+ parse_pdf=True,
shorten_base64_images=True,
use_main_content_only=True,
)
@@ -1927,6 +1616,7 @@ async def test_method_web_scrape_sitemap_with_all_params(self, async_client: Asy
brand = await async_client.brand.web_scrape_sitemap(
domain="domain",
max_links=1,
+ url_regex="^https?://[^/]+/blog/",
)
assert_matches_type(BrandWebScrapeSitemapResponse, brand, path=["response"])
diff --git a/tests/test_client.py b/tests/test_client.py
index 9a1bb3d..5622393 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -429,6 +429,30 @@ def test_default_query_option(self) -> None:
client.close()
+ def test_hardcoded_query_params_in_url(self, client: BrandDev) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: BrandDev) -> None:
request = client._build_request(
FinalRequestOptions(
@@ -1324,6 +1348,30 @@ async def test_default_query_option(self) -> None:
await client.close()
+ async def test_hardcoded_query_params_in_url(self, async_client: AsyncBrandDev) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
def test_request_extra_json(self, client: BrandDev) -> None:
request = client._build_request(
FinalRequestOptions(
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
deleted file mode 100644
index 937bf72..0000000
--- a/tests/test_deepcopy.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from brand.dev._utils import deepcopy_minimal
-
-
-def assert_different_identities(obj1: object, obj2: object) -> None:
- assert obj1 == obj2
- assert id(obj1) != id(obj2)
-
-
-def test_simple_dict() -> None:
- obj1 = {"foo": "bar"}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_dict() -> None:
- obj1 = {"foo": {"bar": True}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
-
-
-def test_complex_nested_dict() -> None:
- obj1 = {"foo": {"bar": [{"hello": "world"}]}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
- assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"])
- assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0])
-
-
-def test_simple_list() -> None:
- obj1 = ["a", "b", "c"]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_list() -> None:
- obj1 = ["a", [1, 2, 3]]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1[1], obj2[1])
-
-
-class MyObject: ...
-
-
-def test_ignores_other_types() -> None:
- # custom classes
- my_obj = MyObject()
- obj1 = {"foo": my_obj}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert obj1["foo"] is my_obj
-
- # tuples
- obj3 = ("a", "b")
- obj4 = deepcopy_minimal(obj3)
- assert obj3 is obj4
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index 72d0a8b..ba150ec 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -4,7 +4,7 @@
import pytest
-from brand.dev._types import FileTypes
+from brand.dev._types import FileTypes, ArrayFormat
from brand.dev._utils import extract_files
@@ -35,6 +35,12 @@ def test_multiple_files() -> None:
assert query == {"documents": [{}, {}]}
+def test_top_level_file_array() -> None:
+ query = {"files": [b"file one", b"file two"], "title": "hello"}
+ assert extract_files(query, paths=[["files", ""]]) == [("files[]", b"file one"), ("files[]", b"file two")]
+ assert query == {"title": "hello"}
+
+
@pytest.mark.parametrize(
"query,paths,expected",
[
@@ -62,3 +68,24 @@ def test_ignores_incorrect_paths(
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected
+
+
+@pytest.mark.parametrize(
+ "array_format,expected_top_level,expected_nested",
+ [
+ ("brackets", [("files[]", b"a"), ("files[]", b"b")], [("items[][file]", b"a"), ("items[][file]", b"b")]),
+ ("repeat", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("comma", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("indices", [("files[0]", b"a"), ("files[1]", b"b")], [("items[0][file]", b"a"), ("items[1][file]", b"b")]),
+ ],
+)
+def test_array_format_controls_file_field_names(
+ array_format: ArrayFormat,
+ expected_top_level: list[tuple[str, FileTypes]],
+ expected_nested: list[tuple[str, FileTypes]],
+) -> None:
+ top_level = {"files": [b"a", b"b"]}
+ assert extract_files(top_level, paths=[["files", ""]], array_format=array_format) == expected_top_level
+
+ nested = {"items": [{"file": b"a"}, {"file": b"b"}]}
+ assert extract_files(nested, paths=[["items", "", "file"]], array_format=array_format) == expected_nested
diff --git a/tests/test_files.py b/tests/test_files.py
index 9af98b8..62632e1 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,7 +4,8 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from brand.dev._files import to_httpx_files, async_to_httpx_files
+from brand.dev._files import to_httpx_files, deepcopy_with_paths, async_to_httpx_files
+from brand.dev._utils import extract_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
@@ -49,3 +50,99 @@ def test_string_not_allowed() -> None:
"file": "foo", # type: ignore
}
)
+
+
+def assert_different_identities(obj1: object, obj2: object) -> None:
+ assert obj1 == obj2
+ assert obj1 is not obj2
+
+
+class TestDeepcopyWithPaths:
+ def test_copies_top_level_dict(self) -> None:
+ original = {"file": b"data", "other": "value"}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+
+ def test_file_value_is_same_reference(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+ assert result["file"] is file_bytes
+
+ def test_list_popped_wholesale(self) -> None:
+ files = [b"f1", b"f2"]
+ original = {"files": files, "title": "t"}
+ result = deepcopy_with_paths(original, [["files", ""]])
+ assert_different_identities(result, original)
+ result_files = result["files"]
+ assert isinstance(result_files, list)
+ assert_different_identities(result_files, files)
+
+ def test_nested_array_path_copies_list_and_elements(self) -> None:
+ elem1 = {"file": b"f1", "extra": 1}
+ elem2 = {"file": b"f2", "extra": 2}
+ original = {"items": [elem1, elem2]}
+ result = deepcopy_with_paths(original, [["items", "", "file"]])
+ assert_different_identities(result, original)
+ result_items = result["items"]
+ assert isinstance(result_items, list)
+ assert_different_identities(result_items, original["items"])
+ assert_different_identities(result_items[0], elem1)
+ assert_different_identities(result_items[1], elem2)
+
+ def test_empty_paths_returns_same_object(self) -> None:
+ original = {"foo": "bar"}
+ result = deepcopy_with_paths(original, [])
+ assert result is original
+
+ def test_multiple_paths(self) -> None:
+ f1 = b"file1"
+ f2 = b"file2"
+ original = {"a": f1, "b": f2, "c": "unchanged"}
+ result = deepcopy_with_paths(original, [["a"], ["b"]])
+ assert_different_identities(result, original)
+ assert result["a"] is f1
+ assert result["b"] is f2
+ assert result["c"] is original["c"]
+
+ def test_extract_files_does_not_mutate_original_top_level(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes, "other": "value"}
+
+ copied = deepcopy_with_paths(original, [["file"]])
+ extracted = extract_files(copied, paths=[["file"]])
+
+ assert extracted == [("file", file_bytes)]
+ assert original == {"file": file_bytes, "other": "value"}
+ assert copied == {"other": "value"}
+
+ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
+ file1 = b"f1"
+ file2 = b"f2"
+ original = {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+
+ copied = deepcopy_with_paths(original, [["items", "", "file"]])
+ extracted = extract_files(copied, paths=[["items", "", "file"]])
+
+ assert [entry for _, entry in extracted] == [file1, file2]
+ assert original == {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+ assert copied == {
+ "items": [
+ {"extra": 1},
+ {"extra": 2},
+ ],
+ "title": "example",
+ }