From 2d5498ef82ff0f7109fe8c92e6825c1ac5d6028f Mon Sep 17 00:00:00 2001
From: "mintlify[bot]" <109931778+mintlify[bot]@users.noreply.github.com>
Date: Mon, 20 Apr 2026 00:18:14 +0000
Subject: [PATCH] docs: fix discrepancies between docs and source code
- Fix incorrect error name property in errors.mdx (said 'CoreAIError' for all classes)
- Fix wrong OTel attribute name (gen_ai.system -> gen_ai.provider.name) and add missing span attributes
- Add missing metadata parameter to all API reference pages and type definitions
- Document Langfuse usage detail key names
- Add Anthropic adaptive thinking effort mapping table
- Document both valid cache TTL values ('5m' and '1h') for Anthropic
Generated-By: mintlify-agent
---
docs/api/core/embed.mdx | 4 ++++
docs/api/core/errors.mdx | 2 +-
docs/api/core/generate-image.mdx | 4 ++++
docs/api/core/generate-object.mdx | 4 ++++
docs/api/core/generate.mdx | 4 ++++
docs/api/core/stream-object.mdx | 4 ++++
docs/api/core/stream.mdx | 4 ++++
docs/api/core/types.mdx | 3 +++
docs/api/providers/anthropic.mdx | 12 +++++++++++-
docs/concepts/configuration.mdx | 1 +
docs/observability/langfuse.mdx | 2 +-
docs/observability/opentelemetry.mdx | 15 ++++++++++++---
12 files changed, 53 insertions(+), 6 deletions(-)
diff --git a/docs/api/core/embed.mdx b/docs/api/core/embed.mdx
index dd1e2d1..54c455c 100644
--- a/docs/api/core/embed.mdx
+++ b/docs/api/core/embed.mdx
@@ -34,6 +34,10 @@ export type EmbedParams = EmbedOptions & {
Optional dimension size for the output embeddings. Not all models support this parameter.
+
+ Custom metadata to attach to the request. Forwarded to observability middleware (OpenTelemetry, Langfuse).
+
+
Provider-specific options, namespaced by provider name (e.g. `{ openai: { encodingFormat: 'float' } }`).
diff --git a/docs/api/core/errors.mdx b/docs/api/core/errors.mdx
index 4775c61..0e5042f 100644
--- a/docs/api/core/errors.mdx
+++ b/docs/api/core/errors.mdx
@@ -68,7 +68,7 @@ export class ValidationError extends CoreAIError {
- Error name, always `'CoreAIError'`.
+ Error name, matching the class (e.g. `'ValidationError'`, `'ProviderError'`).
### Example
diff --git a/docs/api/core/generate-image.mdx b/docs/api/core/generate-image.mdx
index 6f2eade..802dc97 100644
--- a/docs/api/core/generate-image.mdx
+++ b/docs/api/core/generate-image.mdx
@@ -43,6 +43,10 @@ export type GenerateImageParams = ImageGenerateOptions & {
- `'1024x1792'` (portrait)
+
+ Custom metadata to attach to the request. Forwarded to observability middleware (OpenTelemetry, Langfuse).
+
+
Provider-specific options, namespaced by provider name (e.g. `{ openai: { quality: 'hd' } }`).
diff --git a/docs/api/core/generate-object.mdx b/docs/api/core/generate-object.mdx
index 7b97b90..b9292b5 100644
--- a/docs/api/core/generate-object.mdx
+++ b/docs/api/core/generate-object.mdx
@@ -65,6 +65,10 @@ export type GenerateObjectParams =
+
+ Custom metadata to attach to the request. Forwarded to observability middleware (OpenTelemetry, Langfuse).
+
+
Provider-specific options, namespaced by provider name.
diff --git a/docs/api/core/generate.mdx b/docs/api/core/generate.mdx
index c068853..fbad441 100644
--- a/docs/api/core/generate.mdx
+++ b/docs/api/core/generate.mdx
@@ -64,6 +64,10 @@ export type GenerateParams = GenerateOptions & {
- `{ type: 'tool', toolName: string }` - Force specific tool
+
+ Custom metadata to attach to the request. Forwarded to observability middleware (OpenTelemetry, Langfuse).
+
+
Provider-specific options, namespaced by provider name (e.g. `{ openai: { user: '...' } }`).
diff --git a/docs/api/core/stream-object.mdx b/docs/api/core/stream-object.mdx
index 59ad194..f9b9b2f 100644
--- a/docs/api/core/stream-object.mdx
+++ b/docs/api/core/stream-object.mdx
@@ -59,6 +59,10 @@ export type StreamObjectParams =
Configuration for extended thinking/reasoning capabilities.
+
+ Custom metadata to attach to the request. Forwarded to observability middleware (OpenTelemetry, Langfuse).
+
+
Provider-specific options, namespaced by provider name.
diff --git a/docs/api/core/stream.mdx b/docs/api/core/stream.mdx
index a7f3cfd..769a998 100644
--- a/docs/api/core/stream.mdx
+++ b/docs/api/core/stream.mdx
@@ -60,6 +60,10 @@ export type StreamParams = GenerateOptions & {
Controls how the model uses tools: `'auto'`, `'none'`, `'required'`, or `{ type: 'tool', toolName: string }`.
+
+ Custom metadata to attach to the request. Forwarded to observability middleware (OpenTelemetry, Langfuse).
+
+
Provider-specific options, namespaced by provider name.
diff --git a/docs/api/core/types.mdx b/docs/api/core/types.mdx
index b4f6abf..7b1a4c5 100644
--- a/docs/api/core/types.mdx
+++ b/docs/api/core/types.mdx
@@ -261,6 +261,7 @@ type BaseGenerateOptions = {
maxTokens?: number;
topP?: number;
reasoning?: ReasoningConfig;
+ metadata?: Record;
providerOptions?: GenerateProviderOptions;
signal?: AbortSignal;
};
@@ -428,6 +429,7 @@ type EmbeddingUsage = {
type EmbedOptions = {
input: string | string[];
dimensions?: number;
+ metadata?: Record;
providerOptions?: EmbedProviderOptions;
};
```
@@ -450,6 +452,7 @@ type ImageGenerateOptions = {
prompt: string;
n?: number;
size?: string;
+ metadata?: Record;
providerOptions?: ImageProviderOptions;
};
```
diff --git a/docs/api/providers/anthropic.mdx b/docs/api/providers/anthropic.mdx
index bea2282..8793e79 100644
--- a/docs/api/providers/anthropic.mdx
+++ b/docs/api/providers/anthropic.mdx
@@ -217,6 +217,16 @@ const result = await generate({
});
```
+Effort mapping for adaptive thinking:
+
+| `ReasoningEffort` | Anthropic effort |
+|---|---|
+| `'minimal'` | `'low'` |
+| `'low'` | `'low'` |
+| `'medium'` | `'medium'` |
+| `'high'` | `'high'` |
+| `'max'` | `'max'` (`claude-opus-4-6` only) |
+
Only `claude-opus-4-6` supports `'max'` effort directly. On `claude-sonnet-4-6`, `'max'` is clamped to `'high'`.
@@ -325,7 +335,7 @@ console.log(result.usage.inputTokenDetails.cacheReadTokens);
console.log(result.usage.inputTokenDetails.cacheWriteTokens);
```
-You can request a 1-hour cache TTL when you expect follow-up requests to arrive later than Anthropic's default 5-minute cache window:
+Valid TTL values are `'5m'` (the default) and `'1h'`. Request a longer TTL when you expect follow-up requests to arrive later than Anthropic's default 5-minute cache window:
```typescript
const result = await generate({
diff --git a/docs/concepts/configuration.mdx b/docs/concepts/configuration.mdx
index 3bc620e..51075ff 100644
--- a/docs/concepts/configuration.mdx
+++ b/docs/concepts/configuration.mdx
@@ -18,6 +18,7 @@ type BaseGenerateOptions = {
maxTokens?: number;
topP?: number;
reasoning?: ReasoningConfig;
+ metadata?: Record;
providerOptions?: GenerateProviderOptions;
signal?: AbortSignal;
};
diff --git a/docs/observability/langfuse.mdx b/docs/observability/langfuse.mdx
index 5582180..50cc6a3 100644
--- a/docs/observability/langfuse.mdx
+++ b/docs/observability/langfuse.mdx
@@ -125,7 +125,7 @@ Each Langfuse observation includes:
| `modelParameters` | Temperature, max tokens, top-p (when set) |
| `input` | Input messages (when `recordContent` is `true`) |
| `output` | Response content (when `recordContent` is `true`) |
-| `usageDetails` | Input tokens, output tokens, cache tokens, reasoning tokens |
+| `usageDetails` | Token usage with keys: `input`, `output`, `cache_read_input`, `cache_creation_input`, `reasoning_output` (when applicable) |
| `metadata` | Custom metadata from the options |
| `level` | Set to `ERROR` on failures |
| `statusMessage` | Error message on failures |
diff --git a/docs/observability/opentelemetry.mdx b/docs/observability/opentelemetry.mdx
index 94a10b9..b50160b 100644
--- a/docs/observability/opentelemetry.mdx
+++ b/docs/observability/opentelemetry.mdx
@@ -115,15 +115,24 @@ Spans follow [OpenTelemetry GenAI semantic conventions](https://opentelemetry.io
| Attribute | Description |
| --- | --- |
-| `gen_ai.system` | Provider name |
+| `gen_ai.provider.name` | Provider name |
| `gen_ai.request.model` | Model ID |
+| `gen_ai.operation.name` | Operation type (`chat`, `embeddings`, or `image_generation`) |
+| `gen_ai.output.type` | Output type (`text`, `json`, or `image`) |
| `gen_ai.request.temperature` | Temperature setting |
| `gen_ai.request.max_tokens` | Max tokens setting |
| `gen_ai.request.top_p` | Top-p setting |
-| `gen_ai.response.finish_reason` | Why generation stopped |
+| `gen_ai.request.schema_name` | Schema name (for `generateObject`/`streamObject`) |
+| `gen_ai.response.finish_reasons` | Array of finish reasons |
| `gen_ai.usage.input_tokens` | Input token count |
| `gen_ai.usage.output_tokens` | Output token count |
+| `gen_ai.usage.cache_read.input_tokens` | Cached input tokens read |
+| `gen_ai.usage.cache_creation.input_tokens` | Tokens written to cache |
+| `core_ai.function_id` | Function ID from `metadata.functionId` |
+| `core_ai.metadata.*` | Custom metadata fields (from the `metadata` option) |
-When `recordContent` is enabled, input messages and output content are recorded as additional attributes.
+Span names follow the pattern `{operation} {modelId}`, for example `chat gpt-5-mini`, `embeddings text-embedding-3-small`, or `image_generation gpt-image-1`.
+
+When `recordContent` is enabled, input messages, tool definitions, and output content are recorded as additional attributes (`gen_ai.system_instructions`, `gen_ai.input.messages`, `gen_ai.output.messages`, `gen_ai.tool.definitions`, `input.value`, `output.value`).
Errors are recorded with `error.type` and the span status is set to `ERROR`.