From cf3d77c71ce2bddaab3cfed0edf44f0900504ce6 Mon Sep 17 00:00:00 2001 From: "mintlify[bot]" <109931778+mintlify[bot]@users.noreply.github.com> Date: Mon, 13 Apr 2026 00:15:58 +0000 Subject: [PATCH] Standardize heading case and fix documentation issues - Convert all headings to sentence case per style guide - Fix misplaced Properties section in errors.mdx (moved under CoreAIError) - Fix retry logic that incorrectly threw on 429 rate limit errors - Add Google GenAI and Mistral provider tabs to embeddings guide - Add Google GenAI tab to multi-modal provider support section - Add missing ToolCall type import in streaming tool examples - Fix inconsistent heading case in embed.mdx API reference Generated-By: mintlify-agent --- docs/api/core/define-tool.mdx | 6 ++-- docs/api/core/embed.mdx | 2 +- docs/api/core/errors.mdx | 50 +++++++++++++-------------- docs/api/core/generate-image.mdx | 2 +- docs/concepts/error-handling.mdx | 18 +++++----- docs/concepts/messages.mdx | 34 +++++++++--------- docs/concepts/models.mdx | 34 +++++++++--------- docs/concepts/providers.mdx | 32 ++++++++--------- docs/guides/chat-completion.mdx | 14 ++++---- docs/guides/embeddings.mdx | 55 +++++++++++++++++++++--------- docs/guides/image-generation.mdx | 26 +++++++------- docs/guides/multi-modal.mdx | 53 +++++++++++++++++++++------- docs/guides/streaming.mdx | 17 ++++----- docs/guides/structured-outputs.mdx | 20 +++++------ docs/guides/tool-calling.mdx | 23 +++++++------ 15 files changed, 218 insertions(+), 168 deletions(-) diff --git a/docs/api/core/define-tool.mdx b/docs/api/core/define-tool.mdx index 5220a76..231958d 100644 --- a/docs/api/core/define-tool.mdx +++ b/docs/api/core/define-tool.mdx @@ -310,7 +310,7 @@ const result4 = await generate({ }); ``` -## Type Safety +## Type safety The tool definition is fully type-safe: @@ -335,7 +335,7 @@ const handler = async (args: ToolParams) => { }; ``` -## Best Practices +## Best practices Use descriptive tool names that clearly indicate their purpose. Follow the `snake_case` convention. @@ -353,7 +353,7 @@ const handler = async (args: ToolParams) => { Always validate tool results before passing them back to the model. Handle errors gracefully. -## Common Patterns +## Common patterns ### 1. Database Queries diff --git a/docs/api/core/embed.mdx b/docs/api/core/embed.mdx index dd1e2d1..4845578 100644 --- a/docs/api/core/embed.mdx +++ b/docs/api/core/embed.mdx @@ -8,7 +8,7 @@ icon: 'vector-square' The `embed()` function generates vector embeddings for text input using embedding models. Embeddings are useful for semantic search, clustering, recommendations, and other AI tasks that require numerical representations of text. -## Function Signature +## Function signature ```typescript export async function embed( diff --git a/docs/api/core/errors.mdx b/docs/api/core/errors.mdx index 4775c61..4c4c7ef 100644 --- a/docs/api/core/errors.mdx +++ b/docs/api/core/errors.mdx @@ -8,7 +8,7 @@ icon: 'triangle-exclamation' core-ai provides a hierarchy of error classes for handling different types of failures when working with language models. All errors extend from `CoreAIError` and provide structured error information. -## Error Hierarchy +## Error hierarchy ``` CoreAIError @@ -40,19 +40,6 @@ export class CoreAIError extends Error { } ``` -## ValidationError - -Thrown when core-ai rejects invalid caller input or local request configuration before calling a provider. - -```typescript -export class ValidationError extends CoreAIError { - constructor(message: string, cause?: unknown, provider?: string) { - super(message, cause, provider); - this.name = 'ValidationError'; - } -} -``` - ### Properties @@ -68,7 +55,7 @@ export class ValidationError extends CoreAIError { - Error name, always `'CoreAIError'`. + Error name, always `'CoreAIError'` for the base class. Subclasses override this with their own name (e.g., `'ValidationError'`, `'ProviderError'`). ### Example @@ -89,6 +76,19 @@ try { } ``` +## ValidationError + +Thrown when core-ai rejects invalid caller input or local request configuration before calling a provider. + +```typescript +export class ValidationError extends CoreAIError { + constructor(message: string, cause?: unknown, provider?: string) { + super(message, cause, provider); + this.name = 'ValidationError'; + } +} +``` + ## AbortedError Thrown when an operation is cancelled via an `AbortSignal`. @@ -389,9 +389,9 @@ try { } ``` -## Error Handling Patterns +## Error handling patterns -### Comprehensive Error Handling +### Comprehensive error handling ```typescript import { @@ -449,7 +449,7 @@ try { } ``` -### Retry Logic with Error Handling +### Retry logic with error handling ```typescript async function generateWithRetry( @@ -465,12 +465,12 @@ async function generateWithRetry( lastError = error as Error; if (error instanceof ProviderError) { - // Don't retry client errors (4xx) - if (error.statusCode && error.statusCode >= 400 && error.statusCode < 500) { + // Don't retry client errors (4xx) except rate limits (429) + if (error.statusCode && error.statusCode >= 400 && error.statusCode < 500 && error.statusCode !== 429) { throw error; } - // Retry server errors (5xx) and rate limits + // Retry rate limits (429) and server errors (5xx) if (error.statusCode === 429 || (error.statusCode && error.statusCode >= 500)) { const delay = Math.pow(2, i) * 1000; // Exponential backoff console.log(`Retrying in ${delay}ms...`); @@ -503,7 +503,7 @@ const result = await generateWithRetry(() => ); ``` -### Logging and Monitoring +### Logging and monitoring ```typescript function logError(error: unknown): void { @@ -537,7 +537,7 @@ function logError(error: unknown): void { } ``` -### Graceful Degradation +### Graceful degradation ```typescript async function generateWithFallback( @@ -570,7 +570,7 @@ async function generateWithFallback( } ``` -## Best Practices +## Best practices Always check for specific error types before general ones. Use `instanceof` checks in order from most specific to least specific. @@ -592,7 +592,7 @@ async function generateWithFallback( Log error details including provider, status codes, and causes for debugging and monitoring. -## Common HTTP Status Codes +## Common HTTP status codes - `400` - Bad Request (invalid parameters) - `401` - Unauthorized (invalid API key) diff --git a/docs/api/core/generate-image.mdx b/docs/api/core/generate-image.mdx index 6f2eade..97e5b3f 100644 --- a/docs/api/core/generate-image.mdx +++ b/docs/api/core/generate-image.mdx @@ -228,7 +228,7 @@ console.log('Landscape:', landscape.images[0].url); console.log('Square:', square.images[0].url); ``` -## Model Support +## Model support Different providers expose image generation models through `imageModel()`: diff --git a/docs/concepts/error-handling.mdx b/docs/concepts/error-handling.mdx index de155be..85b4684 100644 --- a/docs/concepts/error-handling.mdx +++ b/docs/concepts/error-handling.mdx @@ -7,7 +7,7 @@ description: CoreAIError, ProviderError, and structured output error types in co core-ai provides a hierarchy of error types that help you handle failures gracefully. For the complete API surface, see the [errors reference](/api/core/errors). -## Error Hierarchy +## Error hierarchy ```typescript Error @@ -181,7 +181,7 @@ try { - Provider API errors - Timeout errors -## Structured Output Errors +## Structured output errors Errors specific to structured output generation (`generateObject` and `streamObject`). @@ -313,7 +313,7 @@ try { - Values that don't match schema constraints - Invalid enum values -## Complete Error Handling Example +## Complete error handling example ```typescript import { @@ -390,9 +390,9 @@ async function generateUserProfile(prompt: string) { } ``` -## Retry Strategies +## Retry strategies -### Exponential Backoff +### Exponential backoff ```typescript async function generateWithRetry( @@ -417,7 +417,7 @@ async function generateWithRetry( } ``` -### Circuit Breaker +### Circuit breaker ```typescript class CircuitBreaker { @@ -466,7 +466,7 @@ const result = await breaker.execute(() => ); ``` -## Validation Best Practices +## Validation best practices **Validate inputs before sending:** Check for empty messages, validate file sizes, and ensure proper content types before making API calls to avoid unnecessary errors. @@ -488,7 +488,7 @@ function validateMessages(messages: Message[]): void { } ``` -## Logging Errors +## Logging errors **Log error details for debugging:** Include provider, model, status codes, and raw output when available to help diagnose issues. @@ -534,7 +534,7 @@ try { } ``` -## Next Steps +## Next steps - Learn about [Providers](/concepts/providers) for provider-specific behavior - Explore [Configuration](/concepts/configuration) for controlling generation diff --git a/docs/concepts/messages.mdx b/docs/concepts/messages.mdx index 3a24529..669be89 100644 --- a/docs/concepts/messages.mdx +++ b/docs/concepts/messages.mdx @@ -7,7 +7,7 @@ description: Understanding message types, content parts, and multi-modal inputs Messages are the fundamental building blocks of conversations in core-ai. They represent the dialogue between users, the assistant, and tools. -## Message Types +## Message types core-ai supports four message types: @@ -19,7 +19,7 @@ type Message = | ToolResultMessage; ``` -### System Message +### System message System messages set the context and behavior for the assistant. @@ -39,7 +39,7 @@ const systemMessage: SystemMessage = { }; ``` -### User Message +### User message User messages represent input from the user. They can be simple text or multi-modal content with images and files. @@ -77,7 +77,7 @@ const userMessage: UserMessage = { }; ``` -### Assistant Message +### Assistant message Assistant messages contain the model's responses, including text, reasoning, and tool calls. @@ -108,7 +108,7 @@ const assistantMessage: AssistantMessage = { Assistant messages use a `parts` array to support multiple content types (text, reasoning, tool calls) in a single message. -### Tool Result Message +### Tool result message Tool result messages provide the results of tool calls back to the model. @@ -131,7 +131,7 @@ const toolResult: ToolResultMessage = { }; ``` -## User Content Parts +## User content parts User messages can include multiple types of content: @@ -139,7 +139,7 @@ User messages can include multiple types of content: type UserContentPart = TextPart | ImagePart | FilePart; ``` -### Text Part +### Text part Simple text content: @@ -159,7 +159,7 @@ const textPart: TextPart = { }; ``` -### Image Part +### Image part Images can be provided as URLs or base64-encoded data: @@ -202,7 +202,7 @@ const base64Image: ImagePart = { }; ``` -### File Part +### File part Files can be attached with mime type information: @@ -235,7 +235,7 @@ const filePart: FilePart = { Not all providers support all content types. For example, Anthropic accepts PDF file parts only. Check the provider pages for model-specific input limits. -## Assistant Content Parts +## Assistant content parts Assistant messages can contain text, reasoning, and tool calls: @@ -246,7 +246,7 @@ type AssistantContentPart = | ToolCallPart; ``` -### Text Part +### Text part Regular text responses: @@ -257,7 +257,7 @@ type AssistantTextPart = { }; ``` -### Reasoning Part +### Reasoning part Extended thinking and reasoning content from the model: @@ -289,7 +289,7 @@ if (result.reasoning) { } ``` -### Tool Call Part +### Tool call part Requests to call external tools: @@ -306,7 +306,7 @@ type ToolCall = { }; ``` -## Multi-Turn Conversations +## Multi-turn conversations Build conversations by passing message history: @@ -339,7 +339,7 @@ const result2 = await generate({ model, messages }); For message helpers like `resultToMessage()` and `assistantMessage()`, see the [utilities reference](/api/core/utilities). -## Tool Call Flow +## Tool call flow Here's a complete example of handling tool calls: @@ -382,7 +382,7 @@ if (result1.toolCalls.length > 0) { } ``` -## Best Practices +## Best practices **Keep system messages concise:** System messages set the tone but shouldn't contain too much information. For large context, consider using user messages with retrieved content. @@ -396,7 +396,7 @@ if (result1.toolCalls.length > 0) { **Multi-modal order matters:** When combining text and images, place the text part first to provide context for what you're asking about the image. -## Next Steps +## Next steps - Configure generation with [Configuration](/concepts/configuration) options - Learn about [Error Handling](/concepts/error-handling) diff --git a/docs/concepts/models.mdx b/docs/concepts/models.mdx index 6383559..02bc2ab 100644 --- a/docs/concepts/models.mdx +++ b/docs/concepts/models.mdx @@ -11,7 +11,7 @@ core-ai supports three types of models, each designed for specific tasks: - **Embedding Models**: Convert text into vector representations for semantic search - **Image Models**: Generate images from text prompts -## Chat Models +## Chat models Chat models are the most versatile, supporting text generation, conversations, tool calling, and structured output. @@ -32,7 +32,7 @@ type ChatModel = { }; ``` -### Basic Text Generation +### Basic text generation ```typescript import { createOpenAI } from '@core-ai/openai'; @@ -52,7 +52,7 @@ console.log(result.content); // "Quantum computing uses quantum bits that can exist in multiple states..." ``` -### Streaming Responses +### Streaming responses ```typescript import { stream } from '@core-ai/core-ai'; @@ -71,7 +71,7 @@ for await (const event of response) { } ``` -### Structured Output +### Structured output Generate type-safe structured data using Zod schemas: @@ -98,7 +98,7 @@ console.log(result.object); // { name: "Alice Smith", age: 28, hobbies: ["reading", "hiking"] } ``` -### Tool Calling +### Tool calling Extend model capabilities with function tools: @@ -141,7 +141,7 @@ if (result.toolCalls.length > 0) { See the [types reference](/api/core/types) for the full `GenerateResult`, `StreamEvent`, and `FinishReason` type definitions. -## Embedding Models +## Embedding models Embedding models convert text into numerical vectors for semantic similarity and search. @@ -155,7 +155,7 @@ type EmbeddingModel = { }; ``` -### Basic Usage +### Basic usage ```typescript import { embed } from '@core-ai/core-ai'; @@ -173,7 +173,7 @@ console.log(result.embeddings[0].length); // 1536 (dimensions) ``` -### Batch Embedding +### Batch embedding ```typescript const result = await embed({ @@ -189,7 +189,7 @@ console.log(result.embeddings.length); // 3 ``` -### Custom Dimensions +### Custom dimensions ```typescript const result = await embed({ @@ -199,7 +199,7 @@ const result = await embed({ }); ``` -### Embed Result +### Embed result ```typescript type EmbedResult = { @@ -212,7 +212,7 @@ type EmbeddingUsage = { }; ``` -## Image Models +## Image models Image models generate images from text descriptions. @@ -226,7 +226,7 @@ type ImageModel = { }; ``` -### Basic Usage +### Basic usage ```typescript import { generateImage } from '@core-ai/core-ai'; @@ -244,7 +244,7 @@ console.log(result.images[0]); // { base64: "...", revisedPrompt: "..." } ``` -### Generate Options +### Generate options ```typescript type ImageGenerateOptions = { @@ -255,7 +255,7 @@ type ImageGenerateOptions = { }; ``` -### Multiple Images +### Multiple images ```typescript const result = await generateImage({ @@ -269,7 +269,7 @@ console.log(result.images.length); // 4 ``` -### Image Result +### Image result ```typescript type ImageGenerateResult = { @@ -287,7 +287,7 @@ type GeneratedImage = { Different providers may return images as URLs, base64 data, or both. Check the provider documentation for specific behavior. -## Model Properties +## Model properties All models expose two readonly properties: @@ -300,7 +300,7 @@ console.log(model.modelId); // "gpt-5-mini" These properties are useful for logging, debugging, and tracking which models are used in your application. -## Next Steps +## Next steps - Learn about [Messages](/concepts/messages) to structure conversations - Configure models with [Configuration](/concepts/configuration) options diff --git a/docs/concepts/providers.mdx b/docs/concepts/providers.mdx index 3cb609b..f49fab1 100644 --- a/docs/concepts/providers.mdx +++ b/docs/concepts/providers.mdx @@ -7,7 +7,7 @@ description: Learn how providers work in core-ai and how to use OpenAI, Anthropi Providers are the bridge between core-ai and LLM services like OpenAI, Anthropic, Google GenAI, and Mistral. Each provider implements a unified interface that abstracts away provider-specific details, allowing you to switch between providers with minimal code changes. -## Provider Interface +## Provider interface All providers implement methods to create model instances: @@ -27,7 +27,7 @@ type Provider = { OpenAI is one of the most popular providers, offering chat, embedding, and image generation models. -### Creating a Provider +### Creating a provider ```typescript import { createOpenAI } from '@core-ai/openai'; @@ -38,7 +38,7 @@ const openai = createOpenAI({ }); ``` -### Provider Options +### Provider options ```typescript type OpenAIProviderOptions = { @@ -48,7 +48,7 @@ type OpenAIProviderOptions = { }; ``` -### Getting Models +### Getting models ```typescript const gpt5 = openai.chatModel('gpt-5-mini'); @@ -64,7 +64,7 @@ const imageModel = openai.imageModel('gpt-image-1'); Anthropic provides powerful chat models like Claude, with advanced capabilities for extended thinking and reasoning. -### Creating a Provider +### Creating a provider ```typescript import { createAnthropic } from '@core-ai/anthropic'; @@ -75,7 +75,7 @@ const anthropic = createAnthropic({ }); ``` -### Provider Options +### Provider options ```typescript type AnthropicProviderOptions = { @@ -86,7 +86,7 @@ type AnthropicProviderOptions = { }; ``` -### Getting Models +### Getting models ```typescript const claude = anthropic.chatModel('claude-sonnet-4-6'); @@ -101,7 +101,7 @@ const claudeOpus = anthropic.chatModel('claude-opus-4-6'); Google GenAI provides access to Gemini models for chat, embeddings, and image generation. -### Creating a Provider +### Creating a provider ```typescript import { createGoogleGenAI } from '@core-ai/google-genai'; @@ -113,7 +113,7 @@ const google = createGoogleGenAI({ }); ``` -### Provider Options +### Provider options ```typescript type GoogleGenAIProviderOptions = { @@ -124,7 +124,7 @@ type GoogleGenAIProviderOptions = { }; ``` -### Getting Models +### Getting models ```typescript const gemini = google.chatModel('gemini-3.1-pro'); @@ -136,7 +136,7 @@ const imagen = google.imageModel('imagen-3.0'); Mistral provides efficient open-source and proprietary models for chat and embeddings. -### Creating a Provider +### Creating a provider ```typescript import { createMistral } from '@core-ai/mistral'; @@ -147,7 +147,7 @@ const mistral = createMistral({ }); ``` -### Provider Options +### Provider options ```typescript type MistralProviderOptions = { @@ -157,7 +157,7 @@ type MistralProviderOptions = { }; ``` -### Getting Models +### Getting models ```typescript // Chat models @@ -169,7 +169,7 @@ const mistralSmall = mistral.chatModel('mistral-small'); const embeddings = mistral.embeddingModel('mistral-embed'); ``` -## Using Custom Clients +## Using custom clients All providers support bringing your own client instance, which is useful for advanced configuration: @@ -187,7 +187,7 @@ const customClient = new OpenAI({ const openai = createOpenAI({ client: customClient }); ``` -## Provider Comparison +## Provider comparison | Provider | Chat | Embeddings | Images | Special Features | |----------|------|------------|--------|------------------| @@ -196,7 +196,7 @@ const openai = createOpenAI({ client: customClient }); | Google GenAI | ✓ | ✓ | ✓ | Gemini models with multimodal support | | Mistral | ✓ | ✓ | ✗ | Efficient open-source options | -## Next Steps +## Next steps - Learn about [Models](/concepts/models) to understand different model types - Explore [Messages](/concepts/messages) to see how to structure conversations diff --git a/docs/guides/chat-completion.mdx b/docs/guides/chat-completion.mdx index ce12624..20d6fdc 100644 --- a/docs/guides/chat-completion.mdx +++ b/docs/guides/chat-completion.mdx @@ -5,7 +5,7 @@ description: Generate text responses using the generate() function The `generate()` function provides synchronous chat completion, returning a complete response from the language model. -## Basic Usage +## Basic usage Generate a simple chat completion: @@ -34,7 +34,7 @@ console.log('Response:', result.content); console.log('Usage:', result.usage); ``` -## Using Different Providers +## Using different providers core-ai supports multiple providers with the same API: @@ -139,7 +139,7 @@ const result = await generate({ `temperature`, `maxTokens`, and `topP` are top-level options. `stopSequences`, `frequencyPenalty`, and `presencePenalty` are provider-specific and passed via `providerOptions`. Options like `stopSequences` and `frequencyPenalty` are available with `createOpenAICompat` (Chat Completions API). The default `createOpenAI` (Responses API) supports a different set of options — see [Configuration](/concepts/configuration) for details. -## Response Structure +## Response structure The `generate()` function returns a `GenerateResult` object: @@ -154,7 +154,7 @@ type GenerateResult = { }; ``` -### Understanding Token Usage +### Understanding token usage ```typescript const result = await generate({ model, messages }); @@ -195,7 +195,7 @@ const secondResponse = await generate({ model, messages }); console.log(secondResponse.content); ``` -## Error Handling +## Error handling Handle errors gracefully: @@ -217,7 +217,7 @@ try { } ``` -## Best Practices +## Best practices @@ -279,7 +279,7 @@ try { -## Next Steps +## Next steps diff --git a/docs/guides/embeddings.mdx b/docs/guides/embeddings.mdx index 01ddea7..11ed490 100644 --- a/docs/guides/embeddings.mdx +++ b/docs/guides/embeddings.mdx @@ -5,7 +5,7 @@ description: Generate vector embeddings using embed() for semantic search and si Embeddings convert text into numerical vectors that capture semantic meaning, enabling similarity search, clustering, and classification. -## Basic Usage +## Basic usage Generate embeddings for text: @@ -30,7 +30,7 @@ console.log('First vector preview:', result.embeddings[0]?.slice(0, 8)); console.log('Usage:', result.usage); ``` -## Single vs Batch Embedding +## Single vs batch embedding Embed single or multiple texts: @@ -66,7 +66,7 @@ Embed single or multiple texts: -## Embedding Response +## Embedding response The `embed()` function returns an `EmbedResult`: @@ -81,7 +81,7 @@ type EmbeddingUsage = { }; ``` -## Configuring Dimensions +## Configuring dimensions Some models support custom dimensions: @@ -99,7 +99,7 @@ console.log('Dimensions:', result.embeddings[0].length); // 256 Not all embedding models support custom dimensions. Check your provider's documentation. -## Similarity Search +## Similarity search Calculate similarity between vectors: @@ -146,7 +146,7 @@ similarities.forEach(({ document, similarity }) => { }); ``` -## Semantic Search Example +## Semantic search example Build a simple semantic search system: @@ -214,7 +214,7 @@ const results = await search.search('typed JavaScript', 2); console.log('Search results:', results); ``` -## Clustering Documents +## Clustering documents Group similar documents together: @@ -303,7 +303,7 @@ clusters.forEach((cluster, i) => { }); ``` -## Using Different Providers +## Using different providers @@ -322,20 +322,41 @@ clusters.forEach((cluster, i) => { }); ``` - - Check your provider's documentation for embedding model support: + + ```typescript + import { embed } from '@core-ai/core-ai'; + import { createGoogleGenAI } from '@core-ai/google-genai'; + const google = createGoogleGenAI({ + apiKey: process.env.GOOGLE_API_KEY + }); + const model = google.embeddingModel('text-embedding-004'); + + const result = await embed({ + model, + input: 'Your text here', + }); + ``` + + ```typescript - // Example pattern - const provider = createProvider({ apiKey }); - const model = provider.embeddingModel('model-name'); + import { embed } from '@core-ai/core-ai'; + import { createMistral } from '@core-ai/mistral'; + + const mistral = createMistral({ + apiKey: process.env.MISTRAL_API_KEY + }); + const model = mistral.embeddingModel('mistral-embed'); - const result = await embed({ model, input: 'text' }); + const result = await embed({ + model, + input: 'Your text here', + }); ``` -## Integration with Vector Databases +## Integration with vector databases Store embeddings in vector databases: @@ -422,7 +443,7 @@ Store embeddings in vector databases: -## Best Practices +## Best practices @@ -510,7 +531,7 @@ Store embeddings in vector databases: -## Next Steps +## Next steps diff --git a/docs/guides/image-generation.mdx b/docs/guides/image-generation.mdx index dea5c82..6cc73dc 100644 --- a/docs/guides/image-generation.mdx +++ b/docs/guides/image-generation.mdx @@ -5,7 +5,7 @@ description: Generate images using generateImage() with AI image models Generate images from text prompts using the `generateImage()` function with support for various AI image models. -## Basic Usage +## Basic usage Generate an image from a text prompt: @@ -38,7 +38,7 @@ for (const [index, image] of result.images.entries()) { } ``` -## Configuration Options +## Configuration options Customize image generation: @@ -60,7 +60,7 @@ const result = await generateImage({ - `1024x1792` (portrait) -## Image Response Format +## Image response format The `generateImage()` function returns: @@ -76,7 +76,7 @@ type GeneratedImage = { }; ``` -## Multiple Images +## Multiple images Generate multiple variations: @@ -98,7 +98,7 @@ result.images.forEach((image, index) => { }); ``` -## Handling Image Data +## Handling image data @@ -190,7 +190,7 @@ result.images.forEach((image, index) => { -## Revised Prompts +## Revised prompts Some providers modify your prompt for better results: @@ -209,7 +209,7 @@ if (image.revisedPrompt) { } ``` -## Image Sizes +## Image sizes Different models support different sizes: @@ -243,7 +243,7 @@ Different models support different sizes: -## Prompt Engineering Tips +## Prompt engineering tips Write effective prompts for better results: @@ -303,7 +303,7 @@ Write effective prompts for better results: -## Error Handling +## Error handling Handle image generation errors: @@ -337,7 +337,7 @@ try { } ``` -## Provider-Specific Options +## Provider-specific options Use provider-specific features: @@ -360,7 +360,7 @@ const result = await generateImage({ Provider options are namespaced by provider name and validated by each provider adapter. Check the [provider docs](/api/providers/openai) for available options. -## Practical Examples +## Practical examples @@ -432,7 +432,7 @@ const result = await generateImage({ -## Best Practices +## Best practices @@ -486,7 +486,7 @@ const result = await generateImage({ -## Next Steps +## Next steps diff --git a/docs/guides/multi-modal.mdx b/docs/guides/multi-modal.mdx index 03e703d..a104d50 100644 --- a/docs/guides/multi-modal.mdx +++ b/docs/guides/multi-modal.mdx @@ -5,7 +5,7 @@ description: Work with images, files, and multi-part messages in chat conversati core-ai supports multi-modal inputs, allowing you to include images, files, and text in the same message. -## Images in Messages +## Images in messages Include images in user messages: @@ -38,7 +38,7 @@ const result = await generate({ console.log('Model description:', result.content); ``` -## Image Sources +## Image sources Images can be provided via URL or base64: @@ -96,7 +96,7 @@ Images can be provided via URL or base64: -## Content Part Types +## Content part types User messages can contain multiple content parts: @@ -123,7 +123,7 @@ type FilePart = { }; ``` -## Multiple Images +## Multiple images Include multiple images in one message: @@ -151,7 +151,7 @@ const result = await generate({ console.log('Comparison:', result.content); ``` -## Text and Images Together +## Text and images together Mix text and images in any order: @@ -174,7 +174,7 @@ const result = await generate({ }); ``` -## File Attachments +## File attachments Include files in messages: @@ -210,7 +210,7 @@ console.log('Summary:', result.content); File support varies by provider and model. Check your provider's documentation for supported file types. -## Common Use Cases +## Common use cases @@ -338,7 +338,7 @@ console.log('Summary:', result.content); -## Multi-Modal with Streaming +## Multi-modal with streaming Stream responses for multi-modal inputs: @@ -368,7 +368,7 @@ for await (const event of result) { } ``` -## Reading Images from Disk +## Reading images from disk Load and encode local images: @@ -415,7 +415,7 @@ const description = await analyzeLocalImage('./photo.jpg'); console.log(description); ``` -## Multi-Modal Conversations +## Multi-modal conversations Build conversations with images: @@ -451,7 +451,7 @@ const secondResponse = await generate({ model, messages }); console.log('Second response:', secondResponse.content); ``` -## Provider Support +## Provider support Multi-modal support varies by provider: @@ -491,6 +491,33 @@ Multi-modal support varies by provider: }); const model = anthropic.chatModel('claude-haiku-4-5'); // Supports vision + const result = await generate({ + model, + messages: [ + { + role: 'user', + content: [ + { type: 'text', text: 'What is in this image?' }, + { + type: 'image', + source: { type: 'url', url: imageUrl }, + }, + ], + }, + ], + }); + ``` + + + ```typescript + import { generate } from '@core-ai/core-ai'; + import { createGoogleGenAI } from '@core-ai/google-genai'; + + const google = createGoogleGenAI({ + apiKey: process.env.GOOGLE_API_KEY, + }); + const model = google.chatModel('gemini-3.1-pro'); // Supports vision + const result = await generate({ model, messages: [ @@ -514,7 +541,7 @@ Multi-modal support varies by provider: Check your provider's documentation for which models support vision and other multi-modal capabilities. -## Best Practices +## Best practices @@ -582,7 +609,7 @@ Multi-modal support varies by provider: -## Next Steps +## Next steps diff --git a/docs/guides/streaming.mdx b/docs/guides/streaming.mdx index e7afa0a..3ee4d29 100644 --- a/docs/guides/streaming.mdx +++ b/docs/guides/streaming.mdx @@ -5,7 +5,7 @@ description: Stream responses in real-time using stream() with async iteration The `stream()` function enables real-time streaming of model responses, providing a better user experience for long-form content. -## Basic Usage +## Basic usage Stream a chat completion with async iteration: @@ -63,7 +63,7 @@ type StreamEvent = | { type: 'finish'; finishReason: FinishReason; usage: ChatUsage }; ``` -## Handling Different Event Types +## Handling different event types Process different event types for rich streaming experiences: @@ -161,7 +161,7 @@ for await (const event of result) { } ``` -## UI Integration Examples +## UI integration examples @@ -260,12 +260,13 @@ for await (const event of result) { -## Streaming with Tools +## Streaming with tools Handle tool calls during streaming: ```typescript import { stream, defineTool } from '@core-ai/core-ai'; +import type { ToolCall } from '@core-ai/core-ai'; import { z } from 'zod'; const weatherTool = defineTool({ @@ -306,7 +307,7 @@ if (response.finishReason === 'tool-calls') { } ``` -## Abort Streaming +## Abort streaming Cancel streaming with AbortController: @@ -337,7 +338,7 @@ try { } ``` -## Error Handling +## Error handling Handle errors during streaming: @@ -366,7 +367,7 @@ try { } ``` -## Best Practices +## Best practices @@ -423,7 +424,7 @@ try { -## Next Steps +## Next steps diff --git a/docs/guides/structured-outputs.mdx b/docs/guides/structured-outputs.mdx index 03d869d..b82f55a 100644 --- a/docs/guides/structured-outputs.mdx +++ b/docs/guides/structured-outputs.mdx @@ -5,7 +5,7 @@ description: Generate type-safe JSON with generateObject() and streamObject() us core-ai provides strongly-typed structured outputs using Zod schemas, ensuring your responses match expected formats with full TypeScript type inference. -## Generate Structured Objects +## Generate structured objects Use `generateObject()` to get validated JSON responses: @@ -42,7 +42,7 @@ console.log('Temperature:', result.object.temperatureC); console.log('Summary:', result.object.summary); ``` -## Stream Structured Objects +## Stream structured objects Stream JSON as it's being generated: @@ -100,7 +100,7 @@ type ObjectStreamEvent = The `object` event is emitted once, when the full object payload has been validated. Use `object-delta` events for progressive UI updates while JSON is still streaming. -## Complex Schema Examples +## Complex schema examples @@ -226,7 +226,7 @@ type ObjectStreamEvent = -## Schema Descriptions +## Schema descriptions Add descriptions to help the model understand your schema: @@ -252,7 +252,7 @@ const result = await generateObject({ }); ``` -## Type Inference +## Type inference TypeScript automatically infers types from Zod schemas: @@ -278,7 +278,7 @@ const result = await generateObject({ const user: User = result.object; ``` -## Error Handling +## Error handling Handle validation and parsing errors: @@ -310,7 +310,7 @@ try { } ``` -## Streaming with UI Updates +## Streaming with UI updates Update your UI as the object is built: @@ -350,7 +350,7 @@ const response = await result.result; console.log('Complete recipe:', response.object); ``` -## Configuration Options +## Configuration options Customize structured output generation: @@ -366,7 +366,7 @@ const result = await generateObject({ }); ``` -## Best Practices +## Best practices @@ -433,7 +433,7 @@ const result = await generateObject({ -## Next Steps +## Next steps diff --git a/docs/guides/tool-calling.mdx b/docs/guides/tool-calling.mdx index f3bd65c..980838c 100644 --- a/docs/guides/tool-calling.mdx +++ b/docs/guides/tool-calling.mdx @@ -5,7 +5,7 @@ description: Let models use external tools and functions with defineTool() Tool calling enables language models to interact with external functions and APIs, extending their capabilities beyond text generation. -## Define a Tool +## Define a tool Create tools using `defineTool()` with Zod schemas: @@ -23,7 +23,7 @@ const weatherTool = defineTool({ }); ``` -## Basic Tool Usage +## Basic tool usage Implement a complete tool calling flow: @@ -112,7 +112,7 @@ const secondResult = await generate({ console.log('Final response:', secondResult.content); ``` -## Tool Choice Strategies +## Tool choice strategies Control when and how tools are used: @@ -150,7 +150,7 @@ const result = await generate({ }); ``` -## Multiple Tools +## Multiple tools Provide multiple tools for the model to choose from: @@ -206,7 +206,7 @@ if (result.finishReason === 'tool-calls') { } ``` -## Handling Tool Calls +## Handling tool calls Best practices for executing tool calls: @@ -297,12 +297,13 @@ if (result.finishReason === 'tool-calls') { } ``` -## Tool Calling with Streaming +## Tool calling with streaming Handle tool calls during streaming: ```typescript import { stream, defineTool } from '@core-ai/core-ai'; +import type { ToolCall } from '@core-ai/core-ai'; import { z } from 'zod'; const calculatorTool = defineTool({ @@ -350,7 +351,7 @@ if (response.finishReason === 'tool-calls') { } ``` -## Advanced Tool Examples +## Advanced tool examples @@ -427,7 +428,7 @@ if (response.finishReason === 'tool-calls') { -## Tool Result Messages +## Tool result messages Structure tool results properly: @@ -450,7 +451,7 @@ const errorMessage: ToolResultMessage = { }; ``` -## Helper: Convert Result to Message +## Helper: convert result to message Use `resultToMessage()` to simplify conversation building: @@ -480,7 +481,7 @@ if (firstResult.finishReason === 'tool-calls') { } ``` -## Best Practices +## Best practices @@ -559,7 +560,7 @@ if (firstResult.finishReason === 'tool-calls') { -## Next Steps +## Next steps