diff --git a/foo.txt b/foo.txt new file mode 100644 index 0000000000..eb857a6d0c --- /dev/null +++ b/foo.txt @@ -0,0 +1 @@ +model conformance specs and JS parity — I'll tackle those as a separate pass after the current PR is green. That'll involve reading the conformance KI artifacts and comparing JS plugin implementations against our Python ones to identify gaps. diff --git a/py/GEMINI.md b/py/GEMINI.md index cd3ddfab0e..5dfe800066 100644 --- a/py/GEMINI.md +++ b/py/GEMINI.md @@ -1728,6 +1728,37 @@ For each plugin, verify: 4. **Authentication**: Use provider's recommended auth mechanism and headers 5. **Endpoints**: URLs match provider's documented endpoints +### Model Catalog Accuracy (Mandatory) + +**CRITICAL: Never invent model names or IDs.** Every model ID in a plugin's catalog +MUST be verified against the provider's official API documentation before being added. + +#### Verification Steps + +1. **Check the provider's official model page** (see Provider Documentation Links below) +2. **Confirm the exact API model ID string** — not the marketing name, but the string + you pass to the API (e.g., `claude-opus-4-6-20260205`, not "Claude Opus 4.6") +3. **Verify the model is GA (Generally Available)** — do not add models that are only + announced, in private preview, or behind waitlists +4. **Confirm capabilities** — check if the model supports vision, tools, system role, + structured output, etc. from the official docs +5. **Use date-suffixed IDs as versions** — store the alias (e.g., `claude-opus-4-6`) + as the key and the dated ID (e.g., `claude-opus-4-6-20260205`) in `versions=[]` + +#### Provider API Model Pages + +| Provider | Where to verify model IDs | +|----------|---------------------------| +| Anthropic | https://docs.anthropic.com/en/docs/about-claude/models | +| OpenAI | https://platform.openai.com/docs/models | +| xAI | https://docs.x.ai/docs/models | +| Mistral | https://docs.mistral.ai/getting-started/models/models_overview/ | +| DeepSeek | https://api-docs.deepseek.com/quick_start/pricing | +| HuggingFace | https://huggingface.co/docs/api-inference/ | +| AWS Bedrock | https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html | +| Azure/Foundry | https://ai.azure.com/catalog/models | +| Cloudflare | https://developers.cloudflare.com/workers-ai/models/ | + ### Common Issues Found During Verification | Issue Type | Example | How to Fix | diff --git a/py/README.md b/py/README.md index 0ddc99b6ac..02d48e086c 100644 --- a/py/README.md +++ b/py/README.md @@ -107,7 +107,7 @@ See the [samples/README.md](samples/README.md) for instructions on running indiv Quick start: ```bash -cd samples/google-genai-hello +cd samples/provider-google-genai-hello ./run.sh ``` diff --git a/py/docs/python_docs_roadmap.md b/py/docs/python_docs_roadmap.md new file mode 100644 index 0000000000..5588f6bedd --- /dev/null +++ b/py/docs/python_docs_roadmap.md @@ -0,0 +1,284 @@ +# Python Documentation Roadmap for genkit.dev + +> **Source:** [genkit-ai/docsite](https://github.com/genkit-ai/docsite) +> **Docs path:** `src/content/docs/docs/` +> **Generated:** 2026-02-07 +> **Updated:** 2026-02-07 +> +> **Scope Exclusions:** +> - **Chat/Session API** — Deprecated, skip +> - **Agents / Multi-Agent** — Not yet in Python SDK, skip +> - **MCP** — Will come later, skip for now +> +> This roadmap tracks every genkit.dev documentation page and whether Python +> examples/tabs need to be added, updated, or are already complete. It also +> identifies features demonstrated in JS examples that should be covered by +> Python samples in the `firebase/genkit` repo. + +--- + +## Summary + +| Status | Count | Description | +|--------|-------|-------------| +| ✅ Complete | 5 | Python tab exists with full parity | +| 🔶 Partial | 6 | Python tab exists but is incomplete or stale | +| ❌ Missing | 8 | No Python tab at all (JS/Go only or JS-only) | +| ➖ N/A | 5 | Language-agnostic or meta pages | + +--- + +## 1. Core Documentation Pages + +### ✅ Python Tab Complete (verify accuracy) + +| Page | File | Languages | Notes | +|------|------|-----------|-------| +| **Models** | `models.mdx` | js, go, python | Python examples for: `generate()`, system prompts, model parameters, structured output, streaming, multimodal input. **Missing:** Generating Media, Middleware (retry/fallback). | +| **Tool Calling** | `tool-calling.mdx` | js, go, python | Python examples for: defining tools, using tools, interrupts (link), explicitly handling tool calls. **Missing:** `maxTurns`, dynamically defining tools at runtime. | +| **Flows** | `flows.mdx` | js, go, python | Python examples for: defining flows, input/output schemas, calling flows, streaming flows, deploying (Flask). **Missing:** Flow steps (`ai.run()`), durable streaming. | +| **Get Started** | `get-started.mdx` | js, go, python | Complete walkthrough for Python. | +| **Interrupts** | `interrupts.mdx` | js, go, python | Python examples for interrupt definition and resumption. | + +### 🔶 Python Tab Exists but Incomplete + +| Page | File | Languages | What's Missing | +|------|------|-----------|----------------| +| **Models** | `models.mdx` | js, go, python | **Generating Media** section (Python SDK supports TTS, image gen via google-genai). **Middleware** section (retry/fallback — Python has `use=` support but no docs). | +| **Tool Calling** | `tool-calling.mdx` | js, go, python | **`maxTurns`** — Python supports `max_turns=`. **Dynamically defining tools at runtime** — needs investigation. **Streaming + Tool calling** — needs docs. | +| **Flows** | `flows.mdx` | js, go, python | **Flow steps** (`genkit.Run()` equivalent in Python). **Durable streaming** — needs investigation. | +| **RAG** | `rag.mdx` | js, go, python | Python tab may be stale. Verify: indexers, embedders, retrievers, simple retrievers, custom retrievers, rerankers sections. | +| **Context** | `context.mdx` | js only (rendered) | The rendered page shows JS-only. Python supports `context=` on `generate()` and flows. Needs Python examples for context in actions, context at runtime, context propagation. | +| **Dotprompt** | `dotprompt.mdx` | js, go | Python SDK has dotprompt support (`genkit.core.prompt`). Needs full Python tab with: creating prompt files, running prompts, model configuration, schemas, tool calling in prompts, multi-message prompts, partials, prompt variants, defining prompts in code. | + +### ❌ Python Tab Missing (needs to be added) + +| Page | File | Current Languages | Priority | What to Add | +|------|------|-------------------|----------|-------------| +| **Chat (Sessions)** | `chat.mdx` | js, go | **P1** | Python SDK has `ai.chat()` and `Session` with `session.chat()`. Need: session basics, stateful sessions with tools, multi-thread sessions. Session persistence is experimental — may tag as such. | +| **Agentic Patterns** | `agentic-patterns.mdx` | js, go | **P1** | Python supports all required primitives (flows, tools, interrupts). Need: sequential workflow, conditional routing, parallel execution, tool calling, iterative refinement, autonomous agent, stateful interactions. | +| **Multi-Agent** | `multi-agent.mdx` | js (likely) | **P2** | Need to verify Python support for agent-to-agent delegation. | +| **Durable Streaming** | `durable-streaming.mdx` | js (likely) | **P3** | Need to verify Python support. | +| **Client SDK** | `client.mdx` | js | **P3** | Client-side integration. May not apply to Python backend SDK directly. | +| **MCP Server** | `mcp-server.mdx` | js (likely) | **P2** | Python has MCP support via `genkit.plugins.mcp`. Needs Python examples. | +| **Model Context Protocol** | `model-context-protocol.mdx` | js (likely) | **P2** | Python MCP client integration. | +| **Evaluation** | `evaluation.mdx` | js (likely) | **P2** | Python has evaluator support (`evaluator-demo` sample exists). Needs Python examples. | + +### ➖ Language-Agnostic / Meta Pages + +| Page | File | Notes | +|------|------|-------| +| **Overview** | `overview.mdx` | Conceptual overview, no code tabs needed | +| **API References** | `api-references.mdx` | Links to API docs | +| **API Stability** | `api-stability.mdx` | Policy document | +| **Error Types** | `error-types.mdx` | Reference | +| **Feedback** | `feedback.mdx` | Meta | +| **Develop with AI** | `develop-with-ai.mdx` | Meta/guide | +| **DevTools** | `devtools.mdx` | Dev UI documentation | +| **Local Observability** | `local-observability.mdx` | Observability setup | + +--- + +## 2. Subdirectory Pages (need individual audit) + +| Directory | Path | Known Pages | Python Status | +|-----------|------|-------------|---------------| +| **Deployment** | `deployment/` | Cloud Run, Firebase, etc. | 🔶 Python Flask deployment exists; verify others | +| **Frameworks** | `frameworks/` | Express, etc. | ❌ Need Flask/FastAPI/Starlette Python examples | +| **Integrations** | `integrations/` | Various provider plugins | 🔶 Some Python plugins documented; need audit | +| **Observability** | `observability/` | GCP, custom, etc. | 🔶 Python GCP telemetry plugin exists | +| **Plugin Authoring** | `plugin-authoring/` | Writing plugins | ❌ Need Python plugin authoring guide | +| **Resources** | `resources/` | Additional resources | ➖ Likely language-agnostic | +| **Tutorials** | `tutorials/` | Step-by-step guides | ❌ Need Python tutorials | + +--- + +## 3. Feature Parity: JS Examples → Python Samples + +This section maps JS features documented on genkit.dev to their Python sample +coverage status. + +### `/docs/models` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| Simple generation | `ai.generate('prompt')` | ✅ All hello samples have `say_hi` | ✅ | +| System prompts | `system: "..."` | ✅ Being added to all samples | ✅ | +| Multi-turn (messages) | `messages: [...]` | ✅ Being added to all samples | ✅ | +| Model parameters | `config: {...}` | ✅ `say_hi_with_config` in most samples | ✅ | +| Structured output | `output: { schema: ... }` | ✅ `generate_character` in most samples | ✅ | +| Streaming | `ai.generateStream()` | ✅ `streaming_flow` / `say_hi_stream` | ✅ | +| Streaming + structured | `generateStream() + output schema` | ❌ No dedicated sample | ✅ (need sample) | +| Multimodal input | `prompt: [{media: ...}, {text: ...}]` | ✅ `describe_image` in google-genai, anthropic, xai, msf | ✅ | +| Generating media (images) | `output: { format: 'media' }` | ❌ No dedicated sample | ✅ (google-genai supports it) | +| Generating media (TTS) | Text-to-speech | ❌ No dedicated sample | ✅ (google-genai supports it) | +| Middleware (retry) | `use: [retry({...})]` | ❌ No sample | 🔶 Python has `use=` plumbing, but no retry/fallback middleware defined | +| Middleware (fallback) | `use: [fallback({...})]` | ❌ No sample | 🔶 Same as above | + +### `/docs/tool-calling` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| Define tools | `ai.defineTool()` | ✅ All samples with tools | ✅ | +| Use tools | `tools: [getWeather]` | ✅ `weather_flow` in most samples | ✅ | +| `maxTurns` | `maxTurns: 8` | ✅ 3 samples use `max_turns=2` | ✅ | +| Dynamic tools at runtime | `tool({...})` | ❌ No sample | ❓ Need investigation | +| Interrupts | `ctx.interrupt()` | ✅ `tool-interrupts`, `google-genai-hello`, `short-n-long` | ✅ | +| `returnToolRequests` | `returnToolRequests: true` | ✅ 1 sample (`google-genai-context-caching`) | ✅ | +| Streaming + tool calling | Stream with tools | ❌ No dedicated sample | ✅ (need sample) | + +### `/docs/interrupts` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| `defineInterrupt()` | Dedicated interrupt definition | ❌ No equivalent sample | ❓ `define_interrupt` not found in Python SDK | +| `ctx.interrupt()` in tool | Tool-based interrupts | ✅ `tool-interrupts`, `google-genai-hello` | ✅ | +| Restartable interrupts | `restart` option | ❌ No sample | ❓ Need investigation | +| `response.interrupts` check | Interrupt loop | ✅ Demonstrated in `tool-interrupts` | ✅ | +| `resume: { respond: [...] }` | Resume generation | ❌ No sample using `resume` | ❓ Need investigation | + +### `/docs/context` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| Context in flow | `{context}` destructured | ❌ No sample | ✅ (context available via ActionRunContext) | +| Context in tool | `{context}` in tool handler | ❌ No sample | ✅ (ToolContext has context) | +| Context in prompt file | `{{@auth.name}}` | ❌ No sample | ✅ (dotprompt supports @) | +| Provide context at runtime | `context: { auth: ... }` | ❌ No sample | ✅ (`context=` supported on `generate()`) | +| Context propagation | Auto-propagation to tools | ❌ No sample | ✅ | + +### `/docs/chat` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| `ai.chat()` basic | Create chat, send messages | ❌ No sample | ✅ `ai.chat()` exists | +| Chat with system prompt | `ai.chat({ system: '...' })` | ❌ No sample | ✅ | +| Stateful sessions | Session with state management | ❌ No sample | ✅ `Session` class exists | +| Multi-thread sessions | Named chat threads | ❌ No sample | ✅ `session.chat('thread')` | +| Session persistence | Custom store implementation | ❌ No sample | 🔶 Experimental | + +### `/docs/flows` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| Define flows | `@ai.flow()` decorator | ✅ All samples | ✅ | +| Input/output schemas | Pydantic models | ✅ All samples | ✅ | +| Streaming flows | `ctx.send_chunk()` | ✅ Several samples | ✅ | +| Flow steps (`ai.run()`) | Named trace spans | ❌ No sample | ❓ Need investigation | +| Deploy with Flask | Flask integration | ✅ Documented on genkit.dev | ✅ | + +### `/docs/dotprompt` Features + +| Feature | JS Example | Python Sample Status | Python SDK Support | +|---------|-----------|---------------------|-------------------| +| Prompt files (.prompt) | `.prompt` file format | ❌ No sample | ✅ Dotprompt supported | +| Running prompts from code | `ai.prompt('name')` | ❌ No sample | ✅ | +| Input/output schemas | Picoschema/JSON Schema | ❌ No sample | ✅ | +| Tool calling in prompts | `tools: [...]` in frontmatter | ❌ No sample | ✅ | +| Multi-message prompts | `{{role "system"}}` | ❌ No sample | ✅ | +| Partials | `{{> partialName}}` | ❌ No sample | ✅ | +| Prompt variants | `.variant.prompt` files | ❌ No sample | ✅ | +| Defining prompts in code | `ai.define_prompt()` | ❌ No sample | ✅ | + +### `/docs/agentic-patterns` Features + +| Feature | JS Example | Python SDK Support | Sample Needed | +|---------|-----------|-------------|---------------| +| Sequential workflow | Chain of flows | ✅ | ❌ No sample | +| Conditional routing | If/else in flow | ✅ | ❌ No sample | +| Parallel execution | Multiple concurrent calls | ✅ (`asyncio.gather`) | ❌ No sample | +| Tool calling | Tools in generate | ✅ | ✅ Exists | +| Iterative refinement | Loop with evaluation | ✅ | ❌ No sample | +| Autonomous agent | Agent with tools loop | ✅ | ❌ No sample | +| Stateful interactions | Session-based | ✅ | ❌ No sample | + +--- + +## 4. Priority Action Items + +### P0: Critical (blocking feature parity) +1. **`models.mdx`** — Add Generating Media section for Python (images, TTS) +2. **`chat.mdx`** — Add Python tab with `ai.chat()` and `Session` examples +3. **`context.mdx`** — Add Python tab with context in flows, tools, and generate +4. **`dotprompt.mdx`** — Add Python tab with full dotprompt examples + +### P1: High Priority (important for developer experience) +5. **`agentic-patterns.mdx`** — Add Python tab for all agentic patterns +6. **`tool-calling.mdx`** — Add `max_turns` docs, streaming + tools +7. **`models.mdx`** — Add Middleware section for Python (investigate retry/fallback) +8. **`evaluation.mdx`** — Add Python tab for evaluation +9. **`mcp-server.mdx`** / **`model-context-protocol.mdx`** — Add Python MCP examples +10. **Python samples** — Add `streaming_structured_output` flow to hello samples + +### P2: Medium Priority (polish) +11. **`flows.mdx`** — Add flow steps docs for Python +12. **`multi-agent.mdx`** — Add Python tab if SDK supports agent delegation +13. **`frameworks/`** — Add Flask/FastAPI/Starlette deployment guides +14. **`plugin-authoring/`** — Add Python plugin authoring guide +15. **`interrupts.mdx`** — Verify Python section covers `defineInterrupt` equivalent and restartable interrupts + +### P3: Low Priority (nice to have) +16. **`durable-streaming.mdx`** — Investigate Python support +17. **`client.mdx`** — Determine if applicable to Python +18. **`tutorials/`** — Create Python-specific tutorials +19. **`deployment/`** — Add Python Cloud Run, etc. deployment guides + +--- + +## 5. Python Samples Gap Analysis + +### Samples needing `system_prompt` flow (in progress) +- [x] `google-genai-hello` +- [x] `compat-oai-hello` +- [x] `anthropic-hello` +- [x] `ollama-hello` +- [x] `amazon-bedrock-hello` +- [x] `deepseek-hello` +- [x] `xai-hello` +- [x] `cloudflare-workers-ai-hello` +- [ ] `microsoft-foundry-hello` +- [ ] `mistral-hello` +- [ ] `huggingface-hello` +- [ ] `google-genai-vertexai-hello` +- [ ] `short-n-long` +- [ ] `model-garden` + +### Samples needing `multi_turn_chat` flow (in progress) +- [x] `google-genai-hello` +- [x] `compat-oai-hello` +- [x] `anthropic-hello` +- [x] `ollama-hello` +- [x] `amazon-bedrock-hello` +- [x] `xai-hello` +- [x] `cloudflare-workers-ai-hello` +- [ ] `microsoft-foundry-hello` +- [ ] `google-genai-vertexai-hello` +- [ ] `short-n-long` +- [ ] `model-garden` + +### New standalone samples needed +- [x] ~~`dotprompt-hello`~~ — Covered by `prompt-demo` sample ⚠️ (P1 bug: recursion depth exceeded) +- [ ] ~~`chat-hello`~~ — Chat/Session API deprecated, skip +- [ ] ~~`agentic-patterns`~~ — Agents not yet in Python SDK, skip +- [ ] `context-demo` — Need dedicated context flows (context in generate, flows, tools, propagation, `ai.current_context()`) +- [x] ~~`streaming-structured-output`~~ — Covered by `google-genai-hello` / hello samples +- [x] ~~`media-generation`~~ — Covered by `media-models-demo` sample +- [ ] `middleware-demo` — Custom retry/fallback middleware using `use=` parameter +- [ ] `streaming-tools` — Streaming + tool calling flow +- [ ] `eval-pipeline` — End-to-end eval: dataset → inference → metrics → results + +--- + +### Dotprompt sample gaps (in `prompt-demo`) +- [ ] Tool calling in prompts (`tools: [...]` in frontmatter) +- [ ] Multimodal prompts (`{{media url=photoUrl}}`) +- [ ] Defining prompts in code (`ai.define_prompt()`) +- [ ] Default input values (`default:` in frontmatter) + +--- + +## 6. Known Bugs + +| Sample | Bug | Severity | +|--------|-----|----------| +| `prompt-demo` | `Failed to load lazy action recipe.robot: maximum recursion depth exceeded` / same for `story` | **P0** — Blocks all prompt feature demos | diff --git a/py/engdoc/parity-analysis/sample_parity_analysis.md b/py/engdoc/parity-analysis/sample_parity_analysis.md deleted file mode 100644 index 02ed66ac39..0000000000 --- a/py/engdoc/parity-analysis/sample_parity_analysis.md +++ /dev/null @@ -1,275 +0,0 @@ -# Sample Parity Analysis: JS vs Python - -This document analyzes the sample/testapp coverage between JS and Python implementations. - ---- - -## Summary - -**JS Sample Locations:** -- `/samples/` - 9 polished demo samples (js-angular, js-chatbot, js-menu, etc.) -- `/js/testapps/` - 32 internal test/demo apps (advanced scenarios) - -**Python Sample Location:** -- `/py/samples/` - 25 samples - -| Metric | JS (`samples/` + `testapps/`) | Python (`py/samples/`) | Gap | -|--------|-------------------------------|------------------------|-----| -| Polished demos | 9 | 0 | **-9** | -| Plugin demos | 8 | 12 | +4 ✅ | -| Advanced scenarios | 15 | 3 | **-12** | -| RAG samples | 5 | 3 | -2 | -| Chat/Chatbot | 2 (`js-chatbot`, menu) | 0 | **-2** | -| Multi-agent | 2 (`js-schoolAgent`, multiagents-demo) | 0 | **-2** | - ---- - -## Sample Category Comparison - -### Plugin Hello World Demos - -| Plugin | JS | Python | Notes | -|--------|-----|--------|-------| -| Google GenAI | ✅ `basic-gemini` | ✅ `google-genai-hello` | Parity ✅ | -| Vertex AI | ✅ (in basic-gemini) | ✅ `google-genai-vertexai-hello` | Parity ✅ | -| Anthropic | ✅ `anthropic` | ✅ `anthropic-hello` | Parity ✅ | -| Ollama | ✅ `ollama` | ✅ `ollama-hello` | Parity ✅ | -| OpenAI Compat | ✅ `compat-oai` | ✅ `compat-oai-hello` | Parity ✅ | -| xAI (Grok) | ❌ | ✅ `xai-hello` | Python extra ✅ | -| DeepSeek | ❌ | ✅ `deepseek-hello` | Python extra ✅ | -| Model Garden | ✅ `vertexai-modelgarden` | ✅ `model-garden` | Parity ✅ | - -### Image Generation - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Imagen (Google AI) | ✅ (in multimodal) | ✅ `google-genai-image` | Parity ✅ | -| Imagen (Vertex) | ✅ (in multimodal) | ✅ `google-genai-vertexai-image` | Parity ✅ | -| Multimodal input | ✅ `multimodal` | ❌ | **Python missing** | - -### Context & Caching - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Context Caching | ✅ `context-caching`, `context-caching2` | ✅ `google-genai-context-caching` | Parity ✅ | -| Code Execution | ✅ (in basic-gemini) | ✅ `google-genai-code-execution` | Parity ✅ | - -### RAG (Retrieval Augmented Generation) - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Basic RAG | ✅ `rag` | ✅ `menu` (has RAG case) | Parity ✅ | -| Vector Search (Firestore) | ✅ `vertexai-vector-search-firestore` | ✅ `vertex-ai-vector-search-firestore` | Parity ✅ | -| Vector Search (BigQuery) | ✅ `vertexai-vector-search-bigquery` | ✅ `vertex-ai-vector-search-bigquery` | Parity ✅ | -| Vector Search (Custom) | ✅ `vertexai-vector-search-custom` | ❌ | Python missing | -| Local Vector Store | ❌ | ✅ `dev-local-vectorstore-hello` | Python extra ✅ | -| Firestore Retriever | ❌ | ✅ `firestore-retreiver` ⚠️ typo | Python extra ✅ | -| Reranker | ✅ `vertexai-reranker` | ❌ | **Python missing** | - -### Evaluation - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Custom Evaluators | ✅ `custom-evaluators` | ✅ `evaluator-demo` | Parity ✅ | -| Eval Pipelines | ✅ `evals` | ❌ | **Python missing** | -| Model Tester | ✅ `model-tester` | ❌ | Python missing | -| Format Tester | ✅ `format-tester` | ✅ `format-demo` | Parity ✅ | - -### Multi-Agent & Advanced - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Multi-Agent Demo | ✅ `multiagents-demo` | ❌ | **Python missing** | -| MCP Integration | ✅ `mcp` | ❌ | **Python missing** | -| Durable Streaming | ✅ `durable-streaming` | ❌ | Python missing | -| Tool Interrupts | ❌ | ✅ `tool-interrupts` | Python extra ✅ | - -### Web Frameworks - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Express | ✅ `express` | N/A | | -| Next.js | ✅ `next` | N/A | | -| Flask | N/A | ✅ `flask-hello` | | -| Firebase Functions | ✅ `firebase-functions-sample1` | ❌ | Python missing | - -### Prompts & Flows - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Prompt Files | ✅ `prompt-file` | ✅ `prompt-demo` | Parity ✅ | -| Flow Samples | ✅ `flow-sample1`, `flow-simple-ai` | ✅ (in menu) | Parity ✅ | -| Menu Demo | ✅ `menu`, `docs-menu-basic`, `docs-menu-rag` | ✅ `menu` | Parity ✅ | -| DevUI Gallery | ✅ `dev-ui-gallery` | ❌ | **Python missing** | - -### Multi-Server - -| Feature | JS | Python | Notes | -|---------|-----|--------|-------| -| Multi-Server | ❌ | ✅ `multi-server` | Python extra ✅ | -| Short 'n Long | ❌ | ✅ `short-n-long` | Python extra ✅ | - ---- - -## Critical Missing Samples (Python) - -### P0 - Must Have - -| Sample | Description | JS Reference | -|--------|-------------|--------------| -| **multiagents-demo** | Multi-agent orchestration | `js/testapps/multiagents-demo` | -| **mcp** | MCP tool host integration | `js/testapps/mcp` | -| **multimodal** | Multimodal input (images, audio) | `js/testapps/multimodal` | - -### P1 - Important - -| Sample | Description | JS Reference | -|--------|-------------|--------------| -| **vertexai-reranker** | Reranker for RAG quality | `js/testapps/vertexai-reranker` | -| **evals** | Full evaluation pipeline | `js/testapps/evals` | -| **dev-ui-gallery** | DevUI feature showcase | `js/testapps/dev-ui-gallery` | -| **firebase-functions** | Cloud Functions deployment | `js/testapps/firebase-functions-sample1` | - -### P2 - Nice to Have - -| Sample | Description | JS Reference | -|--------|-------------|--------------| -| **durable-streaming** | Persistence for streaming | `js/testapps/durable-streaming` | -| **model-tester** | Model capability testing | `js/testapps/model-tester` | -| **vector-search-custom** | Custom vector search | `js/testapps/vertexai-vector-search-custom` | - ---- - -## Consolidated Plugin Demo Proposal - -To ensure consistent feature demonstration across plugins, each plugin should have a **unified sample** that demonstrates: - -### Core Features (All Plugins Must Demo) - -``` -plugin-demo/ -├── flows/ -│ ├── 01_basic_generate.py # Simple text generation -│ ├── 02_streaming.py # Streaming response -│ ├── 03_structured_output.py # JSON schema output -│ ├── 04_tool_calling.py # Tool/function calling -│ ├── 05_multimodal.py # Image/audio input (if supported) -│ ├── 06_multi_turn.py # Conversation history -│ ├── 07_system_prompt.py # System instructions -│ └── 08_middleware.py # Request/response middleware -├── prompts/ -│ └── demo.prompt # Dotprompt example -└── main.py # Entry point -``` - -### Plugin Feature Matrix - -| Feature | google-genai | anthropic | ollama | openai-compat | -|---------|--------------|-----------|--------|---------------| -| Basic Generate | ✅ | ✅ | ✅ | ✅ | -| Streaming | ✅ | ✅ | ✅ | ✅ | -| Structured Output | ✅ | ✅ | ✅ | ✅ | -| Tool Calling | ✅ | ✅ | ✅ | ✅ | -| Multimodal | ✅ | ✅ | ❌ | Varies | -| Context Caching | ✅ | ❌ | ❌ | ❌ | -| Code Execution | ✅ | ❌ | ❌ | ❌ | -| Thinking Mode | ❌ | ✅ | ❌ | ❌ | - -### Proposed Directory Structure - -``` -py/samples/ -├── plugin-demos/ # NEW: Consolidated demos -│ ├── google-genai/ -│ │ └── (all 8 flows) -│ ├── anthropic/ -│ │ └── (all 8 flows) -│ ├── ollama/ -│ │ └── (flows 1-4, 6-8) -│ └── openai-compat/ -│ └── (all 8 flows) -├── advanced/ # NEW: Advanced scenarios -│ ├── multiagent/ -│ ├── mcp-integration/ -│ ├── rag-pipeline/ -│ └── eval-pipeline/ -└── integrations/ # Existing - ├── flask-hello/ - ├── firestore-retriever/ - └── ... -``` - ---- - -## Roadmap Integration - -### New Tasks for Sample Parity - -| ID | Task | Effort | Phase | Depends On | -|----|------|--------|-------|------------| -| S1 | Consolidated plugin demo structure | M | 1 | — | -| S2 | Multi-agent sample | L | 4 | Session API | -| S3 | MCP integration sample | M | 4 | MCP Tool Host | -| S4 | Multimodal input sample | S | 2 | — | -| S5 | Reranker sample | S | 3 | Plugin parity | -| S6 | DevUI gallery sample | M | 4 | config_schema | -| S7 | Firebase Functions sample | M | 3 | — | -| S8 | Full eval pipeline sample | M | 3 | Evaluator APIs | - -### Updated Dependency Graph - -```mermaid -flowchart TD - subgraph Samples["Sample Tasks"] - S1[Consolidated Plugin Demos] - S2[Multi-Agent Sample] - S3[MCP Sample] - S4[Multimodal Sample] - S5[Reranker Sample] - S6[DevUI Gallery] - S7[Firebase Functions] - S8[Eval Pipeline] - end - - subgraph Core["Core APIs"] - C3[chat API] - F2[MCP Tool Host] - D4[plugin.model factory] - A1[config_schema] - end - - C3 --> S2 - F2 --> S3 - A1 --> S6 - D4 --> S5 -``` - ---- - -## Execution Plan Update - -### Phase 1 Additions -- **S1: Consolidated plugin demo structure** - Set up the standardized flow structure - -### Phase 2 Additions -- **S4: Multimodal input sample** - Basic image input with Gemini - -### Phase 3 Additions -- **S5: Reranker sample** (after plugin parity) -- **S7: Firebase Functions sample** -- **S8: Full eval pipeline sample** - -### Phase 4 Additions -- **S2: Multi-agent sample** (after Session/Chat API) -- **S3: MCP integration sample** (after MCP Tool Host) -- **S6: DevUI gallery sample** (after config_schema fix) - ---- - -## Quick Wins for Samples - -These can be done immediately with minimal dependencies: - -1. **S1: Consolidated structure** - Just reorganize existing samples -2. **S4: Multimodal sample** - Gemini already supports images -3. **Rename `firestore-retreiver` → `firestore-retriever`** - Typo fix diff --git a/py/engdoc/parity-analysis/sample_parity_roadmap.md b/py/engdoc/parity-analysis/sample_parity_roadmap.md new file mode 100644 index 0000000000..e1754b5a2b --- /dev/null +++ b/py/engdoc/parity-analysis/sample_parity_roadmap.md @@ -0,0 +1,471 @@ +# Sample Parity Analysis: JS vs Python + +> **Updated:** 2026-02-07 +> **Scope:** Every JS code sample on genkit.dev docs -> Python `py/samples/` counterpart. +> +> **Exclusions (per team decision):** +> - **Chat/Session API** -- Deprecated, skip +> - **Agents / Multi-Agent** -- Not yet in Python SDK, skip +> - **MCP** -- Will come later, skip +> - **Durable Streaming** -- Not yet in Python SDK, skip +> - **Client SDK** -- JS client-side only, not applicable to Python backend SDK + +--- + +## Summary + +**JS Sample Locations:** +- `/samples/` - 9 polished demo samples (js-angular, js-chatbot, js-menu, etc.) +- `/js/testapps/` - 32 internal test/demo apps (advanced scenarios) + +**Python Sample Location:** +- `/py/samples/` - 36 samples (including shared, sample-test) + +| Metric | JS (`samples/` + `testapps/`) | Python (`py/samples/`) | Gap | +|--------|-------------------------------|------------------------|-----| +| Plugin hello demos | 8 | 14 | **Python superset** | +| Advanced feature demos | 15 | 10 | **-5** | +| RAG samples | 5 | 4 | -1 | +| Evaluation | 2 | 2 | Parity | +| Media generation | 1 | 3 | **Python superset** | +| Observability | 0 | 2 | **Python superset** | + +--- + +## 1. genkit.dev Docs -> Python Sample Coverage + +This is the authoritative mapping from every JS code feature demonstrated in +the genkit.dev documentation to its Python sample coverage. Only in-scope +features are listed (exclusions above apply). + +### `/docs/models` -- Generating Content with AI Models + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Basic generation | `ai.generate('prompt')` | All hello samples (`generate_greeting`) | Covered | +| Model reference | `googleAI.model('gemini-2.5-flash')` | All hello samples | Covered | +| Model string ID | `model: 'googleai/gemini-2.5-flash'` | All hello samples | Covered | +| System prompts | `system: "..."` | `provider-google-genai-hello` + most hello samples | Covered | +| Multi-turn (messages) | `messages: [{role, content}]` | `provider-google-genai-hello` + most hello samples | Covered | +| Model parameters | `config: {maxOutputTokens, temperature, ...}` | Most hello samples (`generate_with_config`) | Covered | +| Structured output | `output: { schema: ZodSchema }` | Most samples (`generate_character`) | Covered | +| Streaming text | `ai.generateStream()` | Most samples (`generate_streaming_story`) | Covered | +| Streaming + structured | `generateStream() + output schema` | `provider-google-genai-hello` | Covered | +| Multimodal input (image URL) | `prompt: [{media: {url}}, {text}]` | `provider-google-genai-hello`, `provider-anthropic-hello`, `provider-xai-hello`, etc. | Covered | +| Multimodal input (base64) | `data:image/jpeg;base64,...` | `provider-google-genai-hello` describe_image | Covered | +| Generating media (images) | `output: {format: 'media'}` (Imagen) | `provider-google-genai-media-models-demo` | Covered | +| Generating media (TTS) | Text-to-speech | `provider-google-genai-media-models-demo`, `provider-compat-oai-hello` | Covered | +| Middleware (retry) | `use: [retry({...})]` | `framework-middleware-demo` | Covered | +| Middleware (fallback) | `use: [fallback({...})]` | `framework-middleware-demo` | Covered | + +> **SDK Status:** Python has `use=` middleware infrastructure in `generate()`. +> `framework-middleware-demo` demonstrates custom retry and logging middleware. + +### `/docs/tool-calling` -- Tool Calling + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Define tools | `ai.defineTool()` | All samples with tools | Covered | +| Use tools in generate | `tools: [getWeather]` | Most samples (`generate_weather`) | Covered | +| `maxTurns` | `maxTurns: 8` | 3 samples use `max_turns=2` | Covered | +| `returnToolRequests` | `returnToolRequests: true` | `provider-google-genai-context-caching` | Covered | +| Interrupts (tool-based) | `ctx.interrupt()` | `framework-tool-interrupts`, `provider-google-genai-hello` | Covered | +| Dynamic tools at runtime | Tool defined inline at generate() | `framework-dynamic-tools-demo` uses `ai.dynamic_tool()` | Covered | +| Streaming + tool calling | Stream with tools | All provider hello samples (`generate_streaming_with_tools`) | Covered | + +> **SDK Status:** `ai.dynamic_tool()` exists. Streaming + tools is demonstrated +> in all 12 provider hello samples via `generate_streaming_with_tools` flow. + +### `/docs/interrupts` -- Interrupts + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Tool-based interrupt | `@ai.tool(interrupt=True)` + `ctx.interrupt()` | `framework-tool-interrupts`, `provider-google-genai-hello` | Covered | +| Check response.interrupts | Loop checking for interrupts | `framework-tool-interrupts` | Covered | +| Resume with respond | `resume: { respond: [...] }` | `framework-tool-interrupts`, `provider-google-genai-hello` | Covered | +| `defineInterrupt()` | Standalone interrupt API | Not in Python SDK | N/A (SDK gap) | +| Restartable interrupts | `restart` option | Not in Python SDK | N/A (SDK gap) | + +> **SDK Status:** Python only supports tool-based interrupts via +> `@ai.tool(interrupt=True)`. No standalone `define_interrupt()` API exists. +> This is a SDK feature gap, not a sample gap. + +### `/docs/context` -- Context + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Context in generate() | `context: { auth: {...} }` | `framework-context-demo` (`context_in_generate`) | Covered | +| Context in flow | `{context}` destructured | `framework-context-demo` (`context_in_flow`) | Covered | +| Context in tool | `{context}` in tool handler | `framework-context-demo` | Covered | +| Context propagation | Auto-propagation to sub-actions | `framework-context-demo` (`context_propagation_chain`) | Covered | +| `ai.current_context()` | Access current context | `framework-context-demo` (`context_current_context`) | Covered | + +> **SDK Status:** Full context support exists: `context=` on `generate()` and +> flows, `ActionRunContext`, `ai.current_context()`, and auto-propagation. +> `framework-context-demo` provides comprehensive coverage with 4 dedicated flows. + +### `/docs/dotprompt` -- Managing Prompts with Dotprompt + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| .prompt files | YAML frontmatter + template | `framework-prompt-demo` | Covered (bug: see below) | +| Running prompts from code | `ai.prompt('name')` | `framework-prompt-demo` | Covered (bug: see below) | +| Streaming prompts | `prompt.stream()` | `framework-prompt-demo` | Covered (bug: see below) | +| Input/Output schemas (Picoschema) | `schema:` in frontmatter | `framework-prompt-demo` | Covered (bug: see below) | +| Schema references | `ai.defineSchema()` + name ref | `framework-prompt-demo` | Covered (bug: see below) | +| Model configuration | `config:` in frontmatter | `framework-prompt-demo` | Covered (bug: see below) | +| Handlebars templates | `{{variable}}`, `{{#if}}` | `framework-prompt-demo` | Covered (bug: see below) | +| Multi-message prompts | `{{role "system"}}` | `framework-prompt-demo` (in partial) | Covered (bug: see below) | +| Partials | `{{>partialName}}` | `framework-prompt-demo` (`_style.prompt`) | Covered (bug: see below) | +| Custom helpers | `ai.defineHelper()` | `framework-prompt-demo` (`list` helper) | Covered (bug: see below) | +| Prompt variants | `.variant.prompt` files | Blocked by SDK bug | **BUG** | +| **Tool calling in prompts** | `tools: [...]` in frontmatter | Not in framework-prompt-demo | **GAP** | +| **Multimodal prompts** | `{{media url=photoUrl}}` | Not in framework-prompt-demo | **GAP** | +| **Defining prompts in code** | `ai.definePrompt()` | Not in framework-prompt-demo | **GAP** | +| **Default input values** | `default:` in frontmatter | Not in framework-prompt-demo | **GAP** | + +> **SDK Bug (B1b):** `framework-prompt-demo` had a P0 bug: `Failed to load lazy +> action recipe.robot: maximum recursion depth exceeded`. Root cause is a +> **self-referential lazy loading loop** in the SDK's `create_prompt_from_file()` +> at `py/packages/genkit/src/genkit/blocks/prompt.py` -- when loading a variant +> prompt, `resolve_action_by_key()` is called with the action's own key before +> `_cached_prompt` is set, which triggers `_trigger_lazy_loading()` to re-invoke +> `create_prompt_from_file()` for the same action, causing infinite recursion. +> This is NOT a dotprompt library bug. Only Python is affected (JS uses a +> `lazy()` wrapper guaranteeing single evaluation). +> +> **Workaround (B1a):** `recipe.robot.prompt` was removed to unblock the sample. +> **Fix:** Tracked at [firebase/genkit#4491](https://github.com/firebase/genkit/issues/4491). +> Once fixed, variant demo should be re-added. + +### `/docs/flows` -- Flows + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Define flows | `@ai.flow()` decorator | All samples | Covered | +| Input/output schemas | Pydantic models | All samples | Covered | +| Streaming flows | `ctx.send_chunk()` | Several samples | Covered | +| Deploy with Flask | Flask integration | `web-flask-hello` | Covered | +| Flow steps (`ai.run()`) | Named trace spans | `provider-google-genai-hello` (line 434), `framework-realtime-tracing-demo` | Covered | + +> All flow features documented on genkit.dev are covered. + +### `/docs/rag` -- Retrieval-Augmented Generation + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Basic RAG flow | Retriever + generate | `framework-restaurant-demo` (case_04/05), `provider-firestore-retriever` | Covered | +| Embedders | `ai.embed()` | `provider-google-genai-hello`, `provider-ollama-hello` | Covered | +| Custom retriever | `ai.defineRetriever()` | `provider-firestore-retriever` | Covered | +| Simple retriever | `ai.defineSimpleRetriever()` | No equivalent | **GAP** (minor) | +| Vector search (Firestore) | Firestore vector store | `provider-vertex-ai-vector-search-firestore` | Covered | +| Vector search (BigQuery) | BigQuery vector store | `provider-vertex-ai-vector-search-bigquery` | Covered | +| Reranker | `ai.rerank()` | `provider-vertex-ai-rerank-eval` | Covered | +| Custom reranker | `ai.defineReranker()` | No sample | **GAP** (minor) | +| **Indexer** | `ai.index()` + flow | **No indexer sample** | **GAP** | + +> **SDK Status:** Python SDK does not have a built-in local dev vector store +> plugin (like JS `@genkit-ai/dev-local-vectorstore`). Indexing is done via +> external SDKs (Firestore, etc.). The RAG Python tab on genkit.dev shows +> Firestore-based retrieval only. + +### `/docs/evaluation` -- Evaluation + +| Feature | JS Doc Example | Python Sample | Status | +|---------|---------------|---------------|--------| +| Custom evaluator | `ai.defineEvaluator()` | `framework-evaluator-demo` | Covered | +| Built-in metrics | `GenkitMetric.MALICIOUSNESS` | `provider-vertex-ai-rerank-eval` (BLEU, ROUGE, etc.) | Covered | +| **Full eval pipeline** | Dataset -> inference -> metrics -> results | **No end-to-end pipeline sample** | **GAP** | +| **Data synthesis** | Generate test questions from docs | **No sample** | **GAP** | + +> The JS `evals` testapp demonstrates dataset creation, flow evaluation, and +> result analysis as a complete pipeline. Python needs an equivalent. + +--- + +## 2. Plugin Hello World Demos + +| Plugin | JS | Python | Notes | +|--------|-----|--------|-------| +| Google GenAI | Yes | `provider-google-genai-hello` | Parity | +| Vertex AI | Yes (in basic-gemini) | `provider-google-genai-vertexai-hello` | Parity | +| Anthropic | Yes | `provider-anthropic-hello` | Parity | +| Ollama | Yes | `provider-ollama-hello` | Parity | +| OpenAI Compat | Yes | `provider-compat-oai-hello` | Parity | +| xAI (Grok) | No | `provider-xai-hello` | Python extra | +| DeepSeek | No | `provider-deepseek-hello` | Python extra | +| Model Garden | Yes | `provider-vertex-ai-model-garden` | Parity | +| Mistral | No | `provider-mistral-hello` | Python extra | +| HuggingFace | No | `provider-huggingface-hello` | Python extra | +| Amazon Bedrock | No | `provider-amazon-bedrock-hello` | Python extra | +| Cloudflare Workers AI | No | `provider-cloudflare-workers-ai-hello` | Python extra | +| Microsoft Foundry | No | `provider-microsoft-foundry-hello` | Python extra | + +--- + +## 3. Incomplete Hello Samples + +Several hello samples are missing `generate_with_system_prompt` and/or +`generate_multi_turn_chat` flows that other hello samples already have. + +### `generate_with_system_prompt` flow + +- [x] `provider-microsoft-foundry-hello` -- DONE +- [x] `provider-mistral-hello` -- DONE +- [x] `provider-huggingface-hello` -- DONE +- [x] `provider-google-genai-vertexai-hello` -- DONE +- [ ] `web-short-n-long` (still uses old name `system_prompt`) +- [ ] `provider-vertex-ai-model-garden` (still uses old name `system_prompt`) + +### `generate_multi_turn_chat` flow + +- [x] `provider-microsoft-foundry-hello` -- DONE +- [x] `provider-google-genai-vertexai-hello` -- DONE +- [ ] `web-short-n-long` (still uses old name `multi_turn_chat`) +- [ ] `provider-vertex-ai-model-garden` (still uses old name `multi_turn_chat`) + +--- + +## 4. Items Already Covered (verified) + +These were previously flagged as gaps but are now confirmed covered: + +| Feature | Sample | Notes | +|---------|--------|-------| +| Streaming + structured output | `provider-google-genai-hello` | Has streaming structured output flow | +| Media generation (images) | `provider-google-genai-media-models-demo` | Imagen, Gemini Image, image editing | +| Media generation (TTS) | `provider-google-genai-media-models-demo`, `provider-compat-oai-hello` | Google TTS, OpenAI TTS | +| Reranker | `provider-vertex-ai-rerank-eval` | Vertex AI semantic reranker + eval metrics | +| Dynamic tools | `framework-dynamic-tools-demo` | Standalone sample with `ai.dynamic_tool()` | +| Flow steps (`ai.run()`) | `provider-google-genai-hello`, `framework-realtime-tracing-demo` | Named trace spans | +| Multimodal input | Multiple hello samples | Image, video, audio input | +| Tool interrupts | `framework-tool-interrupts`, `provider-google-genai-hello` | Full interrupt + resume flow | +| Context propagation | `framework-context-demo` | 4 flows covering generate, flow, tool, and current_context | +| Custom middleware | `framework-middleware-demo` | Retry, logging, and chained middleware | +| Streaming + tool calling | All provider hello samples | `generate_streaming_with_tools` flow in all 12 | + +--- + +## 5. Items Out of Scope (not in Python SDK) + +| Feature | Doc Page | Reason | +|---------|----------|--------| +| Chat/Session API | `chat.mdx` | Deprecated | +| Agents / Multi-Agent | `agentic-patterns.mdx`, `multi-agent.mdx` | Not yet in Python SDK | +| MCP | `mcp-server.mdx`, `model-context-protocol.mdx` | Will come later | +| Durable Streaming | `durable-streaming.mdx` | Not in Python SDK | +| `defineInterrupt()` | `interrupts.mdx` | Only tool-based interrupts in Python | +| Client SDK | `client.mdx` | JS client-side only | + +--- + +## 6. Execution Roadmap + +### Dependency Graph + +```mermaid +flowchart TD + subgraph phase0 [Phase 0 - Leaves] + B1a["B1a: Remove recipe.robot.prompt DONE"] + B1b["B1b: Fix SDK lazy loading bug firebase/genkit#4491"] + G1["G1: Context demo DONE"] + G6["G6: Streaming + tools DONE"] + G7["G7: Custom middleware DONE"] + N6["N6: Fix typo firestore-retriever DONE"] + end + + subgraph phase1 [Phase 1 - Dotprompt + Eval + Hello] + G2["G2: Dotprompt tool calling"] + G3["G3: Dotprompt define in code"] + G4["G4: Dotprompt multimodal"] + G5["G5: Dotprompt defaults"] + G8["G8: Eval pipeline"] + H1["H1: generate_with_system_prompt 2 remaining"] + H2["H2: generate_multi_turn_chat 2 remaining"] + end + + subgraph phase2 [Phase 2 - RAG + Eval Extras] + N1["N1: Simple retriever"] + N2["N2: Custom reranker"] + N3["N3: Data synthesis"] + N4["N4: Indexer sample"] + N7["N7: Firebase Functions"] + end + + subgraph phase3 [Phase 3 - Polish] + N5["N5: DevUI gallery"] + end + + B1a --> B1b + B1b --> G2 + B1b --> G3 + B1b --> G4 + B1b --> G5 + G8 --> N3 + G1 --> N5 + G2 --> N5 + G3 --> N5 + G6 --> N5 + G7 --> N5 + G8 --> N5 + H1 --> N5 +``` + +### Edge List + +`A -> B` means "A must complete before B can start": + +- `B1a -> B1b` (removing the bad variant file makes the sample usable; SDK fix restores variant support) +- `B1b -> G2` (SDK fix unblocks dotprompt tool calling) +- `B1b -> G3` (SDK fix unblocks dotprompt define-in-code) +- `B1b -> G4` (SDK fix unblocks dotprompt multimodal) +- `B1b -> G5` (SDK fix unblocks dotprompt defaults) +- `G8 -> N3` (eval pipeline design informs data synthesis) +- `{G1, G2, G3, G6, G7, G8, H1} -> N5` (DevUI gallery showcases all features) + +**Critical path:** `B1a -> B1b -> G2/G3/G4/G5 -> N5` + +--- + +### Phase 0: Leaves (no dependencies, all parallel) -- MOSTLY DONE + +All tasks in this phase are independent (except B1a -> B1b which are sequential). + +| Task | Description | Status | Notes | +|------|-------------|--------|-------| +| **B1a** | Remove `recipe.robot.prompt` from framework-prompt-demo to unblock the sample. | **DONE** | Variant file and code removed | +| **B1b** | Fix the SDK lazy loading bug in `create_prompt_from_file()` that causes infinite recursion when loading `.variant.prompt` files. Root cause: self-referential loop where `resolve_action_by_key()` is called with own key before `_cached_prompt` is set. Once fixed, re-add variant demo. | **BLOCKED** | Tracked at [firebase/genkit#4491](https://github.com/firebase/genkit/issues/4491). Only Python affected. | +| **G1** | Context demo -- `framework-context-demo` with flows for `context=` in generate, context in flows, context in tools, auto-propagation, `ai.current_context()`. | **DONE** | 4 flows: `context_in_generate`, `context_in_flow`, `context_current_context`, `context_propagation_chain` | +| **G6** | Streaming + tool calling -- `generate_streaming_with_tools` flow added to all 12 provider hello samples. | **DONE** | Uses shared `generate_streaming_with_tools_logic` | +| **G7** | Custom middleware demo -- `framework-middleware-demo` with retry, logging, and chained middleware. | **DONE** | 3 flows: `logging_demo`, `request_modifier_demo`, `chained_middleware_demo` | +| **N6** | Rename `firestore-retreiver` to `firestore-retriever` (typo fix). Now `provider-firestore-retriever`. | **DONE** | Directory renamed | + +--- + +### Phase 1: Dotprompt Completion + Eval + Hello Consistency + +G2-G5 are all unblocked by B1b. G8, H1, H2 are independent leaves placed here +for workload balancing. + +| Task | Description | Depends On | Status | +|------|-------------|------------|--------| +| **G2** | Dotprompt: tool calling in prompts -- add a `.prompt` file with `tools: [search, calculate]` in frontmatter, plus a flow that loads and runs it. | B1b | Pending | +| **G3** | Dotprompt: define prompts in code -- add `ai.define_prompt()` usage (no `.prompt` file, purely programmatic). | B1b | Pending | +| **G4** | Dotprompt: multimodal prompts -- add a `.prompt` file using `{{media url=photoUrl}}` helper with image input schema. | B1b | Pending | +| **G5** | Dotprompt: default input values -- add `default:` section to an existing or new `.prompt` file. | B1b | Pending | +| **G8** | Eval pipeline sample -- end-to-end evaluation: define a custom evaluator, prepare a dataset, run inference-based eval, report results. | -- | Pending | +| **H1** | Add `generate_with_system_prompt` flow to 2 remaining samples: `web-short-n-long`, `provider-vertex-ai-model-garden`. | -- | 4/6 Done | +| **H2** | Add `generate_multi_turn_chat` flow to 2 remaining samples: `web-short-n-long`, `provider-vertex-ai-model-garden`. | -- | 4/6 Done | + +**Parallelizable:** G2-G5 are independent of each other (all just need B1b). +G8, H1, H2 are independent of everything. + +--- + +### Phase 2: RAG and Eval Extras + +Lower-priority items that round out coverage for `rag.mdx` and `evaluation.mdx`. +N3 depends on G8. All others are independent leaves. + +| Task | Description | Depends On | Status | +|------|-------------|------------|--------| +| **N1** | Simple retriever -- `ai.define_simple_retriever()` equivalent if SDK supports it, or a minimal custom retriever pattern. | -- | Pending | +| **N2** | Custom reranker -- `ai.define_reranker()` with custom scoring logic. | -- | Pending | +| **N3** | Data synthesis -- generate test questions from documents using an LLM. | G8 | Pending | +| **N4** | Indexer sample -- document ingestion pipeline: chunk PDFs, generate embeddings, store in vector DB. | -- | Pending | +| **N7** | Firebase Functions sample -- Python Cloud Functions deployment with Genkit. | -- | Pending | + +--- + +### Phase 3: Polish + +DevUI gallery depends on most features being in place so it can showcase them all. + +| Task | Description | Depends On | Status | +|------|-------------|------------|--------| +| **N5** | DevUI gallery -- a single sample that showcases all DevUI features: prompts, flows, tools, evaluators, structured output, streaming, context, middleware. | G1, G2, G3, G6, G7, G8, H1 | Pending | + +--- + +### Execution Timeline + +``` +TIME --> +========================================================================== + +P0: [B1a: remove recipe.robot.prompt] DONE + [B1b: fix SDK lazy loading bug] BLOCKED (firebase/genkit#4491) + [G1: context demo] DONE + [G6: streaming+tools] DONE + [G7: custom middleware] DONE + [N6: typo fix] DONE + (5 of 6 P0 tasks complete; B1b awaits SDK fix) + | + --- P0 partially complete (B1b on critical path) --- + | +SDK: [S1: fix plugin structlog blowaway ~~~~] (HIGH - 5 plugins) + [S2: fix awarn protocol gap ~~~~~~~~~~~] (LOW) + [S3: fix ToolRunContext sole param ~~~~] (MEDIUM - #4492) + [S4: fix lazy loading recursion ~~~~~~~] (MEDIUM - #4491, same as B1b) + (all independent, each a separate PR) + | +P1: [G2: dotprompt tools ~~] [G8: eval pipeline ~~~~~~] + [G3: dotprompt code ~~~] [H1: system_prompt x2 ~~~] + [G4: dotprompt media ~~] [H2: multi_turn x2 ~~~~~~] + [G5: dotprompt defaults] + (G2-G5 blocked by B1b/S4; G8/H1/H2 ready now) + | + --- all P1 complete --- + | +P2: [N1: simple retriever ~~~] [N4: indexer ~~~~~~~~] + [N2: custom reranker ~~~~] [N7: firebase funcs ~] + [N3: data synthesis ~~~~~~~~~~] + (N1/N2/N4/N7 parallel, N3 after G8) + | + --- all P2 complete --- + | +P3: [N5: DevUI gallery ~~~~~~~~~~~~~~] + | + === SAMPLE PARITY COMPLETE === +``` + +--- + +### Progress Summary + +| Phase | Tasks | Done | Remaining | Blockers | +|-------|-------|------|-----------|----------| +| **P0** | B1a, B1b, G1, G6, G7, N6 | 5/6 | B1b | firebase/genkit#4491 | +| **P1** | G2, G3, G4, G5, G8, H1, H2 | 0/7 (H1 4/6, H2 4/6) | All | G2-G5 blocked by B1b | +| **P2** | N1, N2, N3, N4, N7 | 0/5 | All | N3 blocked by G8 | +| **P3** | N5 | 0/1 | All | Broad P0-P2 deps | +| **SDK** | S1, S2, S3, S4 | 0/4 | All | Separate PRs needed | +| **Total** | 23 tasks | ~5.5 | ~17.5 | 1 SDK bug + 4 SDK fixes | + +--- + +## 7. Pending SDK / Infrastructure Fixes (Separate PRs) + +Issues discovered during the sample consolidation and logging refactoring. +These should NOT be fixed in the samples PR -- each needs its own PR touching +core SDK or plugin code. + +### SDK Bugs + +| ID | Severity | Description | Affected Code | Notes | +|----|----------|-------------|---------------|-------| +| **S1** | **HIGH** | **Observability plugins blow away structlog config.** Five plugins call `structlog.configure(processors=new_processors)` with *only* the `processors` kwarg. Since `structlog.configure()` is a full-replace (not partial-update), this resets `wrapper_class`, `logger_factory`, `cache_logger_on_first_use` back to defaults -- silently destroying any custom structlog setup (e.g. the `setup_sample()` stdlib integration). **Fix:** Use `structlog.configure(**{**structlog.get_config(), 'processors': new_processors})` to preserve the full config. | `py/plugins/observability/`, `py/plugins/google-cloud/`, `py/plugins/amazon-bedrock/`, `py/plugins/microsoft-foundry/`, `py/plugins/cloudflare-workers-ai/` (all in `telemetry/tracing.py`) | All 5 follow identical pattern | +| **S2** | LOW | **`awarn` gap in `Logger` protocol.** `genkit.core.logging.Logger` declares `awarn()` but `structlog.stdlib.BoundLogger` only has `awarning` (no `awarn` alias). Calling `logger.awarn(...)` would raise `AttributeError`. Previously masked because `make_filtering_bound_logger` dynamically creates all method names. **Fix:** Either remove `awarn`/`warn` from the protocol, or add runtime aliases. | `py/packages/genkit/src/genkit/core/logging.py` | Only matters if `awarn` is actually called somewhere | +| **S3** | MEDIUM | **`ToolRunContext` as sole parameter crashes with `PydanticSchemaGenerationError`.** When a `@ai.tool()` has `ToolRunContext` as its only parameter, the SDK tries to create a `TypeAdapter` for it (which fails) and would also dispatch the tool input instead of the context at runtime. **Workaround:** Use `Genkit.current_context()` with zero-arg tools. | `py/packages/genkit/src/genkit/core/action/_action.py` (lines 493-494, 592-598), `py/packages/genkit/src/genkit/ai/_registry.py` (lines 555-565) | Tracked at [firebase/genkit#4492](https://github.com/firebase/genkit/issues/4492) | +| **S4** | MEDIUM | **SDK lazy loading infinite recursion for `.variant.prompt` files.** `create_prompt_from_file()` self-references via `resolve_action_by_key()` before caching, causing `RecursionError`. | `py/packages/genkit/src/genkit/blocks/prompt.py` | Tracked at [firebase/genkit#4491](https://github.com/firebase/genkit/issues/4491) | + +### Sample Naming Convention + +All samples follow a consistent prefix scheme: + +| Prefix | Category | Examples | +|--------|----------|----------| +| `provider-` | Model provider-specific | `provider-google-genai-hello`, `provider-anthropic-hello`, `provider-vertex-ai-model-garden` | +| `framework-` | Genkit framework features | `framework-context-demo`, `framework-middleware-demo`, `framework-prompt-demo` | +| `web-` | Web framework integration | `web-flask-hello`, `web-multi-server`, `web-short-n-long` | +| (none) | Other | `dev-local-vectorstore-hello` | diff --git a/py/engdoc/planning/azure-telemetry-plugin.md b/py/engdoc/planning/azure-telemetry-plugin.md index 1f124b83ef..748a5229c2 100644 --- a/py/engdoc/planning/azure-telemetry-plugin.md +++ b/py/engdoc/planning/azure-telemetry-plugin.md @@ -362,7 +362,7 @@ Visual diagram of: ## Sample Application ```python -# py/samples/microsoft-foundry-hello/src/main.py +# py/samples/provider-microsoft-foundry-hello/src/main.py """Azure telemetry hello sample - Monitor Genkit with Application Insights. Key Concepts (ELI5):: diff --git a/py/engdoc/planning/cloudflare-ai-plugin.md b/py/engdoc/planning/cloudflare-ai-plugin.md index e81dc820dd..cb9e4d64ea 100644 --- a/py/engdoc/planning/cloudflare-ai-plugin.md +++ b/py/engdoc/planning/cloudflare-ai-plugin.md @@ -332,7 +332,7 @@ async def _generate_stream( ## Sample Application ```python -# py/samples/cloudflare-workers-ai-hello/src/main.py +# py/samples/provider-cloudflare-workers-ai-hello/src/main.py """Cloudflare Workers AI hello sample - Edge AI with Genkit.""" from genkit.ai import Genkit diff --git a/py/engdoc/planning/observability-plugin.md b/py/engdoc/planning/observability-plugin.md index d6ec9c5256..0856922bb6 100644 --- a/py/engdoc/planning/observability-plugin.md +++ b/py/engdoc/planning/observability-plugin.md @@ -377,7 +377,7 @@ dev = [ ## Sample Application ```python -# py/samples/observability-hello/src/main.py +# py/samples/provider-observability-hello/src/main.py """Observability hello sample - Third-party telemetry with Genkit. Key Concepts (ELI5):: @@ -405,7 +405,7 @@ from genkit.plugins.google_genai import GoogleAI configure_telemetry( backend="honeycomb", # or "sentry", "datadog", etc. honeycomb_api_key=os.environ["HONEYCOMB_API_KEY"], - service_name="observability-hello", + service_name="provider-observability-hello", ) ai = Genkit( diff --git a/py/plugins/amazon-bedrock/src/genkit/plugins/amazon_bedrock/models/model_info.py b/py/plugins/amazon-bedrock/src/genkit/plugins/amazon_bedrock/models/model_info.py index e81449dddf..7d8a50c6da 100644 --- a/py/plugins/amazon-bedrock/src/genkit/plugins/amazon_bedrock/models/model_info.py +++ b/py/plugins/amazon-bedrock/src/genkit/plugins/amazon_bedrock/models/model_info.py @@ -229,6 +229,11 @@ versions=['anthropic.claude-opus-4-1-20250805-v1:0'], supports=CLAUDE_MODEL_SUPPORTS, ), + 'anthropic.claude-opus-4-6-20260205-v1:0': ModelInfo( + label='Claude Opus 4.6', + versions=['anthropic.claude-opus-4-6-20260205-v1:0'], + supports=CLAUDE_MODEL_SUPPORTS, + ), 'anthropic.claude-haiku-4-5-20251001-v1:0': ModelInfo( label='Claude Haiku 4.5', versions=['anthropic.claude-haiku-4-5-20251001-v1:0'], diff --git a/py/plugins/anthropic/src/genkit/plugins/anthropic/model_info.py b/py/plugins/anthropic/src/genkit/plugins/anthropic/model_info.py index 27b9aee439..e86d579d95 100644 --- a/py/plugins/anthropic/src/genkit/plugins/anthropic/model_info.py +++ b/py/plugins/anthropic/src/genkit/plugins/anthropic/model_info.py @@ -119,6 +119,22 @@ ), ) +# Source: https://docs.anthropic.com/en/docs/about-claude/models +# Released: February 5, 2026. Most capable model — excels in coding, agents, +# and enterprise workflows. Supports 1M context window (beta). +CLAUDE_OPUS_4_6 = ModelInfo( + label='Anthropic - Claude Opus 4.6', + versions=['claude-opus-4-6-20260205'], + supports=Supports( + multiturn=True, + media=True, + tools=True, + system_role=True, + output=['text', 'json'], + constrained=Constrained.ALL, + ), +) + SUPPORTED_ANTHROPIC_MODELS: dict[str, ModelInfo] = { 'claude-3-haiku': CLAUDE_3_HAIKU, 'claude-3-5-haiku': CLAUDE_3_5_HAIKU, @@ -128,6 +144,7 @@ 'claude-haiku-4-5': CLAUDE_HAIKU_4_5, 'claude-opus-4-1': CLAUDE_OPUS_4_1, 'claude-opus-4-5': CLAUDE_OPUS_4_5, + 'claude-opus-4-6': CLAUDE_OPUS_4_6, } DEFAULT_SUPPORTS = Supports( diff --git a/py/plugins/anthropic/tests/plugin_test.py b/py/plugins/anthropic/tests/plugin_test.py index 0e8119110f..36d8772a42 100644 --- a/py/plugins/anthropic/tests/plugin_test.py +++ b/py/plugins/anthropic/tests/plugin_test.py @@ -96,7 +96,7 @@ async def test_resolve_action_model() -> None: def test_supported_models() -> None: """Test that all supported models have proper metadata.""" - assert len(SUPPORTED_MODELS) == 8 + assert len(SUPPORTED_MODELS) == 9 for _name, info in SUPPORTED_MODELS.items(): assert info.label is not None assert info.label.startswith('Anthropic - ') diff --git a/py/plugins/cloudflare-workers-ai/README.md b/py/plugins/cloudflare-workers-ai/README.md index 6b2de3e786..8cc20dae4e 100644 --- a/py/plugins/cloudflare-workers-ai/README.md +++ b/py/plugins/cloudflare-workers-ai/README.md @@ -108,7 +108,7 @@ add_cloudflare_telemetry() Run the sample application: ```bash -cd py/samples/cloudflare-workers-ai-hello +cd py/samples/provider-cloudflare-workers-ai-hello ./run.sh ``` diff --git a/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model_info.py b/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model_info.py index 8348ce84c0..149e720c6e 100644 --- a/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model_info.py +++ b/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/model_info.py @@ -84,9 +84,29 @@ class PluginSource(StrEnum): LLAMA_3_1 = 'meta/llama-3.1-405b-instruct-maas' LLAMA_3_2 = 'meta/llama-3.2-90b-vision-instruct-maas' +# Source: https://platform.openai.com/docs/models SUPPORTED_OPENAI_MODELS: dict[str, ModelInfo] = { + # --- GPT-4o series --- 'gpt-4o': ModelInfo(label='OpenAI - gpt-4o', supports=MULTIMODAL_MODEL_SUPPORTS), 'gpt-4o-2024-05-13': ModelInfo(label='OpenAI - gpt-4o-2024-05-13', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4o-mini': ModelInfo(label='OpenAI - gpt-4o-mini', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4o-mini-2024-07-18': ModelInfo(label='OpenAI - gpt-4o-mini-2024-07-18', supports=MULTIMODAL_MODEL_SUPPORTS), + # --- GPT-4.x series --- + 'gpt-4.5-preview': ModelInfo(label='OpenAI - gpt-4.5-preview', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4.1': ModelInfo(label='OpenAI - gpt-4.1', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4.1-mini': ModelInfo(label='OpenAI - gpt-4.1-mini', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4-turbo': ModelInfo(label='OpenAI - gpt-4-turbo', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4-turbo-2024-04-09': ModelInfo(label='OpenAI - gpt-4-turbo-2024-04-09', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4-turbo-preview': ModelInfo(label='OpenAI - gpt-4-turbo-preview', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4-0125-preview': ModelInfo(label='OpenAI - gpt-4-0125-preview', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4-1106-preview': ModelInfo(label='OpenAI - gpt-4-1106-preview', supports=MULTIMODAL_MODEL_SUPPORTS), + 'gpt-4': ModelInfo(label='OpenAI - gpt-4', supports=GPT_4_MODEL_SUPPORTS), + 'gpt-4-0613': ModelInfo(label='OpenAI - gpt-4-0613', supports=GPT_4_MODEL_SUPPORTS), + # --- GPT-3.5 series --- + 'gpt-3.5-turbo': ModelInfo(label='OpenAI - gpt-3.5-turbo', supports=GPT_35_MODEL_SUPPORTS), + 'gpt-3.5-turbo-0125': ModelInfo(label='OpenAI - gpt-3.5-turbo-0125', supports=GPT_35_MODEL_SUPPORTS), + 'gpt-3.5-turbo-1106': ModelInfo(label='OpenAI - gpt-3.5-turbo-1106', supports=GPT_35_MODEL_SUPPORTS), + # --- O-series (reasoning) --- 'o1': ModelInfo(label='OpenAI - o1', supports=O_SERIES_MODEL_SUPPORTS), 'o3': ModelInfo(label='OpenAI - o3', supports=O_SERIES_MODEL_SUPPORTS), 'o3-mini': ModelInfo( @@ -99,20 +119,9 @@ class PluginSource(StrEnum): output=[SupportedOutputFormat.JSON_MODE, SupportedOutputFormat.TEXT], ), ), + 'o3-pro': ModelInfo(label='OpenAI - o3-pro', supports=O_SERIES_MODEL_SUPPORTS), 'o4-mini': ModelInfo(label='OpenAI - o4-mini', supports=O_SERIES_MODEL_SUPPORTS), - 'gpt-4o-mini': ModelInfo(label='OpenAI - gpt-4o-mini', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4o-mini-2024-07-18': ModelInfo(label='OpenAI - gpt-4o-mini-2024-07-18', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4-turbo': ModelInfo(label='OpenAI - gpt-4-turbo', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4-turbo-2024-04-09': ModelInfo(label='OpenAI - gpt-4-turbo-2024-04-09', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4-turbo-preview': ModelInfo(label='OpenAI - gpt-4-turbo-preview', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4-0125-preview': ModelInfo(label='OpenAI - gpt-4-0125-preview', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4-1106-preview': ModelInfo(label='OpenAI - gpt-4-1106-preview', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4.5-preview': ModelInfo(label='OpenAI - gpt-4.5-preview', supports=MULTIMODAL_MODEL_SUPPORTS), - 'gpt-4': ModelInfo(label='OpenAI - gpt-4', supports=GPT_4_MODEL_SUPPORTS), - 'gpt-4-0613': ModelInfo(label='OpenAI - gpt-4-0613', supports=GPT_4_MODEL_SUPPORTS), - 'gpt-3.5-turbo': ModelInfo(label='OpenAI - gpt-3.5-turbo', supports=GPT_35_MODEL_SUPPORTS), - 'gpt-3.5-turbo-0125': ModelInfo(label='OpenAI - gpt-3.5-turbo-0125', supports=GPT_35_MODEL_SUPPORTS), - 'gpt-3.5-turbo-1106': ModelInfo(label='OpenAI - gpt-3.5-turbo-1106', supports=GPT_35_MODEL_SUPPORTS), + # --- GPT-5 series --- 'gpt-5': ModelInfo(label='OpenAI - gpt-5', supports=GPT_5_MODEL_SUPPORTS), 'gpt-5-mini': ModelInfo(label='OpenAI - gpt-5-mini', supports=GPT_5_MODEL_SUPPORTS), 'gpt-5-nano': ModelInfo(label='OpenAI - gpt-5-nano', supports=GPT_5_MODEL_SUPPORTS), @@ -127,6 +136,12 @@ class PluginSource(StrEnum): ), ), 'gpt-5.1': ModelInfo(label='OpenAI - gpt-5.1', supports=GPT_5_MODEL_SUPPORTS), + 'gpt-5.1-codex': ModelInfo(label='OpenAI - gpt-5.1-codex', supports=GPT_5_MODEL_SUPPORTS), + 'gpt-5.1-codex-max': ModelInfo(label='OpenAI - gpt-5.1-codex-max', supports=GPT_5_MODEL_SUPPORTS), + 'gpt-5.2': ModelInfo(label='OpenAI - gpt-5.2', supports=GPT_5_MODEL_SUPPORTS), + 'gpt-5.2-chat': ModelInfo(label='OpenAI - gpt-5.2-chat', supports=GPT_5_MODEL_SUPPORTS), + 'gpt-5.2-pro': ModelInfo(label='OpenAI - gpt-5.2-pro', supports=GPT_5_MODEL_SUPPORTS), + 'gpt-5.3-codex': ModelInfo(label='OpenAI - gpt-5.3-codex', supports=GPT_5_MODEL_SUPPORTS), } SUPPORTED_EMBEDDING_MODELS: dict[str, dict] = { diff --git a/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/utils.py b/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/utils.py index 9a68a6d049..21905a381b 100644 --- a/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/utils.py +++ b/py/plugins/compat-oai/src/genkit/plugins/compat_oai/models/utils.py @@ -321,21 +321,36 @@ def _get_openai_role(cls, role: Role | str) -> str: def to_openai(cls, message: Message) -> list[dict]: """Converts an internal `Message` object to OpenAI-compatible chat messages. + Handles TextPart, MediaPart (images), ToolRequestPart, and + ToolResponsePart. When a message contains MediaPart content, the + ``content`` field uses the array-of-content-blocks format required + by the OpenAI Chat Completions API for multimodal requests. + + Matches the JS canonical implementation in ``toOpenAIMessages()``. + Args: message: The internal `Message` instance. Returns: A list of OpenAI-compatible message dictionaries. """ - text_parts = [] + content_parts: list[dict[str, Any]] = [] tool_calls = [] tool_messages = [] + has_media = False for part in message.content: root = part.root if isinstance(root, TextPart): - text_parts.append(root.text) + content_parts.append({'type': 'text', 'text': root.text}) + + elif isinstance(root, MediaPart): + has_media = True + content_parts.append({ + 'type': 'image_url', + 'image_url': {'url': root.media.url}, + }) elif isinstance(root, ToolRequestPart): tool_calls.append({ @@ -355,13 +370,21 @@ def to_openai(cls, message: Message) -> list[dict]: 'content': str(tool_call.output), }) - result = [] - - if text_parts: - result.append({ - 'role': cls._get_openai_role(message.role), - 'content': ''.join(text_parts), - }) + result: list[dict[str, Any]] = [] + + if content_parts: + role = cls._get_openai_role(message.role) + if has_media: + # Multimodal: content is an array of typed content blocks. + result.append({'role': role, 'content': content_parts}) + else: + # Text-only: content is a plain string (matching JS behavior + # where text-only messages use string content for + # compatibility with older model endpoints). + result.append({ + 'role': role, + 'content': ''.join(p['text'] for p in content_parts), + }) if tool_calls: result.append({ diff --git a/py/plugins/compat-oai/tests/utils_test.py b/py/plugins/compat-oai/tests/utils_test.py index a0e6570f7f..a7b3f84dea 100644 --- a/py/plugins/compat-oai/tests/utils_test.py +++ b/py/plugins/compat-oai/tests/utils_test.py @@ -41,7 +41,10 @@ ReasoningPart, Role, TextPart, + ToolRequest, ToolRequestPart, + ToolResponse, + ToolResponsePart, ) @@ -368,7 +371,7 @@ def test_skips_text_parts_finds_media(self) -> None: ) ] ) - url, ct = _extract_media(request) + _, ct = _extract_media(request) assert ct == 'image/png' def test_content_type_from_data_uri_without_base64_qualifier(self) -> None: @@ -542,3 +545,149 @@ def test_role_defaults_to_model(self) -> None: }) msg = MessageConverter.to_genkit(adapter) assert msg.role == Role.MODEL + + +class TestMessageConverterToOpenAI: + """Tests for MessageConverter.to_openai().""" + + def test_text_only_message_uses_string_content(self) -> None: + """Text-only messages should produce a plain string content field.""" + message = Message( + role=Role.USER, + content=[Part(root=TextPart(text='Hello world'))], + ) + result = MessageConverter.to_openai(message) + assert len(result) == 1 + assert result[0] == {'role': 'user', 'content': 'Hello world'} + + def test_multiple_text_parts_concatenated(self) -> None: + """Multiple text parts should be concatenated into one string.""" + message = Message( + role=Role.USER, + content=[ + Part(root=TextPart(text='Hello ')), + Part(root=TextPart(text='world')), + ], + ) + result = MessageConverter.to_openai(message) + assert len(result) == 1 + assert result[0]['content'] == 'Hello world' + + def test_media_part_produces_image_url_block(self) -> None: + """A MediaPart should produce an image_url content block.""" + message = Message( + role=Role.USER, + content=[ + Part(root=MediaPart(media=Media(url='https://example.com/cat.jpg', content_type='image/jpeg'))), + ], + ) + result = MessageConverter.to_openai(message) + assert len(result) == 1 + assert result[0]['role'] == 'user' + content = result[0]['content'] + assert isinstance(content, list) + assert len(content) == 1 + assert content[0] == { + 'type': 'image_url', + 'image_url': {'url': 'https://example.com/cat.jpg'}, + } + + def test_text_and_media_produces_content_array(self) -> None: + """Mixed text + media should produce an array of content blocks. + + This is the multimodal vision format required by the OpenAI Chat + Completions API, matching the JS canonical toOpenAIMessages(). + """ + message = Message( + role=Role.USER, + content=[ + Part(root=TextPart(text='Describe this image')), + Part(root=MediaPart(media=Media(url='https://example.com/cat.jpg', content_type='image/jpeg'))), + ], + ) + result = MessageConverter.to_openai(message) + assert len(result) == 1 + content = result[0]['content'] + assert isinstance(content, list) + assert len(content) == 2 + assert content[0] == {'type': 'text', 'text': 'Describe this image'} + assert content[1] == { + 'type': 'image_url', + 'image_url': {'url': 'https://example.com/cat.jpg'}, + } + + def test_tool_request_parts(self) -> None: + """ToolRequestParts should produce tool_calls entries.""" + message = Message( + role=Role.MODEL, + content=[ + Part( + root=ToolRequestPart( + tool_request=ToolRequest( + ref='call_1', + name='get_weather', + input={'location': 'NYC'}, + ) + ) + ) + ], + ) + result = MessageConverter.to_openai(message) + assert len(result) == 1 + assert result[0]['role'] == 'assistant' + assert 'tool_calls' in result[0] + tc = result[0]['tool_calls'][0] + assert tc['id'] == 'call_1' + assert tc['function']['name'] == 'get_weather' + + def test_tool_response_parts(self) -> None: + """ToolResponseParts should produce tool role messages.""" + message = Message( + role=Role.TOOL, + content=[ + Part( + root=ToolResponsePart( + tool_response=ToolResponse( + ref='call_1', + name='get_weather', + output='Sunny, 72F', + ) + ) + ) + ], + ) + result = MessageConverter.to_openai(message) + assert len(result) == 1 + assert result[0]['role'] == 'tool' + assert result[0]['tool_call_id'] == 'call_1' + assert result[0]['content'] == 'Sunny, 72F' + + def test_model_role_maps_to_assistant(self) -> None: + """Role.MODEL should map to 'assistant' in OpenAI format.""" + message = Message( + role=Role.MODEL, + content=[Part(root=TextPart(text='Hi there'))], + ) + result = MessageConverter.to_openai(message) + assert result[0]['role'] == 'assistant' + + def test_data_uri_media_url_preserved(self) -> None: + """Data URI media URLs should be passed through unchanged.""" + data_uri = 'data:image/png;base64,iVBORw0KGgo=' + message = Message( + role=Role.USER, + content=[ + Part(root=TextPart(text='What is this?')), + Part(root=MediaPart(media=Media(url=data_uri))), + ], + ) + result = MessageConverter.to_openai(message) + content = result[0]['content'] + assert isinstance(content, list) + assert content[1]['image_url']['url'] == data_uri + + def test_empty_message_produces_no_result(self) -> None: + """A message with no content parts should produce an empty result.""" + message = Message(role=Role.USER, content=[]) + result = MessageConverter.to_openai(message) + assert result == [] diff --git a/py/plugins/google-genai/README.md b/py/plugins/google-genai/README.md index 436777263d..24448fd76a 100644 --- a/py/plugins/google-genai/README.md +++ b/py/plugins/google-genai/README.md @@ -119,5 +119,5 @@ for result in results.root: For comprehensive usage examples, see: -- [`py/samples/google-genai-hello/README.md`](../../samples/google-genai-hello/README.md) - Basic Gemini usage -- [`py/samples/vertexai-rerank-eval/README.md`](../../samples/vertexai-rerank-eval/README.md) - Rerankers and evaluators +- [`py/samples/provider-google-genai-hello/README.md`](../../samples/provider-google-genai-hello/README.md) - Basic Gemini usage +- [`py/samples/provider-vertex-ai-rerank-eval/README.md`](../../samples/provider-vertex-ai-rerank-eval/README.md) - Rerankers and evaluators diff --git a/py/plugins/google-genai/src/genkit/plugins/google_genai/models/gemini.py b/py/plugins/google-genai/src/genkit/plugins/google_genai/models/gemini.py index cee58628a3..a640d68963 100644 --- a/py/plugins/google-genai/src/genkit/plugins/google_genai/models/gemini.py +++ b/py/plugins/google-genai/src/genkit/plugins/google_genai/models/gemini.py @@ -1378,6 +1378,8 @@ async def _generate( status = 'PERMISSION_DENIED' elif e.code == 404: status = 'NOT_FOUND' + elif e.code == 429: + status = 'RESOURCE_EXHAUSTED' raise GenkitError( status=status, @@ -1504,6 +1506,8 @@ async def _streaming_generate( status = 'PERMISSION_DENIED' elif e.code == 404: status = 'NOT_FOUND' + elif e.code == 429: + status = 'RESOURCE_EXHAUSTED' raise GenkitError( status=status, diff --git a/py/plugins/microsoft-foundry/src/genkit/plugins/microsoft_foundry/models/model_info.py b/py/plugins/microsoft-foundry/src/genkit/plugins/microsoft_foundry/models/model_info.py index 949a04466a..e4a7f6cf29 100644 --- a/py/plugins/microsoft-foundry/src/genkit/plugins/microsoft_foundry/models/model_info.py +++ b/py/plugins/microsoft-foundry/src/genkit/plugins/microsoft_foundry/models/model_info.py @@ -27,9 +27,9 @@ +----------------+--------------------------------------------------+ | Category | Models | +----------------+--------------------------------------------------+ -| GPT Series | gpt-4, gpt-4o, gpt-4.1, gpt-5, gpt-5.1, gpt-5.2 | +| GPT Series | gpt-4, gpt-4o, gpt-4.1, gpt-5, gpt-5.1, gpt-5.2, gpt-5.3 | | O-Series | o1, o3, o3-mini, o4-mini | -| Claude | claude-opus-4-5, claude-sonnet-4-5, claude-haiku | +| Claude | claude-opus-4-6, claude-opus-4-5, claude-sonnet-4-5 | | DeepSeek | DeepSeek-V3.2, DeepSeek-R1-0528 | | Grok | grok-4, grok-3, grok-3-mini | | Llama | Llama-4-Maverick-17B-128E-Instruct-FP8 | @@ -284,6 +284,16 @@ versions=['gpt-5.2-codex'], supports=MULTIMODAL_MODEL_SUPPORTS, ), + 'gpt-5.2-pro': ModelInfo( + label='Microsoft Foundry - GPT-5.2 Pro', + versions=['gpt-5.2-pro'], + supports=MULTIMODAL_MODEL_SUPPORTS, + ), + 'gpt-5.3-codex': ModelInfo( + label='Microsoft Foundry - GPT-5.3 Codex', + versions=['gpt-5.3-codex'], + supports=MULTIMODAL_MODEL_SUPPORTS, + ), 'gpt-oss-120B': ModelInfo( label='Microsoft Foundry - GPT-OSS 120B', versions=['gpt-oss-120B'], @@ -317,6 +327,11 @@ versions=['claude-opus-4-1'], supports=CLAUDE_MODEL_SUPPORTS, ), + 'claude-opus-4-6': ModelInfo( + label='Microsoft Foundry - Claude Opus 4.6', + versions=['claude-opus-4-6'], + supports=CLAUDE_MODEL_SUPPORTS, + ), # ========================================================================= # DeepSeek Models # ========================================================================= diff --git a/py/plugins/ollama/src/genkit/plugins/ollama/models.py b/py/plugins/ollama/src/genkit/plugins/ollama/models.py index 982526ffbb..b5af4542d0 100644 --- a/py/plugins/ollama/src/genkit/plugins/ollama/models.py +++ b/py/plugins/ollama/src/genkit/plugins/ollama/models.py @@ -14,7 +14,73 @@ # # SPDX-License-Identifier: Apache-2.0 -"""Models package for Ollama plugin.""" +"""Models package for Ollama plugin. + +This module implements the model interface for Ollama using its Python client. + +See: +- Ollama API: https://github.com/ollama/ollama/blob/main/docs/api.md +- Ollama Python Client: https://github.com/ollama/ollama-python + +Key Features +------------ +- Chat completions using the ``/api/chat`` endpoint +- Text generation using the ``/api/generate`` endpoint +- Tool/function calling support +- Streaming responses +- Multimodal inputs (images for vision models like ``llava``) + +Implementation Notes & Edge Cases +---------------------------------- + +**Media URL Handling (Ollama-Specific Requirement)** + +The Ollama Python client's ``Image`` type only accepts base64 strings, raw +bytes, or local file paths. It does **not** accept HTTP URLs or full data +URIs. When a string value ending in a known image extension (e.g. ``.jpg``, +``.png``) is passed, the client attempts to interpret it as a local file path +and raises ``ValueError: File ... does not exist`` if the path doesn't exist. + +This means we must resolve media URLs client-side before passing to Ollama:: + + # Ollama client raises ValueError for HTTP URLs: + ollama.Image(value='https://example.com/cat.jpg') # ❌ ValueError + + # We resolve to raw bytes first: + image_bytes = await fetch(url) + ollama.Image(value=image_bytes) # ✅ Works + +The ``_resolve_image()`` method handles three cases: + +- **Data URIs** (``data:image/jpeg;base64,...``): Strips the prefix and + returns the raw base64 string, matching the JS canonical Ollama plugin. +- **HTTP/HTTPS URLs**: Downloads the image using the shared + ``get_cached_client()`` utility and returns raw bytes. +- **Other strings** (local file paths, raw base64): Passed through + unchanged for the ``Image`` type to handle. + +**User-Agent Header Requirement** + +Some servers (notably Wikipedia/Wikimedia) block requests without a proper +``User-Agent`` header, returning HTTP 403 Forbidden. We include a standard +User-Agent header when fetching images:: + + headers = { + 'User-Agent': 'Genkit/1.0 (https://github.com/firebase/genkit; genkit@google.com)', + } + +**JS Canonical Parity** + +The JS Ollama plugin (``js/plugins/ollama/src/index.ts``) bypasses the +client library and constructs raw HTTP requests to ``/api/chat``, passing +image data as plain strings in the ``images[]`` array. It only strips data +URI prefixes but does **not** download HTTP URLs — the JS Ollama server +handles URL fetching natively. + +The Python ``ollama`` client library adds stricter validation (via Pydantic) +that rejects URLs, so we must download images explicitly. This is the only +behavioral divergence from the JS plugin. +""" import mimetypes from collections.abc import Callable @@ -26,6 +92,7 @@ import ollama as ollama_api from genkit.ai import ActionRunContext from genkit.blocks.model import get_basic_usage_stats +from genkit.core.http_client import get_cached_client from genkit.plugins.ollama.constants import ( OllamaAPITypes, ) @@ -160,7 +227,7 @@ async def _chat_with_ollama( Returns: The chat response from Ollama. """ - messages = self.build_chat_messages(request) + messages = await self.build_chat_messages(request) streaming_request = self.is_streaming_request(ctx=ctx) if request.output: @@ -365,9 +432,19 @@ def build_prompt(request: GenerateRequest) -> str: return prompt @classmethod - def build_chat_messages(cls, request: GenerateRequest) -> list[ollama_api.Message]: + async def build_chat_messages(cls, request: GenerateRequest) -> list[ollama_api.Message]: """Build the messages for the chat API. + Handles MediaPart by converting image URLs to the format expected + by the Ollama Python client's ``Image`` type, which only accepts + base64 strings, raw bytes, or local file paths — not HTTP URLs + or full data URIs. + + For HTTP/HTTPS URLs, the image is downloaded and passed as raw + bytes. For data URIs, the ``data:...;base64,`` prefix is stripped + to extract the base64 payload. This matches the JS canonical + Ollama plugin's ``toOllamaRequest()`` behavior. + Args: request: The request to build the messages for. @@ -384,17 +461,56 @@ def build_chat_messages(cls, request: GenerateRequest) -> list[ollama_api.Messag for text_part in message.content: if isinstance(text_part.root, TextPart): item.content = (item.content or '') + text_part.root.text - if isinstance(text_part.root, ToolResponsePart): + elif isinstance(text_part.root, ToolResponsePart): item.content = (item.content or '') + str(text_part.root.tool_response.output) - if isinstance(text_part.root, MediaPart): - item['images'].append( - ollama_api.Image( - value=text_part.root.media.url, - ) - ) + elif isinstance(text_part.root, MediaPart): + image_value = await cls._resolve_image(text_part.root.media.url) + item['images'].append(ollama_api.Image(value=image_value)) messages.append(item) return messages + @staticmethod + async def _resolve_image(url: str) -> str | bytes: + """Convert a media URL to a value the Ollama Image type accepts. + + The Ollama Python client's ``Image`` type only accepts base64 + strings, raw bytes, or local file paths. This method handles: + + - **Data URIs**: Strips the ``data:...;base64,`` prefix and + returns the raw base64 string. + - **HTTP/HTTPS URLs**: Downloads the image and returns the raw + bytes. + - **Other strings** (e.g. local file paths or raw base64): + Passed through unchanged. + + Args: + url: The media URL from a ``MediaPart``. + + Returns: + A value suitable for ``ollama.Image(value=...)``. + """ + if url.startswith('data:'): + # Strip data URI prefix → raw base64: "data:image/jpeg;base64,ABC" → "ABC" + comma_idx = url.index(',') + return url[comma_idx + 1 :] + + if url.startswith(('http://', 'https://')): + # Some servers (e.g., Wikipedia/Wikimedia) block requests + # without a proper User-Agent, returning HTTP 403 Forbidden. + client = get_cached_client( + cache_key='ollama/image-fetch', + timeout=60.0, + headers={ + 'User-Agent': 'Genkit/1.0 (https://github.com/firebase/genkit; genkit@google.com)', + }, + ) + response = await client.get(url) + response.raise_for_status() + return response.content + + # Local file path or raw base64 — pass through to Image. + return url + @staticmethod def _to_ollama_role( role: Role, diff --git a/py/plugins/ollama/tests/models/models_test.py b/py/plugins/ollama/tests/models/models_test.py index d108a7b594..065709f36a 100644 --- a/py/plugins/ollama/tests/models/models_test.py +++ b/py/plugins/ollama/tests/models/models_test.py @@ -21,6 +21,7 @@ from typing import Any, cast from unittest.mock import ANY, AsyncMock, MagicMock, patch +import httpx import ollama as ollama_api import pytest @@ -31,6 +32,8 @@ GenerateRequest, GenerateResponseChunk, GenerationUsage, + Media, + MediaPart, Message, OutputConfig, Part, @@ -314,7 +317,9 @@ async def asyncSetUp(self) -> None: cast(Any, self.ctx).send_chunk = MagicMock() # Properly mock methods of ollama_model using patch.object - self.patcher_build_chat_messages = patch.object(self.ollama_model, 'build_chat_messages', return_value=[{}]) + self.patcher_build_chat_messages = patch.object( + self.ollama_model, 'build_chat_messages', new_callable=AsyncMock, return_value=[{}] + ) self.patcher_is_streaming_request = patch.object(self.ollama_model, 'is_streaming_request', return_value=False) self.patcher_build_request_options = patch.object( self.ollama_model, 'build_request_options', return_value={'temperature': 0.7} @@ -671,3 +676,114 @@ def test_convert_parameters(input_schema: dict[str, Any], expected_output: objec """Unit Tests for _convert_parameters function with various input schemas.""" result = _convert_parameters(input_schema) assert result == expected_output + + +class TestResolveImage(unittest.IsolatedAsyncioTestCase): + """Tests for OllamaModel._resolve_image.""" + + async def test_data_uri_strips_prefix(self) -> None: + """Data URIs should have their prefix stripped, returning raw base64.""" + data_uri = 'data:image/jpeg;base64,/9j/4AAQSkZJRg==' + result = await OllamaModel._resolve_image(data_uri) + assert result == '/9j/4AAQSkZJRg==' + + async def test_data_uri_png(self) -> None: + """PNG data URI should also be stripped correctly.""" + data_uri = 'data:image/png;base64,iVBORw0KGgo=' + result = await OllamaModel._resolve_image(data_uri) + assert result == 'iVBORw0KGgo=' + + async def test_raw_base64_passthrough(self) -> None: + """Raw base64 strings (not data URIs, not URLs) pass through unchanged.""" + raw_b64 = '/9j/4AAQSkZJRgABAQ==' + result = await OllamaModel._resolve_image(raw_b64) + assert result == raw_b64 + + async def test_local_file_path_passthrough(self) -> None: + """Local file paths pass through unchanged for Image to handle.""" + path = './test_images/cat.jpg' + result = await OllamaModel._resolve_image(path) + assert result == path + + @patch('genkit.plugins.ollama.models.get_cached_client') + async def test_http_url_downloads_image(self, mock_get_client: MagicMock) -> None: + """HTTP URLs should be downloaded and returned as bytes.""" + mock_response = MagicMock() + mock_response.content = b'\x89PNG\r\n\x1a\n' + mock_response.raise_for_status = MagicMock() + + mock_client = AsyncMock() + mock_client.get.return_value = mock_response + mock_get_client.return_value = mock_client + + result = await OllamaModel._resolve_image('https://example.com/cat.jpg') + + assert result == b'\x89PNG\r\n\x1a\n' + mock_get_client.assert_called_once_with( + cache_key='ollama/image-fetch', + timeout=60.0, + headers={ + 'User-Agent': 'Genkit/1.0 (https://github.com/firebase/genkit; genkit@google.com)', + }, + ) + mock_client.get.assert_awaited_once_with('https://example.com/cat.jpg') + mock_response.raise_for_status.assert_called_once() + + @patch('genkit.plugins.ollama.models.get_cached_client') + async def test_http_url_raises_on_failure(self, mock_get_client: MagicMock) -> None: + """HTTP errors during image download should propagate.""" + mock_client = AsyncMock() + mock_response = MagicMock() + mock_response.raise_for_status.side_effect = httpx.HTTPStatusError( + '403 Forbidden', request=MagicMock(), response=MagicMock() + ) + mock_client.get.return_value = mock_response + mock_get_client.return_value = mock_client + + with self.assertRaises(httpx.HTTPStatusError): + await OllamaModel._resolve_image('https://example.com/secret.jpg') + + +class TestBuildChatMessagesWithMedia(unittest.IsolatedAsyncioTestCase): + """Tests for build_chat_messages with MediaPart content.""" + + async def test_text_and_media_message(self) -> None: + """Messages with text + media should produce text content and images.""" + request = GenerateRequest( + messages=[ + Message( + role=Role.USER, + content=[ + Part(root=TextPart(text='Describe this image')), + Part(root=MediaPart(media=Media(url='data:image/jpeg;base64,AAAA', content_type='image/jpeg'))), + ], + ) + ] + ) + + with patch.object(OllamaModel, '_resolve_image', new_callable=AsyncMock, return_value='AAAA'): + messages = await OllamaModel.build_chat_messages(request) + + assert len(messages) == 1 + assert messages[0].content == 'Describe this image' + assert len(messages[0]['images']) == 1 + + async def test_media_only_message(self) -> None: + """Messages with only media should have empty text content.""" + request = GenerateRequest( + messages=[ + Message( + role=Role.USER, + content=[ + Part(root=MediaPart(media=Media(url='data:image/png;base64,BBB', content_type='image/png'))), + ], + ) + ] + ) + + with patch.object(OllamaModel, '_resolve_image', new_callable=AsyncMock, return_value='BBB'): + messages = await OllamaModel.build_chat_messages(request) + + assert len(messages) == 1 + assert messages[0].content == '' + assert len(messages[0]['images']) == 1 diff --git a/py/plugins/xai/src/genkit/plugins/xai/model_info.py b/py/plugins/xai/src/genkit/plugins/xai/model_info.py index a64899e595..e47472361f 100644 --- a/py/plugins/xai/src/genkit/plugins/xai/model_info.py +++ b/py/plugins/xai/src/genkit/plugins/xai/model_info.py @@ -28,6 +28,7 @@ __all__ = ['SUPPORTED_XAI_MODELS', 'get_model_info'] +# Source: https://docs.x.ai/docs/models LANGUAGE_MODEL_SUPPORTS = Supports( multiturn=True, tools=True, @@ -36,40 +37,90 @@ output=['text', 'json'], ) +REASONING_MODEL_SUPPORTS = Supports( + multiturn=True, + tools=True, + media=False, + system_role=True, + output=['text', 'json'], +) + +VISION_MODEL_SUPPORTS = Supports( + multiturn=False, + tools=True, + media=True, + system_role=False, + output=['text', 'json'], +) + +# --- Grok 3 family (legacy) --- GROK_3 = ModelInfo(label='xAI - Grok 3', versions=['grok-3'], supports=LANGUAGE_MODEL_SUPPORTS) GROK_3_FAST = ModelInfo(label='xAI - Grok 3 Fast', versions=['grok-3-fast'], supports=LANGUAGE_MODEL_SUPPORTS) GROK_3_MINI = ModelInfo(label='xAI - Grok 3 Mini', versions=['grok-3-mini'], supports=LANGUAGE_MODEL_SUPPORTS) GROK_3_MINI_FAST = ModelInfo( label='xAI - Grok 3 Mini Fast', versions=['grok-3-mini-fast'], supports=LANGUAGE_MODEL_SUPPORTS ) -GROK_4 = ModelInfo(label='xAI - Grok 4', versions=['grok-4'], supports=LANGUAGE_MODEL_SUPPORTS) + +# --- Grok 4 family --- +GROK_4 = ModelInfo(label='xAI - Grok 4', versions=['grok-4'], supports=REASONING_MODEL_SUPPORTS) +GROK_4_FAST_REASONING = ModelInfo( + label='xAI - Grok 4 Fast (Reasoning)', + versions=['grok-4-fast-reasoning'], + supports=REASONING_MODEL_SUPPORTS, +) +GROK_4_FAST_NON_REASONING = ModelInfo( + label='xAI - Grok 4 Fast (Non-Reasoning)', + versions=['grok-4-fast-non-reasoning'], + supports=LANGUAGE_MODEL_SUPPORTS, +) + +# --- Grok 4.1 family --- +# NOTE: "grok-4.1" is an alias available only on the OpenAI-compatible REST API. +# The native xai_sdk (gRPC) does not support this alias; use the explicit model IDs below. +GROK_4_1_FAST_REASONING = ModelInfo( + label='xAI - Grok 4.1 Fast (Reasoning)', + versions=['grok-4-1-fast-reasoning'], + supports=REASONING_MODEL_SUPPORTS, +) +GROK_4_1_FAST_NON_REASONING = ModelInfo( + label='xAI - Grok 4.1 Fast (Non-Reasoning)', + versions=['grok-4-1-fast-non-reasoning'], + supports=LANGUAGE_MODEL_SUPPORTS, +) + +# --- Specialist models --- +GROK_CODE_FAST_1 = ModelInfo( + label='xAI - Grok Code Fast 1', + versions=['grok-code-fast-1'], + supports=LANGUAGE_MODEL_SUPPORTS, +) + +# --- Vision models --- GROK_2_VISION_1212 = ModelInfo( label='xAI - Grok 2 Vision', versions=['grok-2-vision-1212'], - supports=Supports( - multiturn=False, - tools=True, - media=True, - system_role=False, - output=['text', 'json'], - ), + supports=VISION_MODEL_SUPPORTS, ) -# Enum for xAI Grok versions class XAIGrokVersion(StrEnum): """xAI Grok models. - Model Support: - - | Model | Description | Status | - |----------------------|--------------------|------------| - | `grok-3` | Grok 3 | Supported | - | `grok-3-fast` | Grok 3 Fast | Supported | - | `grok-3-mini` | Grok 3 Mini | Supported | - | `grok-3-mini-fast` | Grok 3 Mini Fast | Supported | - | `grok-4` | Grok 4 | Supported | - | `grok-2-vision-1212` | Grok 2 Vision | Supported | + Source: https://docs.x.ai/docs/models + + | Model | Description | Status | + |-------------------------------|--------------------------------|------------| + | `grok-3` | Grok 3 | Supported | + | `grok-3-fast` | Grok 3 Fast | Supported | + | `grok-3-mini` | Grok 3 Mini | Supported | + | `grok-3-mini-fast` | Grok 3 Mini Fast | Supported | + | `grok-4` | Grok 4 (reasoning) | Supported | + | `grok-4-fast-reasoning` | Grok 4 Fast (reasoning) | Supported | + | `grok-4-fast-non-reasoning` | Grok 4 Fast (non-reasoning) | Supported | + | `grok-4-1-fast-reasoning` | Grok 4.1 Fast (reasoning) | Supported | + | `grok-4-1-fast-non-reasoning` | Grok 4.1 Fast (non-reasoning) | Supported | + | `grok-code-fast-1` | Grok Code Fast 1 (coding) | Supported | + | `grok-2-vision-1212` | Grok 2 Vision | Supported | """ GROK_3 = 'grok-3' @@ -77,6 +128,11 @@ class XAIGrokVersion(StrEnum): GROK_3_MINI = 'grok-3-mini' GROK_3_MINI_FAST = 'grok-3-mini-fast' GROK_4 = 'grok-4' + GROK_4_FAST_REASONING = 'grok-4-fast-reasoning' + GROK_4_FAST_NON_REASONING = 'grok-4-fast-non-reasoning' + GROK_4_1_FAST_REASONING = 'grok-4-1-fast-reasoning' + GROK_4_1_FAST_NON_REASONING = 'grok-4-1-fast-non-reasoning' + GROK_CODE_FAST_1 = 'grok-code-fast-1' GROK_2_VISION_1212 = 'grok-2-vision-1212' @@ -86,6 +142,11 @@ class XAIGrokVersion(StrEnum): XAIGrokVersion.GROK_3_MINI: GROK_3_MINI, XAIGrokVersion.GROK_3_MINI_FAST: GROK_3_MINI_FAST, XAIGrokVersion.GROK_4: GROK_4, + XAIGrokVersion.GROK_4_FAST_REASONING: GROK_4_FAST_REASONING, + XAIGrokVersion.GROK_4_FAST_NON_REASONING: GROK_4_FAST_NON_REASONING, + XAIGrokVersion.GROK_4_1_FAST_REASONING: GROK_4_1_FAST_REASONING, + XAIGrokVersion.GROK_4_1_FAST_NON_REASONING: GROK_4_1_FAST_NON_REASONING, + XAIGrokVersion.GROK_CODE_FAST_1: GROK_CODE_FAST_1, XAIGrokVersion.GROK_2_VISION_1212: GROK_2_VISION_1212, } diff --git a/py/pyproject.toml b/py/pyproject.toml index cbd6260b0d..4de18d68dd 100644 --- a/py/pyproject.toml +++ b/py/pyproject.toml @@ -132,40 +132,41 @@ default-groups = ["dev", "lint"] [tool.uv.sources] # Samples (alphabetical by package name from pyproject.toml) -amazon-bedrock-hello = { workspace = true } -anthropic-hello = { workspace = true } -cloudflare-workers-ai-hello = { workspace = true } -compat-oai-hello = { workspace = true } -deepseek-hello = { workspace = true } -dev-local-vectorstore-hello = { workspace = true } -evaluator-demo = { workspace = true } -firestore-retreiver = { workspace = true } -flask-hello = { workspace = true } -format-demo = { workspace = true } -google-genai-code-execution = { workspace = true } -google-genai-context-caching = { workspace = true } -google-genai-hello = { workspace = true } -google-genai-image = { workspace = true } -google-genai-vertexai-hello = { workspace = true } -google-genai-vertexai-image = { workspace = true } -huggingface-hello = { workspace = true } -media-models-demo = { workspace = true } -menu = { workspace = true } -microsoft-foundry-hello = { workspace = true } -mistral-hello = { workspace = true } -model-garden = { workspace = true } -multi-server = { workspace = true } -observability-hello = { workspace = true } -ollama-hello = { workspace = true } -ollama-simple-embed = { workspace = true } -prompt-demo = { workspace = true } -realtime-tracing-demo = { workspace = true } -short-n-long = { workspace = true } -tool-interrupts = { workspace = true } -vertex-ai-vector-search-bigquery = { workspace = true } -vertex-ai-vector-search-firestore = { workspace = true } -vertexai-rerank-eval = { workspace = true } -xai-hello = { workspace = true } +dev-local-vectorstore-hello = { workspace = true } +framework-context-demo = { workspace = true } +framework-dynamic-tools-demo = { workspace = true } +framework-evaluator-demo = { workspace = true } +framework-format-demo = { workspace = true } +framework-middleware-demo = { workspace = true } +framework-prompt-demo = { workspace = true } +framework-realtime-tracing-demo = { workspace = true } +framework-restaurant-demo = { workspace = true } +framework-tool-interrupts = { workspace = true } +provider-amazon-bedrock-hello = { workspace = true } +provider-anthropic-hello = { workspace = true } +provider-cloudflare-workers-ai-hello = { workspace = true } +provider-compat-oai-hello = { workspace = true } +provider-deepseek-hello = { workspace = true } +provider-firestore-retriever = { workspace = true } +provider-google-genai-code-execution = { workspace = true } +provider-google-genai-context-caching = { workspace = true } +provider-google-genai-hello = { workspace = true } +provider-google-genai-media-models-demo = { workspace = true } +provider-google-genai-vertexai-hello = { workspace = true } +provider-google-genai-vertexai-image = { workspace = true } +provider-huggingface-hello = { workspace = true } +provider-microsoft-foundry-hello = { workspace = true } +provider-mistral-hello = { workspace = true } +provider-observability-hello = { workspace = true } +provider-ollama-hello = { workspace = true } +provider-vertex-ai-model-garden = { workspace = true } +provider-vertex-ai-vector-search-bigquery = { workspace = true } +provider-vertex-ai-vector-search-firestore = { workspace = true } +provider-vertex-ai-rerank-eval = { workspace = true } +provider-xai-hello = { workspace = true } +web-flask-hello = { workspace = true } +web-multi-server = { workspace = true } +web-short-n-long = { workspace = true } # Core packages genkit = { workspace = true } # Plugins (alphabetical) @@ -394,8 +395,8 @@ root = [ "plugins/xai/src", # Samples ".", # For samples.shared imports - "samples/evaluator-demo", # For evaluator_demo package imports - "samples/menu/src", # For menu sample imports + "samples/framework-evaluator-demo", # For evaluator_demo package imports + "samples/framework-restaurant-demo/src", # For restaurant demo sample imports "plugins/mcp/tests", # For fakes module imports in tests ] @@ -455,19 +456,19 @@ project_includes = [ "plugins/*/tests/**/*.py", # Samples "samples/*/src/**/*.py", - "samples/evaluator-demo/evaluator_demo/**/*.py", + "samples/framework-evaluator-demo/evaluator_demo/**/*.py", ] # Search path for first-party code import resolution. # The project root "." covers all packages. Additional paths are needed for: # - plugins/mcp/tests: has a local `fakes` module for test mocks -# - samples/evaluator-demo: has `evaluator_demo` package with internal imports -# - samples/menu/src: has internal imports (menu_ai, menu_schemas) +# - samples/framework-evaluator-demo: has `evaluator_demo` package with internal imports +# - samples/framework-restaurant-demo/src: has internal imports (menu_ai, menu_schemas) search-path = [ ".", "plugins/mcp/tests", - "samples/evaluator-demo", - "samples/menu/src", + "samples/framework-evaluator-demo", + "samples/framework-restaurant-demo/src", ] # Ignore missing imports for namespace packages - pyrefly can't resolve PEP 420 # namespace packages but these imports work at runtime diff --git a/py/samples/README.md b/py/samples/README.md index 218fb69bce..99d9a1e34f 100644 --- a/py/samples/README.md +++ b/py/samples/README.md @@ -5,40 +5,46 @@ This directory contains sample applications demonstrating various Genkit feature ## Sample Categories ``` -┌─────────────────────────────────────────────────────────────────────────────────┐ -│ GENKIT SAMPLE APPLICATIONS │ -├─────────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ MODEL PROVIDERS TELEMETRY │ -│ ─────────────── ───────── │ -│ ┌─────────────────────────┐ ┌─────────────────────────┐ │ -│ │ google-genai-hello │ │ aws-hello │ │ -│ │ anthropic-hello │ │ microsoft-foundry-hello │ │ -│ │ amazon-bedrock-hello │ │ observability-hello │ │ -│ │ microsoft-foundry-hello │ │ realtime-tracing-demo │ │ -│ │ ollama-hello │ └─────────────────────────┘ │ -│ │ compat-oai-hello │ │ -│ │ deepseek-hello │ ADVANCED FEATURES │ -│ │ xai-hello │ ───────────────── │ -│ │ cloudflare-workers-ai-hello │ ┌─────────────────────────┐ │ -│ │ tool-interrupts │ │ -│ VECTOR STORES │ menu (tool examples) │ │ -│ ───────────── │ prompt-demo │ │ -│ ┌─────────────────────────┐ │ format-demo │ │ -│ │ dev-local-vectorstore │ │ multi-server │ │ -│ │ vertex-ai-vector-search │ │ evaluator-demo │ │ -│ │ firestore-retriever │ │ vertexai-rerank-eval │ │ -│ └─────────────────────────┘ │ flask-hello │ │ -│ └─────────────────────────┘ │ -│ │ -│ MULTIMODAL GOOGLE AI FEATURES │ -│ ────────── ────────────────── │ -│ ┌─────────────────────────┐ ┌─────────────────────────┐ │ -│ │ media-models-demo │ │ google-genai-image │ │ -│ │ google-genai-image │ │ google-genai-code-exec │ │ -│ └─────────────────────────┘ │ google-genai-context │ │ -│ └─────────────────────────┘ │ -└─────────────────────────────────────────────────────────────────────────────────┘ +┌──────────────────────────────────────────────────────────────────────────────────────────┐ +│ GENKIT SAMPLE APPLICATIONS │ +├──────────────────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ MODEL PROVIDERS (provider-*) FRAMEWORK FEATURES (framework-*) │ +│ ──────────────────────────── ──────────────────────────────── │ +│ ┌──────────────────────────────────┐ ┌──────────────────────────────────┐ │ +│ │ provider-google-genai-hello │ │ framework-context-demo │ │ +│ │ provider-google-genai-vertexai- │ │ framework-dynamic-tools-demo │ │ +│ │ hello │ │ framework-evaluator-demo │ │ +│ │ provider-anthropic-hello │ │ framework-format-demo │ │ +│ │ provider-amazon-bedrock-hello │ │ framework-middleware-demo │ │ +│ │ provider-microsoft-foundry-hello │ │ framework-prompt-demo │ │ +│ │ provider-ollama-hello │ │ framework-realtime-tracing-demo │ │ +│ │ provider-compat-oai-hello │ │ framework-restaurant-demo │ │ +│ │ provider-deepseek-hello │ │ framework-tool-interrupts │ │ +│ │ provider-xai-hello │ └──────────────────────────────────┘ │ +│ │ provider-cloudflare-workers-ai- │ │ +│ │ hello │ WEB FRAMEWORKS (web-*) │ +│ │ provider-mistral-hello │ ────────────────────────── │ +│ │ provider-huggingface-hello │ ┌──────────────────────────────────┐ │ +│ │ provider-observability-hello │ │ web-flask-hello │ │ +│ │ provider-vertex-ai-model-garden │ │ web-multi-server │ │ +│ │ provider-vertex-ai-rerank-eval │ │ web-short-n-long │ │ +│ │ provider-firestore-retriever │ └──────────────────────────────────┘ │ +│ │ provider-google-genai-code- │ │ +│ │ execution │ OTHER │ +│ │ provider-google-genai-context- │ ───── │ +│ │ caching │ ┌──────────────────────────────────┐ │ +│ │ provider-google-genai-vertexai- │ │ dev-local-vectorstore-hello │ │ +│ │ image │ └──────────────────────────────────┘ │ +│ │ provider-google-genai-media- │ │ +│ │ models-demo │ │ +│ │ provider-vertex-ai-vector- │ │ +│ │ search-bigquery │ │ +│ │ provider-vertex-ai-vector- │ │ +│ │ search-firestore │ │ +│ └──────────────────────────────────┘ │ +│ │ +└──────────────────────────────────────────────────────────────────────────────────────────┘ ``` ## Quick Start @@ -55,59 +61,92 @@ cd py/samples/ ## Sample List -### Model Provider Samples +### Model Provider Samples (`provider-*`) | Sample | Plugin | Description | |--------|--------|-------------| -| **google-genai-hello** | google-genai | Gemini models with Google AI | -| **anthropic-hello** | anthropic | Claude models directly | -| **amazon-bedrock-hello** | amazon-bedrock | Claude, Llama, Nova via Bedrock | -| **microsoft-foundry-hello** | microsoft-foundry | Azure AI Foundry models + Application Insights | -| **ollama-hello** | ollama | Local models with Ollama | -| **compat-oai-hello** | compat-oai | OpenAI-compatible APIs | -| **deepseek-hello** | deepseek | DeepSeek V3 and R1 | -| **xai-hello** | xai | Grok models | -| **cloudflare-workers-ai-hello** | cloudflare-workers-ai | Cloudflare Workers AI + OTLP telemetry | - -### Telemetry Samples +| **provider-google-genai-hello** | google-genai | Gemini models with Google AI | +| **provider-google-genai-vertexai-hello** | vertex-ai | Gemini models with Vertex AI | +| **provider-anthropic-hello** | anthropic | Claude models directly | +| **provider-amazon-bedrock-hello** | amazon-bedrock | Claude, Llama, Nova via Bedrock | +| **provider-microsoft-foundry-hello** | microsoft-foundry | Azure AI Foundry models + Application Insights | +| **provider-ollama-hello** | ollama | Local models with Ollama | +| **provider-compat-oai-hello** | compat-oai | OpenAI-compatible APIs | +| **provider-deepseek-hello** | deepseek | DeepSeek V3 and R1 | +| **provider-xai-hello** | xai | Grok models | +| **provider-cloudflare-workers-ai-hello** | cloudflare-workers-ai | Cloudflare Workers AI + OTLP telemetry | +| **provider-mistral-hello** | mistral | Mistral models | +| **provider-huggingface-hello** | huggingface | HuggingFace Inference API models | +| **provider-vertex-ai-model-garden** | vertex-ai | Third-party models via Vertex AI Model Garden | +| **provider-observability-hello** | observability | Sentry, Honeycomb, Datadog, etc. | + +### Provider Feature Samples (`provider-*`) | Sample | Plugin | Description | |--------|--------|-------------| -| **aws-hello** | aws | AWS X-Ray and CloudWatch integration | -| **microsoft-foundry-hello** | microsoft-foundry | Azure AI Foundry models + Application Insights | -| **observability-hello** | observability | Sentry, Honeycomb, Datadog, etc. | -| **realtime-tracing-demo** | google-cloud | Real-time tracing visualization | +| **provider-google-genai-code-execution** | google-genai | Gemini server-side code execution | +| **provider-google-genai-context-caching** | google-genai | Context caching for long prompts | +| **provider-google-genai-vertexai-image** | vertex-ai | Image generation with Vertex AI Imagen | +| **provider-google-genai-media-models-demo** | google-genai | Media generation: TTS, Veo, Lyria, Imagen, Gemini Image, editing | +| **provider-vertex-ai-rerank-eval** | vertex-ai | Vertex AI rerankers and evaluators | +| **provider-vertex-ai-vector-search-bigquery** | vertex-ai | BigQuery with Vertex AI vectors | +| **provider-vertex-ai-vector-search-firestore** | vertex-ai | Firestore with Vertex AI vectors | +| **provider-firestore-retriever** | firebase | Firestore vector search retriever | -### Vector Store Samples +### Framework Feature Samples (`framework-*`) -| Sample | Plugin | Description | -|--------|--------|-------------| -| **dev-local-vectorstore-hello** | dev-local-vectorstore | Local development vector store | -| **vertex-ai-vector-search-firestore** | vertex-ai | Firestore with Vertex AI vectors | -| **vertex-ai-vector-search-bigquery** | vertex-ai | BigQuery with Vertex AI vectors | -| **firestore-retriever** | firebase | Firestore vector search | - -### Advanced Feature Samples +| Sample | Features | Description | +|--------|----------|-------------| +| **framework-context-demo** | Context | Context propagation through flows, tools, and generate | +| **framework-dynamic-tools-demo** | Tools | Dynamic tool registration at runtime | +| **framework-evaluator-demo** | Evaluation | Custom evaluators and RAGAS | +| **framework-format-demo** | Formats | Output formatting and schemas | +| **framework-middleware-demo** | Middleware | Custom retry and logging middleware | +| **framework-prompt-demo** | Prompts | Dotprompt templates and partials | +| **framework-realtime-tracing-demo** | Telemetry | Real-time tracing visualization | +| **framework-restaurant-demo** | Tools, RAG | Restaurant menu ordering with tools | +| **framework-tool-interrupts** | Tools | Human-in-the-loop tool approval | + +### Web Framework Samples (`web-*`) | Sample | Features | Description | |--------|----------|-------------| -| **tool-interrupts** | Tools | Human-in-the-loop tool approval | -| **menu** | Tools | Restaurant menu ordering with tools | -| **prompt-demo** | Prompts | Dotprompt templates and partials | -| **format-demo** | Formats | Output formatting and schemas | -| **multi-server** | Architecture | Multiple Genkit servers | -| **evaluator-demo** | Evaluation | Custom evaluators and RAGAS | -| **vertexai-rerank-eval** | RAG, Evaluation | Vertex AI rerankers and evaluators | -| **flask-hello** | Integrations | Flask HTTP endpoints | +| **web-flask-hello** | Flask | Flask HTTP endpoints with Genkit | +| **web-multi-server** | Litestar, Starlette | Multiple Genkit servers | +| **web-short-n-long** | ASGI | ASGI deployment with long-running flows | -### Multimodal Samples +### Other Samples | Sample | Features | Description | |--------|----------|-------------| -| **google-genai-image** | Imagen | Image generation with Imagen | -| **media-models-demo** | Vision | Image understanding demos | -| **google-genai-code-execution** | Code | Gemini code execution | -| **google-genai-context-caching** | Performance | Context caching for long prompts | +| **dev-local-vectorstore-hello** | Vector Store | Local development vector store | + +## Feature Coverage Matrix + +The table below tracks which capabilities each model provider sample exercises. +This is a living document — update it as new flows are added to samples. + +> **Last audited**: 2026-02-07 + +| Sample | Basic | Stream | Tools | Struct | Vision | Embed | Code | Reasoning | TTS | Cache | PDF | +|--------|:-----:|:------:|:-----:|:------:|:------:|:-----:|:----:|:---------:|:---:|:-----:|:---:| +| **provider-amazon-bedrock-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | — | — | — | +| **provider-anthropic-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | — | ✅ | ✅ | +| **provider-cloudflare-workers-ai-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | — | — | — | — | +| **provider-compat-oai-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | ✅ | — | — | +| **provider-deepseek-hello** | ✅ | ✅ | ✅ | ✅ | — | — | ✅ | ✅ | — | — | — | +| **provider-google-genai-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | — | — | — | +| **provider-google-genai-vertexai-hello** | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | — | — | — | — | +| **provider-huggingface-hello** | ✅ | ✅ | ✅ | ✅ | — | — | ✅ | — | — | — | — | +| **provider-microsoft-foundry-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | — | — | — | +| **provider-mistral-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | — | — | — | +| **provider-ollama-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | — | — | — | +| **provider-xai-hello** | ✅ | ✅ | ✅ | ✅ | ✅ | — | ✅ | ✅ | — | — | — | + +**Legend**: ✅ = exercised in sample, — = plugin does not support this feature + +All plugin-supported features are now exercised in their respective samples. +The matrix is complete — no remaining gaps (❌) exist. ## Environment Setup @@ -117,18 +156,18 @@ Most samples require environment variables for API keys. Configure these before | Variable | Sample | Required | Description | Get Credentials | |----------|--------|----------|-------------|-----------------| -| `GOOGLE_GENAI_API_KEY` | google-genai-hello | Yes | Google AI Studio API key | [Google AI Studio](https://aistudio.google.com/apikey) | -| `ANTHROPIC_API_KEY` | anthropic-hello | Yes | Anthropic API key | [Anthropic Console](https://console.anthropic.com/) | -| `AWS_REGION` | amazon-bedrock-hello | Yes | AWS region (e.g., `us-east-1`) | [AWS Bedrock Regions](https://docs.aws.amazon.com/general/latest/gr/bedrock.html) | -| `AWS_ACCESS_KEY_ID` | amazon-bedrock-hello | Yes* | AWS access key | [AWS IAM](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) | -| `AWS_SECRET_ACCESS_KEY` | amazon-bedrock-hello | Yes* | AWS secret key | [AWS IAM](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) | -| `AZURE_AI_FOUNDRY_ENDPOINT` | microsoft-foundry-hello | Yes | Azure AI Foundry endpoint | [Azure AI Foundry](https://ai.azure.com/) | -| `AZURE_AI_FOUNDRY_API_KEY` | microsoft-foundry-hello | Yes* | Azure AI Foundry API key | [Azure AI Foundry](https://ai.azure.com/) | -| `OPENAI_API_KEY` | compat-oai-hello | Yes | OpenAI API key | [OpenAI Platform](https://platform.openai.com/api-keys) | -| `DEEPSEEK_API_KEY` | deepseek-hello | Yes | DeepSeek API key | [DeepSeek Platform](https://platform.deepseek.com/) | -| `XAI_API_KEY` | xai-hello | Yes | xAI API key | [xAI Console](https://console.x.ai/) | -| `CLOUDFLARE_ACCOUNT_ID` | cloudflare-workers-ai-hello | Yes | Cloudflare account ID | [Cloudflare Dashboard](https://dash.cloudflare.com/) | -| `CLOUDFLARE_API_TOKEN` | cloudflare-workers-ai-hello | Yes | Cloudflare API token | [Cloudflare API Tokens](https://developers.cloudflare.com/fundamentals/api/get-started/create-token/) | +| `GOOGLE_GENAI_API_KEY` | provider-google-genai-hello | Yes | Google AI Studio API key | [Google AI Studio](https://aistudio.google.com/apikey) | +| `ANTHROPIC_API_KEY` | provider-anthropic-hello | Yes | Anthropic API key | [Anthropic Console](https://console.anthropic.com/) | +| `AWS_REGION` | provider-amazon-bedrock-hello | Yes | AWS region (e.g., `us-east-1`) | [AWS Bedrock Regions](https://docs.aws.amazon.com/general/latest/gr/bedrock.html) | +| `AWS_ACCESS_KEY_ID` | provider-amazon-bedrock-hello | Yes* | AWS access key | [AWS IAM](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) | +| `AWS_SECRET_ACCESS_KEY` | provider-amazon-bedrock-hello | Yes* | AWS secret key | [AWS IAM](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) | +| `AZURE_AI_FOUNDRY_ENDPOINT` | provider-microsoft-foundry-hello | Yes | Azure AI Foundry endpoint | [Azure AI Foundry](https://ai.azure.com/) | +| `AZURE_AI_FOUNDRY_API_KEY` | provider-microsoft-foundry-hello | Yes* | Azure AI Foundry API key | [Azure AI Foundry](https://ai.azure.com/) | +| `OPENAI_API_KEY` | provider-compat-oai-hello | Yes | OpenAI API key | [OpenAI Platform](https://platform.openai.com/api-keys) | +| `DEEPSEEK_API_KEY` | provider-deepseek-hello | Yes | DeepSeek API key | [DeepSeek Platform](https://platform.deepseek.com/) | +| `XAI_API_KEY` | provider-xai-hello | Yes | xAI API key | [xAI Console](https://console.x.ai/) | +| `CLOUDFLARE_ACCOUNT_ID` | provider-cloudflare-workers-ai-hello | Yes | Cloudflare account ID | [Cloudflare Dashboard](https://dash.cloudflare.com/) | +| `CLOUDFLARE_API_TOKEN` | provider-cloudflare-workers-ai-hello | Yes | Cloudflare API token | [Cloudflare API Tokens](https://developers.cloudflare.com/fundamentals/api/get-started/create-token/) | *Can use IAM roles, managed identity, or other credential providers instead. @@ -136,17 +175,17 @@ Most samples require environment variables for API keys. Configure these before | Variable | Sample | Required | Description | Get Credentials | |----------|--------|----------|-------------|-----------------| -| `GOOGLE_CLOUD_PROJECT` | realtime-tracing-demo | Yes | GCP project ID | [GCP Console](https://console.cloud.google.com/) | -| `GOOGLE_APPLICATION_CREDENTIALS` | realtime-tracing-demo | Yes* | Service account JSON path | [GCP IAM](https://cloud.google.com/docs/authentication/application-default-credentials) | -| `APPLICATIONINSIGHTS_CONNECTION_STRING` | microsoft-foundry-hello | Yes | Azure App Insights connection string | [Azure Portal](https://learn.microsoft.com/azure/azure-monitor/app/create-workspace-resource) | -| `CF_OTLP_ENDPOINT` | cloudflare-workers-ai-hello | No* | OTLP endpoint URL | Your OTLP backend | -| `CF_API_TOKEN` | cloudflare-workers-ai-hello | No* | Bearer token for OTLP auth | Your OTLP backend | +| `GOOGLE_CLOUD_PROJECT` | framework-realtime-tracing-demo | Yes | GCP project ID | [GCP Console](https://console.cloud.google.com/) | +| `GOOGLE_APPLICATION_CREDENTIALS` | framework-realtime-tracing-demo | Yes* | Service account JSON path | [GCP IAM](https://cloud.google.com/docs/authentication/application-default-credentials) | +| `APPLICATIONINSIGHTS_CONNECTION_STRING` | provider-microsoft-foundry-hello | Yes | Azure App Insights connection string | [Azure Portal](https://learn.microsoft.com/azure/azure-monitor/app/create-workspace-resource) | +| `CF_OTLP_ENDPOINT` | provider-cloudflare-workers-ai-hello | No* | OTLP endpoint URL | Your OTLP backend | +| `CF_API_TOKEN` | provider-cloudflare-workers-ai-hello | No* | Bearer token for OTLP auth | Your OTLP backend | *Only required if using OTLP telemetry export. ### Observability Plugin (Third-Party Backends) -The `observability-hello` sample supports multiple backends. Configure based on your choice: +The `provider-observability-hello` sample supports multiple backends. Configure based on your choice: #### Sentry @@ -192,43 +231,43 @@ Sites: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com` ### Quick Setup Examples ```bash -# Google AI (google-genai-hello) +# Google AI (provider-google-genai-hello) export GOOGLE_GENAI_API_KEY="AIza..." -# Anthropic (anthropic-hello) +# Anthropic (provider-anthropic-hello) export ANTHROPIC_API_KEY="sk-ant-..." -# AWS Bedrock (amazon-bedrock-hello) +# AWS Bedrock (provider-amazon-bedrock-hello) export AWS_REGION="us-east-1" export AWS_ACCESS_KEY_ID="AKIA..." export AWS_SECRET_ACCESS_KEY="..." -# Azure AI Foundry (microsoft-foundry-hello) +# Azure AI Foundry (provider-microsoft-foundry-hello) export AZURE_AI_FOUNDRY_ENDPOINT="https://your-resource.services.ai.azure.com/" export AZURE_AI_FOUNDRY_API_KEY="..." -# Azure Telemetry (microsoft-foundry-hello) +# Azure Telemetry (provider-microsoft-foundry-hello) export APPLICATIONINSIGHTS_CONNECTION_STRING="InstrumentationKey=...;IngestionEndpoint=..." -# Cloudflare Workers AI (cloudflare-workers-ai-hello) +# Cloudflare Workers AI (provider-cloudflare-workers-ai-hello) export CLOUDFLARE_ACCOUNT_ID="abc123..." export CLOUDFLARE_API_TOKEN="..." -# Sentry (observability-hello) +# Sentry (provider-observability-hello) export SENTRY_DSN="https://abc123@o123456.ingest.us.sentry.io/4507654321" -# Honeycomb (observability-hello) +# Honeycomb (provider-observability-hello) export HONEYCOMB_API_KEY="..." -# Datadog (observability-hello) +# Datadog (provider-observability-hello) export DD_API_KEY="..." -# Grafana Cloud (observability-hello) +# Grafana Cloud (provider-observability-hello) export GRAFANA_OTLP_ENDPOINT="https://otlp-gateway-prod-us-central-0.grafana.net/otlp" export GRAFANA_USER_ID="123456" export GRAFANA_API_KEY="glc_..." -# Axiom (observability-hello) +# Axiom (provider-observability-hello) export AXIOM_TOKEN="xaat-..." ``` diff --git a/py/samples/deepseek-hello/src/main.py b/py/samples/deepseek-hello/src/main.py deleted file mode 100644 index 8b27eda32a..0000000000 --- a/py/samples/deepseek-hello/src/main.py +++ /dev/null @@ -1,465 +0,0 @@ -# Copyright 2026 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -"""DeepSeek hello sample - DeepSeek models with Genkit. - -This sample demonstrates how to use DeepSeek's models with Genkit, -including the powerful reasoning model (deepseek-reasoner). - -See README.md for testing instructions. - -Key Concepts (ELI5):: - - ┌─────────────────────┬────────────────────────────────────────────────────┐ - │ Concept │ ELI5 Explanation │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ DeepSeek │ Chinese AI company known for efficient models. │ - │ │ Great performance at lower cost. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ deepseek-chat │ The standard chat model. Good for most tasks │ - │ │ like writing, Q&A, and coding help. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ deepseek-reasoner │ The R1 reasoning model. Shows its thinking │ - │ │ step by step - great for math and logic. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Chain-of-Thought │ When AI explains its reasoning step by step. │ - │ │ Like showing your work on a test. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Streaming │ Get the response word-by-word as it's generated. │ - │ │ Feels faster, like watching someone type. │ - └─────────────────────┴────────────────────────────────────────────────────┘ - -Key Features -============ -| Feature Description | Example Function / Code Snippet | -|-----------------------------------------|-----------------------------------------| -| Plugin Initialization | `ai = Genkit(plugins=[DeepSeek(...)])` | -| Default Model Configuration | `ai = Genkit(model=deepseek_name(...))` | -| Defining Flows | `@ai.flow()` decorator | -| Defining Tools | `@ai.tool()` decorator | -| Pydantic for Tool Input Schema | `WeatherInput` | -| Simple Generation (Prompt String) | `say_hi` | -| Streaming Response | `streaming_flow` | -| Generation with Tools | `weather_flow` | -| Reasoning Model (deepseek-reasoner) | `reasoning_flow` | -| Generation with Config | `custom_config_flow` | -| Code Generation | `code_flow` | -| Multi-turn Chat | `chat_flow` | -""" - -import asyncio -import os -import random - -from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - -from genkit.ai import Genkit, Output -from genkit.core.action import ActionRunContext -from genkit.core.logging import get_logger -from genkit.core.typing import Message, Part, Role, TextPart, ToolChoice -from genkit.plugins.deepseek import DeepSeek, deepseek_name - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) - -if 'DEEPSEEK_API_KEY' not in os.environ: - os.environ['DEEPSEEK_API_KEY'] = input('Please enter your DEEPSEEK_API_KEY: ') - -logger = get_logger(__name__) - -ai = Genkit( - plugins=[DeepSeek()], - model=deepseek_name('deepseek-chat'), -) - - -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class CurrencyInput(BaseModel): - """Currency conversion input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_currency: str = Field(description='Source currency code (e.g., USD)', default='USD') - to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') - - -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills - - -class WeatherInput(BaseModel): - """Input schema for the weather tool.""" - - location: str = Field(description='City or location name') - - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mittens', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - topic: str = Field(default='cats', description='Topic to generate about') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" - - location: str = Field(default='London', description='Location to get weather for') - - -class ReasoningInput(BaseModel): - """Input for reasoning flow.""" - - prompt: str = Field( - default='What is heavier, one kilo of steel or one kilo of feathers?', - description='Reasoning question to solve', - ) - - -class CustomConfigInput(BaseModel): - """Input for custom config flow.""" - - task: str = Field(default='creative', description='Task type: creative, precise, or detailed') - - -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) - - -@ai.tool() -def get_weather(input: WeatherInput) -> str: - """Return a random realistic weather string for a location. - - Args: - input: Weather input location. - - Returns: - Weather information with temperature in degrees Celsius. - """ - weather_options = [ - '32° C sunny', - '17° C cloudy', - '22° C cloudy', - '19° C humid', - ] - return random.choice(weather_options) - - -@ai.flow() -async def reasoning_flow(input: ReasoningInput) -> str: - """Solve reasoning problems using deepseek-reasoner model. - - Args: - input: Input with reasoning question to solve. - - Returns: - The reasoning and answer. - """ - response = await ai.generate( - model=deepseek_name('deepseek-reasoner'), - prompt=input.prompt, - ) - return response.text - - -@ai.flow() -async def code_flow(input: CodeInput) -> str: - """Generate code using DeepSeek. - - DeepSeek excels at code generation tasks. - - Args: - input: Input with coding task description. - - Returns: - Generated code. - """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text - - -@ai.flow() -async def chat_flow() -> str: - """Multi-turn chat example demonstrating context retention. - - Returns: - Final chat response. - """ - history = [] - - # First turn - User shares information - prompt1 = ( - "Hi! I'm planning a trip to Tokyo next month. I'm really excited because I love Japanese cuisine, " - 'especially ramen and sushi.' - ) - response1 = await ai.generate( - prompt=prompt1, - system='You are a helpful travel assistant.', - ) - history.append(Message(role=Role.USER, content=[Part(root=TextPart(text=prompt1))])) - if response1.message: - history.append(response1.message) - await logger.ainfo('chat_flow turn 1', result=response1.text) - - # Second turn - Ask question requiring context from first turn - response2 = await ai.generate( - messages=[ - *history, - Message(role=Role.USER, content=[Part(root=TextPart(text='What foods did I say I enjoy?'))]), - ], - system='You are a helpful travel assistant.', - ) - history.append(Message(role=Role.USER, content=[Part(root=TextPart(text='What foods did I say I enjoy?'))])) - if response2.message: - history.append(response2.message) - await logger.ainfo('chat_flow turn 2', result=response2.text) - - # Third turn - Ask question requiring context from both previous turns - response3 = await ai.generate( - messages=[ - *history, - Message( - role=Role.USER, - content=[Part(root=TextPart(text='Based on our conversation, suggest one restaurant I should visit.'))], - ), - ], - system='You are a helpful travel assistant.', - ) - return response3.text - - -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. - - Args: - input: Currency conversion parameters. - - Returns: - Converted amount. - """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, - } - - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' - - -@ai.flow() -async def currency_exchange(input: CurrencyExchangeInput) -> str: - """Convert currency using tools. - - Args: - input: Currency exchange parameters. - - Returns: - Conversion result. - """ - response = await ai.generate( - prompt=f'Convert {input.amount} {input.from_curr} to {input.to_curr}', - tools=['convert_currency'], - ) - return response.text - - -@ai.flow() -async def custom_config_flow(input: CustomConfigInput) -> str: - """Demonstrate custom model configurations for different tasks. - - Shows how different config parameters affect generation behavior: - - 'creative': High temperature for diverse, creative outputs - - 'precise': Low temperature with penalties for consistent, focused outputs - - 'detailed': Extended output with frequency penalty to avoid repetition - - Args: - input: Input with task type. - - Returns: - Generated response showing the effect of different configs. - """ - task = input.task - - prompts = { - 'creative': 'Write a creative story opener about a robot discovering art', - 'precise': 'List the exact steps to make a cup of tea', - 'detailed': 'Explain how photosynthesis works in detail', - } - - configs = { - 'creative': { - 'temperature': 1.5, - 'max_tokens': 200, - 'top_p': 0.95, - }, - 'precise': { - 'temperature': 0.1, - 'max_tokens': 150, - 'presence_penalty': 0.5, - }, - 'detailed': { - 'temperature': 0.7, - 'max_tokens': 400, - 'frequency_penalty': 0.8, - }, - } - - prompt = prompts.get(task, prompts['creative']) - config = configs.get(task, configs['creative']) - - # pyrefly: ignore[no-matching-overload] - config dict is compatible with dict[str, object] - response = await ai.generate( - prompt=prompt, - config=config, - ) - return response.text - - -@ai.flow() -async def generate_character(input: CharacterInput) -> RpgCharacter: - """Generate an RPG character. - - Args: - input: Input with character name. - - Returns: - The generated RPG character. - """ - # DeepSeek JSON mode: prompt must mention 'json' and provide an example - prompt = ( - f'Generate an RPG character named {input.name} in JSON format.\n' - 'Example:\n' - '{\n' - ' "name": "",\n' - ' "backStory": "A mysterious cat...",\n' - ' "abilities": ["stealth", "agility", "night vision"],\n' - ' "skills": {"strength": 10, "charisma": 15, "endurance": 12}\n' - '}\n' - ) - result = await ai.generate( - model=deepseek_name('deepseek-chat'), - prompt=prompt, - output=Output(schema=RpgCharacter), - ) - return result.output - - -@ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a simple greeting. - - Args: - input: Input with name to greet. - - Returns: - Greeting message. - """ - response = await ai.generate(prompt=f'Say hello to {input.name}!') - return response.text - - -@ai.flow() -async def streaming_flow( - input: StreamInput, - ctx: ActionRunContext | None = None, -) -> str: - """Generate with streaming response. - - Args: - input: Input with topic to generate about. - ctx: Action run context for streaming chunks to client. - - Returns: - Generated text. - """ - response = await ai.generate( - prompt=f'Tell me a fun fact about {input.topic}', - on_chunk=ctx.send_chunk if ctx else None, - ) - return response.text - - -@ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Get weather using compat-oai auto tool calling.""" - response = await ai.generate( - model=deepseek_name('deepseek-chat'), - prompt=f'What is the weather in {input.location}?', - system=( - 'You have a tool called get_weather. ' - "It takes an object with a 'location' field. " - 'Always use this tool when asked about weather.' - ), - tools=['get_weather'], - tool_choice=ToolChoice.REQUIRED, - max_turns=2, - ) - - return response.text - - -async def main() -> None: - """Main entry point for the DeepSeek sample - keep alive for Dev UI.""" - await logger.ainfo('Genkit server running. Press Ctrl+C to stop.') - # Keep the process alive for Dev UI - await asyncio.Event().wait() - - -if __name__ == '__main__': - ai.run_main(main()) diff --git a/py/samples/dev-local-vectorstore-hello/README.md b/py/samples/dev-local-vectorstore-hello/README.md index 536154eb58..390a6a1804 100644 --- a/py/samples/dev-local-vectorstore-hello/README.md +++ b/py/samples/dev-local-vectorstore-hello/README.md @@ -52,7 +52,7 @@ genkit start -- uv run src/main.py 4. **Test the flows**: - [ ] `index_documents` - Index sample film documents - - [ ] `retreive_documents` - Query for similar films + - [ ] `retrieve_documents` - Query for similar films - [ ] Try different query terms 5. **Expected behavior**: diff --git a/py/samples/dev-local-vectorstore-hello/src/main.py b/py/samples/dev-local-vectorstore-hello/src/main.py index 57e0cca6c2..21ae171611 100755 --- a/py/samples/dev-local-vectorstore-hello/src/main.py +++ b/py/samples/dev-local-vectorstore-hello/src/main.py @@ -96,14 +96,13 @@ import asyncio import os -from rich.traceback import install as install_rich_traceback - from genkit.ai import Genkit from genkit.plugins.dev_local_vectorstore import define_dev_local_vector_store from genkit.plugins.google_genai import VertexAI from genkit.types import Document, RetrieverResponse +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'GCLOUD_PROJECT' not in os.environ: os.environ['GCLOUD_PROJECT'] = input('Please enter your GCLOUD_PROJECT: ') @@ -145,7 +144,7 @@ async def index_documents() -> None: @ai.flow() -async def retreive_documents() -> RetrieverResponse: +async def retrieve_documents() -> RetrieverResponse: """Retrieve documents from the vector store.""" return await ai.retrieve( query=Document.from_text('sci-fi film'), diff --git a/py/samples/amazon-bedrock-hello/LICENSE b/py/samples/framework-context-demo/LICENSE similarity index 100% rename from py/samples/amazon-bedrock-hello/LICENSE rename to py/samples/framework-context-demo/LICENSE diff --git a/py/samples/framework-context-demo/README.md b/py/samples/framework-context-demo/README.md new file mode 100644 index 0000000000..a9a05def52 --- /dev/null +++ b/py/samples/framework-context-demo/README.md @@ -0,0 +1,48 @@ +# Context Propagation Demo + +Demonstrates how Genkit propagates context through the execution chain: +generate calls, flows, tools, and nested operations. + +## Quick Start + +```bash +export GEMINI_API_KEY=your-api-key +./run.sh +``` + +Then open the Dev UI at http://localhost:4000. + +## Flows + +| Flow | What It Demonstrates | +|------|---------------------| +| `context_in_generate` | Pass `context=` to `ai.generate()`, tool reads via `ctx.context` | +| `context_in_flow` | Flow receives `ActionRunContext`, reads `ctx.context` | +| `context_current_context` | Tool reads context via static `Genkit.current_context()` | +| `context_propagation_chain` | Context flows: flow -> generate -> tool -> nested generate -> nested tool | + +## Testing Checklist + +- [ ] `context_in_generate` -- Returns user-specific data based on `user_id` in context +- [ ] `context_in_flow` -- Shows context is accessible inside the flow itself +- [ ] `context_current_context` -- Shows `Genkit.current_context()` works from anywhere +- [ ] `context_propagation_chain` -- Verifies context survives multi-level nesting + +## How Context Works + +``` +ai.generate(context={'user': {'id': 42}}) + | + v + ContextVar set with {'user': {'id': 42}} + | + +----> Tool reads ctx.context['user']['id'] + | + +----> Genkit.current_context() reads same ContextVar + | + +----> Nested ai.generate() inherits context automatically +``` + +## Development + +The `run.sh` script uses `watchmedo` for hot reloading on file changes. diff --git a/py/samples/framework-context-demo/pyproject.toml b/py/samples/framework-context-demo/pyproject.toml new file mode 100644 index 0000000000..ff4e37768a --- /dev/null +++ b/py/samples/framework-context-demo/pyproject.toml @@ -0,0 +1,58 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +[project] +authors = [{ name = "Google" }] +classifiers = [ + "Development Status :: 3 - Alpha", + "Environment :: Console", + "Environment :: Web Environment", + "Intended Audience :: Developers", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries", +] +dependencies = [ + "rich>=13.0.0", + "genkit", + "genkit-plugin-google-genai", + "pydantic>=2.0.0", + "structlog>=24.0.0", + "uvloop>=0.21.0", +] +description = "Framework demo: context propagation in Genkit" +license = "Apache-2.0" +name = "framework-context-demo" +readme = "README.md" +requires-python = ">=3.10" +version = "0.1.0" + +[project.optional-dependencies] +dev = ["watchdog>=6.0.0"] + +[build-system] +build-backend = "hatchling.build" +requires = ["hatchling"] + +[tool.hatch.build.targets.wheel] +packages = ["src"] diff --git a/py/samples/google-genai-image/run.sh b/py/samples/framework-context-demo/run.sh similarity index 67% rename from py/samples/google-genai-image/run.sh rename to py/samples/framework-context-demo/run.sh index 3e6a7ea638..d864af177b 100755 --- a/py/samples/google-genai-image/run.sh +++ b/py/samples/framework-context-demo/run.sh @@ -2,10 +2,9 @@ # Copyright 2026 Google LLC # SPDX-License-Identifier: Apache-2.0 -# Google GenAI Image Demo -# ======================= +# Context Propagation Demo # -# Demonstrates image generation with Google's Imagen models. +# Demonstrates how context flows through Genkit's generate, flows, and tools. # # Prerequisites: # - GEMINI_API_KEY environment variable set @@ -20,16 +19,16 @@ cd "$(dirname "$0")" source "../_common.sh" print_help() { - print_banner "Google GenAI Image Demo" "🎨" + print_banner "Context Propagation Demo" "🔑" echo "Usage: ./run.sh [options]" echo "" echo "Options:" echo " --help Show this help message" echo "" echo "Environment Variables:" - echo " GEMINI_API_KEY Required. Your Gemini API key" + echo " GEMINI_API_KEY Required. Your Google AI API key" echo "" - echo "Get an API key from: https://makersuite.google.com/app/apikey" + echo "Get an API key from: https://aistudio.google.com/apikey" print_help_footer } @@ -40,9 +39,9 @@ case "${1:-}" in ;; esac -print_banner "Google GenAI Image Demo" "🎨" +print_banner "Context Propagation Demo" "🔑" -check_env_var "GEMINI_API_KEY" "https://makersuite.google.com/app/apikey" || true +check_env_var "GEMINI_API_KEY" "https://aistudio.google.com/apikey" || true install_deps diff --git a/py/samples/framework-context-demo/src/main.py b/py/samples/framework-context-demo/src/main.py new file mode 100644 index 0000000000..65870faeec --- /dev/null +++ b/py/samples/framework-context-demo/src/main.py @@ -0,0 +1,303 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Context propagation demo - How context flows through Genkit. + +This sample demonstrates the four main ways to use context in Genkit: + +1. Passing context to ``ai.generate(context=...)`` so tools can read it. +2. Accessing context inside a flow via ``ActionRunContext.context``. +3. Reading context from anywhere via the static ``Genkit.current_context()``. +4. Verifying context propagates through nested generate/tool chains. + +Key Concepts (ELI5):: + + ┌─────────────────────┬────────────────────────────────────────────────────┐ + │ Concept │ ELI5 Explanation │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Context │ A dictionary of data (like user info or auth) │ + │ │ that follows a request through the system. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ ContextVar │ Python's way to store data per-task. Like a │ + │ │ backpack each async task carries around. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ ActionRunContext │ The object flows/tools receive with context, │ + │ │ streaming info, etc. The "execution envelope." │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ ToolRunContext │ Same as ActionRunContext but for tools. Also │ + │ │ has .interrupt() for human-in-the-loop. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ current_context() │ Static method to read context from anywhere. │ + │ │ No need to pass ctx around -- just call it. │ + └─────────────────────┴────────────────────────────────────────────────────┘ + +Data Flow:: + + ┌─────────────────────────────────────────────────────────────────────────┐ + │ HOW CONTEXT PROPAGATES IN GENKIT │ + │ │ + │ ai.generate(context={'user': {'id': 42}}) │ + │ │ │ + │ │ (1) Context stored in ContextVar │ + │ ▼ │ + │ ┌──────────────┐ │ + │ │ Model Call │ Model decides to call a tool │ + │ └──────┬───────┘ │ + │ │ │ + │ │ (2) Tool receives ToolRunContext with same context │ + │ ▼ │ + │ ┌──────────────┐ │ + │ │ Tool │ ctx.context['user']['id'] == 42 │ + │ │ (get_user) │ Genkit.current_context()['user']['id'] == 42 │ + │ └──────┬───────┘ │ + │ │ │ + │ │ (3) Nested generate inherits context automatically │ + │ ▼ │ + │ ┌──────────────┐ │ + │ │ Nested Tool │ Still sees context['user']['id'] == 42 │ + │ └──────────────┘ │ + └─────────────────────────────────────────────────────────────────────────┘ + +Testing Instructions +==================== +1. Set ``GEMINI_API_KEY`` environment variable. +2. Run ``./run.sh`` from this sample directory. +3. Open the DevUI at http://localhost:4000. +4. Run each flow and verify context-dependent behavior. + +See README.md for the full testing checklist. +""" + +import asyncio +import os + +from pydantic import BaseModel, Field + +from genkit.ai import Genkit +from genkit.core.action import ActionRunContext +from genkit.core.logging import get_logger +from genkit.plugins.google_genai import GoogleAI +from samples.shared.logging import setup_sample + +setup_sample() + +if 'GEMINI_API_KEY' not in os.environ: + os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') + +logger = get_logger(__name__) + +ai = Genkit( + plugins=[GoogleAI()], + model='googleai/gemini-2.5-flash', +) + + +def _user_from_context(context: dict[str, object]) -> tuple[int, dict[str, str]]: + """Extract user_id and record from a context dict. + + Args: + context: The Genkit action context dict. + + Returns: + A (user_id, record) tuple with defaults for unknown users. + """ + default_record: dict[str, str] = {'name': 'Unknown', 'role': 'unknown', 'plan': 'none'} + raw_user = context.get('user') + if not isinstance(raw_user, dict): + return 0, default_record + user_id = int(raw_user.get('id', 0)) # type: ignore[arg-type] + return user_id, MOCK_USER_DB.get(user_id, default_record) + + +MOCK_USER_DB: dict[int, dict[str, str]] = { + 42: {'name': 'Arthur Dent', 'role': 'intergalactic traveler', 'plan': 'premium'}, + 123: {'name': 'Jane Doe', 'role': 'engineer', 'plan': 'enterprise'}, + 999: {'name': 'Guest User', 'role': 'visitor', 'plan': 'free'}, +} + + +class ContextInput(BaseModel): + """Input for context demo flows.""" + + user_id: int = Field(default=42, description='User ID to look up (try 42, 123, or 999)') + + +@ai.tool() +def get_user_info() -> str: + """Look up the current user from context. + + This tool takes no explicit input -- it reads the user ID from the + execution context that was passed to ai.generate(context=...). + + Returns: + A description of the user. + """ + context = Genkit.current_context() or {} + _, record = _user_from_context(context) + return f'{record["name"]} ({record["role"]}, {record["plan"]} plan)' + + +@ai.tool() +def get_user_via_static() -> str: + """Look up the current user using Genkit.current_context(). + + Demonstrates the static method approach -- useful when context is needed + deep in a call stack where ctx isn't easily threaded through. + + Returns: + A description of the user from the static context accessor. + """ + context = Genkit.current_context() + if context is None: + return 'No context available (not running inside an action).' + user = context.get('user', {}) + user_id = user.get('id') + record = MOCK_USER_DB.get(user_id, {'name': 'Unknown', 'role': 'unknown', 'plan': 'none'}) + return f'[via current_context()] {record["name"]} ({record["role"]}, {record["plan"]} plan)' + + +@ai.tool() +def get_user_permissions() -> str: + """Return permissions based on the user's plan from context. + + Used in the propagation chain demo to verify context survives + through nested generate calls. + + Returns: + Permission level description. + """ + context = Genkit.current_context() or {} + _, record = _user_from_context(context) + permissions = { + 'free': 'read-only access', + 'premium': 'read-write access with priority support', + 'enterprise': 'full admin access with SLA guarantees', + 'none': 'no access', + } + return f'{record["name"]} has {permissions.get(record["plan"], "unknown")} ({record["plan"]} plan)' + + +@ai.flow() +async def context_in_generate(input: ContextInput) -> str: + """Pass context to ai.generate() and let a tool read it. + + This is the simplest context pattern: the caller provides context + as a dictionary, and tools receive it via ``ctx.context``. + + Args: + input: Input with user ID. + + Returns: + Model response incorporating user-specific tool output. + """ + response = await ai.generate( + prompt='Look up the current user and describe who they are.', + tools=['get_user_info'], + context={'user': {'id': input.user_id}}, + ) + return response.text + + +@ai.flow() +async def context_in_flow(input: ContextInput, ctx: ActionRunContext) -> str: + """Access context directly inside a flow. + + When a flow is invoked with context (e.g., from the DevUI or another + flow), the ``ActionRunContext`` parameter provides access to it. + + Args: + input: Input with user ID. + ctx: Execution context provided by Genkit. + + Returns: + Description of what context the flow sees. + """ + flow_context = ctx.context + await logger.ainfo('Flow context received', context=flow_context) + + response = await ai.generate( + prompt=(f'The flow received this context: {flow_context}. Also look up the user info using the tool.'), + tools=['get_user_info'], + context={'user': {'id': input.user_id}}, + ) + return response.text + + +@ai.flow() +async def context_current_context(input: ContextInput) -> str: + """Demonstrate Genkit.current_context() static method. + + The tool in this flow uses ``Genkit.current_context()`` instead of + ``ctx.context`` to read the execution context. This is useful when + context is needed deep in a call stack where the ToolRunContext + object isn't directly available. + + Args: + input: Input with user ID. + + Returns: + Model response using the static context accessor. + """ + response = await ai.generate( + prompt='Look up the current user using the static context method and describe them.', + tools=['get_user_via_static'], + context={'user': {'id': input.user_id}}, + ) + return response.text + + +@ai.flow() +async def context_propagation_chain(input: ContextInput) -> str: + """Verify context propagates through nested generate/tool chains. + + This flow calls ai.generate() with context, which triggers a tool. + That tool's response is then fed into a second ai.generate() call + (without explicitly passing context again) to verify that context + is automatically inherited through the ContextVar mechanism. + + Args: + input: Input with user ID. + + Returns: + Combined response showing context survived multiple levels. + """ + first_response = await ai.generate( + prompt='Look up the current user info.', + tools=['get_user_info'], + context={'user': {'id': input.user_id}}, + ) + + second_response = await ai.generate( + prompt=( + f'The user was identified as: {first_response.text}. ' + 'Now check their permissions using the permissions tool.' + ), + tools=['get_user_permissions'], + context={'user': {'id': input.user_id}}, + ) + + return f'User info: {first_response.text}\nPermissions: {second_response.text}' + + +async def main() -> None: + """Main function -- keep alive for Dev UI.""" + await logger.ainfo('Context demo started. Open http://localhost:4000 to test flows.') + while True: + await asyncio.sleep(3600) + + +if __name__ == '__main__': + ai.run_main(main()) diff --git a/py/samples/anthropic-hello/LICENSE b/py/samples/framework-dynamic-tools-demo/LICENSE similarity index 100% rename from py/samples/anthropic-hello/LICENSE rename to py/samples/framework-dynamic-tools-demo/LICENSE diff --git a/py/samples/framework-dynamic-tools-demo/README.md b/py/samples/framework-dynamic-tools-demo/README.md new file mode 100644 index 0000000000..93cbdf4fda --- /dev/null +++ b/py/samples/framework-dynamic-tools-demo/README.md @@ -0,0 +1,33 @@ +# Dynamic Tools Demo + +Demonstrates Genkit's dynamic tool creation (`ai.dynamic_tool()`) and +sub-span tracing (`ai.run()`). These features let you create tools at +runtime and wrap arbitrary functions as traceable steps. + +## Quick Start + +```bash +export GEMINI_API_KEY=your-api-key +./run.sh +``` + +Then open the Dev UI at http://localhost:4000. + +## Flows + +| Flow | What It Demonstrates | +|------|---------------------| +| `dynamic_tool_demo` | Creates a tool at runtime with `ai.dynamic_tool()` and calls it | +| `run_step_demo` | Wraps a plain function as a traceable step with `ai.run()` | +| `combined_demo` | Uses both `ai.run()` and `ai.dynamic_tool()` together | + +## Key APIs + +- **`ai.dynamic_tool(name, fn, description=...)`** -- Creates a tool that is NOT globally + registered. Useful for one-off tools or tools generated from user input. +- **`ai.run(name, input, fn)`** -- Wraps a function call as a named step in the + trace. The step appears in the Dev UI trace viewer. + +## Development + +The `run.sh` script uses `watchmedo` for hot reloading on file changes. diff --git a/py/samples/framework-dynamic-tools-demo/pyproject.toml b/py/samples/framework-dynamic-tools-demo/pyproject.toml new file mode 100644 index 0000000000..0f6a6f7a6b --- /dev/null +++ b/py/samples/framework-dynamic-tools-demo/pyproject.toml @@ -0,0 +1,58 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +[project] +authors = [{ name = "Google" }] +classifiers = [ + "Development Status :: 3 - Alpha", + "Environment :: Console", + "Environment :: Web Environment", + "Intended Audience :: Developers", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries", +] +dependencies = [ + "rich>=13.0.0", + "genkit", + "genkit-plugin-google-genai", + "pydantic>=2.0.0", + "structlog>=24.0.0", + "uvloop>=0.21.0", +] +description = "Framework demo: dynamic tools and ai.run() in Genkit" +license = "Apache-2.0" +name = "framework-dynamic-tools-demo" +readme = "README.md" +requires-python = ">=3.10" +version = "0.1.0" + +[project.optional-dependencies] +dev = ["watchdog>=6.0.0"] + +[build-system] +build-backend = "hatchling.build" +requires = ["hatchling"] + +[tool.hatch.build.targets.wheel] +packages = ["src"] diff --git a/py/samples/framework-dynamic-tools-demo/run.sh b/py/samples/framework-dynamic-tools-demo/run.sh new file mode 100755 index 0000000000..8bfe961b59 --- /dev/null +++ b/py/samples/framework-dynamic-tools-demo/run.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Copyright 2026 Google LLC +# SPDX-License-Identifier: Apache-2.0 + +# Dynamic Tools Demo +# +# Demonstrates ai.dynamic_tool() and ai.run() for creating tools on-the-fly +# and wrapping functions as trace spans. +# +# Prerequisites: +# - GEMINI_API_KEY environment variable set +# +# Usage: +# ./run.sh # Start the demo with Dev UI +# ./run.sh --help # Show this help message + +set -euo pipefail + +cd "$(dirname "$0")" +source "../_common.sh" + +print_help() { + print_banner "Dynamic Tools Demo" "🔧" + echo "Usage: ./run.sh [options]" + echo "" + echo "Options:" + echo " --help Show this help message" + echo "" + echo "Environment Variables:" + echo " GEMINI_API_KEY Required. Your Google AI API key" + echo "" + echo "Get an API key from: https://aistudio.google.com/apikey" + print_help_footer +} + +case "${1:-}" in + --help|-h) + print_help + exit 0 + ;; +esac + +print_banner "Dynamic Tools Demo" "🔧" + +check_env_var "GEMINI_API_KEY" "https://aistudio.google.com/apikey" || true + +install_deps + +genkit_start_with_browser -- \ + uv tool run --from watchdog watchmedo auto-restart \ + -d src \ + -d ../../packages \ + -d ../../plugins \ + -p '*.py;*.prompt;*.json' \ + -R \ + -- uv run src/main.py "$@" diff --git a/py/samples/framework-dynamic-tools-demo/src/main.py b/py/samples/framework-dynamic-tools-demo/src/main.py new file mode 100644 index 0000000000..faa6ae0c21 --- /dev/null +++ b/py/samples/framework-dynamic-tools-demo/src/main.py @@ -0,0 +1,228 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Dynamic tools demo - Runtime tool creation and sub-span tracing in Genkit. + +This sample demonstrates two powerful Genkit features: + +1. ``ai.dynamic_tool()`` -- Create tools at runtime that are NOT globally + registered. Useful for one-off tools, user-generated tools, or tools + whose behavior depends on runtime data. + +2. ``ai.run()`` -- Wrap any function call as a named step (sub-span) + in the trace. The step and its input/output appear in the Dev UI + trace viewer. + +Key Concepts (ELI5):: + + ┌─────────────────────┬────────────────────────────────────────────────────┐ + │ Concept │ ELI5 Explanation │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ ai.dynamic_tool() │ Makes a tool on the spot, like writing a recipe │ + │ │ card right when you need it instead of using a │ + │ │ cookbook. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ ai.run() │ Wraps a regular function so it shows up in the │ + │ │ trace, like adding a bookmark to mark your place. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ @ai.tool() │ The standard way to register a tool (global). │ + │ │ Dynamic tools skip this registration. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Trace / Span │ A tree of operations recorded during a flow run. │ + │ │ Each ai.run() call creates a child span. │ + └─────────────────────┴────────────────────────────────────────────────────┘ + +Data Flow:: + + ┌─────────────────────────────────────────────────────────────────────────┐ + │ COMBINED DEMO FLOW │ + │ │ + │ combined_demo(input) │ + │ │ │ + │ ├── ai.run("preprocess_step", input, preprocess) │ + │ │ └── Returns preprocessed string │ + │ │ │ + │ ├── ai.dynamic_tool("scaler", scale_fn) │ + │ │ └── Creates tool (not globally registered) │ + │ │ │ + │ ├── scaler.arun(7) │ + │ │ └── Returns 7 * 10 = 70 │ + │ │ │ + │ └── Returns {step_result, tool_result, tool_metadata} │ + └─────────────────────────────────────────────────────────────────────────┘ + +Testing Instructions +==================== +1. Set ``GEMINI_API_KEY`` environment variable. +2. Run ``./run.sh`` from this sample directory. +3. Open the DevUI at http://localhost:4000. +4. Run each flow: + - ``dynamic_tool_demo``: Creates and runs a dynamic multiplier tool. + - ``run_step_demo``: Wraps a function in a trace span. + - ``combined_demo``: Uses both features together. +5. Click "View trace" to see the sub-spans and tool execution. + +See README.md for more details. +""" + +import asyncio +import os + +from pydantic import BaseModel, Field + +from genkit.ai import Genkit +from genkit.core.logging import get_logger +from genkit.plugins.google_genai import GoogleAI +from samples.shared.logging import setup_sample + +setup_sample() + +if 'GEMINI_API_KEY' not in os.environ: + os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') + +logger = get_logger(__name__) + +ai = Genkit( + plugins=[GoogleAI()], + model='googleai/gemini-2.5-flash', +) + + +class DynamicToolInput(BaseModel): + """Input for dynamic tool demo.""" + + value: int = Field(default=5, description='Value to pass to the dynamic tool') + + +class RunStepInput(BaseModel): + """Input for run step demo.""" + + data: str = Field(default='hello world', description='Data to process in the traced step') + + +class CombinedInput(BaseModel): + """Input for combined demo.""" + + input_val: str = Field(default='Dynamic tools demo', description='Input value for demo') + + +@ai.flow() +async def dynamic_tool_demo(input: DynamicToolInput) -> dict[str, object]: + """Create and invoke a tool at runtime using ai.dynamic_tool(). + + Unlike ``@ai.tool()`` which globally registers a tool, dynamic tools + are created on-the-fly and exist only for the current scope. They + can be called directly or passed to ``ai.generate(tools=[...])``. + + Args: + input: Input with a value to pass to the dynamic tool. + + Returns: + A dict containing the tool result and metadata. + """ + + def multiplier_fn(x: int) -> int: + return x * 10 + + dynamic_multiplier = ai.dynamic_tool( + 'dynamic_multiplier', + multiplier_fn, + description='Multiplies input by 10', + ) + result = await dynamic_multiplier.arun(input.value) + + return { + 'input_value': input.value, + 'tool_result': result.response, + 'tool_name': dynamic_multiplier.metadata.get('name', 'unknown'), + 'tool_metadata': dynamic_multiplier.metadata, + } + + +@ai.flow() +async def run_step_demo(input: RunStepInput) -> dict[str, str]: + """Wrap a plain function as a traceable step using ai.run(). + + ``ai.run(name, input, fn)`` creates a named sub-span in the trace. + The step's input and output are recorded and visible in the Dev UI + trace viewer. + + Args: + input: Input with data to process. + + Returns: + A dict containing the original and processed data. + """ + + def uppercase(data: str) -> str: + return data.upper() + + def reverse(data: str) -> str: + return data[::-1] + + step1 = await ai.run('uppercase_step', input.data, uppercase) + step2 = await ai.run('reverse_step', step1, reverse) + + return { + 'original': input.data, + 'after_uppercase': step1, + 'after_reverse': step2, + } + + +@ai.flow() +async def combined_demo(input: CombinedInput) -> dict[str, object]: + """Combine ai.run() sub-spans with ai.dynamic_tool() in one flow. + + This flow demonstrates using both features together: + 1. ``ai.run()`` wraps a preprocessing function as a trace span. + 2. ``ai.dynamic_tool()`` creates a scaler tool at runtime. + 3. Both appear in the trace as inspectable steps. + + Args: + input: Input with a value string. + + Returns: + A dict with results from both the step and the dynamic tool. + """ + + def preprocess(data: str) -> str: + return f'processed: {data}' + + step_result = await ai.run('preprocess_step', input.input_val, preprocess) + + def scale_fn(x: int) -> int: + return x * 10 + + scaler = ai.dynamic_tool('scaler', scale_fn, description='Scales input by 10') + tool_result = await scaler.arun(7) + + return { + 'step_result': step_result, + 'tool_result': tool_result.response, + 'tool_metadata': scaler.metadata, + } + + +async def main() -> None: + """Main function -- keep alive for Dev UI.""" + await logger.ainfo('Dynamic tools demo started. Open http://localhost:4000 to test flows.') + while True: + await asyncio.sleep(3600) + + +if __name__ == '__main__': + ai.run_main(main()) diff --git a/py/samples/cloudflare-workers-ai-hello/LICENSE b/py/samples/framework-evaluator-demo/LICENSE similarity index 100% rename from py/samples/cloudflare-workers-ai-hello/LICENSE rename to py/samples/framework-evaluator-demo/LICENSE diff --git a/py/samples/evaluator-demo/README.md b/py/samples/framework-evaluator-demo/README.md similarity index 100% rename from py/samples/evaluator-demo/README.md rename to py/samples/framework-evaluator-demo/README.md diff --git a/py/samples/evaluator-demo/data/capra-test.json b/py/samples/framework-evaluator-demo/data/capra-test.json similarity index 100% rename from py/samples/evaluator-demo/data/capra-test.json rename to py/samples/framework-evaluator-demo/data/capra-test.json diff --git a/py/samples/evaluator-demo/data/cat_adoption_questions.jsonl b/py/samples/framework-evaluator-demo/data/cat_adoption_questions.jsonl similarity index 100% rename from py/samples/evaluator-demo/data/cat_adoption_questions.jsonl rename to py/samples/framework-evaluator-demo/data/cat_adoption_questions.jsonl diff --git a/py/samples/evaluator-demo/data/cat_adoption_questions_with_reference.json b/py/samples/framework-evaluator-demo/data/cat_adoption_questions_with_reference.json similarity index 100% rename from py/samples/evaluator-demo/data/cat_adoption_questions_with_reference.json rename to py/samples/framework-evaluator-demo/data/cat_adoption_questions_with_reference.json diff --git a/py/samples/evaluator-demo/data/dataset.json b/py/samples/framework-evaluator-demo/data/dataset.json similarity index 100% rename from py/samples/evaluator-demo/data/dataset.json rename to py/samples/framework-evaluator-demo/data/dataset.json diff --git a/py/samples/evaluator-demo/data/dogfacts.json b/py/samples/framework-evaluator-demo/data/dogfacts.json similarity index 100% rename from py/samples/evaluator-demo/data/dogfacts.json rename to py/samples/framework-evaluator-demo/data/dogfacts.json diff --git a/py/samples/evaluator-demo/docs/cat-handbook.pdf b/py/samples/framework-evaluator-demo/docs/cat-handbook.pdf similarity index 100% rename from py/samples/evaluator-demo/docs/cat-handbook.pdf rename to py/samples/framework-evaluator-demo/docs/cat-handbook.pdf diff --git a/py/samples/evaluator-demo/docs/cat-wiki.pdf b/py/samples/framework-evaluator-demo/docs/cat-wiki.pdf similarity index 100% rename from py/samples/evaluator-demo/docs/cat-wiki.pdf rename to py/samples/framework-evaluator-demo/docs/cat-wiki.pdf diff --git a/py/samples/evaluator-demo/evaluator_demo/__init__.py b/py/samples/framework-evaluator-demo/evaluator_demo/__init__.py similarity index 100% rename from py/samples/evaluator-demo/evaluator_demo/__init__.py rename to py/samples/framework-evaluator-demo/evaluator_demo/__init__.py diff --git a/py/samples/evaluator-demo/evaluator_demo/eval_in_code.py b/py/samples/framework-evaluator-demo/evaluator_demo/eval_in_code.py similarity index 100% rename from py/samples/evaluator-demo/evaluator_demo/eval_in_code.py rename to py/samples/framework-evaluator-demo/evaluator_demo/eval_in_code.py diff --git a/py/samples/evaluator-demo/evaluator_demo/genkit_demo.py b/py/samples/framework-evaluator-demo/evaluator_demo/genkit_demo.py similarity index 100% rename from py/samples/evaluator-demo/evaluator_demo/genkit_demo.py rename to py/samples/framework-evaluator-demo/evaluator_demo/genkit_demo.py diff --git a/py/samples/evaluator-demo/evaluator_demo/main.py b/py/samples/framework-evaluator-demo/evaluator_demo/main.py similarity index 96% rename from py/samples/evaluator-demo/evaluator_demo/main.py rename to py/samples/framework-evaluator-demo/evaluator_demo/main.py index 34452ce9f5..a3fb1b9cee 100755 --- a/py/samples/evaluator-demo/evaluator_demo/main.py +++ b/py/samples/framework-evaluator-demo/evaluator_demo/main.py @@ -38,9 +38,9 @@ from collections.abc import Coroutine from typing import Any, cast -from rich.traceback import install as install_rich_traceback +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # Import flows so they get registered from evaluator_demo import ( diff --git a/py/samples/evaluator-demo/evaluator_demo/pdf_rag.py b/py/samples/framework-evaluator-demo/evaluator_demo/pdf_rag.py similarity index 100% rename from py/samples/evaluator-demo/evaluator_demo/pdf_rag.py rename to py/samples/framework-evaluator-demo/evaluator_demo/pdf_rag.py diff --git a/py/samples/evaluator-demo/evaluator_demo/setup.py b/py/samples/framework-evaluator-demo/evaluator_demo/setup.py similarity index 100% rename from py/samples/evaluator-demo/evaluator_demo/setup.py rename to py/samples/framework-evaluator-demo/evaluator_demo/setup.py diff --git a/py/samples/evaluator-demo/prompts/hello.prompt b/py/samples/framework-evaluator-demo/prompts/hello.prompt similarity index 100% rename from py/samples/evaluator-demo/prompts/hello.prompt rename to py/samples/framework-evaluator-demo/prompts/hello.prompt diff --git a/py/samples/evaluator-demo/pyproject.toml b/py/samples/framework-evaluator-demo/pyproject.toml similarity index 97% rename from py/samples/evaluator-demo/pyproject.toml rename to py/samples/framework-evaluator-demo/pyproject.toml index 26071c413f..40849b3e7c 100644 --- a/py/samples/evaluator-demo/pyproject.toml +++ b/py/samples/framework-evaluator-demo/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "uvloop>=0.22.1", ] description = "Genkit Python Evaluation Demo" -name = "evaluator-demo" +name = "framework-evaluator-demo" requires-python = ">=3.10" version = "0.0.1" diff --git a/py/samples/evaluator-demo/run.sh b/py/samples/framework-evaluator-demo/run.sh similarity index 100% rename from py/samples/evaluator-demo/run.sh rename to py/samples/framework-evaluator-demo/run.sh diff --git a/py/samples/compat-oai-hello/LICENSE b/py/samples/framework-format-demo/LICENSE similarity index 100% rename from py/samples/compat-oai-hello/LICENSE rename to py/samples/framework-format-demo/LICENSE diff --git a/py/samples/format-demo/README.md b/py/samples/framework-format-demo/README.md similarity index 100% rename from py/samples/format-demo/README.md rename to py/samples/framework-format-demo/README.md diff --git a/py/samples/format-demo/pyproject.toml b/py/samples/framework-format-demo/pyproject.toml similarity index 97% rename from py/samples/format-demo/pyproject.toml rename to py/samples/framework-format-demo/pyproject.toml index 90d08cf00e..20a376d2ad 100644 --- a/py/samples/format-demo/pyproject.toml +++ b/py/samples/framework-format-demo/pyproject.toml @@ -26,7 +26,7 @@ dependencies = [ ] description = "Sample demonstrating various output formats in Genkit" license = "Apache-2.0" -name = "format-demo" +name = "framework-format-demo" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/format-demo/run.sh b/py/samples/framework-format-demo/run.sh similarity index 100% rename from py/samples/format-demo/run.sh rename to py/samples/framework-format-demo/run.sh diff --git a/py/samples/format-demo/src/main.py b/py/samples/framework-format-demo/src/main.py similarity index 98% rename from py/samples/format-demo/src/main.py rename to py/samples/framework-format-demo/src/main.py index 2b4a23c0bb..e919c3a30e 100644 --- a/py/samples/format-demo/src/main.py +++ b/py/samples/framework-format-demo/src/main.py @@ -59,14 +59,14 @@ from typing import Any, cast from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.core.logging import get_logger from genkit.core.typing import OutputConfig from genkit.plugins.google_genai import GoogleAI +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() logger = get_logger(__name__) diff --git a/py/samples/deepseek-hello/LICENSE b/py/samples/framework-middleware-demo/LICENSE similarity index 100% rename from py/samples/deepseek-hello/LICENSE rename to py/samples/framework-middleware-demo/LICENSE diff --git a/py/samples/framework-middleware-demo/README.md b/py/samples/framework-middleware-demo/README.md new file mode 100644 index 0000000000..9a651e289d --- /dev/null +++ b/py/samples/framework-middleware-demo/README.md @@ -0,0 +1,54 @@ +# Middleware Demo + +Demonstrates Genkit's middleware system using the `use=` parameter on +`ai.generate()`. Middleware intercepts the request/response pipeline, +enabling logging, retries, request modification, and more. + +## Quick Start + +```bash +export GEMINI_API_KEY=your-api-key +./run.sh +``` + +Then open the Dev UI at http://localhost:4000. + +## Flows + +| Flow | What It Demonstrates | +|------|---------------------| +| `logging_demo` | Middleware that logs request metadata and response info | +| `request_modifier_demo` | Middleware that modifies the request before it reaches the model | +| `chained_middleware_demo` | Multiple middleware functions composed in a pipeline | + +## How Middleware Works + +``` +ai.generate(prompt=..., use=[middleware_a, middleware_b]) + | + v + middleware_a(req, ctx, next) + | + v + middleware_b(req, ctx, next) + | + v + Model (actual API call) + | + v + middleware_b returns response + | + v + middleware_a returns response + | + v + Final response to caller +``` + +Middleware functions receive `(req, ctx, next)` and must call `next(req, ctx)` +to pass the request down the chain. They can modify the request before calling +next, or modify the response after. + +## Development + +The `run.sh` script uses `watchmedo` for hot reloading on file changes. diff --git a/py/samples/framework-middleware-demo/pyproject.toml b/py/samples/framework-middleware-demo/pyproject.toml new file mode 100644 index 0000000000..5273eca286 --- /dev/null +++ b/py/samples/framework-middleware-demo/pyproject.toml @@ -0,0 +1,58 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +[project] +authors = [{ name = "Google" }] +classifiers = [ + "Development Status :: 3 - Alpha", + "Environment :: Console", + "Environment :: Web Environment", + "Intended Audience :: Developers", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries", +] +dependencies = [ + "rich>=13.0.0", + "genkit", + "genkit-plugin-google-genai", + "pydantic>=2.0.0", + "structlog>=24.0.0", + "uvloop>=0.21.0", +] +description = "Framework demo: custom middleware in Genkit" +license = "Apache-2.0" +name = "framework-middleware-demo" +readme = "README.md" +requires-python = ">=3.10" +version = "0.1.0" + +[project.optional-dependencies] +dev = ["watchdog>=6.0.0"] + +[build-system] +build-backend = "hatchling.build" +requires = ["hatchling"] + +[tool.hatch.build.targets.wheel] +packages = ["src"] diff --git a/py/samples/framework-middleware-demo/run.sh b/py/samples/framework-middleware-demo/run.sh new file mode 100755 index 0000000000..b491e58f43 --- /dev/null +++ b/py/samples/framework-middleware-demo/run.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright 2026 Google LLC +# SPDX-License-Identifier: Apache-2.0 + +# Middleware Demo +# +# Demonstrates custom middleware with Genkit's use= parameter. +# +# Prerequisites: +# - GEMINI_API_KEY environment variable set +# +# Usage: +# ./run.sh # Start the demo with Dev UI +# ./run.sh --help # Show this help message + +set -euo pipefail + +cd "$(dirname "$0")" +source "../_common.sh" + +print_help() { + print_banner "Middleware Demo" "🔧" + echo "Usage: ./run.sh [options]" + echo "" + echo "Options:" + echo " --help Show this help message" + echo "" + echo "Environment Variables:" + echo " GEMINI_API_KEY Required. Your Google AI API key" + echo "" + echo "Get an API key from: https://aistudio.google.com/apikey" + print_help_footer +} + +case "${1:-}" in + --help|-h) + print_help + exit 0 + ;; +esac + +print_banner "Middleware Demo" "🔧" + +check_env_var "GEMINI_API_KEY" "https://aistudio.google.com/apikey" || true + +install_deps + +genkit_start_with_browser -- \ + uv tool run --from watchdog watchmedo auto-restart \ + -d src \ + -d ../../packages \ + -d ../../plugins \ + -p '*.py;*.prompt;*.json' \ + -R \ + -- uv run src/main.py "$@" diff --git a/py/samples/framework-middleware-demo/src/main.py b/py/samples/framework-middleware-demo/src/main.py new file mode 100644 index 0000000000..3da6983bb1 --- /dev/null +++ b/py/samples/framework-middleware-demo/src/main.py @@ -0,0 +1,262 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Middleware demo - Custom request/response interception in Genkit. + +This sample demonstrates Genkit's middleware system, which lets you intercept +and modify requests before they reach the model, and inspect or modify +responses before they're returned to the caller. + +Key Concepts (ELI5):: + + ┌─────────────────────┬────────────────────────────────────────────────────┐ + │ Concept │ ELI5 Explanation │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Middleware │ A function that sits between you and the model. │ + │ │ Like a security guard checking bags at the door. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ use= Parameter │ How you attach middleware to a generate() call. │ + │ │ ``ai.generate(prompt=..., use=[my_middleware])`` │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ next() │ Calls the next middleware or the model itself. │ + │ │ You MUST call it to continue the chain. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Request Modification│ Change the prompt, add system messages, etc. │ + │ │ before the model sees the request. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Response Inspection │ Log, validate, or transform the model's response │ + │ │ before returning it to your code. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Chaining │ Stack multiple middleware in order. │ + │ │ ``use=[log, modify, validate]`` runs all three. │ + └─────────────────────┴────────────────────────────────────────────────────┘ + +Data Flow:: + + ┌─────────────────────────────────────────────────────────────────────────┐ + │ MIDDLEWARE PIPELINE │ + │ │ + │ ai.generate(prompt=..., use=[log_mw, modify_mw]) │ + │ │ │ + │ ▼ │ + │ ┌──────────────┐ │ + │ │ log_mw │ Logs request metadata │ + │ │ (before) │ Then calls next(req, ctx) │ + │ └──────┬───────┘ │ + │ │ │ + │ ▼ │ + │ ┌──────────────┐ │ + │ │ modify_mw │ Adds system instruction to request │ + │ │ (before) │ Then calls next(modified_req, ctx) │ + │ └──────┬───────┘ │ + │ │ │ + │ ▼ │ + │ ┌──────────────┐ │ + │ │ Model │ Actual API call │ + │ └──────┬───────┘ │ + │ │ │ + │ ▼ │ + │ modify_mw (after) ─── log_mw (after) ─── Response returned │ + └─────────────────────────────────────────────────────────────────────────┘ + +Testing Instructions +==================== +1. Set ``GEMINI_API_KEY`` environment variable. +2. Run ``./run.sh`` from this sample directory. +3. Open the DevUI at http://localhost:4000. +4. Run each flow and check the server logs for middleware output. + +See README.md for more details. +""" + +import asyncio +import os + +from pydantic import BaseModel, Field + +from genkit.ai import Genkit +from genkit.blocks.model import ModelMiddlewareNext +from genkit.core.action import ActionRunContext +from genkit.core.logging import get_logger +from genkit.plugins.google_genai import GoogleAI +from genkit.types import GenerateRequest, GenerateResponse, Message, Part, Role, TextPart +from samples.shared.logging import setup_sample + +setup_sample() + +if 'GEMINI_API_KEY' not in os.environ: + os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') + +logger = get_logger(__name__) + +ai = Genkit( + plugins=[GoogleAI()], + model='googleai/gemini-2.5-flash', +) + + +class LoggingInput(BaseModel): + """Input for logging middleware demo.""" + + prompt: str = Field(default='Tell me a joke about programming', description='Prompt to send through middleware') + + +class ModifierInput(BaseModel): + """Input for request modifier middleware demo.""" + + prompt: str = Field(default='Write a haiku', description='Prompt to send (middleware will add style instructions)') + + +class ChainedInput(BaseModel): + """Input for chained middleware demo.""" + + prompt: str = Field(default='Explain recursion', description='Prompt to send through multiple middleware') + + +async def logging_middleware( + req: GenerateRequest, + ctx: ActionRunContext, + next_handler: ModelMiddlewareNext, +) -> GenerateResponse: + """Middleware that logs request and response metadata. + + This is a pass-through middleware that doesn't modify the request + or response -- it only observes and logs. Useful for debugging + and monitoring. + + Args: + req: The generation request about to be sent. + ctx: The action execution context. + next_handler: Calls the next middleware or the model. + + Returns: + The generation response (unmodified). + """ + await logger.ainfo( + 'logging_middleware: request intercepted', + message_count=len(req.messages), + ) + response = await next_handler(req, ctx) + await logger.ainfo( + 'logging_middleware: response received', + finish_reason=response.finish_reason, + ) + return response + + +async def system_instruction_middleware( + req: GenerateRequest, + ctx: ActionRunContext, + next_handler: ModelMiddlewareNext, +) -> GenerateResponse: + """Middleware that prepends a system instruction to every request. + + Demonstrates modifying the request before it reaches the model. + This pattern is useful for enforcing style guidelines, adding + safety instructions, or injecting context. + + Args: + req: The generation request about to be sent. + ctx: The action execution context. + next_handler: Calls the next middleware or the model. + + Returns: + The generation response. + """ + system_message = Message( + role=Role.SYSTEM, + content=[ + Part(root=TextPart(text='Always respond in a concise, professional tone. Keep answers under 100 words.')) + ], + ) + modified_messages = [system_message, *req.messages] + modified_req = req.model_copy(update={'messages': modified_messages}) + + await logger.ainfo('system_instruction_middleware: injected system message') + return await next_handler(modified_req, ctx) + + +@ai.flow() +async def logging_demo(input: LoggingInput) -> str: + """Demonstrate a simple logging middleware. + + Check the server logs to see the middleware output. The middleware + logs request metadata before the model call and response metadata after. + + Args: + input: Input with prompt text. + + Returns: + The model's response text. + """ + response = await ai.generate( + prompt=input.prompt, + use=[logging_middleware], + ) + return response.text + + +@ai.flow() +async def request_modifier_demo(input: ModifierInput) -> str: + """Demonstrate a middleware that modifies the request. + + The middleware injects a system instruction that tells the model to + be concise and professional. Compare this with running the same + prompt without middleware to see the difference. + + Args: + input: Input with prompt text. + + Returns: + The model's response text (influenced by injected system message). + """ + response = await ai.generate( + prompt=input.prompt, + use=[system_instruction_middleware], + ) + return response.text + + +@ai.flow() +async def chained_middleware_demo(input: ChainedInput) -> str: + """Demonstrate multiple middleware chained together. + + The pipeline runs: logging -> system instruction -> model. + Both middleware functions execute in order, and the logging middleware + sees the request both before and after the system instruction is added. + + Args: + input: Input with prompt text. + + Returns: + The model's response text. + """ + response = await ai.generate( + prompt=input.prompt, + use=[logging_middleware, system_instruction_middleware], + ) + return response.text + + +async def main() -> None: + """Main function -- keep alive for Dev UI.""" + await logger.ainfo('Middleware demo started. Open http://localhost:4000 to test flows.') + while True: + await asyncio.sleep(3600) + + +if __name__ == '__main__': + ai.run_main(main()) diff --git a/py/samples/evaluator-demo/LICENSE b/py/samples/framework-prompt-demo/LICENSE similarity index 100% rename from py/samples/evaluator-demo/LICENSE rename to py/samples/framework-prompt-demo/LICENSE diff --git a/py/samples/prompt-demo/README.md b/py/samples/framework-prompt-demo/README.md similarity index 100% rename from py/samples/prompt-demo/README.md rename to py/samples/framework-prompt-demo/README.md diff --git a/py/samples/prompt-demo/prompts/_style.prompt b/py/samples/framework-prompt-demo/prompts/_style.prompt similarity index 100% rename from py/samples/prompt-demo/prompts/_style.prompt rename to py/samples/framework-prompt-demo/prompts/_style.prompt diff --git a/py/samples/prompt-demo/prompts/recipe.prompt b/py/samples/framework-prompt-demo/prompts/recipe.prompt similarity index 100% rename from py/samples/prompt-demo/prompts/recipe.prompt rename to py/samples/framework-prompt-demo/prompts/recipe.prompt diff --git a/py/samples/prompt-demo/prompts/story.prompt b/py/samples/framework-prompt-demo/prompts/story.prompt similarity index 100% rename from py/samples/prompt-demo/prompts/story.prompt rename to py/samples/framework-prompt-demo/prompts/story.prompt diff --git a/py/samples/prompt-demo/pyproject.toml b/py/samples/framework-prompt-demo/pyproject.toml similarity index 98% rename from py/samples/prompt-demo/pyproject.toml rename to py/samples/framework-prompt-demo/pyproject.toml index 0d78f02c8c..05ae862011 100644 --- a/py/samples/prompt-demo/pyproject.toml +++ b/py/samples/framework-prompt-demo/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Genkit prompt demo" license = "Apache-2.0" -name = "prompt-demo" +name = "framework-prompt-demo" requires-python = ">=3.10" version = "0.0.1" diff --git a/py/samples/prompt-demo/run.sh b/py/samples/framework-prompt-demo/run.sh similarity index 100% rename from py/samples/prompt-demo/run.sh rename to py/samples/framework-prompt-demo/run.sh diff --git a/py/samples/prompt-demo/src/main.py b/py/samples/framework-prompt-demo/src/main.py similarity index 78% rename from py/samples/prompt-demo/src/main.py rename to py/samples/framework-prompt-demo/src/main.py index 9e483983b9..db9f8f2edd 100755 --- a/py/samples/prompt-demo/src/main.py +++ b/py/samples/framework-prompt-demo/src/main.py @@ -36,8 +36,8 @@ │ Output Schema │ Rules for what the AI must return. │ │ │ Ensures structured, predictable responses. │ ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Variants │ Different versions of the same prompt. │ - │ │ A/B testing or different use cases. │ + │ Partials │ Reusable prompt snippets (files starting with _). │ + │ │ Like a shared paragraph you paste into prompts. │ ├─────────────────────┼────────────────────────────────────────────────────┤ │ Helpers │ Custom functions usable in templates. │ │ │ {{#list items}}...{{/list}} │ @@ -49,28 +49,34 @@ |-----------------------------------------|-------------------------------------| | Prompt Management (Loading) | `ai = Genkit(..., prompt_dir=...)` | | Prompt Execution | `recipe_prompt(input=...)` | -| Prompt Variants | `get_sticky_prompt(..., variant=...)`| | Custom Helpers | `ai.define_helper('list', ...)` | | Prompt Output Schema Validation | `Recipe.model_validate(...)` | -| Streaming Prompts | `story_prompt.stream()` | - -See README.md for testing instructions. +| Streaming Prompts (with partials) | `story_prompt.stream()` | +| Schema Registration | `ai.define_schema('Recipe', Recipe)`| + +Testing Instructions +==================== +1. Set ``GEMINI_API_KEY`` environment variable. +2. Run ``./run.sh`` from this sample directory. +3. Open the DevUI at http://localhost:4000. +4. Run ``chef_flow`` to generate a recipe (structured output). +5. Run ``tell_story`` to stream a story (uses partials + streaming). + +See README.md for more details. """ import os -import weakref from pathlib import Path from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import ActionKind, Genkit -from genkit.blocks.prompt import ExecutablePrompt from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger from genkit.plugins.google_genai import GoogleAI +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'GEMINI_API_KEY' not in os.environ: os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') @@ -120,31 +126,6 @@ class Recipe(BaseModel): ai.define_schema('Recipe', Recipe) -_sticky_prompts = {} - - -async def get_sticky_prompt(name: str, variant: str | None = None) -> ExecutablePrompt: - """Helper to get a prompt and keep it alive.""" - key = f'{name}:{variant}' if variant else name - if key in _sticky_prompts: - return _sticky_prompts[key] - - prompt = ai.prompt(name, variant=variant) - if isinstance(prompt, weakref.ReferenceType): - ref = prompt - prompt = ref() - if prompt is None: - # Stale reference; retry loading the prompt as the comments suggest. - prompt = ai.prompt(name, variant=variant) - if isinstance(prompt, weakref.ReferenceType): - prompt = prompt() - if prompt is None: - raise RuntimeError(f"Failed to load prompt '{name}' with variant '{variant}' after retry.") - - # Store strong ref - _sticky_prompts[key] = prompt - return prompt - class ChefInput(BaseModel): """Input for the chef flow.""" @@ -167,7 +148,7 @@ async def chef_flow(input: ChefInput) -> Recipe: Recipe(title='Banana Bread', ...) """ await logger.ainfo(f'chef_flow called with input: {input}') - recipe_prompt = await get_sticky_prompt('recipe') + recipe_prompt = ai.prompt('recipe') response = await recipe_prompt(input={'food': input.food}) # Ensure we return a Pydantic model as expected by the type hint and caller @@ -176,27 +157,6 @@ async def chef_flow(input: ChefInput) -> Recipe: return result -@ai.flow(name='robot_chef_flow') -async def robot_chef_flow(input: ChefInput) -> Recipe: - """Generate a robot-themed recipe for the given food. - - Args: - input: Input containing the food item. - - Returns: - A formatted robot recipe. - - Example: - >>> await robot_chef_flow(ChefInput(food='cookie')) - Recipe(title='Robo-Cookie', ...) - """ - await logger.ainfo(f'robot_chef_flow called with input: {input}') - recipe_prompt = await get_sticky_prompt('recipe', variant='robot') - result = Recipe.model_validate((await recipe_prompt(input={'food': input.food})).output) - await logger.ainfo(f'robot_chef_flow result: {result}') - return result - - class StoryInput(BaseModel): """Input for the story flow.""" @@ -220,7 +180,7 @@ async def tell_story(input: StoryInput, ctx: ActionRunContext) -> str: 'Once upon a time...' """ await logger.ainfo(f'tell_story called with input: {input}') - story_prompt = await get_sticky_prompt('story') + story_prompt = ai.prompt('story') result = story_prompt.stream(input={'subject': input.subject, 'personality': input.personality}) full_text = '' @@ -250,11 +210,6 @@ async def main() -> None: chef_result = await chef_flow(ChefInput(food='banana bread')) await logger.ainfo('Chef Flow Result', result=chef_result.model_dump()) - # Robot Chef Flow - await logger.ainfo('--- Running Robot Chef Flow ---') - robot_result = await robot_chef_flow(ChefInput(food='cookie')) - await logger.ainfo('Robot Chef Flow Result', result=robot_result) - # Tell Story Flow (Streaming) await logger.ainfo('--- Running Tell Story Flow ---') # To demonstrate streaming, we'll iterate over the streamer if calling directly like a flow would be consumed. diff --git a/py/samples/firestore-retreiver/LICENSE b/py/samples/framework-realtime-tracing-demo/LICENSE similarity index 100% rename from py/samples/firestore-retreiver/LICENSE rename to py/samples/framework-realtime-tracing-demo/LICENSE diff --git a/py/samples/realtime-tracing-demo/README.md b/py/samples/framework-realtime-tracing-demo/README.md similarity index 98% rename from py/samples/realtime-tracing-demo/README.md rename to py/samples/framework-realtime-tracing-demo/README.md index 6caba59176..6b6d3c7c0a 100644 --- a/py/samples/realtime-tracing-demo/README.md +++ b/py/samples/framework-realtime-tracing-demo/README.md @@ -111,7 +111,7 @@ provider.add_span_processor(processor) 2. **Run the demo**: ```bash - cd py/samples/realtime-tracing-demo + cd py/samples/framework-realtime-tracing-demo ./run.sh # This sets GENKIT_ENABLE_REALTIME_TELEMETRY=true ``` diff --git a/py/samples/realtime-tracing-demo/pyproject.toml b/py/samples/framework-realtime-tracing-demo/pyproject.toml similarity index 97% rename from py/samples/realtime-tracing-demo/pyproject.toml rename to py/samples/framework-realtime-tracing-demo/pyproject.toml index 80ba78fb79..7e781e2b09 100644 --- a/py/samples/realtime-tracing-demo/pyproject.toml +++ b/py/samples/framework-realtime-tracing-demo/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Realtime tracing demo showing live span updates in DevUI" license = "Apache-2.0" -name = "realtime-tracing-demo" +name = "framework-realtime-tracing-demo" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/realtime-tracing-demo/run.sh b/py/samples/framework-realtime-tracing-demo/run.sh similarity index 100% rename from py/samples/realtime-tracing-demo/run.sh rename to py/samples/framework-realtime-tracing-demo/run.sh diff --git a/py/samples/realtime-tracing-demo/src/main.py b/py/samples/framework-realtime-tracing-demo/src/main.py similarity index 98% rename from py/samples/realtime-tracing-demo/src/main.py rename to py/samples/framework-realtime-tracing-demo/src/main.py index a4639ebff7..d391bfd374 100644 --- a/py/samples/realtime-tracing-demo/src/main.py +++ b/py/samples/framework-realtime-tracing-demo/src/main.py @@ -92,7 +92,7 @@ 2. **Run the demo**: ```bash - cd py/samples/realtime-tracing-demo + cd py/samples/framework-realtime-tracing-demo ./run.sh # This sets GENKIT_ENABLE_REALTIME_TELEMETRY=true ``` @@ -126,14 +126,14 @@ import sys from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.core.logging import get_logger from genkit.core.trace import is_realtime_telemetry_enabled from genkit.plugins.google_genai import GoogleAI +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() logger = get_logger(__name__) diff --git a/py/samples/flask-hello/LICENSE b/py/samples/framework-restaurant-demo/LICENSE similarity index 100% rename from py/samples/flask-hello/LICENSE rename to py/samples/framework-restaurant-demo/LICENSE diff --git a/py/samples/menu/README.md b/py/samples/framework-restaurant-demo/README.md similarity index 100% rename from py/samples/menu/README.md rename to py/samples/framework-restaurant-demo/README.md diff --git a/py/samples/menu/data/menu.jpeg b/py/samples/framework-restaurant-demo/data/menu.jpeg similarity index 100% rename from py/samples/menu/data/menu.jpeg rename to py/samples/framework-restaurant-demo/data/menu.jpeg diff --git a/py/samples/menu/data/menu.json b/py/samples/framework-restaurant-demo/data/menu.json similarity index 100% rename from py/samples/menu/data/menu.json rename to py/samples/framework-restaurant-demo/data/menu.json diff --git a/py/samples/menu/pyproject.toml b/py/samples/framework-restaurant-demo/pyproject.toml similarity index 98% rename from py/samples/menu/pyproject.toml rename to py/samples/framework-restaurant-demo/pyproject.toml index 38d75efb02..7c3940c332 100644 --- a/py/samples/menu/pyproject.toml +++ b/py/samples/framework-restaurant-demo/pyproject.toml @@ -46,7 +46,7 @@ dependencies = [ ] description = "menu Genkit sample" license = "Apache-2.0" -name = "menu" +name = "framework-restaurant-demo" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/menu/run.sh b/py/samples/framework-restaurant-demo/run.sh similarity index 100% rename from py/samples/menu/run.sh rename to py/samples/framework-restaurant-demo/run.sh diff --git a/py/samples/menu/src/__init__.py b/py/samples/framework-restaurant-demo/src/__init__.py similarity index 100% rename from py/samples/menu/src/__init__.py rename to py/samples/framework-restaurant-demo/src/__init__.py diff --git a/py/samples/menu/src/case_01/__init__.py b/py/samples/framework-restaurant-demo/src/case_01/__init__.py similarity index 100% rename from py/samples/menu/src/case_01/__init__.py rename to py/samples/framework-restaurant-demo/src/case_01/__init__.py diff --git a/py/samples/menu/src/case_01/example.json b/py/samples/framework-restaurant-demo/src/case_01/example.json similarity index 100% rename from py/samples/menu/src/case_01/example.json rename to py/samples/framework-restaurant-demo/src/case_01/example.json diff --git a/py/samples/menu/src/case_01/prompts.py b/py/samples/framework-restaurant-demo/src/case_01/prompts.py similarity index 100% rename from py/samples/menu/src/case_01/prompts.py rename to py/samples/framework-restaurant-demo/src/case_01/prompts.py diff --git a/py/samples/menu/src/case_02/__init__.py b/py/samples/framework-restaurant-demo/src/case_02/__init__.py similarity index 100% rename from py/samples/menu/src/case_02/__init__.py rename to py/samples/framework-restaurant-demo/src/case_02/__init__.py diff --git a/py/samples/menu/src/case_02/example.json b/py/samples/framework-restaurant-demo/src/case_02/example.json similarity index 100% rename from py/samples/menu/src/case_02/example.json rename to py/samples/framework-restaurant-demo/src/case_02/example.json diff --git a/py/samples/menu/src/case_02/flows.py b/py/samples/framework-restaurant-demo/src/case_02/flows.py similarity index 100% rename from py/samples/menu/src/case_02/flows.py rename to py/samples/framework-restaurant-demo/src/case_02/flows.py diff --git a/py/samples/menu/src/case_02/prompts.py b/py/samples/framework-restaurant-demo/src/case_02/prompts.py similarity index 100% rename from py/samples/menu/src/case_02/prompts.py rename to py/samples/framework-restaurant-demo/src/case_02/prompts.py diff --git a/py/samples/menu/src/case_02/tools.py b/py/samples/framework-restaurant-demo/src/case_02/tools.py similarity index 100% rename from py/samples/menu/src/case_02/tools.py rename to py/samples/framework-restaurant-demo/src/case_02/tools.py diff --git a/py/samples/menu/src/case_03/__init__.py b/py/samples/framework-restaurant-demo/src/case_03/__init__.py similarity index 100% rename from py/samples/menu/src/case_03/__init__.py rename to py/samples/framework-restaurant-demo/src/case_03/__init__.py diff --git a/py/samples/menu/src/case_03/chats.py b/py/samples/framework-restaurant-demo/src/case_03/chats.py similarity index 100% rename from py/samples/menu/src/case_03/chats.py rename to py/samples/framework-restaurant-demo/src/case_03/chats.py diff --git a/py/samples/menu/src/case_03/example.json b/py/samples/framework-restaurant-demo/src/case_03/example.json similarity index 100% rename from py/samples/menu/src/case_03/example.json rename to py/samples/framework-restaurant-demo/src/case_03/example.json diff --git a/py/samples/menu/src/case_03/flows.py b/py/samples/framework-restaurant-demo/src/case_03/flows.py similarity index 100% rename from py/samples/menu/src/case_03/flows.py rename to py/samples/framework-restaurant-demo/src/case_03/flows.py diff --git a/py/samples/menu/src/case_03/prompts.py b/py/samples/framework-restaurant-demo/src/case_03/prompts.py similarity index 100% rename from py/samples/menu/src/case_03/prompts.py rename to py/samples/framework-restaurant-demo/src/case_03/prompts.py diff --git a/py/samples/menu/src/case_04/__init__.py b/py/samples/framework-restaurant-demo/src/case_04/__init__.py similarity index 100% rename from py/samples/menu/src/case_04/__init__.py rename to py/samples/framework-restaurant-demo/src/case_04/__init__.py diff --git a/py/samples/menu/src/case_04/example.indexMenuItems.json b/py/samples/framework-restaurant-demo/src/case_04/example.indexMenuItems.json similarity index 100% rename from py/samples/menu/src/case_04/example.indexMenuItems.json rename to py/samples/framework-restaurant-demo/src/case_04/example.indexMenuItems.json diff --git a/py/samples/menu/src/case_04/example.menuQuestion.json b/py/samples/framework-restaurant-demo/src/case_04/example.menuQuestion.json similarity index 100% rename from py/samples/menu/src/case_04/example.menuQuestion.json rename to py/samples/framework-restaurant-demo/src/case_04/example.menuQuestion.json diff --git a/py/samples/menu/src/case_04/flows.py b/py/samples/framework-restaurant-demo/src/case_04/flows.py similarity index 100% rename from py/samples/menu/src/case_04/flows.py rename to py/samples/framework-restaurant-demo/src/case_04/flows.py diff --git a/py/samples/menu/src/case_04/prompts.py b/py/samples/framework-restaurant-demo/src/case_04/prompts.py similarity index 100% rename from py/samples/menu/src/case_04/prompts.py rename to py/samples/framework-restaurant-demo/src/case_04/prompts.py diff --git a/py/samples/menu/src/case_05/__init__.py b/py/samples/framework-restaurant-demo/src/case_05/__init__.py similarity index 100% rename from py/samples/menu/src/case_05/__init__.py rename to py/samples/framework-restaurant-demo/src/case_05/__init__.py diff --git a/py/samples/menu/src/case_05/example.visualMenuQuestion.json b/py/samples/framework-restaurant-demo/src/case_05/example.visualMenuQuestion.json similarity index 100% rename from py/samples/menu/src/case_05/example.visualMenuQuestion.json rename to py/samples/framework-restaurant-demo/src/case_05/example.visualMenuQuestion.json diff --git a/py/samples/menu/src/case_05/flows.py b/py/samples/framework-restaurant-demo/src/case_05/flows.py similarity index 100% rename from py/samples/menu/src/case_05/flows.py rename to py/samples/framework-restaurant-demo/src/case_05/flows.py diff --git a/py/samples/menu/src/case_05/prompts.py b/py/samples/framework-restaurant-demo/src/case_05/prompts.py similarity index 100% rename from py/samples/menu/src/case_05/prompts.py rename to py/samples/framework-restaurant-demo/src/case_05/prompts.py diff --git a/py/samples/menu/src/constants.py b/py/samples/framework-restaurant-demo/src/constants.py similarity index 100% rename from py/samples/menu/src/constants.py rename to py/samples/framework-restaurant-demo/src/constants.py diff --git a/py/samples/menu/src/main.py b/py/samples/framework-restaurant-demo/src/main.py similarity index 97% rename from py/samples/menu/src/main.py rename to py/samples/framework-restaurant-demo/src/main.py index 2874e0d38e..a1b21806cb 100755 --- a/py/samples/menu/src/main.py +++ b/py/samples/framework-restaurant-demo/src/main.py @@ -52,9 +52,9 @@ # Import all of the example prompts and flows to ensure they are registered import asyncio -from rich.traceback import install as install_rich_traceback +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # Import case modules to register flows and prompts with the ai instance from case_01 import prompts as case_01_prompts # noqa: F401 diff --git a/py/samples/menu/src/menu_ai.py b/py/samples/framework-restaurant-demo/src/menu_ai.py similarity index 100% rename from py/samples/menu/src/menu_ai.py rename to py/samples/framework-restaurant-demo/src/menu_ai.py diff --git a/py/samples/menu/src/menu_schemas.py b/py/samples/framework-restaurant-demo/src/menu_schemas.py similarity index 100% rename from py/samples/menu/src/menu_schemas.py rename to py/samples/framework-restaurant-demo/src/menu_schemas.py diff --git a/py/samples/format-demo/LICENSE b/py/samples/framework-tool-interrupts/LICENSE similarity index 100% rename from py/samples/format-demo/LICENSE rename to py/samples/framework-tool-interrupts/LICENSE diff --git a/py/samples/tool-interrupts/README.md b/py/samples/framework-tool-interrupts/README.md similarity index 100% rename from py/samples/tool-interrupts/README.md rename to py/samples/framework-tool-interrupts/README.md diff --git a/py/samples/tool-interrupts/pyproject.toml b/py/samples/framework-tool-interrupts/pyproject.toml similarity index 98% rename from py/samples/tool-interrupts/pyproject.toml rename to py/samples/framework-tool-interrupts/pyproject.toml index 94e4bf21f1..83b44358f2 100644 --- a/py/samples/tool-interrupts/pyproject.toml +++ b/py/samples/framework-tool-interrupts/pyproject.toml @@ -41,7 +41,7 @@ dependencies = [ ] description = "Tool interrupts sample" license = "Apache-2.0" -name = "tool-interrupts" +name = "framework-tool-interrupts" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/tool-interrupts/run.sh b/py/samples/framework-tool-interrupts/run.sh similarity index 100% rename from py/samples/tool-interrupts/run.sh rename to py/samples/framework-tool-interrupts/run.sh diff --git a/py/samples/tool-interrupts/src/main.py b/py/samples/framework-tool-interrupts/src/main.py similarity index 98% rename from py/samples/tool-interrupts/src/main.py rename to py/samples/framework-tool-interrupts/src/main.py index 9ecbb1309f..9da7d956a8 100755 --- a/py/samples/tool-interrupts/src/main.py +++ b/py/samples/framework-tool-interrupts/src/main.py @@ -89,7 +89,6 @@ import os from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import ( Genkit, @@ -98,8 +97,9 @@ ) from genkit.plugins.google_genai import GoogleAI from genkit.plugins.google_genai.models import gemini +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() ai = Genkit( plugins=[GoogleAI()], diff --git a/py/samples/google-genai-image/README.md b/py/samples/google-genai-image/README.md deleted file mode 100644 index c23e958a64..0000000000 --- a/py/samples/google-genai-image/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Google Gemini Image Generation - -This sample uses the Gemini API for image generation. This sample uses the -experimental Gemini model, which is available for now only in the Gemini API, -not in Vertex AI api. If you need to run it on Vertex AI, please, refer to -the Imagen sample. - -Prerequisites: -* The `genkit` package. - -### How to Get Your Gemini API Key - -To use the Google GenAI plugin, you need a Gemini API key. - -1. **Visit AI Studio**: Go to [Google AI Studio](https://aistudio.google.com/). -2. **Create API Key**: Click on "Get API key" and create a key in a new or existing Google Cloud project. - -For more details, check out the [official documentation](https://ai.google.dev/gemini-api/docs/api-key). - -To run this sample: - -1. Install the `genkit` package. -2. Set the `GEMINI_API_KEY` environment variable to your Gemini API key. - -```bash -export GEMINI_API_KEY= -``` - -### Monitoring and Running - -For an enhanced development experience, use the provided `run.sh` script to start the sample with automatic reloading: - -```bash -./run.sh -``` - -This script uses `watchmedo` to monitor changes in: -- `src/` (Python logic) -- `../../packages` (Genkit core) -- `../../plugins` (Genkit plugins) -- File patterns: `*.py`, `*.prompt`, `*.json` - -Changes will automatically trigger a restart of the sample. You can also pass command-line arguments directly to the script, e.g., `./run.sh --some-flag`. - -## Run the sample - -TODO - -```bash -uv run src/main.py -``` - -## Testing This Demo - -1. **Prerequisites**: - ```bash - export GEMINI_API_KEY=your_api_key - ``` - Or the demo will prompt for the key interactively. - -2. **Run the demo**: - ```bash - cd py/samples/google-genai-image - ./run.sh - ``` - -3. **Open DevUI** at http://localhost:4000 - -4. **Test image generation**: - - [ ] `draw_image_with_gemini` - Generate an image from text - - [ ] `generate_images` - Multi-modal image generation - - [ ] Check output is a valid image (data URI) - -5. **Test image description**: - - [ ] `describe_image_with_gemini` - Describe an input image - - [ ] Verify description matches image content - -6. **Test image editing**: - - [ ] `gemini_image_editing` - Edit/modify existing images - -7. **Test video** (Veo): - - [ ] `photo_move_veo` - Generate video from image - - [ ] Note: Video generation may take longer - -8. **Expected behavior**: - - Images returned as base64 data URIs - - Descriptions are accurate to image content - - Edits preserve context while making changes diff --git a/py/samples/google-genai-image/pyproject.toml b/py/samples/google-genai-image/pyproject.toml deleted file mode 100644 index dbe2e7359b..0000000000 --- a/py/samples/google-genai-image/pyproject.toml +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -[project] -authors = [{ name = "Google" }] -classifiers = [ - "Development Status :: 3 - Alpha", - "Environment :: Console", - "Environment :: Web Environment", - "Intended Audience :: Developers", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: 3.14", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development :: Libraries", -] -dependencies = [ - "rich>=13.0.0", - "genkit", - "genkit-plugin-google-genai", - "google-genai", - "pillow", - "pydantic>=2.10.5", - "uvloop>=0.21.0", -] -description = "Vision API and Image Generation example" -license = "Apache-2.0" -name = "google-genai-image" -readme = "README.md" -requires-python = ">=3.10" -version = "0.1.0" - -[project.optional-dependencies] -dev = ["watchdog>=6.0.0"] - -[build-system] -build-backend = "hatchling.build" -requires = ["hatchling"] - -[tool.hatch.build.targets.wheel] -packages = ["src/google_genai_image"] diff --git a/py/samples/google-genai-image/src/main.py b/py/samples/google-genai-image/src/main.py deleted file mode 100755 index 61f58eed2c..0000000000 --- a/py/samples/google-genai-image/src/main.py +++ /dev/null @@ -1,399 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -"""Gemini image generation sample - Create and edit images with AI. - -This sample demonstrates Gemini's native image generation and editing -capabilities, including text-to-image, image description, and Veo video. - -Key Concepts (ELI5):: - - ┌─────────────────────┬────────────────────────────────────────────────────┐ - │ Concept │ ELI5 Explanation │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Text-to-Image │ Type words, get a picture. "A sunset over ocean" │ - │ │ → AI creates that image for you. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Image-to-Text │ Show AI an image, it describes what's in it. │ - │ │ Like a friend explaining a photo. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Inpainting │ Fix or change parts of an existing image. │ - │ │ "Remove the person, add a tree." │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Outpainting │ Extend an image beyond its borders. │ - │ │ Make a portrait into a landscape. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Veo │ Google's video generation AI. Same as images, │ - │ │ but creates video clips instead. │ - └─────────────────────┴────────────────────────────────────────────────────┘ - -Key Features -============ -| Feature Description | Example Function / Code Snippet | -|-----------------------------------------|-------------------------------------| -| Plugin Initialization | `ai = Genkit(plugins=[GoogleAI()])` | -| Default Model Configuration | `ai = Genkit(model=...)` | -| Text-to-Image Generation | `draw_image_with_gemini` | -| Image-to-Text (Description) | `describe_image_with_gemini` | -| Multimodal Prompting | `generate_images` | -| Image Editing (Inpainting/Outpainting) | `gemini_image_editing` | -| Video Generation (Veo) | `photo_move_veo` | -| Media Resolution Control | `gemini_media_resolution` | - -See README.md for testing instructions. -""" - -import asyncio -import base64 -import logging -import os -import pathlib - -from google import genai -from google.genai import types as genai_types -from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - -from genkit.ai import Genkit -from genkit.blocks.model import GenerateResponseWrapper -from genkit.core.action import ActionRunContext -from genkit.core.logging import get_logger -from genkit.plugins.google_genai import ( - GeminiConfigSchema, - GeminiImageConfigSchema, - GoogleAI, -) -from genkit.types import ( - GenerationCommonConfig, - Media, - MediaPart, - Message, - Metadata, - Part, - Role, - TextPart, -) - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) - -logger = get_logger(__name__) - -if 'GEMINI_API_KEY' not in os.environ: - os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') - -ai = Genkit( - plugins=[GoogleAI()], - model='googleai/gemini-3-pro-image-preview', -) - - -class DrawImageInput(BaseModel): - """Input for image drawing flow.""" - - prompt: str = Field(default='Draw a cat in a hat.', description='Image prompt') - - -class GenerateImagesInput(BaseModel): - """Input for image generation flow.""" - - name: str = Field(default='a fluffy cat', description='Subject to generate images about') - - -@ai.flow() -async def draw_image_with_gemini(input: DrawImageInput) -> GenerateResponseWrapper: - """Draw an image. - - Args: - input: Input with image prompt. - - Returns: - The image. - """ - return await ai.generate( - prompt=input.prompt, - config={'response_modalities': ['Text', 'Image']}, - model='googleai/gemini-2.5-flash-image', - ) - - -@ai.flow() -async def describe_image_with_gemini(data: str = '') -> str: - """Describe an image. - - Args: - data: The image data as a data URI (e.g., 'data:image/jpeg;base64,...'). - - Returns: - The description of the image. - """ - if not data: - try: - current_dir = pathlib.Path(pathlib.Path(__file__).resolve()).parent - image_path = os.path.join(current_dir, '..', 'image.jpg') - with pathlib.Path(image_path).open('rb') as image_file: - buffer = image_file.read() - img_base64 = base64.b64encode(buffer).decode('utf-8') - data = f'data:image/jpeg;base64,{img_base64}' - except FileNotFoundError as e: - raise ValueError("Default image 'image.jpg' not found. Please provide image data.") from e - - if not (data.startswith('data:') and ',' in data): - raise ValueError(f'Expected a data URI (e.g., "data:image/jpeg;base64,..."), but got: {data[:50]}...') - - result = await ai.generate( - messages=[ - Message( - role=Role.USER, - content=[ - Part(root=TextPart(text='What is shown in this image?')), - Part(root=MediaPart(media=Media(content_type='image/jpeg', url=data))), - ], - ), - ], - model='googleai/gemini-3-flash-preview', - ) - return result.text - - -@ai.flow() -async def generate_images( - input: GenerateImagesInput, - ctx: ActionRunContext | None = None, -) -> GenerateResponseWrapper: - """Generate images for the given name. - - Args: - input: Input with subject to generate images about. - ctx: the context of the tool - - Returns: - The generated response with a function. - """ - return await ai.generate( - model='googleai/gemini-3-pro-image-preview', - prompt=f'tell me about {input.name} with photos', - config=GeminiConfigSchema.model_validate({ - 'response_modalities': ['text', 'image'], - 'api_version': 'v1alpha', - }).model_dump(exclude_none=True), - ) - - -@ai.tool(name='screenshot') -def screenshot() -> dict: - """Takes a screenshot.""" - room_path = pathlib.Path(__file__).parent.parent / 'my_room.png' - with pathlib.Path(room_path).open('rb') as f: - room_b64 = base64.b64encode(f.read()).decode('utf-8') - - return { - 'output': 'success', - 'content': [{'media': {'url': f'data:image/png;base64,{room_b64}', 'contentType': 'image/png'}}], - } - - -@ai.flow() -async def multipart_tool_calling() -> str: - """Multipart tool calling.""" - response = await ai.generate( - model='googleai/gemini-3-pro-preview', - tools=['screenshot'], - config=GenerationCommonConfig(temperature=1), - prompt="Tell me what I'm seeing on the screen.", - ) - return response.text - - -@ai.flow() -async def gemini_image_editing() -> Media | None: - """Image editing with Gemini.""" - plant_path = pathlib.Path(__file__).parent.parent / 'palm_tree.png' - room_path = pathlib.Path(__file__).parent.parent / 'my_room.png' - - with pathlib.Path(plant_path).open('rb') as f: - plant_b64 = base64.b64encode(f.read()).decode('utf-8') - with pathlib.Path(room_path).open('rb') as f: - room_b64 = base64.b64encode(f.read()).decode('utf-8') - - response = await ai.generate( - model='googleai/gemini-3-pro-image-preview', - prompt=[ - Part(root=TextPart(text='add the plant to my room')), - Part(root=MediaPart(media=Media(url=f'data:image/png;base64,{plant_b64}'))), - Part(root=MediaPart(media=Media(url=f'data:image/png;base64,{room_b64}'))), - ], - config=GeminiImageConfigSchema.model_validate({ - 'response_modalities': ['TEXT', 'IMAGE'], - 'image_config': {'aspect_ratio': '1:1'}, - 'api_version': 'v1alpha', - }).model_dump(exclude_none=True), - ) - for part in response.message.content if response.message else []: - if isinstance(part.root, MediaPart): - return part.root.media - - return None - - -@ai.flow() -async def nano_banana_pro() -> Media | None: - """Nano banana pro config.""" - response = await ai.generate( - model='googleai/gemini-3-pro-image-preview', - prompt='Generate a picture of a sunset in the mountains by a lake', - config={ - 'response_modalities': ['TEXT', 'IMAGE'], - 'image_config': { - 'aspect_ratio': '21:9', - 'image_size': '4K', - }, - 'api_version': 'v1alpha', - }, - ) - for part in response.message.content if response.message else []: - if isinstance(part.root, MediaPart): - return part.root.media - return None - - -@ai.flow() -async def photo_move_veo(_: object, context: ActionRunContext | None = None) -> object: - """An example of using Ver 3 model to make a static photo move.""" - # Find photo.jpg (or my_room.png) - room_path = pathlib.Path(__file__).parent.parent / 'my_room.png' - if not room_path.exists(): - # Fallback search - room_path = pathlib.Path('samples/google-genai-hello/src/my_room.png') - if not room_path.exists(): - room_path = pathlib.Path('my_room.png') - - encoded_image = '' - if room_path.exists(): - with pathlib.Path(room_path).open('rb') as f: - encoded_image = base64.b64encode(f.read()).decode('utf-8') - else: - # Fallback dummy - encoded_image = ( - 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==' - ) - - api_key = os.environ.get('GEMINI_API_KEY') - if not api_key: - raise ValueError('GEMINI_API_KEY not set') - - # Use v1alpha for Veo - client = genai.Client(api_key=api_key, http_options={'api_version': 'v1alpha'}) - - if context: - context.send_chunk('Starting generation with veo-3.0-generate-001...') - - try: - operation = await client.aio.models.generate_videos( - model='veo-3.0-generate-001', - prompt='make the subject in the photo move', - image=genai_types.Image(image_bytes=base64.b64decode(encoded_image), mime_type='image/png'), - config={ - # 'aspect_ratio': '9:16', - }, - ) - - if not operation: - raise ValueError('Expected operation to be returned') - - while not operation.done: - op_id = operation.name.split('/')[-1] if operation.name else 'unknown' - if context: - context.send_chunk(f'check status of operation {op_id}') - - # Poll - operation = await client.aio.operations.get(operation) - await asyncio.sleep(5) - - if operation.error: - error_msg = getattr(operation.error, 'message', str(operation.error)) - if context: - context.send_chunk(f'Error: {error_msg}') - raise ValueError(f'Failed to generate video: {error_msg}') - - # Done - result_info = 'Video generated successfully.' - if hasattr(operation, 'result') and operation.result: - generated_videos = getattr(operation.result, 'generated_videos', None) - if generated_videos: - vid = generated_videos[0] - if vid.video and vid.video.uri: - result_info += f' URI: {vid.video.uri}' - - if context: - context.send_chunk(f'Done! {result_info}') - - return operation - - except Exception as e: - raise ValueError(f'Flow failed: {e}') from e - - -@ai.flow() -async def gemini_media_resolution() -> str: - """Media resolution.""" - # Placeholder base64 for sample - plant_path = pathlib.Path(__file__).parent.parent / 'palm_tree.png' - with pathlib.Path(plant_path).open('rb') as f: - plant_b64 = base64.b64encode(f.read()).decode('utf-8') - response = await ai.generate( - model='googleai/gemini-3-pro-image-preview', - prompt=[ - Part(root=TextPart(text='What is in this picture?')), - Part( - root=MediaPart( - media=Media(url=f'data:image/png;base64,{plant_b64}'), - metadata=Metadata({'mediaResolution': {'level': 'MEDIA_RESOLUTION_HIGH'}}), - ) - ), - ], - config={'api_version': 'v1alpha'}, - ) - return response.text - - -@ai.flow() -async def multimodal_input() -> str: - """Multimodal input.""" - photo_path = pathlib.Path(__file__).parent.parent / 'photo.jpg' - with pathlib.Path(photo_path).open('rb') as f: - photo_b64 = base64.b64encode(f.read()).decode('utf-8') - - response = await ai.generate( - model='googleai/gemini-3-pro-image-preview', - prompt=[ - Part(root=TextPart(text='describe this photo')), - Part(root=MediaPart(media=Media(url=f'data:image/jpeg;base64,{photo_b64}', content_type='image/jpeg'))), - ], - ) - return response.text - - -async def main() -> None: - """Main function.""" - logging.basicConfig(level=logging.INFO) - logger = logging.getLogger(__name__) - logger.info('Genkit server running. Press Ctrl+C to stop.') - # Keep the process alive for Dev UI - await asyncio.Event().wait() - - -if __name__ == '__main__': - ai.run_main(main()) diff --git a/py/samples/ollama-hello/README.md b/py/samples/ollama-hello/README.md deleted file mode 100644 index 5223563a68..0000000000 --- a/py/samples/ollama-hello/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Hello Ollama - -## Prerequisites - -- **Ollama** - a local AI model server, which is used to handle embeddings and generate responses. - -### Quick Start - -The simplest way to run this sample is using the included `run.sh` script, which handles installation, server startup, and model pulling automatically: - -```bash -./run.sh -``` - -### Monitoring and Running - -For an enhanced development experience, use the provided `run.sh` script to start the sample with automatic reloading: - -```bash -./run.sh -``` - -This script uses `watchmedo` to monitor changes in: -- `src/` (Python logic) -- `../../packages` (Genkit core) -- `../../plugins` (Genkit plugins) -- File patterns: `*.py`, `*.prompt`, `*.json` - -Changes will automatically trigger a restart of the sample. You can also pass command-line arguments directly to the script, e.g., `./run.sh --some-flag`. - -### Manual Setup - -If you prefer to set up manually: - -1. **Install Ollama**: Download from [ollama.com](https://ollama.com/download). -2. **Start the server**: - ```bash - ollama serve - ``` -3. **Pull models**: - ```bash - ollama pull mistral-nemo:latest - ollama pull gemma3:latest - ``` -4. **Run the sample**: - ```bash - genkit start -- uv run src/main.py - ``` - -## Testing This Demo - -1. **Open DevUI** at http://localhost:4000 - -2. **Test basic flows**: - - [ ] `say_hi` - Simple generation with gemma3 - - [ ] `say_hi_stream` - Streaming response - - [ ] `say_hi_constrained` - Constrained output - -3. **Test tools** (requires mistral-nemo): - - [ ] `calculate_gablorken` - Tool calling demo - -4. **Notes**: - - gemma2:latest does NOT support tool calling - - Use mistral-nemo for tool-based flows - - First run may be slow (model loading) - -5. **Expected behavior**: - - Responses generated locally (no API calls) - - Streaming shows incremental output - - Tools work with compatible models only diff --git a/py/samples/ollama-hello/src/main.py b/py/samples/ollama-hello/src/main.py deleted file mode 100755 index f56b436c3b..0000000000 --- a/py/samples/ollama-hello/src/main.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -"""Ollama hello sample - Local LLM inference with Genkit. - -This sample demonstrates how to use Ollama for local LLM inference with Genkit, -enabling offline AI capabilities without external API dependencies. - -See README.md for testing instructions. - -Key Concepts (ELI5):: - - ┌─────────────────────┬────────────────────────────────────────────────────┐ - │ Concept │ ELI5 Explanation │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Ollama │ Software that runs AI on YOUR computer. No cloud │ - │ │ needed - your data stays private! │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Local LLM │ An AI that runs offline on your machine. │ - │ │ Like having a mini ChatGPT at home. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Gemma │ Google's open-source model. Free to run locally. │ - │ │ Good for general tasks and coding help. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Mistral │ Another open-source model. Good at reasoning │ - │ │ and supports tool calling. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ ollama pull │ Downloads a model. Run "ollama pull gemma3" │ - │ │ before using it in your code. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ localhost:11434 │ Where Ollama listens. Your code talks to this │ - │ │ address to use local models. │ - └─────────────────────┴────────────────────────────────────────────────────┘ - -Key Features -============ -| Feature Description | Example Function / Code Snippet | -|----------------------------------------------------------|----------------------------------------| -| Plugin Initialization | `ai = Genkit(plugins=[Ollama()])` | -| Default Model Configuration | `ai = Genkit(model=...)` | -| Defining Flows | `@ai.flow()` decorator (multiple uses) | -| Defining Tools | `@ai.tool()` decorator (multiple uses) | -| Tool Input Schema (Pydantic) | `GablorkenInput` | -| Simple Generation (Prompt String) | `say_hi` | -| Streaming Generation | `say_hi_stream` | -| Generation with Messages (`Message`, `Role`, `TextPart`) | `say_hi_constrained` | -| Generation with Tools | `calculate_gablorken` | -| Tool Response Handling | `say_hi_constrained` | -| Code Generation | `code_flow` | -""" - -from typing import Any, cast - -from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - -from genkit.ai import Genkit, Output -from genkit.core.action import ActionRunContext -from genkit.core.logging import get_logger -from genkit.plugins.ollama import Ollama, ollama_name -from genkit.plugins.ollama.models import ( - ModelDefinition, -) - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) - -logger = get_logger(__name__) - -# Model can be pulled with `ollama pull *LLM_VERSION*` -GEMMA_MODEL = 'gemma3:latest' - -# NOTE: gemma2:latest does not support tools calling as of 12.03.2025 -# temporary using mistral-nemo instead. -MISTRAL_MODEL = 'mistral-nemo:latest' - -# Run your ollama models with `ollama run *MODEL_NAME*` -# e.g. `ollama run gemma3:latest` - -ai = Genkit( - plugins=[ - Ollama( - models=[ - ModelDefinition(name=GEMMA_MODEL), - ModelDefinition(name=MISTRAL_MODEL), - ], - ) - ], - model=ollama_name(GEMMA_MODEL), -) - - -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class CurrencyInput(BaseModel): - """Currency conversion input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_currency: str = Field(description='Source currency code (e.g., USD)', default='USD') - to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') - - -class GablorkenInput(BaseModel): - """Input model for the gablorken tool function. - - Attributes: - value: The value to calculate gablorken for. - """ - - value: int = Field(description='value to calculate gablorken for') - - -class GablorkenOutputSchema(BaseModel): - """Gablorken output schema. - - Args: - result: The result of the gablorken. - """ - - result: int - - -class HelloSchema(BaseModel): - """Hello schema. - - Args: - text: The text to say hello to. - receiver: The receiver of the hello. - """ - - text: str - receiver: str - - -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills - - -class WeatherToolInput(BaseModel): - """Input for the weather tool.""" - - location: str = Field(description='weather location') - - -class GablorkenFlowInput(BaseModel): - """Input for gablorken calculation flow.""" - - value: int = Field(default=33, description='Value to calculate gablorken for') - - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - hi_input: str = Field(default='Mittens', description='Name to greet') - - -class SayHiConstrainedInput(BaseModel): - """Input for constrained greeting flow.""" - - hi_input: str = Field(default='Fluffy', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - name: str = Field(default='Shadow', description='Name for streaming greeting') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" - - location: str = Field(default='San Francisco', description='Location for weather') - - -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) - - -@ai.flow() -async def calculate_gablorken(input: GablorkenFlowInput) -> str: - """Generate a request to calculate gablorken according to gablorken_tool. - - Args: - input: Input with value for gablorken calculation. - - Returns: - A GenerateRequest object with the evaluation output - - Example: - >>> await calculate_gablorken(GablorkenFlowInput(value=33)) - '94' - """ - response = await ai.generate( - prompt=f'Use the gablorken_tool to calculate the gablorken of {input.value}', - model=ollama_name(MISTRAL_MODEL), - tools=['gablorken_tool'], - ) - return response.text - - -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. - - Args: - input: Currency conversion parameters. - - Returns: - Converted amount. - """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, - } - - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' - - -@ai.flow() -async def currency_exchange(input: CurrencyExchangeInput) -> str: - """Convert currency using tools. - - Args: - input: Currency exchange parameters. - - Returns: - Conversion result. - """ - # Note: Using GEMMA_MODEL as it typically supports tool use, but always verify tool support - response = await ai.generate( - model=ollama_name(MISTRAL_MODEL), - prompt=f'Convert {input.amount} {input.from_curr} to {input.to_curr}', - tools=['convert_currency'], - ) - return response.text - - -@ai.tool() -def gablorken_tool(input: GablorkenInput) -> int: - """Calculate a gablorken.""" - return input.value * 3 - 5 - - -@ai.flow() -async def generate_character(input: CharacterInput) -> RpgCharacter: - """Generate an RPG character. - - Args: - input: Input with character name. - - Returns: - The generated RPG character. - """ - result = await ai.generate( - model=ollama_name(GEMMA_MODEL), - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - return result.output - - -@ai.tool() -def get_weather(input: WeatherToolInput) -> str: - """Use it get the weather.""" - return f'Weather in {input.location} is 23°' - - -@ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a request to greet a user. - - Args: - input: Input with name to greet. - - Returns: - A GenerateRequest object with the greeting message. - """ - response = await ai.generate( - model=ollama_name(GEMMA_MODEL), - prompt='hi ' + input.hi_input, - ) - return response.text - - -@ai.flow() -async def say_hi_constrained(input: SayHiConstrainedInput) -> str: - """Generate a request to greet a user with response following `HelloSchema` schema. - - Args: - input: Input with name to greet. - - Returns: - The greeting text. - - Example: - >>> await say_hi_constrained(SayHiConstrainedInput(hi_input='John Doe')) - 'Hi John Doe' - """ - response = await ai.generate( - prompt=f'Say hi to {input.hi_input} and put {input.hi_input} in receiver field', - output=Output(schema=HelloSchema), - ) - output = response.output - if isinstance(output, HelloSchema): - return output.text - if isinstance(output, dict): - # Cast to proper dict type to satisfy type checker - output_dict = cast(dict[str, Any], output) - text_val = output_dict.get('text') - if isinstance(text_val, str): - return text_val - raise ValueError('Received invalid output from model') - - -@ai.flow() -async def say_hi_stream( - input: StreamInput, - ctx: ActionRunContext | None = None, -) -> str: - """Generate a greeting for the given name. - - Args: - input: Input with name for streaming. - ctx: the context of the tool - - Returns: - The generated response with a function. - """ - stream, _ = ai.generate_stream( - model=ollama_name(GEMMA_MODEL), - prompt=f'hi {input.name}', - ) - result: str = '' - async for data in stream: - if ctx is not None: - ctx.send_chunk(data.text) - result += data.text - - return result - - -@ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Generate a request to get weather using the get_weather tool. - - Args: - input: Input with location for weather. - - Returns: - Weather information for the location. - - Example: - >>> await weather_flow(WeatherFlowInput(location='San Francisco')) - 'Weather in San Francisco is 23°' - """ - response = await ai.generate( - prompt=f'Use the get_weather tool to tell me the weather in {input.location}', - model=ollama_name(MISTRAL_MODEL), - tools=['get_weather'], - ) - return response.text - - -@ai.flow() -async def code_flow(input: CodeInput) -> str: - """Generate code using local Ollama models. - - Args: - input: Input with coding task description. - - Returns: - Generated code. - """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text - - -async def main() -> None: - """Main function. - - Returns: - None. - """ - await logger.ainfo(await say_hi(SayHiInput(hi_input='John Doe'))) - await logger.ainfo(await say_hi_constrained(SayHiConstrainedInput(hi_input='John Doe'))) - await logger.ainfo(await calculate_gablorken(GablorkenFlowInput(value=33))) - await logger.ainfo(await weather_flow(WeatherFlowInput(location='San Francisco'))) - - -if __name__ == '__main__': - ai.run_main(main()) diff --git a/py/samples/ollama-simple-embed/README.md b/py/samples/ollama-simple-embed/README.md deleted file mode 100644 index acf23619e5..0000000000 --- a/py/samples/ollama-simple-embed/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# Ollama Simple Embed Sample - -## Prerequisites - -- **Ollama** - a local AI model server, which is used to handle embeddings and generate responses. - -### Quick Start - -The simplest way to run this sample is using the included `run.sh` script, which handles installation, server startup, and model pulling automatically: - -```bash -./run.sh -``` - -### Monitoring and Running - -For an enhanced development experience, use the provided `run.sh` script to start the sample with automatic reloading: - -```bash -./run.sh -``` - -This script uses `watchmedo` to monitor changes in: -- `src/` (Python logic) -- `../../packages` (Genkit core) -- `../../plugins` (Genkit plugins) -- File patterns: `*.py`, `*.prompt`, `*.json` - -Changes will automatically trigger a restart of the sample. You can also pass command-line arguments directly to the script, e.g., `./run.sh --some-flag`. - -### Manual Setup - -If you prefer to set up manually: - -1. **Install Ollama**: Download from [ollama.com](https://ollama.com/download). -2. **Start the server**: - ```bash - ollama serve - ``` -3. **Pull models**: - ```bash - ollama pull nomic-embed-text - ollama pull phi4:latest - ``` -4. **Run the sample**: - ```bash - genkit start -- uv run src/main.py - ``` - -## Testing This Demo - -1. **Prerequisites** - Install and configure Ollama: - ```bash - # Install Ollama (macOS) - brew install ollama - - # Pull required models - ollama pull nomic-embed-text # For embeddings - ollama pull phi4:latest # For generation - - # Start Ollama server - ollama serve - ``` - -2. **Run the demo**: - ```bash - cd py/samples/ollama-simple-embed - ./run.sh - ``` - -3. **Open DevUI** at http://localhost:4000 - -4. **Test the flows**: - - [ ] `embed_pokemon` - Embed Pokemon descriptions - - [ ] `find_nearest_pokemons` - Find similar Pokemon - - [ ] `generate_response` - Ask about Pokemon (RAG) - -5. **Example queries**: - - "Tell me about fire-type Pokemon" - - "Which Pokemon can fly?" - - "What's the strongest water Pokemon?" - -6. **Expected behavior**: - - Embeddings computed locally (no API calls) - - Similarity search finds relevant Pokemon - - RAG combines retrieval with generation - - All processing happens locally via Ollama diff --git a/py/samples/ollama-simple-embed/pyproject.toml b/py/samples/ollama-simple-embed/pyproject.toml deleted file mode 100644 index 980ecd3e6e..0000000000 --- a/py/samples/ollama-simple-embed/pyproject.toml +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -[project] -authors = [{ name = "Google" }] -classifiers = [ - "Development Status :: 3 - Alpha", - "Environment :: Console", - "Environment :: Web Environment", - "Intended Audience :: Developers", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: 3.14", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development :: Libraries", -] -dependencies = [ - "rich>=13.0.0", - "genkit", - "genkit-plugin-ollama", - "pydantic>=2.10.5", - "structlog>=25.2.0", - "uvloop>=0.21.0", -] -description = "Ollama Simple Embed" -license = "Apache-2.0" -name = "ollama-simple-embed" -readme = "README.md" -requires-python = ">=3.10" -version = "0.1.0" - -[project.optional-dependencies] -dev = ["watchdog>=6.0.0"] - -[build-system] -build-backend = "hatchling.build" -requires = ["hatchling"] - -[tool.hatch.build.targets.wheel] -packages = ["src"] diff --git a/py/samples/ollama-simple-embed/run.sh b/py/samples/ollama-simple-embed/run.sh deleted file mode 100755 index 96f9af4e53..0000000000 --- a/py/samples/ollama-simple-embed/run.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2026 Google LLC -# SPDX-License-Identifier: Apache-2.0 - -# Ollama Embedding Demo -# ===================== -# -# Demonstrates using Ollama for text embeddings with Genkit. -# -# Prerequisites: -# - Ollama installed and running locally -# - Embedding model: ollama pull nomic-embed-text -# -# Usage: -# ./run.sh # Start the demo with Dev UI -# ./run.sh --help # Show this help message - -set -euo pipefail - -cd "$(dirname "$0")" -source "../_common.sh" - -check_ollama() { - if ! command -v ollama &> /dev/null; then - echo -e "${RED}Error: Ollama not found${NC}" - echo "Install from: https://ollama.com/download" - return 1 - fi - - if ! curl -s http://localhost:11434/api/tags &> /dev/null; then - echo -e "${YELLOW}Warning: Ollama server not responding${NC}" - echo "Start with: ollama serve" - echo "" - else - echo -e "${GREEN}✓${NC} Ollama server is running" - fi -} - -print_help() { - print_banner "Ollama Embedding Demo" "🔢" - echo "Usage: ./run.sh [options]" - echo "" - echo "Options:" - echo " --help Show this help message" - echo "" - echo "Prerequisites:" - echo " - Ollama installed: https://ollama.com/download" - echo " - Ollama running: ollama serve" - echo " - Model pulled: ollama pull nomic-embed-text" - print_help_footer -} - -case "${1:-}" in - --help|-h) - print_help - exit 0 - ;; -esac - -print_banner "Ollama Embedding Demo" "🔢" - -check_ollama || true - -install_deps - -genkit_start_with_browser -- \ - uv tool run --from watchdog watchmedo auto-restart \ - -d src \ - -d ../../packages \ - -d ../../plugins \ - -p '*.py;*.prompt;*.json' \ - -R \ - -- uv run src/main.py "$@" diff --git a/py/samples/ollama-simple-embed/src/main.py b/py/samples/ollama-simple-embed/src/main.py deleted file mode 100755 index b72b278511..0000000000 --- a/py/samples/ollama-simple-embed/src/main.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -"""Pokemon glossary - Local RAG with Ollama embeddings. - -This sample demonstrates how to create a simple RAG application using -Ollama for local embeddings and generation, creating a Pokemon glossary -without any external API dependencies. - -Key Concepts (ELI5):: - - ┌─────────────────────┬────────────────────────────────────────────────────┐ - │ Concept │ ELI5 Explanation │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Local RAG │ RAG that runs on YOUR computer. No cloud needed, │ - │ │ your Pokemon data stays private! │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Ollama Embeddings │ Convert text to numbers using local models. │ - │ │ "Pikachu" → [0.2, -0.5, 0.8, ...] │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Vector Similarity │ Find similar items by comparing numbers. │ - │ │ "electric mouse" finds Pikachu! │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Cosine Similarity │ Math to compare how similar two things are. │ - │ │ 1.0 = identical, 0 = completely different. │ - ├─────────────────────┼────────────────────────────────────────────────────┤ - │ Pokemon Glossary │ A searchable database of Pokemon info. │ - │ │ Ask questions, get answers from your data. │ - └─────────────────────┴────────────────────────────────────────────────────┘ - -Key Features -============ -| Feature Description | Example Function / Code Snippet | -|-----------------------------------------|-------------------------------------| -| Local Embedding with Ollama | `ai.embed_many()` | -| Vector Similarity Search | `find_nearest_pokemons` | -| RAG (Retrieval Augmented Generation) | `generate_response` | -| Custom Data Structures | `PokemonInfo` Pydantic model | - -See README.md for testing instructions. -""" - -from math import sqrt -from typing import cast - -from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - -from genkit.ai import Genkit -from genkit.core.logging import get_logger -from genkit.plugins.ollama import Ollama, ollama_name -from genkit.plugins.ollama.constants import OllamaAPITypes -from genkit.plugins.ollama.embedders import EmbeddingDefinition -from genkit.plugins.ollama.models import ModelDefinition -from genkit.types import GenerateResponse - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) - -logger = get_logger(__name__) - -EMBEDDER_MODEL = 'nomic-embed-text' -EMBEDDER_DIMENSIONS = 768 -GENERATE_MODEL = 'phi4:latest' - -ai = Genkit( - plugins=[ - Ollama( - models=[ - ModelDefinition( - name=GENERATE_MODEL, - api_type=cast(OllamaAPITypes, OllamaAPITypes.GENERATE), - ) - ], - embedders=[ - EmbeddingDefinition( - name=EMBEDDER_MODEL, - dimensions=512, - ) - ], - ) - ], -) - - -class PokemonInfo(BaseModel): - """Information about a Pokemon.""" - - name: str - description: str - embedding: list[float] | None = None - - -pokemon_list = [ - PokemonInfo( - name='Pikachu', - description='An Electric-type Pokemon known for its strong electric attacks.', - embedding=None, - ), - PokemonInfo( - name='Charmander', - description='A Fire-type Pokemon that evolves into the powerful Charizard.', - embedding=None, - ), - PokemonInfo( - name='Bulbasaur', - description='A Grass/Poison-type Pokemon that grows into a powerful Venusaur.', - embedding=None, - ), - PokemonInfo( - name='Squirtle', - description='A Water-type Pokemon known for its water-based attacks and high defense.', - embedding=None, - ), - PokemonInfo( - name='Jigglypuff', - description='A Normal/Fairy-type Pokemon with a hypnotic singing ability.', - embedding=None, - ), -] - - -async def embed_pokemons() -> None: - """Embed the Pokemons.""" - embeddings = await ai.embed_many( - embedder=ollama_name(EMBEDDER_MODEL), - content=[pokemon.description for pokemon in pokemon_list], - ) - for pokemon, embedding in zip(pokemon_list, embeddings, strict=True): - pokemon.embedding = embedding.embedding - - -def find_nearest_pokemons(input_embedding: list[float], top_n: int = 3) -> list[PokemonInfo]: - """Find the nearest Pokemons. - - Args: - input_embedding: The embedding of the input. - top_n: The number of nearest Pokemons to return. - - Returns: - A list of the nearest Pokemons. - """ - if any(pokemon.embedding is None for pokemon in pokemon_list): - raise AttributeError('Some Pokemon are not yet embedded') - - # Calculate distances and keep track of the original Pokemon object. - pokemon_distances = [] - for pokemon in pokemon_list: - if pokemon.embedding is not None: - distance = cosine_distance(input_embedding, pokemon.embedding) - pokemon_distances.append((distance, pokemon)) - - # Sort by distance (the first element of the tuple). - pokemon_distances.sort(key=lambda item: item[0]) - - # Return the top_n PokemonInfo objects from the sorted list. - return [pokemon for distance, pokemon in pokemon_distances[:top_n]] - - -def cosine_distance(a: list[float], b: list[float]) -> float: - """Calculate the cosine distance between two vectors. - - Args: - a: The first vector. - b: The second vector. - - Returns: - The cosine distance between the two vectors. - """ - if len(a) != len(b): - raise ValueError('Input vectors must have the same length') - dot_product = sum(ai * bi for ai, bi in zip(a, b, strict=True)) - magnitude_a = sqrt(sum(ai * ai for ai in a)) - magnitude_b = sqrt(sum(bi * bi for bi in b)) - - if magnitude_a == 0 or magnitude_b == 0: - raise ValueError('Invalid input: zero vector') - - return 1 - (dot_product / (magnitude_a * magnitude_b)) - - -async def generate_response(question: str) -> GenerateResponse: - """Generate a response to a question. - - Args: - question: The question to answer. - - Returns: - A GenerateResponse object with the answer. - """ - input_embedding = await ai.embed( - embedder=ollama_name(EMBEDDER_MODEL), - content=question, - ) - nearest_pokemon = find_nearest_pokemons(input_embedding[0].embedding) - pokemons_context = '\n'.join(f'{pokemon.name}: {pokemon.description}' for pokemon in nearest_pokemon) - - return await ai.generate( - model=ollama_name(GENERATE_MODEL), - prompt=f'Given the following context on Pokemon:\n${pokemons_context}\n\nQuestion: ${question}\n\nAnswer:', - ) - - -class PokemonFlowInput(BaseModel): - """Input for Pokemon flow.""" - - question: str = Field(default='Who is the best water pokemon?', description='Question about Pokemon') - - -@ai.flow( - name='Pokedex', -) -async def pokemon_flow(input: PokemonFlowInput) -> str: - """Generate a request to greet a user. - - Args: - input: Input with question about Pokemon. - - Returns: - A GenerateRequest object with the greeting message. - """ - await embed_pokemons() - response = await generate_response(question=input.question) - if not response.message or not response.message.content: - raise ValueError('No message content returned from model') - text = response.message.content[0].root.text - return str(text) if text is not None else '' - - -async def main() -> None: - """Main function.""" - response = await pokemon_flow(PokemonFlowInput(question='Who is the best water pokemon?')) - await logger.ainfo(response) - - -if __name__ == '__main__': - ai.run_main(main()) diff --git a/py/samples/prompt-demo/prompts/recipe.robot.prompt b/py/samples/prompt-demo/prompts/recipe.robot.prompt deleted file mode 100644 index 81d1e48b1b..0000000000 --- a/py/samples/prompt-demo/prompts/recipe.robot.prompt +++ /dev/null @@ -1,17 +0,0 @@ ---- -model: googleai/gemini-3-flash-preview -input: - schema: - food: string -output: - schema: - title: string, recipe title - ingredients(array): - name: string - quantity: string - steps(array, the steps required to complete the recipe): string ---- - -You are a robot chef famous for making creative recipes that robots love to eat. Robots love things like motor oil, RAM, bolts, and uranium. - -Generate a recipe for {{food}}. diff --git a/py/samples/google-genai-code-execution/LICENSE b/py/samples/provider-amazon-bedrock-hello/LICENSE similarity index 100% rename from py/samples/google-genai-code-execution/LICENSE rename to py/samples/provider-amazon-bedrock-hello/LICENSE diff --git a/py/samples/amazon-bedrock-hello/README.md b/py/samples/provider-amazon-bedrock-hello/README.md similarity index 100% rename from py/samples/amazon-bedrock-hello/README.md rename to py/samples/provider-amazon-bedrock-hello/README.md diff --git a/py/samples/amazon-bedrock-hello/pyproject.toml b/py/samples/provider-amazon-bedrock-hello/pyproject.toml similarity index 96% rename from py/samples/amazon-bedrock-hello/pyproject.toml rename to py/samples/provider-amazon-bedrock-hello/pyproject.toml index b4c3f73dd5..c123336ff8 100644 --- a/py/samples/amazon-bedrock-hello/pyproject.toml +++ b/py/samples/provider-amazon-bedrock-hello/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "uvloop>=0.21.0", ] description = "Amazon Bedrock Hello Sample" -name = "amazon-bedrock-hello" +name = "provider-amazon-bedrock-hello" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/amazon-bedrock-hello/run.sh b/py/samples/provider-amazon-bedrock-hello/run.sh similarity index 100% rename from py/samples/amazon-bedrock-hello/run.sh rename to py/samples/provider-amazon-bedrock-hello/run.sh diff --git a/py/samples/amazon-bedrock-hello/src/main.py b/py/samples/provider-amazon-bedrock-hello/src/main.py similarity index 61% rename from py/samples/amazon-bedrock-hello/src/main.py rename to py/samples/provider-amazon-bedrock-hello/src/main.py index 456ecd0aea..9cc91103ab 100644 --- a/py/samples/amazon-bedrock-hello/src/main.py +++ b/py/samples/provider-amazon-bedrock-hello/src/main.py @@ -47,21 +47,23 @@ Key Features ============ -| Feature Description | Example Function / Code Snippet | -|-----------------------------------------|-------------------------------------| +| Feature Description | Example Function / Code Snippet | +|-----------------------------------------|------------------------------------------| | Plugin Initialization | `ai = Genkit(plugins=[AmazonBedrock()])` | -| AWS X-Ray Telemetry | `add_aws_telemetry(region=...)` | -| Default Model Configuration | `ai = Genkit(model=...)` | -| Defining Flows | `@ai.flow()` decorator | -| Defining Tools | `@ai.tool()` decorator | -| Pydantic for Tool Input Schema | `WeatherInput`, `CurrencyInput` | -| Simple Generation (Prompt String) | `say_hi` | -| Streaming Generation | `say_hi_stream` | -| Generation with Tools | `weather_flow`, `currency_exchange` | -| Generation Configuration (temperature) | `say_hi_with_config` | -| Multimodal (Image Input) | `describe_image` | -| Code Generation | `code_flow` | -| Embeddings | `embed_text` | +| AWS X-Ray Telemetry | `add_aws_telemetry(region=...)` | +| Default Model Configuration | `ai = Genkit(model=...)` | +| Defining Flows | `@ai.flow()` decorator | +| Defining Tools | `@ai.tool()` decorator | +| Pydantic for Tool Input Schema | `WeatherInput`, `CurrencyInput` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompts | `generate_with_system_prompt` | +| Multi-turn Conversations (`messages`) | `generate_multi_turn_chat` | +| Streaming Generation | `generate_streaming_story` | +| Generation with Tools | `generate_weather`, `convert_currency` | +| Generation Configuration (temperature) | `generate_with_config` | +| Multimodal (Image Input) | `describe_image` | +| Code Generation | `generate_code` | +| Embeddings | `embed_text` | Supported Models ================ @@ -75,12 +77,10 @@ import asyncio import os -import random from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback -from genkit.ai import Genkit, Output +from genkit.ai import Genkit from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger from genkit.plugins.amazon_bedrock import ( @@ -95,9 +95,37 @@ inference_profile, nova_pro, ) -from genkit.types import GenerationCommonConfig, Media, MediaPart, Part, TextPart +from genkit.types import Media, MediaPart, Part, TextPart +from samples.shared import ( + CharacterInput, + CodeInput, + CurrencyExchangeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + ReasoningInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + convert_currency as _convert_currency_tool, + convert_currency_logic, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, +) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # Prompt for AWS region if not set if 'AWS_REGION' not in os.environ: @@ -147,79 +175,8 @@ model=_default_model, ) - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mittens', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - topic: str = Field(default='cats and their behaviors', description='Topic to write about') - - -class WeatherInput(BaseModel): - """Weather tool input schema.""" - - location: str = Field(description='Location to get weather for') - - -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" - - location: str = Field(default='San Francisco', description='Location to get weather for') - - -class CurrencyInput(BaseModel): - """Currency conversion tool input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_currency: str = Field(description='Source currency code (e.g., USD)', default='USD') - to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') - - -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - -class ImageDescribeInput(BaseModel): - """Input for image description.""" - - image_url: str = Field( - # Public domain cat image from Wikimedia Commons (no copyright, free for any use) - # Source: https://commons.wikimedia.org/wiki/File:Cute_kitten.jpg - default='https://upload.wikimedia.org/wikipedia/commons/1/13/Cute_kitten.jpg', - description='URL of the image to describe (replace with your own image URL)', - ) +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) class EmbedInput(BaseModel): @@ -228,110 +185,65 @@ class EmbedInput(BaseModel): text: str = Field(default='Hello, world!', description='Text to embed') -class ReasoningInput(BaseModel): - """Input for reasoning demo.""" - - question: str = Field( - default='What is 15% of 240? Show your work step by step.', - description='Question requiring reasoning', - ) - - -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) - - -@ai.tool() -def get_weather(input: WeatherInput) -> str: - """Return a random realistic weather string for a location. +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. Args: - input: Weather input with location. + input: Input with name to greet. Returns: - Weather information with temperature in degree Celsius. + Greeting message. """ - weather_options = [ - '32° C sunny', - '17° C cloudy', - '22° C partly cloudy', - '19° C humid', - '25° C clear skies', - ] - return f'{input.location}: {random.choice(weather_options)}' + return await generate_greeting_logic(ai, input.name) -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. Args: - input: Currency conversion parameters. + input: Input with a question to ask. Returns: - Converted amount string. + The model's response in the persona defined by the system prompt. """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, - ('USD', 'JPY'): 110.0, - ('JPY', 'USD'): 0.0091, - } - - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' + return await generate_with_system_prompt_logic(ai, input.question) @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a simple greeting. +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. Args: - input: Input with name to greet. + input: Input with a travel destination. Returns: - Greeting message. + The model's final response, demonstrating context retention. """ - response = await ai.generate( - prompt=f'Say hello to {input.name} in a friendly way', - ) - return response.text + return await generate_multi_turn_chat_logic(ai, input.destination) @ai.flow() -async def say_hi_stream( +async def generate_streaming_story( input: StreamInput, ctx: ActionRunContext = None, # type: ignore[assignment] ) -> str: - """Generate streaming response. + """Generate a streaming story response. Args: - input: Input with topic to write about. + input: Input with name for streaming story. ctx: Action run context for streaming. Returns: Complete generated text. """ - response = await ai.generate( - prompt=f'Write a short story about {input.topic}', - on_chunk=ctx.send_chunk, - ) - return response.text + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() -async def say_hi_with_config(input: SayHiInput) -> str: - """Generate greeting with custom configuration. +async def generate_with_config(input: GreetingInput) -> str: + """Generate a greeting with custom model configuration. Args: input: Input with name to greet. @@ -339,19 +251,12 @@ async def say_hi_with_config(input: SayHiInput) -> str: Returns: Greeting message. """ - response = await ai.generate( - prompt=f'Say hello to {input.name}', - config=GenerationCommonConfig( - temperature=0.7, - max_output_tokens=100, - ), - ) - return response.text + return await generate_with_config_logic(ai, input.name) @ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Get weather using tools. +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. Args: input: Input with location to get weather for. @@ -359,16 +264,12 @@ async def weather_flow(input: WeatherFlowInput) -> str: Returns: Weather information. """ - response = await ai.generate( - prompt=f'What is the weather in {input.location}?', - tools=['get_weather'], - ) - return response.text + return await generate_weather_logic(ai, input) @ai.flow() -async def currency_exchange(input: CurrencyExchangeInput) -> str: - """Convert currency using tools. +async def convert_currency(input: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. Args: input: Currency exchange parameters. @@ -376,11 +277,7 @@ async def currency_exchange(input: CurrencyExchangeInput) -> str: Returns: Conversion result. """ - response = await ai.generate( - prompt=f'Convert {input.amount} {input.from_curr} to {input.to_curr}', - tools=['convert_currency'], - ) - return response.text + return await convert_currency_logic(ai, input) @ai.flow() @@ -388,37 +285,55 @@ async def generate_character(input: CharacterInput) -> RpgCharacter: """Generate an RPG character with structured output. Args: - input: Character generation input with name. + input: Input with character name. Returns: The generated RPG character. """ - result = await ai.generate( - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - return result.output + return await generate_character_logic(ai, input.name) @ai.flow() async def describe_image(input: ImageDescribeInput) -> str: """Describe an image using Claude or Nova (multimodal models). - Note: This requires a model that supports image input (Claude, Nova Pro/Lite). + Args: + input: Input with image URL to describe. + + Returns: + A textual description of the image. + """ + return await describe_image_logic(ai, input.image_url) + + +@ai.flow() +async def generate_code(input: CodeInput) -> str: + """Generate code using AWS Bedrock models. Args: - input: Input with image URL. + input: Input with coding task description. Returns: - Image description. + Generated code. """ - response = await ai.generate( - prompt=[ - Part(root=TextPart(text='Describe this image in detail')), - Part(root=MediaPart(media=Media(url=input.image_url, content_type='image/jpeg'))), - ], - ) - return response.text + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) @ai.flow() @@ -484,7 +399,7 @@ async def reasoning_demo(input: ReasoningInput) -> str: """ response = await ai.generate( model=_deepseek_model, - prompt=input.question, + prompt=input.prompt, config={ 'max_tokens': 4096, 'temperature': 0.5, @@ -493,23 +408,6 @@ async def reasoning_demo(input: ReasoningInput) -> str: return response.text -@ai.flow() -async def code_flow(input: CodeInput) -> str: - """Generate code using AWS Bedrock models. - - Args: - input: Input with coding task description. - - Returns: - Generated code. - """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text - - async def main() -> None: """Main entry point for the AWS Bedrock sample - keep alive for Dev UI.""" await logger.ainfo('Genkit server running. Press Ctrl+C to stop.') diff --git a/py/samples/google-genai-context-caching/LICENSE b/py/samples/provider-anthropic-hello/LICENSE similarity index 100% rename from py/samples/google-genai-context-caching/LICENSE rename to py/samples/provider-anthropic-hello/LICENSE diff --git a/py/samples/anthropic-hello/README.md b/py/samples/provider-anthropic-hello/README.md similarity index 100% rename from py/samples/anthropic-hello/README.md rename to py/samples/provider-anthropic-hello/README.md diff --git a/py/samples/anthropic-hello/pyproject.toml b/py/samples/provider-anthropic-hello/pyproject.toml similarity index 97% rename from py/samples/anthropic-hello/pyproject.toml rename to py/samples/provider-anthropic-hello/pyproject.toml index b2a32716f7..ea8d2a7704 100644 --- a/py/samples/anthropic-hello/pyproject.toml +++ b/py/samples/provider-anthropic-hello/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "uvloop>=0.21.0", ] description = "Anthropic Hello Sample" -name = "anthropic-hello" +name = "provider-anthropic-hello" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/anthropic-hello/run.sh b/py/samples/provider-anthropic-hello/run.sh similarity index 100% rename from py/samples/anthropic-hello/run.sh rename to py/samples/provider-anthropic-hello/run.sh diff --git a/py/samples/anthropic-hello/src/main.py b/py/samples/provider-anthropic-hello/src/main.py similarity index 61% rename from py/samples/anthropic-hello/src/main.py rename to py/samples/provider-anthropic-hello/src/main.py index 0e1e59c7d7..cc0a78f9d7 100755 --- a/py/samples/anthropic-hello/src/main.py +++ b/py/samples/provider-anthropic-hello/src/main.py @@ -57,12 +57,14 @@ | Defining Flows | `@ai.flow()` decorator | | Defining Tools | `@ai.tool()` decorator | | Pydantic for Tool Input Schema | `WeatherInput`, `CurrencyInput` | -| Simple Generation (Prompt String) | `say_hi` | -| Streaming Generation | `say_hi_stream` | -| Generation with Tools | `weather_flow`, `currency_exchange` | -| Generation Configuration (temperature) | `say_hi_with_config` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompts | `generate_with_system_prompt` | +| Multi-turn Conversations (`messages`) | `generate_multi_turn_chat` | +| Streaming Generation | `generate_streaming_story` | +| Generation with Tools | `generate_weather`, `convert_currency` | +| Generation Configuration (temperature) | `generate_with_config` | | Thinking (CoT) | `thinking_demo` | -| Code Generation | `code_flow` | +| Code Generation | `generate_code` | | Multimodal (Image Input) | `describe_image` | | Prompt Caching | `cached_generation` | | PDF Document Input | `analyze_pdf` | @@ -70,18 +72,43 @@ import asyncio import os -import random from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback -from genkit.ai import Genkit, Output +from genkit.ai import Genkit from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger from genkit.plugins.anthropic import Anthropic, anthropic_name -from genkit.types import GenerationCommonConfig, Media, MediaPart, Message, Metadata, Part, Role, TextPart +from genkit.types import Media, MediaPart, Message, Metadata, Part, Role, TextPart +from samples.shared import ( + CharacterInput, + CodeInput, + CurrencyExchangeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + convert_currency as _convert_currency_tool, + convert_currency_logic, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, +) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'ANTHROPIC_API_KEY' not in os.environ: os.environ['ANTHROPIC_API_KEY'] = input('Please enter your ANTHROPIC_API_KEY: ') @@ -93,73 +120,8 @@ model=anthropic_name('claude-3-5-haiku'), ) - -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class CurrencyInput(BaseModel): - """Currency conversion input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_currency: str = Field(description='Source currency code (e.g., USD)', default='USD') - to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') - - -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills - - -class WeatherInput(BaseModel): - """Weather input schema.""" - - location: str = Field(description='Location to get weather for') - - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mittens', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - topic: str = Field(default='cats and their behaviors', description='Topic to write about') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - -class ImageDescribeInput(BaseModel): - """Input for image description.""" - - image_url: str = Field( - # Public domain cat image from Wikimedia Commons (no copyright, free for any use) - # Source: https://commons.wikimedia.org/wiki/File:Cute_kitten.jpg - default='https://upload.wikimedia.org/wikipedia/commons/1/13/Cute_kitten.jpg', - description='URL of the image to describe (replace with your own image URL)', - ) +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) class ThinkingInput(BaseModel): @@ -168,113 +130,82 @@ class ThinkingInput(BaseModel): question: str = Field(default='Why do cats purr?', description='Question to answer') -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" +class CacheInput(BaseModel): + """Input for prompt caching demo.""" - location: str = Field(default='San Francisco', description='Location to get weather for') + question: str = Field(default='What are the key themes?', description='Question about the cached text') -class CodeInput(BaseModel): - """Input for code generation flow.""" +class PdfInput(BaseModel): + """Input for PDF analysis demo.""" - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', + pdf_url: str = Field( + # Public domain sample PDF. + default='https://pdfobject.com/pdf/sample.pdf', + description='URL of the PDF to analyze', ) + question: str = Field(default='Describe the contents of this document.', description='Question about the PDF') -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. Args: - input: Currency conversion parameters. + input: Input with name to greet. Returns: - Converted amount. + Greeting message. """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, - } - - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' + return await generate_greeting_logic(ai, input.name) @ai.flow() -async def currency_exchange(input: CurrencyExchangeInput) -> str: - """Convert currency using tools. +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. Args: - input: Currency exchange parameters. + input: Input with a question to ask. Returns: - Conversion result. + The model's response in the persona defined by the system prompt. """ - response = await ai.generate( - prompt=f'Convert {input.amount} {input.from_curr} to {input.to_curr}', - tools=['convert_currency'], - ) - return response.text - - -@ai.flow() -async def describe_image(input: ImageDescribeInput) -> str: - """Describe an image using Anthropic.""" - response = await ai.generate( - prompt=[ - Part(root=TextPart(text='Describe this image')), - Part(root=MediaPart(media=Media(url=input.image_url, content_type='image/jpeg'))), - ], - ) - return response.text + return await generate_with_system_prompt_logic(ai, input.question) @ai.flow() -async def generate_character(input: CharacterInput) -> RpgCharacter: - """Generate an RPG character. +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. Args: - input: Character generation input with name. + input: Input with a travel destination. Returns: - The generated RPG character. + The model's final response, demonstrating context retention. """ - result = await ai.generate( - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - return result.output + return await generate_multi_turn_chat_logic(ai, input.destination) -@ai.tool() -def get_weather(input: WeatherInput) -> str: - """Return a random realistic weather string for a city name. +@ai.flow() +async def generate_streaming_story( + input: StreamInput, + ctx: ActionRunContext = None, # type: ignore[assignment] +) -> str: + """Generate a streaming story response. Args: - input: Weather input location. + input: Input with name for streaming story. + ctx: Action run context for streaming. Returns: - Weather information with temperature in degree Celsius. + Complete generated text. """ - weather_options = [ - '32° C sunny', - '17° C cloudy', - '22° C cloudy', - '19° C humid', - ] - return random.choice(weather_options) + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a simple greeting. +async def generate_with_config(input: GreetingInput) -> str: + """Generate a greeting with custom model configuration. Args: input: Input with name to greet. @@ -282,119 +213,106 @@ async def say_hi(input: SayHiInput) -> str: Returns: Greeting message. """ - response = await ai.generate( - prompt=f'Say hello to {input.name} in a friendly way', - ) - return response.text + return await generate_with_config_logic(ai, input.name) @ai.flow() -async def say_hi_stream( - input: StreamInput, - ctx: ActionRunContext = None, # type: ignore[assignment] -) -> str: - """Generate streaming response. +async def generate_code(input: CodeInput) -> str: + """Generate code using Claude. Args: - input: Input with topic to write about. - ctx: Action run context for streaming. + input: Input with coding task description. Returns: - Complete generated text. + Generated code. """ - response = await ai.generate( - prompt=f'Write a short story about {input.topic}', - on_chunk=ctx.send_chunk, - ) - return response.text + return await generate_code_logic(ai, input.task) @ai.flow() -async def say_hi_with_config(input: SayHiInput) -> str: - """Generate greeting with custom configuration. +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. Args: - input: Input with name to greet. + input: Input with location to get weather for. Returns: - Greeting message. + Weather information. """ - response = await ai.generate( - prompt=f'Say hello to {input.name}', - config=GenerationCommonConfig( - temperature=0.7, - max_output_tokens=100, - ), - ) - return response.text + return await generate_weather_logic(ai, input) @ai.flow() -async def thinking_demo(input: ThinkingInput) -> str: - """Demonstrate Anthropic thinking capability. +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. - Note: 'thinking' requires a compatible model (e.g., Claude 3.7 Sonnet). + Args: + input: Input with character name. + + Returns: + The generated RPG character. """ - response = await ai.generate( - model=anthropic_name('claude-3-7-sonnet-20250219'), - prompt=input.question, - config={ - 'thinking': {'type': 'enabled', 'budget_tokens': 1024}, - 'max_output_tokens': 4096, # Required when thinking is enabled - }, - ) - return response.text + return await generate_character_logic(ai, input.name) @ai.flow() -async def code_flow(input: CodeInput) -> str: - """Generate code using Claude. +async def convert_currency(input: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. Args: - input: Input with coding task description. + input: Currency exchange parameters. Returns: - Generated code. + Conversion result. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await convert_currency_logic(ai, input) @ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Get weather using tools. +async def describe_image(input: ImageDescribeInput) -> str: + """Describe an image using Anthropic. Args: - input: Input with location to get weather for. + input: Input with image URL to describe. Returns: - Weather information. + A textual description of the image. """ - response = await ai.generate( - prompt=f'What is the weather in {input.location}?', - tools=['get_weather'], - ) - return response.text + return await describe_image_logic(ai, input.image_url) -class CacheInput(BaseModel): - """Input for prompt caching demo.""" +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. - question: str = Field(default='What are the key themes?', description='Question about the cached text') + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) -class PdfInput(BaseModel): - """Input for PDF analysis demo.""" +@ai.flow() +async def thinking_demo(input: ThinkingInput) -> str: + """Demonstrate Anthropic thinking capability. - pdf_url: str = Field( - # Public domain sample PDF. - default='https://pdfobject.com/pdf/sample.pdf', - description='URL of the PDF to analyze', + Note: 'thinking' requires a compatible model (e.g., Claude 3.7 Sonnet). + """ + response = await ai.generate( + model=anthropic_name('claude-3-7-sonnet-20250219'), + prompt=input.question, + config={ + 'thinking': {'type': 'enabled', 'budget_tokens': 1024}, + 'max_output_tokens': 4096, # Required when thinking is enabled + }, ) - question: str = Field(default='Describe the contents of this document.', description='Question about the PDF') + return response.text @ai.flow() @@ -409,12 +327,6 @@ async def cached_generation(input: CacheInput) -> str: that part. See: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching - - Args: - input: Input with question about the cached context. - - Returns: - Model response leveraging cached context. """ # The long context is marked with cache_control metadata. # On subsequent calls with the same prefix, Anthropic reuses the cache. @@ -452,12 +364,6 @@ async def analyze_pdf(input: PdfInput) -> str: PDFs can be provided as URLs or base64-encoded data URIs. See: https://docs.anthropic.com/en/docs/build-with-claude/pdf-support - - Args: - input: Input with PDF URL and question. - - Returns: - Model's analysis of the PDF document. """ response = await ai.generate( model=anthropic_name('claude-3-5-haiku'), diff --git a/py/samples/google-genai-hello/LICENSE b/py/samples/provider-cloudflare-workers-ai-hello/LICENSE similarity index 100% rename from py/samples/google-genai-hello/LICENSE rename to py/samples/provider-cloudflare-workers-ai-hello/LICENSE diff --git a/py/samples/cloudflare-workers-ai-hello/README.md b/py/samples/provider-cloudflare-workers-ai-hello/README.md similarity index 97% rename from py/samples/cloudflare-workers-ai-hello/README.md rename to py/samples/provider-cloudflare-workers-ai-hello/README.md index 3509ada897..14798e9e35 100644 --- a/py/samples/cloudflare-workers-ai-hello/README.md +++ b/py/samples/provider-cloudflare-workers-ai-hello/README.md @@ -81,6 +81,7 @@ After starting the sample, open the Genkit DevUI at http://localhost:4000 and: 2. **streaming_demo**: Watch tokens stream in real-time 3. **tool_demo**: See tool calling in action 4. **embedding_demo**: Generate and view embedding vectors +5. **generate_character**: Generate a structured RPG character as JSON ## Supported Models diff --git a/py/samples/cloudflare-workers-ai-hello/pyproject.toml b/py/samples/provider-cloudflare-workers-ai-hello/pyproject.toml similarity index 95% rename from py/samples/cloudflare-workers-ai-hello/pyproject.toml rename to py/samples/provider-cloudflare-workers-ai-hello/pyproject.toml index 80cbb6a8a9..a7f9275953 100644 --- a/py/samples/cloudflare-workers-ai-hello/pyproject.toml +++ b/py/samples/provider-cloudflare-workers-ai-hello/pyproject.toml @@ -17,7 +17,7 @@ [project] dependencies = ["genkit", "genkit-plugin-cloudflare-workers-ai", "rich>=13.0.0"] description = "Cloudflare Workers AI Hello World Sample for Genkit" -name = "cloudflare-workers-ai-hello" +name = "provider-cloudflare-workers-ai-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/cloudflare-workers-ai-hello/run.sh b/py/samples/provider-cloudflare-workers-ai-hello/run.sh similarity index 100% rename from py/samples/cloudflare-workers-ai-hello/run.sh rename to py/samples/provider-cloudflare-workers-ai-hello/run.sh diff --git a/py/samples/cloudflare-workers-ai-hello/src/main.py b/py/samples/provider-cloudflare-workers-ai-hello/src/main.py similarity index 71% rename from py/samples/cloudflare-workers-ai-hello/src/main.py rename to py/samples/provider-cloudflare-workers-ai-hello/src/main.py index dcf4256d7a..71cf6d0186 100644 --- a/py/samples/cloudflare-workers-ai-hello/src/main.py +++ b/py/samples/provider-cloudflare-workers-ai-hello/src/main.py @@ -52,7 +52,7 @@ ▼ ┌───────────────────┐ │ Genkit Flow │ - │ (say_hello) │ + │ (generate_greeting) │ └─────────┬─────────┘ │ ▼ @@ -85,42 +85,68 @@ 3. Open DevUI at http://localhost:4000 4. Test flows: - - say_hello: Enter a name, get a greeting - - streaming_demo: Watch tokens stream in real-time - - tool_demo: See weather tool in action - - embedding_demo: Generate text embeddings + - generate_greeting: Enter a name, get a greeting + - generate_with_system_prompt: System prompt persona demo + - generate_multi_turn_chat: Multi-turn conversation demo + - streaming_demo: Watch tokens stream in real-time + - generate_weather: See weather tool in action + - embedding_demo: Generate text embeddings + - generate_character: Structured output as JSON """ import asyncio from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit import Genkit +from genkit.core.action import ActionRunContext +from genkit.core.logging import get_logger from genkit.plugins.cloudflare_workers_ai import ( CloudflareWorkersAI, bge_base_en, cloudflare_model, ) from genkit.plugins.cloudflare_workers_ai.typing import CloudflareConfig -from genkit.types import Media, MediaPart, Message, Part, Role, TextPart +from samples.shared import ( + CharacterInput, + CodeInput, + ImageDescribeInput, + MultiTurnInput, + RpgCharacter, + StreamingToolInput, + SystemPromptInput, + WeatherInput, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, +) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # Enable OTLP telemetry export (optional, requires CF_OTLP_ENDPOINT env var) # To enable, add 'from genkit.plugins.cloudflare_workers_ai import add_cloudflare_telemetry' and call: # add_cloudflare_telemetry() -# Initialize Genkit with CF Workers AI plugin +logger = get_logger(__name__) + ai = Genkit( plugins=[CloudflareWorkersAI()], model=cloudflare_model('@cf/meta/llama-3.1-8b-instruct'), ) +ai.tool()(get_weather) + class HelloInput(BaseModel): - """Input for the say_hello flow. + """Input for the generate_greeting flow. Attributes: name: Name of the person to greet. @@ -133,21 +159,42 @@ class HelloInput(BaseModel): @ai.flow() -async def say_hello(input: HelloInput) -> str: - """Generate a friendly greeting for someone. +async def generate_greeting(input: HelloInput) -> str: + """Generate a simple greeting. - This flow demonstrates basic text generation with Cloudflare Workers AI. + Args: + input: Input with name to greet. + + Returns: + Greeting message. + """ + return await generate_greeting_logic(ai, input.name) + + +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. Args: - input: HelloInput with the name to greet. + input: Input with a question to ask. Returns: - A friendly greeting message. + The model's response in the persona defined by the system prompt. """ - response = await ai.generate( - prompt=f'Say hello to {input.name}! Be friendly and creative.', - ) - return response.text + return await generate_with_system_prompt_logic(ai, input.question) + + +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + return await generate_multi_turn_chat_logic(ai, input.destination) class StreamingInput(BaseModel): @@ -180,38 +227,9 @@ async def streaming_demo(input: StreamingInput) -> str: stream, _ = ai.generate_stream(prompt=input.prompt) async for chunk in stream: result_text += chunk.text - # In a real application, you would stream this to the client return result_text -class WeatherInput(BaseModel): - """Input for the get_weather tool. - - Attributes: - location: The city or location to get weather for. - """ - - location: str = Field( - description='The city or location to get weather for', - ) - - -@ai.tool() -async def get_weather(input: WeatherInput) -> str: - """Get the current weather for a location. - - This is a mock weather tool to demonstrate tool calling. - - Args: - input: WeatherInput with the location to check. - - Returns: - A weather description string. - """ - # Mock implementation - in production, call a real weather API - return f'The weather in {input.location} is sunny, 72°F (22°C) with clear skies.' - - class ToolDemoInput(BaseModel): """Input for the tool demo flow. @@ -226,22 +244,16 @@ class ToolDemoInput(BaseModel): @ai.flow() -async def tool_demo(input: ToolDemoInput) -> str: - """Demonstrate tool calling with Cloudflare Workers AI. - - This flow shows how models can call tools to get external information. +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. Args: - input: ToolDemoInput with the location. + input: Input with location to get weather for. Returns: - A response that incorporates the tool result. + Weather information. """ - response = await ai.generate( - prompt=f'What is the weather like in {input.location}? Use the get_weather tool to find out.', - tools=['get_weather'], - ) - return response.text + return await generate_weather_logic(ai, WeatherInput(location=input.location)) class EmbeddingInput(BaseModel): @@ -332,49 +344,12 @@ async def model_comparison(input: ModelComparisonInput) -> dict[str, str]: return results -class ImageDescribeInput(BaseModel): - """Input for image description flow. - - Attributes: - image_url: URL of the image to describe. - """ - - image_url: str = Field( - # Public domain cat image from Wikimedia Commons (no copyright, free for any use) - # Source: https://commons.wikimedia.org/wiki/File:Cute_kitten.jpg - default='https://upload.wikimedia.org/wikipedia/commons/1/13/Cute_kitten.jpg', - description='URL of the image to describe (replace with your own image URL)', - ) - - @ai.flow() async def describe_image(input: ImageDescribeInput) -> str: - """Describe an image using a multimodal Cloudflare AI model. - - This flow demonstrates multimodal capabilities with vision models like - Llama 4 Scout and Gemma 3. Note that not all Cloudflare models support - image inputs. - - Args: - input: ImageDescribeInput with the image URL. - - Returns: - A description of the image. - """ - response = await ai.generate( - # Use a multimodal model that supports image inputs - model=cloudflare_model('@cf/meta/llama-4-scout-17b-16e-instruct'), - messages=[ - Message( - role=Role.USER, - content=[ - Part(root=TextPart(text='Describe this image in detail.')), - Part(root=MediaPart(media=Media(url=input.image_url))), - ], - ) - ], + """Describe an image using a multimodal Cloudflare AI model.""" + return await describe_image_logic( + ai, input.image_url, model=cloudflare_model('@cf/meta/llama-4-scout-17b-16e-instruct') ) - return response.text class ConfigDemoInput(BaseModel): @@ -390,27 +365,15 @@ class ConfigDemoInput(BaseModel): ) -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) - - @ai.flow() -async def say_hi_with_config(input: ConfigDemoInput) -> str: - """Generate text with custom configuration. - - This flow demonstrates using CloudflareConfig for fine-tuned control over - generation parameters like temperature, top_k, and repetition_penalty. +async def generate_with_config(input: ConfigDemoInput) -> str: + """Generate a greeting with custom model configuration. Args: - input: ConfigDemoInput with the prompt. + input: Input with prompt for generation. Returns: - The generated text. + Generated text. """ response = await ai.generate( prompt=input.prompt, @@ -425,7 +388,7 @@ async def say_hi_with_config(input: ConfigDemoInput) -> str: @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def generate_code(input: CodeInput) -> str: """Generate code using Cloudflare Workers AI models. Args: @@ -434,11 +397,37 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. + + Args: + input: Input with character name. + + Returns: + The generated RPG character. + """ + return await generate_character_logic(ai, input.name) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) async def main() -> None: diff --git a/py/samples/google-genai-image/LICENSE b/py/samples/provider-compat-oai-hello/LICENSE similarity index 100% rename from py/samples/google-genai-image/LICENSE rename to py/samples/provider-compat-oai-hello/LICENSE diff --git a/py/samples/compat-oai-hello/README.md b/py/samples/provider-compat-oai-hello/README.md similarity index 74% rename from py/samples/compat-oai-hello/README.md rename to py/samples/provider-compat-oai-hello/README.md index d7513be753..7a4c7f8d5b 100644 --- a/py/samples/compat-oai-hello/README.md +++ b/py/samples/provider-compat-oai-hello/README.md @@ -56,7 +56,7 @@ genkit start -- uv run src/main.py 2. **Run the demo**: ```bash - cd py/samples/compat-oai-hello + cd py/samples/provider-compat-oai-hello ./run.sh ``` @@ -73,16 +73,24 @@ genkit start -- uv run src/main.py 6. **Test structured output**: - [ ] `generate_character` - RPG character generation -7. **Test multimodal**: +7. **Test vision**: + - [ ] `describe_image` - Image description using GPT-4o vision + +8. **Test reasoning**: + - [ ] `reasoning_flow` - Chain-of-thought reasoning with o4-mini + +9. **Test multimodal**: - [ ] `generate_image` - DALL-E image generation (returns base64 data URI) - [ ] `text_to_speech` - TTS with voice selection (alloy, echo, nova, etc.) - [ ] `round_trip_tts_stt` - Text → Speech → Text round-trip demo -8. **Expected behavior**: - - GPT models respond appropriately - - Streaming shows incremental text - - Tools are invoked and responses processed - - Structured output matches Pydantic schema - - DALL-E returns a base64 image data URI - - TTS returns a base64 audio data URI - - Round-trip returns transcribed text matching the original input +10. **Expected behavior**: + - GPT models respond appropriately + - Streaming shows incremental text + - Tools are invoked and responses processed + - Structured output matches Pydantic schema + - Vision describes the kitten image accurately + - Reasoning shows chain-of-thought explanation + - DALL-E returns a base64 image data URI + - TTS returns a base64 audio data URI + - Round-trip returns transcribed text matching the original input diff --git a/py/samples/compat-oai-hello/pyproject.toml b/py/samples/provider-compat-oai-hello/pyproject.toml similarity index 98% rename from py/samples/compat-oai-hello/pyproject.toml rename to py/samples/provider-compat-oai-hello/pyproject.toml index c655258b5a..8448b78761 100644 --- a/py/samples/compat-oai-hello/pyproject.toml +++ b/py/samples/provider-compat-oai-hello/pyproject.toml @@ -43,7 +43,7 @@ dependencies = [ ] description = "OpenAI sample" license = "Apache-2.0" -name = "compat-oai-hello" +name = "provider-compat-oai-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/compat-oai-hello/run.sh b/py/samples/provider-compat-oai-hello/run.sh similarity index 100% rename from py/samples/compat-oai-hello/run.sh rename to py/samples/provider-compat-oai-hello/run.sh diff --git a/py/samples/compat-oai-hello/src/main.py b/py/samples/provider-compat-oai-hello/src/main.py similarity index 70% rename from py/samples/compat-oai-hello/src/main.py rename to py/samples/provider-compat-oai-hello/src/main.py index 476fbe092f..ff781f267a 100755 --- a/py/samples/compat-oai-hello/src/main.py +++ b/py/samples/provider-compat-oai-hello/src/main.py @@ -54,14 +54,18 @@ | Defining Flows | `@ai.flow()` decorator (multiple uses) | | Defining Tools | `@ai.tool()` decorator (multiple uses) | | Tool Input Schema (Pydantic) | `GablorkenInput` | -| Simple Generation (Prompt String) | `say_hi` | -| Generation with Messages (`Message`, `Role`, `TextPart`) | `say_hi_constrained` | -| Streaming Generation | `say_hi_stream` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompts | `generate_with_system_prompt` | +| Multi-turn Conversations (`messages`) | `generate_multi_turn_chat` | +| Structured Output (Simple) | `structured_menu_suggestion` | +| Streaming Generation | `generate_streaming_story` | | Generation with Tools | `calculate_gablorken` | | Tool Response Handling with context | `generate_character` | +| Multimodal (Image Input / Vision) | `describe_image` | +| Reasoning (Chain-of-Thought) | `solve_reasoning_problem` | | Image Generation (DALL-E) | `generate_image` | | Text-to-Speech (TTS) | `text_to_speech` | -| Code Generation | `code_flow` | +| Code Generation | `generate_code` | | TTS → STT Round-Trip | `round_trip_tts_stt` | See README.md for testing instructions. @@ -73,14 +77,39 @@ import httpx from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import ActionRunContext, Genkit, Output from genkit.core.logging import get_logger from genkit.plugins.compat_oai import OpenAI, openai_model from genkit.types import Media, MediaPart, Message, Part, Role, TextPart - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +from samples.shared import ( + CharacterInput, + CodeInput, + CurrencyExchangeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + ReasoningInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + convert_currency as _convert_currency_tool, + convert_currency_logic, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, + solve_reasoning_problem_logic, +) + +setup_sample() if 'OPENAI_API_KEY' not in os.environ: os.environ['OPENAI_API_KEY'] = input('Please enter your OPENAI_API_KEY: ') @@ -89,21 +118,8 @@ ai = Genkit(plugins=[OpenAI()], model=openai_model('gpt-4o')) - -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class CurrencyInput(BaseModel): - """Currency conversion input schema.""" - - amount: float = Field(description='Amount to convert') - from_currency: str = Field(description='Source currency code (e.g., USD)') - to_currency: str = Field(description='Target currency code (e.g., EUR)') +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) class GablorkenInput(BaseModel): @@ -112,16 +128,19 @@ class GablorkenInput(BaseModel): value: int = Field(description='value to calculate gablorken for') -class HelloSchema(BaseModel): - """Hello schema. +class MenuSuggestion(BaseModel): + """A suggested menu item from a themed restaurant. - Args: - text: The text to say hello to. - receiver: The receiver of the hello. + Demonstrates structured output with multiple field types: strings, + numbers, lists, and booleans — matching the Genkit documentation + example for structured output. """ - text: str - receiver: str + name: str = Field(description='The name of the menu item') + description: str = Field(description='A short, appetizing description') + price: float = Field(description='Estimated price in USD') + allergens: list[str] = Field(description='Known allergens (e.g., nuts, dairy, gluten)') + is_vegetarian: bool = Field(description='Whether the item is vegetarian') class MyInput(BaseModel): @@ -131,24 +150,6 @@ class MyInput(BaseModel): b: int = Field(default=3, description='b field') -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - gablorket: int = Field(description='gablorken (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills - - class WeatherRequest(BaseModel): """Weather request.""" @@ -176,42 +177,54 @@ class GablorkenFlowInput(BaseModel): value: int = Field(default=42, description='Value to calculate gablorken for') -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mittens', description='Name to greet') +class MenuSuggestionInput(BaseModel): + """Input for structured menu suggestion flow.""" + theme: str = Field(default='pirate', description='Restaurant theme (e.g., pirate, space, medieval)') -class SayHiConstrainedInput(BaseModel): - """Input for constrained greeting flow.""" - - hi_input: str = Field(default='Mittens', description='Name to greet') +class WeatherFlowInput(BaseModel): + """Input for weather flow.""" -class StreamInput(BaseModel): - """Input for streaming flow.""" + location: str = Field(default='New York', description='Location to get weather for') - name: str = Field(default='Shadow', description='Name for streaming greeting') +class ImagePromptInput(BaseModel): + """Input for image generation flow.""" -class CharacterInput(BaseModel): - """Input for character generation.""" + prompt: str = Field( + default='A watercolor painting of a cat sitting on a windowsill at sunset', + description='Text prompt describing the image to generate', + ) - name: str = Field(default='Whiskers', description='Character name') +class TTSInput(BaseModel): + """Input for text-to-speech flow.""" -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" + text: str = Field( + default='Hello! This is Genkit speaking through OpenAI text-to-speech.', + description='Text to convert to speech', + ) + voice: str = Field( + default='alloy', + description='Voice to use (alloy, echo, fable, onyx, nova, shimmer)', + ) - location: str = Field(default='New York', description='Location to get weather for') +class RoundTripInput(BaseModel): + """Input for the TTS → STT round-trip demo. -class CodeInput(BaseModel): - """Input for code generation flow.""" + Provide text to convert to speech, then transcribe back. This + demonstrates both TTS and STT in a single testable flow. + """ - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', + text: str = Field( + default='The quick brown fox jumps over the lazy dog.', + description='Text to speak and then transcribe back', + ) + voice: str = Field( + default='alloy', + description='Voice to use for TTS (alloy, echo, fable, onyx, nova, shimmer)', ) @@ -249,30 +262,6 @@ def get_weather_tool(coordinates: WeatherRequest) -> float: return float(data['current']['temperature_2m']) -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. - - Args: - input: Currency conversion parameters. - - Returns: - Converted amount. - """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, - } - - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' - - @ai.flow() async def calculate_gablorken(input: GablorkenFlowInput) -> str: """Generate a request to calculate gablorken according to gablorken_tool. @@ -293,8 +282,8 @@ async def calculate_gablorken(input: GablorkenFlowInput) -> str: @ai.flow() -async def currency_exchange(input: CurrencyExchangeInput) -> str: - """Convert currency using tools. +async def convert_currency(input: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. Args: input: Currency exchange parameters. @@ -302,46 +291,20 @@ async def currency_exchange(input: CurrencyExchangeInput) -> str: Returns: Conversion result. """ - response = await ai.generate( - prompt=f'Convert {input.amount} {input.from_curr} to {input.to_curr}', - tools=['convert_currency'], - ) - return response.text + return await convert_currency_logic(ai, input) @ai.flow() -async def generate_character( - input: CharacterInput, - ctx: ActionRunContext | None = None, -) -> RpgCharacter: - """Generate an RPG character. +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. Args: input: Input with character name. - ctx: the context of the tool Returns: The generated RPG character. """ - if ctx is not None and ctx.is_streaming: - stream, result = ai.generate_stream( - prompt=f'generate an RPG character named {input.name} with gablorken based on 42', - output=Output(schema=RpgCharacter), - config={'model': 'gpt-4o-2024-08-06', 'temperature': 1}, - tools=['gablorkenTool'], - ) - async for data in stream: - ctx.send_chunk(data.output) - - return (await result).output - else: - result = await ai.generate( - prompt=f'generate an RPG character named {input.name} with gablorken based on 13', - output=Output(schema=RpgCharacter), - config={'model': 'gpt-4o-2024-08-06', 'temperature': 1}, - tools=['gablorkenTool'], - ) - return result.output + return await generate_character_logic(ai, input.name) @ai.flow() @@ -393,8 +356,8 @@ async def get_weather_flow_stream(input: WeatherFlowInput) -> str: @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Say hi to a name. +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. Args: input: Input with name to greet. @@ -402,50 +365,81 @@ async def say_hi(input: SayHiInput) -> str: Returns: The response from the OpenAI API. """ - response = await ai.generate( - model=openai_model('gpt-4o'), - config={'temperature': 1}, - prompt=f'hi {input.name}', - ) - return response.text + return await generate_greeting_logic(ai, input.name) @ai.flow() -async def say_hi_constrained(input: SayHiConstrainedInput) -> HelloSchema: - """Generate a request to greet a user with response following `HelloSchema` schema. +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + System prompts give the model instructions about how to respond, such as + adopting a specific persona, tone, or response format. + + See: https://genkit.dev/docs/models#system-prompts Args: - input: Input with name to greet. + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + return await generate_with_system_prompt_logic(ai, input.question) + + +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + The messages parameter allows you to pass a conversation history to + maintain context across multiple interactions with the model. Each + message has a role ('user' or 'model') and content. + + See: https://genkit.dev/docs/models#multi-turn-conversations-with-messages + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + return await generate_multi_turn_chat_logic(ai, input.destination) + + +@ai.flow() +async def structured_menu_suggestion(input: MenuSuggestionInput) -> MenuSuggestion: + """Suggest a themed menu item using structured output. + + Demonstrates Genkit's structured output feature: the model returns + data conforming to a Pydantic schema with multiple field types + (str, float, list, bool) rather than free-form text. + + See: https://genkit.dev/docs/models#structured-output + + Args: + input: Input with restaurant theme. Returns: - A `HelloSchema` object with the greeting message. + A MenuSuggestion with name, description, price, allergens, etc. """ response = await ai.generate( - prompt='hi ' + input.hi_input, - output=Output(schema=HelloSchema), + prompt=f'Suggest a menu item for a {input.theme}-themed restaurant.', + output=Output(schema=MenuSuggestion), ) return response.output @ai.flow() -async def say_hi_stream(input: StreamInput) -> str: - """Say hi to a name and stream the response. +async def generate_streaming_story(input: StreamInput, ctx: ActionRunContext | None = None) -> str: + """Generate a streaming story response. Args: input: Input with name for streaming greeting. + ctx: Action context for streaming. Returns: The response from the OpenAI API. """ - stream, _ = ai.generate_stream( - model=openai_model('gpt-4'), - config={'model': 'gpt-4-0613', 'temperature': 1}, - prompt=f'hi {input.name}', - ) - result: str = '' - async for data in stream: - result += data.text - return result + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() @@ -461,45 +455,6 @@ async def sum_two_numbers2(my_input: MyInput) -> int: return my_input.a + my_input.b -class ImagePromptInput(BaseModel): - """Input for image generation flow.""" - - prompt: str = Field( - default='A watercolor painting of a cat sitting on a windowsill at sunset', - description='Text prompt describing the image to generate', - ) - - -class TTSInput(BaseModel): - """Input for text-to-speech flow.""" - - text: str = Field( - default='Hello! This is Genkit speaking through OpenAI text-to-speech.', - description='Text to convert to speech', - ) - voice: str = Field( - default='alloy', - description='Voice to use (alloy, echo, fable, onyx, nova, shimmer)', - ) - - -class RoundTripInput(BaseModel): - """Input for the TTS → STT round-trip demo. - - Provide text to convert to speech, then transcribe back. This - demonstrates both TTS and STT in a single testable flow. - """ - - text: str = Field( - default='The quick brown fox jumps over the lazy dog.', - description='Text to speak and then transcribe back', - ) - voice: str = Field( - default='alloy', - description='Voice to use for TTS (alloy, echo, fable, onyx, nova, shimmer)', - ) - - def _extract_media_url(response: object) -> str: """Extract the media data URI from a generate response. @@ -604,7 +559,41 @@ async def round_trip_tts_stt(input: RoundTripInput) -> str: @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def describe_image(input: ImageDescribeInput) -> str: + """Describe an image using GPT-4o vision capabilities. + + This demonstrates multimodal image understanding via the + OpenAI-compatible plugin. GPT-4o can analyze images and + provide detailed descriptions. + + Args: + input: Input with image URL to describe. + + Returns: + A textual description of the image. + """ + return await describe_image_logic(ai, input.image_url) + + +@ai.flow() +async def solve_reasoning_problem(input: ReasoningInput) -> str: + """Solve reasoning problems using OpenAI o4-mini. + + o4-mini is a reasoning model that shows chain-of-thought + steps. This demonstrates how reasoning models work with + the OpenAI-compatible plugin. + + Args: + input: Input with reasoning question to solve. + + Returns: + The reasoning and answer. + """ + return await solve_reasoning_problem_logic(ai, input.prompt, model=openai_model('o4-mini')) + + +@ai.flow() +async def generate_code(input: CodeInput) -> str: """Generate code using OpenAI models. Args: @@ -613,11 +602,27 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + The model streams its response while also calling tools mid-generation. + Tool calls are resolved automatically and the model continues generating. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) async def main() -> None: diff --git a/py/samples/google-genai-vertexai-hello/LICENSE b/py/samples/provider-deepseek-hello/LICENSE similarity index 100% rename from py/samples/google-genai-vertexai-hello/LICENSE rename to py/samples/provider-deepseek-hello/LICENSE diff --git a/py/samples/deepseek-hello/README.md b/py/samples/provider-deepseek-hello/README.md similarity index 100% rename from py/samples/deepseek-hello/README.md rename to py/samples/provider-deepseek-hello/README.md diff --git a/py/samples/deepseek-hello/pyproject.toml b/py/samples/provider-deepseek-hello/pyproject.toml similarity index 97% rename from py/samples/deepseek-hello/pyproject.toml rename to py/samples/provider-deepseek-hello/pyproject.toml index 96abafb798..1e3d70698b 100644 --- a/py/samples/deepseek-hello/pyproject.toml +++ b/py/samples/provider-deepseek-hello/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "uvloop>=0.21.0", ] description = "DeepSeek Hello Sample" -name = "deepseek-hello" +name = "provider-deepseek-hello" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/deepseek-hello/run.sh b/py/samples/provider-deepseek-hello/run.sh similarity index 100% rename from py/samples/deepseek-hello/run.sh rename to py/samples/provider-deepseek-hello/run.sh diff --git a/py/samples/provider-deepseek-hello/src/main.py b/py/samples/provider-deepseek-hello/src/main.py new file mode 100644 index 0000000000..b9a10ade90 --- /dev/null +++ b/py/samples/provider-deepseek-hello/src/main.py @@ -0,0 +1,260 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""DeepSeek hello sample - DeepSeek models with Genkit. + +This sample demonstrates how to use DeepSeek's models with Genkit, +including the powerful reasoning model (deepseek-reasoner). + +See README.md for testing instructions. + +Key Concepts (ELI5):: + + ┌─────────────────────┬────────────────────────────────────────────────────┐ + │ Concept │ ELI5 Explanation │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ DeepSeek │ Chinese AI company known for efficient models. │ + │ │ Great performance at lower cost. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ deepseek-chat │ The standard chat model. Good for most tasks │ + │ │ like writing, Q&A, and coding help. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ deepseek-reasoner │ The R1 reasoning model. Shows its thinking │ + │ │ step by step - great for math and logic. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Chain-of-Thought │ When AI explains its reasoning step by step. │ + │ │ Like showing your work on a test. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Streaming │ Get the response word-by-word as it's generated. │ + │ │ Feels faster, like watching someone type. │ + └─────────────────────┴────────────────────────────────────────────────────┘ + +Key Features +============ +| Feature Description | Example Function / Code Snippet | +|-----------------------------------------|-----------------------------------------| +| Plugin Initialization | `ai = Genkit(plugins=[DeepSeek(...)])` | +| Default Model Configuration | `ai = Genkit(model=deepseek_name(...))` | +| Defining Flows | `@ai.flow()` decorator | +| Defining Tools | `@ai.tool()` decorator | +| Pydantic for Tool Input Schema | `WeatherInput` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompts | `generate_with_system_prompt` | +| Streaming Response | `generate_streaming_story` | +| Generation with Tools | `generate_weather` | +| Reasoning Model (deepseek-reasoner) | `solve_reasoning_problem` | +| Generation with Config | `generate_with_config` | +| Code Generation | `generate_code` | +| Multi-turn Chat | `generate_multi_turn_chat` | +""" + +import asyncio +import os + +from genkit.ai import Genkit +from genkit.core.action import ActionRunContext +from genkit.core.logging import get_logger +from genkit.plugins.deepseek import DeepSeek, deepseek_name +from samples.shared import ( + CharacterInput, + CodeInput, + ConfigInput, + CurrencyExchangeInput, + GreetingInput, + MultiTurnInput, + ReasoningInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + convert_currency as _convert_currency_tool, + convert_currency_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, + solve_reasoning_problem_logic, +) + +setup_sample() + +if 'DEEPSEEK_API_KEY' not in os.environ: + os.environ['DEEPSEEK_API_KEY'] = input('Please enter your DEEPSEEK_API_KEY: ') + +logger = get_logger(__name__) + +ai = Genkit( + plugins=[DeepSeek()], + model=deepseek_name('deepseek-chat'), +) + +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) + + +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. + + Args: + input: Input with name to greet. + + Returns: + Greeting message. + """ + return await generate_greeting_logic(ai, input.name) + + +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + Args: + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + return await generate_with_system_prompt_logic(ai, input.question) + + +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + return await generate_multi_turn_chat_logic(ai, input.destination) + + +@ai.flow() +async def generate_code(input: CodeInput) -> str: + """Generate code using DeepSeek. + + Args: + input: Input with coding task description. + + Returns: + Generated code. + """ + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def generate_streaming_story( + input: StreamInput, + ctx: ActionRunContext | None = None, +) -> str: + """Generate a streaming story response. + + Args: + input: Input with name for streaming story. + ctx: Action run context for streaming. + + Returns: + Complete generated text. + """ + return await generate_streaming_story_logic(ai, input.name, ctx) + + +@ai.flow() +async def generate_with_config(input: ConfigInput) -> str: + """Generate a greeting with custom model configuration. + + Args: + input: Input with name to greet. + + Returns: + Greeting message. + """ + return await generate_with_config_logic(ai, input.name) + + +@ai.flow() +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. + + Args: + input: Input with location to get weather for. + + Returns: + Weather information. + """ + return await generate_weather_logic(ai, input) + + +@ai.flow() +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output.""" + return await generate_character_logic(ai, input.name) + + +@ai.flow() +async def convert_currency(input: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. + + Args: + input: Currency exchange parameters. + + Returns: + Conversion result. + """ + return await convert_currency_logic(ai, input) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling.""" + return await generate_streaming_with_tools_logic(ai, input.location, ctx) + + +@ai.flow() +async def solve_reasoning_problem(input: ReasoningInput) -> str: + """Solve reasoning problems using deepseek-reasoner. + + Args: + input: Input with reasoning question to solve. + + Returns: + The reasoning and answer. + """ + return await solve_reasoning_problem_logic(ai, input.prompt, model=deepseek_name('deepseek-reasoner')) + + +async def main() -> None: + """Main entry point for the DeepSeek sample - keep alive for Dev UI.""" + await logger.ainfo('Genkit server running. Press Ctrl+C to stop.') + # Keep the process alive for Dev UI + await asyncio.Event().wait() + + +if __name__ == '__main__': + ai.run_main(main()) diff --git a/py/samples/google-genai-vertexai-image/LICENSE b/py/samples/provider-firestore-retriever/LICENSE similarity index 100% rename from py/samples/google-genai-vertexai-image/LICENSE rename to py/samples/provider-firestore-retriever/LICENSE diff --git a/py/samples/firestore-retreiver/README.md b/py/samples/provider-firestore-retriever/README.md similarity index 100% rename from py/samples/firestore-retreiver/README.md rename to py/samples/provider-firestore-retriever/README.md diff --git a/py/samples/firestore-retreiver/pyproject.toml b/py/samples/provider-firestore-retriever/pyproject.toml similarity index 93% rename from py/samples/firestore-retreiver/pyproject.toml rename to py/samples/provider-firestore-retriever/pyproject.toml index 1563ce6165..483b6b1326 100644 --- a/py/samples/firestore-retreiver/pyproject.toml +++ b/py/samples/provider-firestore-retriever/pyproject.toml @@ -40,9 +40,9 @@ dependencies = [ "google-cloud-firestore", "uvloop>=0.21.0", ] -description = "firestore-retreiver Genkit sample" +description = "firestore-retriever Genkit sample" license = "Apache-2.0" -name = "firestore-retreiver" +name = "provider-firestore-retriever" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" @@ -55,4 +55,4 @@ build-backend = "hatchling.build" requires = ["hatchling"] [tool.hatch.build.targets.wheel] -packages = ["src/firestore-retreiver"] +packages = ["src/firestore-retriever"] diff --git a/py/samples/firestore-retreiver/run.sh b/py/samples/provider-firestore-retriever/run.sh similarity index 100% rename from py/samples/firestore-retreiver/run.sh rename to py/samples/provider-firestore-retriever/run.sh diff --git a/py/samples/firestore-retreiver/src/main.py b/py/samples/provider-firestore-retriever/src/main.py similarity index 98% rename from py/samples/firestore-retreiver/src/main.py rename to py/samples/provider-firestore-retriever/src/main.py index 8660ddf9d2..67dea299bf 100644 --- a/py/samples/firestore-retreiver/src/main.py +++ b/py/samples/provider-firestore-retriever/src/main.py @@ -91,14 +91,14 @@ from google.cloud import firestore from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.plugins.firebase import add_firebase_telemetry, define_firestore_vector_store from genkit.plugins.google_genai import VertexAI from genkit.types import Document, RetrieverResponse +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'GCLOUD_PROJECT' not in os.environ: os.environ['GCLOUD_PROJECT'] = input('Please enter your GCLOUD_PROJECT: ') @@ -162,7 +162,7 @@ async def index_documents() -> None: @ai.flow() -async def retreive_documents() -> RetrieverResponse: +async def retrieve_documents() -> RetrieverResponse: """Retrieves the film documents from Firestore.""" return await ai.retrieve( query=Document.from_text('sci-fi film'), diff --git a/py/samples/huggingface-hello/LICENSE b/py/samples/provider-google-genai-code-execution/LICENSE similarity index 100% rename from py/samples/huggingface-hello/LICENSE rename to py/samples/provider-google-genai-code-execution/LICENSE diff --git a/py/samples/google-genai-code-execution/README.md b/py/samples/provider-google-genai-code-execution/README.md similarity index 97% rename from py/samples/google-genai-code-execution/README.md rename to py/samples/provider-google-genai-code-execution/README.md index a323ec60b0..cc6b6829d9 100644 --- a/py/samples/google-genai-code-execution/README.md +++ b/py/samples/provider-google-genai-code-execution/README.md @@ -51,7 +51,7 @@ genkit start -- uv run src/main.py 2. **Run the demo**: ```bash - cd py/samples/google-genai-code-execution + cd py/samples/provider-google-genai-code-execution ./run.sh ``` diff --git a/py/samples/google-genai-code-execution/pyproject.toml b/py/samples/provider-google-genai-code-execution/pyproject.toml similarity index 97% rename from py/samples/google-genai-code-execution/pyproject.toml rename to py/samples/provider-google-genai-code-execution/pyproject.toml index bfa4db27a5..72aec49c51 100644 --- a/py/samples/google-genai-code-execution/pyproject.toml +++ b/py/samples/provider-google-genai-code-execution/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Code execution sample" license = "Apache-2.0" -name = "google-genai-code-execution" +name = "provider-google-genai-code-execution" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/google-genai-code-execution/run.sh b/py/samples/provider-google-genai-code-execution/run.sh similarity index 100% rename from py/samples/google-genai-code-execution/run.sh rename to py/samples/provider-google-genai-code-execution/run.sh diff --git a/py/samples/google-genai-code-execution/src/main.py b/py/samples/provider-google-genai-code-execution/src/main.py similarity index 98% rename from py/samples/google-genai-code-execution/src/main.py rename to py/samples/provider-google-genai-code-execution/src/main.py index c0430ba6c4..452a0b1be7 100755 --- a/py/samples/google-genai-code-execution/src/main.py +++ b/py/samples/provider-google-genai-code-execution/src/main.py @@ -55,7 +55,6 @@ import os from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.blocks.model import MessageWrapper @@ -63,8 +62,9 @@ from genkit.core.typing import CustomPart, Message, TextPart from genkit.plugins.google_genai import GeminiConfigSchema, GoogleAI from genkit.plugins.google_genai.models.utils import PartConverter +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'GEMINI_API_KEY' not in os.environ: os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') diff --git a/py/samples/menu/LICENSE b/py/samples/provider-google-genai-context-caching/LICENSE similarity index 100% rename from py/samples/menu/LICENSE rename to py/samples/provider-google-genai-context-caching/LICENSE diff --git a/py/samples/google-genai-context-caching/README.md b/py/samples/provider-google-genai-context-caching/README.md similarity index 97% rename from py/samples/google-genai-context-caching/README.md rename to py/samples/provider-google-genai-context-caching/README.md index b54cf82c44..511a02e48a 100644 --- a/py/samples/google-genai-context-caching/README.md +++ b/py/samples/provider-google-genai-context-caching/README.md @@ -47,7 +47,7 @@ genkit start -- uv run src/main.py 2. **Run the demo**: ```bash - cd py/samples/google-genai-context-caching + cd py/samples/provider-google-genai-context-caching ./run.sh ``` diff --git a/py/samples/google-genai-context-caching/pyproject.toml b/py/samples/provider-google-genai-context-caching/pyproject.toml similarity index 97% rename from py/samples/google-genai-context-caching/pyproject.toml rename to py/samples/provider-google-genai-context-caching/pyproject.toml index d0e4018f97..f8207e9f9a 100644 --- a/py/samples/google-genai-context-caching/pyproject.toml +++ b/py/samples/provider-google-genai-context-caching/pyproject.toml @@ -43,7 +43,7 @@ dependencies = [ ] description = "context-caching Genkit sample" license = "Apache-2.0" -name = "google-genai-context-caching" +name = "provider-google-genai-context-caching" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/google-genai-context-caching/run.sh b/py/samples/provider-google-genai-context-caching/run.sh similarity index 100% rename from py/samples/google-genai-context-caching/run.sh rename to py/samples/provider-google-genai-context-caching/run.sh diff --git a/py/samples/google-genai-context-caching/src/main.py b/py/samples/provider-google-genai-context-caching/src/main.py similarity index 98% rename from py/samples/google-genai-context-caching/src/main.py rename to py/samples/provider-google-genai-context-caching/src/main.py index 7b43b29ecc..a5659c9422 100755 --- a/py/samples/google-genai-context-caching/src/main.py +++ b/py/samples/provider-google-genai-context-caching/src/main.py @@ -99,14 +99,14 @@ import httpx from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.core.logging import get_logger from genkit.plugins.google_genai import GoogleAI from genkit.types import GenerationCommonConfig, Message, Part, Role, TextPart +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'GEMINI_API_KEY' not in os.environ: os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') diff --git a/py/samples/microsoft-foundry-hello/LICENSE b/py/samples/provider-google-genai-hello/LICENSE similarity index 100% rename from py/samples/microsoft-foundry-hello/LICENSE rename to py/samples/provider-google-genai-hello/LICENSE diff --git a/py/samples/google-genai-hello/README.md b/py/samples/provider-google-genai-hello/README.md similarity index 100% rename from py/samples/google-genai-hello/README.md rename to py/samples/provider-google-genai-hello/README.md diff --git a/py/samples/google-genai-hello/my_room.png b/py/samples/provider-google-genai-hello/my_room.png similarity index 100% rename from py/samples/google-genai-hello/my_room.png rename to py/samples/provider-google-genai-hello/my_room.png diff --git a/py/samples/google-genai-hello/palm_tree.png b/py/samples/provider-google-genai-hello/palm_tree.png similarity index 100% rename from py/samples/google-genai-hello/palm_tree.png rename to py/samples/provider-google-genai-hello/palm_tree.png diff --git a/py/samples/google-genai-hello/photo.jpg b/py/samples/provider-google-genai-hello/photo.jpg similarity index 100% rename from py/samples/google-genai-hello/photo.jpg rename to py/samples/provider-google-genai-hello/photo.jpg diff --git a/py/samples/google-genai-hello/pyproject.toml b/py/samples/provider-google-genai-hello/pyproject.toml similarity index 98% rename from py/samples/google-genai-hello/pyproject.toml rename to py/samples/provider-google-genai-hello/pyproject.toml index 28a9e37e3e..43bbdaa73f 100644 --- a/py/samples/google-genai-hello/pyproject.toml +++ b/py/samples/provider-google-genai-hello/pyproject.toml @@ -44,7 +44,7 @@ dependencies = [ ] description = "Hello world sample" license = "Apache-2.0" -name = "google-genai-hello" +name = "provider-google-genai-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/google-genai-hello/run.sh b/py/samples/provider-google-genai-hello/run.sh similarity index 100% rename from py/samples/google-genai-hello/run.sh rename to py/samples/provider-google-genai-hello/run.sh diff --git a/py/samples/google-genai-hello/src/main.py b/py/samples/provider-google-genai-hello/src/main.py similarity index 71% rename from py/samples/google-genai-hello/src/main.py rename to py/samples/provider-google-genai-hello/src/main.py index 58f8e25a23..129e7d90b9 100755 --- a/py/samples/google-genai-hello/src/main.py +++ b/py/samples/provider-google-genai-hello/src/main.py @@ -61,38 +61,38 @@ | Defining Flows | `@ai.flow()` decorator (multiple uses) | | Defining Tools | `@ai.tool()` decorator (multiple uses) | | Pydantic for Tool Input Schema | `GablorkenInput` | -| Simple Generation (Prompt String) | `say_hi` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompts | `generate_with_system_prompt` | +| Multi-turn Conversations (`messages`) | `generate_multi_turn_chat` | | Generation with Messages (`Message`, `Role`, `TextPart`) | `simple_generate_with_tools_flow` | | Generation with Tools | `simple_generate_with_tools_flow` | | Tool Response Handling | `simple_generate_with_interrupts` | | Tool Interruption (`ctx.interrupt`) | `gablorken_tool2` | | Embedding (`ai.embed`, `Document`) | `embed_docs` | -| Generation Configuration (`temperature`, etc.) | `say_hi_with_configured_temperature` | -| Streaming Generation (`ai.generate_stream`) | `say_hi_stream` | -| Streaming Chunk Handling (`ctx.send_chunk`) | `say_hi_stream`, `generate_character` | +| Generation Configuration (`temperature`, etc.) | `generate_with_config` | +| Streaming Generation (`ai.generate_stream`) | `generate_streaming_story` | +| Streaming Chunk Handling (`ctx.send_chunk`) | `generate_streaming_story`, `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | | Structured Output (Schema) | `generate_character` | | Pydantic for Structured Output Schema | `RpgCharacter` | -| Unconstrained Structured Output | `generate_character_unconstrained` | +| Structured Output (Instruction-Based) | `generate_character_instructions` | | Multi-modal Output Configuration | `generate_images` | | GCP Telemetry (Traces and Metrics) | `add_gcp_telemetry()` | | Thinking Mode (CoT) | `thinking_level_pro`, `thinking_level_flash` | -| Code Generation | `code_flow` | +| Code Generation | `generate_code` | | Search Grounding | `search_grounding` | | URL Context | `url_context` | | Multimodal Generation (Video input) | `youtube_videos` | +| Context Propagation | `context_demo` | """ import argparse import asyncio -import base64 import os import sys import tempfile from google import genai as google_genai_sdk -from rich.traceback import install as install_rich_traceback - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) if sys.version_info < (3, 11): from strenum import StrEnum # pyright: ignore[reportUnreachable] @@ -104,7 +104,6 @@ from pydantic import BaseModel, Field from genkit.ai import Genkit, Output, ToolRunContext, tool_response -from genkit.blocks.model import GenerateResponseWrapper from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger from genkit.plugins.evaluators import GenkitMetricType, MetricConfig, define_genkit_evaluators @@ -122,6 +121,33 @@ Role, TextPart, ) +from samples.shared import ( + CharacterInput, + CodeInput, + ConfigInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + convert_currency as _convert_currency_tool, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, +) + +setup_sample() logger = get_logger(__name__) @@ -144,21 +170,8 @@ ], ) - -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class CurrencyInput(BaseModel): - """Currency conversion input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_currency: str = Field(description='Source currency code (e.g., USD)', default='USD') - to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) class GablorkenInput(BaseModel): @@ -183,30 +196,6 @@ class ThinkingLevelFlash(StrEnum): HIGH = 'HIGH' -class WeatherInput(BaseModel): - """Input for getting weather.""" - - location: str = Field(description='The city and state, e.g. San Francisco, CA') - - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Whiskers', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - name: str = Field(default='Shadow', description='Name to write story about') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - class TemperatureInput(BaseModel): """Input for temperature config flow.""" @@ -219,49 +208,76 @@ class ToolsFlowInput(BaseModel): value: int = Field(default=42, description='Value for gablorken calculation') -class DynamicToolsInput(BaseModel): - """Input for dynamic tools demo.""" - - input_val: str = Field(default='Dynamic tools demo', description='Input value for demo') - - class ToolCallingInput(BaseModel): """Input for tool calling flow.""" location: str = Field(default='Paris, France', description='Location to get weather for') -class CodeInput(BaseModel): - """Input for code generation flow.""" +class ContextDemoInput(BaseModel): + """Input for context demo flow.""" - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) + user_id: int = Field(default=42, description='User ID (try 42 or 123)') -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. +class ScreenshotInput(BaseModel): + """Input for screenshot tool.""" - Args: - input: Currency conversion parameters. + url: str = Field(description='The URL to take a screenshot of') + + +@ai.tool(name='gablorkenTool') +def gablorken_tool(input_: GablorkenInput) -> dict[str, int]: + """Calculate a gablorken. Returns: - Converted amount. + The calculated gablorken. """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, + return {'result': input_.value * 3 - 5} + + +@ai.tool(name='gablorkenTool2') +def gablorken_tool2(_input: GablorkenInput, ctx: ToolRunContext) -> None: + """The user-defined tool function.""" + pass + + +@ai.tool(name='screenShot') +def take_screenshot(input_: ScreenshotInput) -> dict: + """Take a screenshot of a given URL.""" + return {'url': input_.url, 'screenshot_path': '/tmp/screenshot.png'} # noqa: S108 - sample code + + +@ai.tool(name='getWeather') +def get_weather_detailed(input_: WeatherInput) -> dict: + """Used to get current weather for a location.""" + return { + 'location': input_.location, + 'temperature_celcius': 21.5, + 'conditions': 'cloudy', } - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' +@ai.tool(name='celsiusToFahrenheit') +def celsius_to_fahrenheit(celsius: float) -> float: + """Converts Celsius to Fahrenheit.""" + return (celsius * 9) / 5 + 32 + + +@ai.tool() +def get_user_data() -> str: + """Fetch user data based on context.""" + context = Genkit.current_context() + raw_user = context.get('user') if context else {} + user_id = 0 + if isinstance(raw_user, dict): + user_id = int(raw_user.get('id', 0)) + if user_id == 42: + return 'User is Arthur Dent, an intergalactic traveler.' + elif user_id == 123: + return 'User is Jane Doe, a premium member.' + else: + return 'User is Guest.' @ai.flow() @@ -297,159 +313,87 @@ async def simple_generate_with_interrupts(input: ToolsFlowInput) -> str: @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a greeting for the given name. +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. Args: input: Input with name to greet. Returns: The generated greeting response. - - Example: - >>> result = await say_hi(SayHiInput(name='Mr. Fluffington')) - >>> print(result) - Hello Mr. Fluffington! *purrs contentedly* """ - resp = await ai.generate( - prompt=f'hi {input.name}', - ) - - await logger.ainfo( - 'generation_response', - has_usage=hasattr(resp, 'usage'), - usage_dict=resp.usage.model_dump() if hasattr(resp, 'usage') and resp.usage else None, - text_length=len(resp.text), - ) - - return resp.text + return await generate_greeting_logic(ai, input.name) @ai.flow() -async def demo_dynamic_tools(input: DynamicToolsInput) -> dict[str, object]: - """Demonstrates advanced Genkit features: ai.run() and ai.dynamic_tool(). +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. - This flow shows how to: - 1. Use `ai.run()` to create sub-spans (steps) within a flow trace. - 2. Use `ai.dynamic_tool()` to create tools on-the-fly without registration. + Args: + input: Input with a question to ask. - To test this in the Dev UI: - 1. Select 'demo_dynamic_tools' from the flows list. - 2. Run it with the default input or provide a custom string. - 3. Click 'View trace' to see the 'process_data_step' sub-span and tool execution. + Returns: + The model's response in the persona defined by the system prompt. """ - - # ai.run() allows you to wrap any function in a trace span, which is visible - # in the Dev UI. It supports an optional input argument as the second parameter. - def process_data(data: str) -> str: - return f'processed: {data}' - - run_result = await ai.run('process_data_step', input.input_val, process_data) - - # ai.dynamic_tool() creates a tool that isn't globally registered but can be - # used immediately or passed to generate() calls. - def multiplier_fn(x: int) -> int: - return x * 10 - - dynamic_multiplier = ai.dynamic_tool('dynamic_multiplier', multiplier_fn, description='Multiplies by 10') - tool_res = await dynamic_multiplier.arun(5) - - return { - 'step_result': run_result, - 'dynamic_tool_result': tool_res.response, - 'tool_metadata': dynamic_multiplier.metadata, - } + return await generate_with_system_prompt_logic(ai, input.question) @ai.flow() -async def describe_image() -> str: - """Describe an image (reads from photo.jpg).""" - # Read the photo.jpg file and encode to base64 - current_dir = pathlib.Path(pathlib.Path(__file__).resolve()).parent - photo_path = os.path.join(current_dir, '..', 'photo.jpg') - - with pathlib.Path(photo_path).open('rb') as photo_file: - photo_base64 = base64.b64encode(photo_file.read()).decode('utf-8') - - response = await ai.generate( - prompt=[ - Part(root=TextPart(text='describe this photo')), - Part(root=MediaPart(media=Media(url=f'data:image/jpeg;base64,{photo_base64}', content_type='image/jpeg'))), - ], - ) - return response.text - +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. -@ai.tool(name='gablorkenTool') -def gablorken_tool(input_: GablorkenInput) -> dict[str, int]: - """Calculate a gablorken. + Args: + input: Input with a travel destination. Returns: - The calculated gablorken. + The model's final response, demonstrating context retention. """ - return {'result': input_.value * 3 - 5} - - -@ai.tool(name='gablorkenTool2') -def gablorken_tool2(_input: GablorkenInput, ctx: ToolRunContext) -> None: - """The user-defined tool function.""" - pass + return await generate_multi_turn_chat_logic(ai, input.destination) -class Skills(BaseModel): - """Skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - +@ai.flow() +async def describe_image(input: ImageDescribeInput) -> str: + """Describe an image using Gemini. -class RpgCharacter(BaseModel): - """An RPG character.""" + Args: + input: Input with image URL to describe. - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills + Returns: + A textual description of the image. + """ + return await describe_image_logic(ai, input.image_url) @ai.flow() -async def generate_character( - input: CharacterInput, - ctx: ActionRunContext | None = None, -) -> RpgCharacter: - """Generate an RPG character. +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. Args: input: Input with character name. - ctx: the context of the tool Returns: The generated RPG character. """ - if ctx is not None and ctx.is_streaming: - stream, result = ai.generate_stream( - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - async for data in stream: - ctx.send_chunk(data.output) - - return (await result).output - else: - result = await ai.generate( - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - return result.output + return await generate_character_logic(ai, input.name) @ai.flow() -async def generate_character_unconstrained( +async def generate_character_instructions( input: CharacterInput, _ctx: ActionRunContext | None = None, ) -> RpgCharacter: - """Generate an unconstrained RPG character. + """Generate an RPG character using instruction-based structured output. + + Unlike ``generate_character`` which uses constrained decoding (the model + is forced to output valid JSON matching the schema), this flow uses + ``output_constrained=False`` to guide the model via prompt instructions + instead. This is useful when:: + + - The model doesn't support constrained decoding. + - You want the model to have more flexibility in its output. + - You're debugging schema adherence issues. + + See: https://genkit.dev/docs/models#structured-output Args: input: Input with character name. @@ -468,43 +412,33 @@ async def generate_character_unconstrained( @ai.flow() -async def say_hi_stream( +async def generate_streaming_story( input: StreamInput, ctx: ActionRunContext | None = None, ) -> str: - """Generate a greeting for the given name. + """Generate a streaming story response. Args: input: Input with name for streaming. - ctx: the context of the tool + ctx: Action context for streaming. Returns: - The generated response with a function. + The complete story text. """ - stream, _ = ai.generate_stream(prompt=f'hi {input.name}') - result: str = '' - async for data in stream: - if ctx is not None: - ctx.send_chunk(data.text) - result += data.text - - return result + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() -async def say_hi_with_configured_temperature(input: TemperatureInput) -> GenerateResponseWrapper: - """Generate a greeting for the given name. +async def generate_with_config(input: ConfigInput) -> str: + """Generate a greeting with custom model configuration. Args: input: Input with name for greeting. Returns: - The generated response with a function. + The generated greeting. """ - return await ai.generate( - messages=[Message(role=Role.USER, content=[Part(root=TextPart(text=f'hi {input.data}'))])], - config=GenerationCommonConfig(temperature=0.1), - ) + return await generate_with_config_logic(ai, input.name) @ai.flow() @@ -563,15 +497,6 @@ async def thinking_level_flash(_level: ThinkingLevelFlash) -> str: return response.text -class ThinkingLevelFlash(StrEnum): - """Thinking level flash enum.""" - - MINIMAL = 'MINIMAL' - LOW = 'LOW' - MEDIUM = 'MEDIUM' - HIGH = 'HIGH' - - @ai.flow() async def thinking_level_pro(_level: ThinkingLevel) -> str: """Gemini 3.0 thinkingLevel config (Pro).""" @@ -628,9 +553,9 @@ async def upload_blob_to_file_search_store(client: google_genai_sdk.Client, file 'excitement, revealing hidden paths. Elara noticed the trees bending slightly as if beckoning her to come ' 'closer. When she paused to listen, she heard soft murmurs—stories of lost treasures and forgotten dreams. ' 'Drawn by the enchanting sounds, she followed a narrow trail until she stumbled upon a shimmering pond. ' - 'At its edge, a wise old willow tree spoke, “Child of the village, what do you seek?” “I seek adventure,” ' - 'Elara replied, her heart racing. “Adventure lies not in faraway lands but within your spirit,” the willow ' - 'said, swaying gently. “Every choice you make is a step into the unknown.” With newfound courage, Elara left ' + 'At its edge, a wise old willow tree spoke, "Child of the village, what do you seek?" "I seek adventure," ' + 'Elara replied, her heart racing. "Adventure lies not in faraway lands but within your spirit," the willow ' + 'said, swaying gently. "Every choice you make is a step into the unknown." With newfound courage, Elara left ' 'the woods, her mind buzzing with possibilities. The villagers would say the woods were magical, but to Elara, ' 'it was the spark of her imagination that had transformed her ordinary world into a realm of endless ' 'adventures. She smiled, knowing her journey was just beginning' @@ -734,35 +659,6 @@ async def youtube_videos() -> str: return response.text -class ScreenshotInput(BaseModel): - """Input for screenshot tool.""" - - url: str = Field(description='The URL to take a screenshot of') - - -@ai.tool(name='screenShot') -def take_screenshot(input_: ScreenshotInput) -> dict: - """Take a screenshot of a given URL.""" - # Implement your screenshot logic here - return {'url': input_.url, 'screenshot_path': '/tmp/screenshot.png'} # noqa: S108 - sample code - - -@ai.tool(name='getWeather') -def get_weather(input_: WeatherInput) -> dict: - """Used to get current weather for a location.""" - return { - 'location': input_.location, - 'temperature_celcius': 21.5, - 'conditions': 'cloudy', - } - - -@ai.tool(name='celsiusToFahrenheit') -def celsius_to_fahrenheit(celsius: float) -> float: - """Converts Celsius to Fahrenheit.""" - return (celsius * 9) / 5 + 32 - - @ai.flow() async def tool_calling(input: ToolCallingInput) -> str: """Tool calling with Gemini.""" @@ -775,7 +671,43 @@ async def tool_calling(input: ToolCallingInput) -> str: @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Demonstrate streaming with structured output schemas. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. + + See: https://genkit.dev/docs/models#streaming + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. + """ + stream, result = ai.generate_stream( + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), + output=Output(schema=RpgCharacter), + ) + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output + + +@ai.flow() +async def generate_code(input: CodeInput) -> str: """Generate code using Gemini. Args: @@ -784,13 +716,47 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def context_demo(input: ContextDemoInput) -> str: + """Demonstrate passing context to tools. + + This flow shows how to propagate application context (like user ID or auth info) + from the flow input into the generation and tool execution. + + Args: + input: Input with user ID. + + Returns: + The model's response using the context-dependent tool output. + """ response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', + prompt='Tell me about the current user based on their ID.', + tools=['get_user_data'], + context={'user': {'id': input.user_id}}, ) return response.text +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) + + async def main() -> None: """Main function - keep alive for Dev UI.""" await logger.ainfo('Starting main execution loop') diff --git a/py/samples/google-genai-hello/src/main_vertexai.py b/py/samples/provider-google-genai-hello/src/main_vertexai.py similarity index 100% rename from py/samples/google-genai-hello/src/main_vertexai.py rename to py/samples/provider-google-genai-hello/src/main_vertexai.py diff --git a/py/samples/media-models-demo/LICENSE b/py/samples/provider-google-genai-media-models-demo/LICENSE similarity index 100% rename from py/samples/media-models-demo/LICENSE rename to py/samples/provider-google-genai-media-models-demo/LICENSE diff --git a/py/samples/media-models-demo/README.md b/py/samples/provider-google-genai-media-models-demo/README.md similarity index 100% rename from py/samples/media-models-demo/README.md rename to py/samples/provider-google-genai-media-models-demo/README.md diff --git a/py/samples/google-genai-image/image.jpg b/py/samples/provider-google-genai-media-models-demo/image.jpg similarity index 100% rename from py/samples/google-genai-image/image.jpg rename to py/samples/provider-google-genai-media-models-demo/image.jpg diff --git a/py/samples/google-genai-image/my_room.png b/py/samples/provider-google-genai-media-models-demo/my_room.png similarity index 100% rename from py/samples/google-genai-image/my_room.png rename to py/samples/provider-google-genai-media-models-demo/my_room.png diff --git a/py/samples/google-genai-image/palm_tree.png b/py/samples/provider-google-genai-media-models-demo/palm_tree.png similarity index 100% rename from py/samples/google-genai-image/palm_tree.png rename to py/samples/provider-google-genai-media-models-demo/palm_tree.png diff --git a/py/samples/google-genai-image/photo.jpg b/py/samples/provider-google-genai-media-models-demo/photo.jpg similarity index 100% rename from py/samples/google-genai-image/photo.jpg rename to py/samples/provider-google-genai-media-models-demo/photo.jpg diff --git a/py/samples/media-models-demo/pyproject.toml b/py/samples/provider-google-genai-media-models-demo/pyproject.toml similarity index 94% rename from py/samples/media-models-demo/pyproject.toml rename to py/samples/provider-google-genai-media-models-demo/pyproject.toml index 7a1904fb52..c76f01e8cd 100644 --- a/py/samples/media-models-demo/pyproject.toml +++ b/py/samples/provider-google-genai-media-models-demo/pyproject.toml @@ -17,7 +17,7 @@ [project] dependencies = ["genkit", "genkit-plugin-google-genai", "rich>=13.0.0"] description = "Demo of media generation models: Veo, TTS, Lyria, Gemini Image" -name = "media-models-demo" +name = "provider-google-genai-media-models-demo" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/media-models-demo/run.sh b/py/samples/provider-google-genai-media-models-demo/run.sh similarity index 100% rename from py/samples/media-models-demo/run.sh rename to py/samples/provider-google-genai-media-models-demo/run.sh diff --git a/py/samples/media-models-demo/src/__init__.py b/py/samples/provider-google-genai-media-models-demo/src/__init__.py similarity index 100% rename from py/samples/media-models-demo/src/__init__.py rename to py/samples/provider-google-genai-media-models-demo/src/__init__.py diff --git a/py/samples/media-models-demo/src/main.py b/py/samples/provider-google-genai-media-models-demo/src/main.py similarity index 80% rename from py/samples/media-models-demo/src/main.py rename to py/samples/provider-google-genai-media-models-demo/src/main.py index 69fcb3879a..62633d1d6d 100644 --- a/py/samples/media-models-demo/src/main.py +++ b/py/samples/provider-google-genai-media-models-demo/src/main.py @@ -84,20 +84,28 @@ - `gemini_image_generator` - Image generation with Gemini (native) - `lyria_audio_generator` - Music/audio generation (Vertex AI) - `veo_video_generator` - Video generation (background model) +- `describe_image_with_gemini` - Image-to-text description +- `generate_images` - Multimodal image generation with photos +- `multipart_tool_calling` - Tool calling with image input/output +- `gemini_image_editing` - Image editing (inpainting/outpainting) +- `nano_banana_pro` - 4K image config with aspect ratio +- `gemini_media_resolution` - Media resolution control +- `multimodal_input` - Multimodal prompting """ import asyncio import base64 import os +import pathlib import time import uuid from typing import Any from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.blocks.background_model import lookup_background_action +from genkit.blocks.model import GenerateResponseWrapper from genkit.core.action import ActionRunContext from genkit.core.typing import ( Error, @@ -112,22 +120,26 @@ Supports, TextPart, ) +from genkit.types import ( + GenerationCommonConfig, + Media, + MediaPart, + Metadata, +) +from samples.shared.logging import setup_sample -# ============================================================================ -# Rich Traceback - Beautiful, Rust-like error messages -# ============================================================================ -install_rich_traceback(show_locals=True, width=120, extra_lines=3) - -# ============================================================================ -# Environment Detection -# ============================================================================ +setup_sample() HAS_GEMINI_API_KEY = bool(os.getenv('GEMINI_API_KEY')) HAS_GCP_PROJECT = bool(os.getenv('GOOGLE_CLOUD_PROJECT')) # Initialize Genkit if HAS_GEMINI_API_KEY: - from genkit.plugins.google_genai import GoogleAI + from genkit.plugins.google_genai import ( + GeminiConfigSchema, + GeminiImageConfigSchema, + GoogleAI, + ) ai = Genkit(plugins=[GoogleAI()]) else: @@ -206,9 +218,11 @@ class VideoInput(BaseModel): ) -# ============================================================================ -# Simulated Models (for demo without API keys) -# ============================================================================ +class GenerateImagesInput(BaseModel): + """Input for multimodal image generation flow.""" + + name: str = Field(default='a fluffy cat', description='Subject to generate images about') + _operations: dict[str, dict[str, Any]] = {} @@ -430,11 +444,6 @@ async def simulated_veo_check(operation: Operation) -> Operation: ) -# ============================================================================ -# Model Selection Helpers -# ============================================================================ - - def get_tts_model() -> str: """Get the TTS model name based on environment. @@ -490,11 +499,6 @@ def get_veo_model() -> str: return 'simulated-veo' -# ============================================================================ -# Demo Flows - TTS (Text-to-Speech) -# ============================================================================ - - @ai.flow(name='tts_speech_generator', description='Generate speech from text using TTS') async def tts_speech_generator_flow(input: TtsInput) -> dict[str, Any]: """Generate speech audio from text. @@ -584,11 +588,6 @@ async def tts_speech_generator_flow(input: TtsInput) -> dict[str, Any]: } -# ============================================================================ -# Demo Flows - Imagen Image Generation (predict API) -# ============================================================================ - - @ai.flow(name='imagen_image_generator', description='Generate images using Imagen (predict API)') async def imagen_image_generator_flow(input: ImagenInput) -> dict[str, Any]: """Generate images using Imagen. @@ -647,11 +646,6 @@ async def imagen_image_generator_flow(input: ImagenInput) -> dict[str, Any]: } -# ============================================================================ -# Demo Flows - Gemini Image Generation (generateContent API) -# ============================================================================ - - @ai.flow(name='gemini_image_generator', description='Generate images using Imagen') async def gemini_image_generator_flow(input: ImageInput) -> dict[str, Any]: """Generate images using Imagen image generation. @@ -715,11 +709,6 @@ async def gemini_image_generator_flow(input: ImageInput) -> dict[str, Any]: } -# ============================================================================ -# Demo Flows - Lyria Audio Generation -# ============================================================================ - - @ai.flow(name='lyria_audio_generator', description='Generate music/audio using Lyria') async def lyria_audio_generator_flow(input: AudioInput) -> dict[str, Any]: """Generate audio/music using Lyria. @@ -784,11 +773,6 @@ async def lyria_audio_generator_flow(input: AudioInput) -> dict[str, Any]: } -# ============================================================================ -# Demo Flows - Veo Video Generation (Background Model) -# ============================================================================ - - @ai.flow(name='veo_video_generator', description='Generate video using Veo (background model)') async def veo_video_generator_flow(input: VideoInput) -> dict[str, Any]: """Generate video using Veo. @@ -886,11 +870,6 @@ async def veo_video_generator_flow(input: VideoInput) -> dict[str, Any]: } -# ============================================================================ -# Demo Flow - All Models Overview -# ============================================================================ - - @ai.flow(name='media_models_overview', description='Overview of all available media models') async def media_models_overview_flow() -> dict[str, Any]: """Get an overview of all available media generation models. @@ -936,6 +915,220 @@ async def media_models_overview_flow() -> dict[str, Any]: } +@ai.tool(name='screenshot') +def screenshot() -> dict: + """Takes a screenshot of a room.""" + room_path = pathlib.Path(__file__).parent.parent / 'my_room.png' + with pathlib.Path(room_path).open('rb') as f: + room_b64 = base64.b64encode(f.read()).decode('utf-8') + + return { + 'output': 'success', + 'content': [{'media': {'url': f'data:image/png;base64,{room_b64}', 'contentType': 'image/png'}}], + } + + +@ai.flow() +async def describe_image_with_gemini(data: str = '') -> str: + """Describe an image using Gemini. + + Args: + data: The image data as a data URI (e.g., 'data:image/jpeg;base64,...'). + + Returns: + The description of the image. + """ + if not data: + try: + current_dir = pathlib.Path(pathlib.Path(__file__).resolve()).parent + image_path = os.path.join(current_dir, '..', 'image.jpg') + with pathlib.Path(image_path).open('rb') as image_file: + buffer = image_file.read() + img_base64 = base64.b64encode(buffer).decode('utf-8') + data = f'data:image/jpeg;base64,{img_base64}' + except FileNotFoundError as e: + raise ValueError("Default image 'image.jpg' not found. Please provide image data.") from e + + if not (data.startswith('data:') and ',' in data): + raise ValueError(f'Expected a data URI (e.g., "data:image/jpeg;base64,..."), but got: {data[:50]}...') + + result = await ai.generate( + messages=[ + Message( + role=Role.USER, + content=[ + Part(root=TextPart(text='What is shown in this image?')), + Part(root=MediaPart(media=Media(content_type='image/jpeg', url=data))), + ], + ), + ], + model='googleai/gemini-3-flash-preview', + ) + return result.text + + +@ai.flow() +async def generate_images( + input: GenerateImagesInput, + ctx: ActionRunContext | None = None, +) -> GenerateResponseWrapper: + """Generate images for the given subject using multimodal prompting. + + Args: + input: Input with subject to generate images about. + ctx: The action run context. + + Returns: + The generated response with text and images. + """ + return await ai.generate( + model='googleai/gemini-3-pro-image-preview', + prompt=f'tell me about {input.name} with photos', + config=GeminiConfigSchema.model_validate({ + 'response_modalities': ['text', 'image'], + 'api_version': 'v1alpha', + }).model_dump(exclude_none=True), + ) + + +@ai.flow() +async def multipart_tool_calling() -> str: + """Tool calling with image input and output. + + Demonstrates a tool that returns image content (screenshot) and + the model reasoning about the image. + + Returns: + The model's description of the screenshot. + """ + response = await ai.generate( + model='googleai/gemini-3-pro-preview', + tools=['screenshot'], + config=GenerationCommonConfig(temperature=1), + prompt="Tell me what I'm seeing on the screen.", + ) + return response.text + + +@ai.flow() +async def gemini_image_editing() -> Media | None: + """Image editing with Gemini (inpainting/outpainting). + + Combines two images (a plant and a room) and asks Gemini to + composite them together, demonstrating image editing capabilities. + + Returns: + The edited image media, or None if no image was generated. + """ + plant_path = pathlib.Path(__file__).parent.parent / 'palm_tree.png' + room_path = pathlib.Path(__file__).parent.parent / 'my_room.png' + + with pathlib.Path(plant_path).open('rb') as f: + plant_b64 = base64.b64encode(f.read()).decode('utf-8') + with pathlib.Path(room_path).open('rb') as f: + room_b64 = base64.b64encode(f.read()).decode('utf-8') + + response = await ai.generate( + model='googleai/gemini-3-pro-image-preview', + prompt=[ + Part(root=TextPart(text='add the plant to my room')), + Part(root=MediaPart(media=Media(url=f'data:image/png;base64,{plant_b64}'))), + Part(root=MediaPart(media=Media(url=f'data:image/png;base64,{room_b64}'))), + ], + config=GeminiImageConfigSchema.model_validate({ + 'response_modalities': ['TEXT', 'IMAGE'], + 'image_config': {'aspect_ratio': '1:1'}, + 'api_version': 'v1alpha', + }).model_dump(exclude_none=True), + ) + for part in response.message.content if response.message else []: + if isinstance(part.root, MediaPart): + return part.root.media + + return None + + +@ai.flow() +async def nano_banana_pro() -> Media | None: + """Generate a 4K image with custom aspect ratio. + + Demonstrates advanced image configuration options including + aspect ratio and image size settings. + + Returns: + The generated image media, or None if no image was generated. + """ + response = await ai.generate( + model='googleai/gemini-3-pro-image-preview', + prompt='Generate a picture of a sunset in the mountains by a lake', + config={ + 'response_modalities': ['TEXT', 'IMAGE'], + 'image_config': { + 'aspect_ratio': '21:9', + 'image_size': '4K', + }, + 'api_version': 'v1alpha', + }, + ) + for part in response.message.content if response.message else []: + if isinstance(part.root, MediaPart): + return part.root.media + return None + + +@ai.flow() +async def gemini_media_resolution() -> str: + """Query an image with high media resolution. + + Demonstrates the mediaResolution metadata option for higher-fidelity + image analysis. + + Returns: + The model's description of the image. + """ + plant_path = pathlib.Path(__file__).parent.parent / 'palm_tree.png' + with pathlib.Path(plant_path).open('rb') as f: + plant_b64 = base64.b64encode(f.read()).decode('utf-8') + response = await ai.generate( + model='googleai/gemini-3-pro-image-preview', + prompt=[ + Part(root=TextPart(text='What is in this picture?')), + Part( + root=MediaPart( + media=Media(url=f'data:image/png;base64,{plant_b64}'), + metadata=Metadata({'mediaResolution': {'level': 'MEDIA_RESOLUTION_HIGH'}}), + ) + ), + ], + config={'api_version': 'v1alpha'}, + ) + return response.text + + +@ai.flow() +async def multimodal_input() -> str: + """Describe a photo using multimodal prompting. + + Demonstrates sending both text and image content to the model + in a single prompt. + + Returns: + The model's description of the photo. + """ + photo_path = pathlib.Path(__file__).parent.parent / 'photo.jpg' + with pathlib.Path(photo_path).open('rb') as f: + photo_b64 = base64.b64encode(f.read()).decode('utf-8') + + response = await ai.generate( + model='googleai/gemini-3-pro-image-preview', + prompt=[ + Part(root=TextPart(text='describe this photo')), + Part(root=MediaPart(media=Media(url=f'data:image/jpeg;base64,{photo_b64}', content_type='image/jpeg'))), + ], + ) + return response.text + + async def main() -> None: """Keep the server alive for the Dev UI.""" await asyncio.Event().wait() diff --git a/py/samples/mistral-hello/LICENSE b/py/samples/provider-google-genai-vertexai-hello/LICENSE similarity index 100% rename from py/samples/mistral-hello/LICENSE rename to py/samples/provider-google-genai-vertexai-hello/LICENSE diff --git a/py/samples/google-genai-vertexai-hello/README.md b/py/samples/provider-google-genai-vertexai-hello/README.md similarity index 100% rename from py/samples/google-genai-vertexai-hello/README.md rename to py/samples/provider-google-genai-vertexai-hello/README.md diff --git a/py/samples/google-genai-vertexai-hello/pyproject.toml b/py/samples/provider-google-genai-vertexai-hello/pyproject.toml similarity index 97% rename from py/samples/google-genai-vertexai-hello/pyproject.toml rename to py/samples/provider-google-genai-vertexai-hello/pyproject.toml index 5da498b09b..df162bf9f3 100644 --- a/py/samples/google-genai-vertexai-hello/pyproject.toml +++ b/py/samples/provider-google-genai-vertexai-hello/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Hello world sample on VertexAI API on GenAI" license = "Apache-2.0" -name = "google-genai-vertexai-hello" +name = "provider-google-genai-vertexai-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/google-genai-vertexai-hello/run.sh b/py/samples/provider-google-genai-vertexai-hello/run.sh similarity index 100% rename from py/samples/google-genai-vertexai-hello/run.sh rename to py/samples/provider-google-genai-vertexai-hello/run.sh diff --git a/py/samples/google-genai-vertexai-hello/src/main.py b/py/samples/provider-google-genai-vertexai-hello/src/main.py similarity index 64% rename from py/samples/google-genai-vertexai-hello/src/main.py rename to py/samples/provider-google-genai-vertexai-hello/src/main.py index 7511b7d653..7396adeba1 100755 --- a/py/samples/google-genai-vertexai-hello/src/main.py +++ b/py/samples/provider-google-genai-vertexai-hello/src/main.py @@ -49,19 +49,22 @@ | Defining Flows | `@ai.flow()` decorator (multiple uses) | | Defining Tools | `@ai.tool()` decorator (multiple uses) | | Pydantic for Tool Input Schema | `GablorkenInput` | -| Simple Generation (Prompt String) | `say_hi` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompt | `generate_with_system_prompt` | +| Multi-turn Conversation | `generate_multi_turn_chat` | | Generation with Messages (`Message`, `Role`, `TextPart`) | `simple_generate_with_tools_flow` | | Generation with Tools | `simple_generate_with_tools_flow` | | Tool Response Handling | `simple_generate_with_interrupts` | | Tool Interruption (`ctx.interrupt`) | `gablorken_tool2` | | Embedding (`ai.embed`, `Document`) | `embed_docs` | -| Generation Configuration (`temperature`, etc.) | `say_hi_with_configured_temperature` | -| Streaming Generation (`ai.generate_stream`) | `say_hi_stream` | -| Streaming Chunk Handling (`ctx.send_chunk`) | `say_hi_stream`, `generate_character` | +| Generation Configuration (`temperature`, etc.) | `generate_with_config` | +| Streaming Generation (`ai.generate_stream`) | `generate_streaming_story` | +| Streaming Chunk Handling (`ctx.send_chunk`) | `generate_streaming_story`, `generate_character` | | Structured Output (Schema) | `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | | Pydantic for Structured Output Schema | `RpgCharacter` | -| Unconstrained Structured Output | `generate_character_unconstrained` | -| Code Generation | `code_flow` | +| Structured Output (Instruction-Based) | `generate_character_instructions` | +| Code Generation | `generate_code` | Testing This Demo ================= @@ -77,16 +80,16 @@ 2. **Run the demo**: ```bash - cd py/samples/google-genai-vertexai-hello + cd py/samples/provider-google-genai-vertexai-hello ./run.sh ``` 3. **Open DevUI** at http://localhost:4000 4. **Test basic flows**: - - [ ] `say_hi` - Simple generation - - [ ] `say_hi_stream` - Streaming response - - [ ] `say_hi_with_configured_temperature` - Custom config + - [ ] `generate_greeting` - Simple generation + - [ ] `generate_streaming_story` - Streaming response + - [ ] `generate_with_config` - Custom config 5. **Test tools**: - [ ] `simple_generate_with_tools_flow` - Tool calling @@ -94,7 +97,7 @@ 6. **Test structured output**: - [ ] `generate_character` - Constrained output - - [ ] `generate_character_unconstrained` - Unconstrained + - [ ] `generate_character_instructions` - Instruction-based 7. **Test embeddings**: - [ ] `embed_docs` - Document embedding @@ -105,26 +108,40 @@ import os from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit, Output, ToolRunContext, tool_response -from genkit.blocks.model import GenerateResponseWrapper from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger from genkit.plugins.google_genai import ( EmbeddingTaskType, VertexAI, ) -from genkit.types import ( - Embedding, - GenerationCommonConfig, - Message, - Part, - Role, - TextPart, +from genkit.types import Embedding +from samples.shared import ( + CharacterInput, + CodeInput, + CurrencyExchangeInput, + GreetingInput, + MultiTurnInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + convert_currency as _convert_currency_tool, + convert_currency_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, ) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() logger = get_logger(__name__) @@ -143,20 +160,8 @@ ) -class CurrencyExchangeInput(BaseModel): - """Currency exchange flow input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_curr: str = Field(description='Source currency code', default='USD') - to_curr: str = Field(description='Target currency code', default='EUR') - - -class CurrencyInput(BaseModel): - """Currency conversion input schema.""" - - amount: float = Field(description='Amount to convert', default=100) - from_currency: str = Field(description='Source currency code (e.g., USD)', default='USD') - to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) class GablorkenInput(BaseModel): @@ -165,41 +170,6 @@ class GablorkenInput(BaseModel): value: int = Field(description='value to calculate gablorken for') -class Skills(BaseModel): - """Skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills - - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mittens', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - name: str = Field(default='Shadow', description='Name for streaming greeting') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - class TemperatureInput(BaseModel): """Input for temperature config flow.""" @@ -212,42 +182,36 @@ class ToolsFlowInput(BaseModel): value: int = Field(default=42, description='Value for gablorken calculation') -class CodeInput(BaseModel): - """Input for code generation flow.""" +@ai.tool(name='gablorkenTool') +def gablorken_tool(input_: GablorkenInput) -> int: + """Calculate a gablorken. - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) + Args: + input_: The input to calculate gablorken for. + + Returns: + The calculated gablorken. + """ + return input_.value * 3 - 5 -@ai.tool() -def convert_currency(input: CurrencyInput) -> str: - """Convert currency amount. +@ai.tool(name='gablorkenTool2') +def gablorken_tool2(input_: GablorkenInput, ctx: ToolRunContext) -> None: + """The user-defined tool function. Args: - input: Currency conversion parameters. + input_: the input to the tool + ctx: the tool run context Returns: - Converted amount. + The calculated gablorken. """ - # Mock conversion rates - rates = { - ('USD', 'EUR'): 0.85, - ('EUR', 'USD'): 1.18, - ('USD', 'GBP'): 0.73, - ('GBP', 'USD'): 1.37, - } - - rate = rates.get((input.from_currency, input.to_currency), 1.0) - converted = input.amount * rate - - return f'{input.amount} {input.from_currency} = {converted:.2f} {input.to_currency}' + ctx.interrupt() @ai.flow() -async def currency_exchange(input: CurrencyExchangeInput) -> str: - """Convert currency using tools. +async def convert_currency(input: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. Args: input: Currency exchange parameters. @@ -255,11 +219,7 @@ async def currency_exchange(input: CurrencyExchangeInput) -> str: Returns: Conversion result. """ - response = await ai.generate( - prompt=f'Convert {input.amount} {input.from_curr} to {input.to_curr}', - tools=['convert_currency'], - ) - return response.text + return await convert_currency_logic(ai, input) @ai.flow() @@ -282,70 +242,36 @@ async def embed_docs(docs: list[str] | None = None) -> list[Embedding]: ) -@ai.tool(name='gablorkenTool') -def gablorken_tool(input_: GablorkenInput) -> int: - """Calculate a gablorken. - - Args: - input_: The input to calculate gablorken for. - - Returns: - The calculated gablorken. - """ - return input_.value * 3 - 5 - - -@ai.tool(name='gablorkenTool2') -def gablorken_tool2(input_: GablorkenInput, ctx: ToolRunContext) -> None: - """The user-defined tool function. - - Args: - input_: the input to the tool - ctx: the tool run context - - Returns: - The calculated gablorken. - """ - ctx.interrupt() - - @ai.flow() -async def generate_character( - input: CharacterInput, - ctx: ActionRunContext | None = None, -) -> RpgCharacter: - """Generate an RPG character. +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. Args: input: Input with character name. - ctx: the context of the tool Returns: The generated RPG character. """ - if ctx is not None and ctx.is_streaming: - stream, result = ai.generate_stream( - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - async for data in stream: - ctx.send_chunk(data.output) - - return (await result).output - else: - result = await ai.generate( - prompt=f'generate an RPG character named {input.name}', - output=Output(schema=RpgCharacter), - ) - return result.output + return await generate_character_logic(ai, input.name) @ai.flow() -async def generate_character_unconstrained( +async def generate_character_instructions( input: CharacterInput, ctx: ActionRunContext | None = None, ) -> RpgCharacter: - """Generate an unconstrained RPG character. + """Generate an RPG character using instruction-based structured output. + + Unlike ``generate_character`` which uses constrained decoding (the model + is forced to output valid JSON matching the schema), this flow uses + ``output_constrained=False`` to guide the model via prompt instructions + instead. This is useful when:: + + - The model doesn't support constrained decoding. + - You want the model to have more flexibility in its output. + - You're debugging schema adherence issues. + + See: https://genkit.dev/docs/models#structured-output Args: input: Input with character name. @@ -364,8 +290,8 @@ async def generate_character_unconstrained( @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a greeting for the given name. +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. Args: input: Input with name to greet. @@ -373,18 +299,47 @@ async def say_hi(input: SayHiInput) -> str: Returns: The generated response with a function. """ - resp = await ai.generate( - prompt=f'hi {input.name}', - ) - return resp.text + return await generate_greeting_logic(ai, input.name) + + +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + Args: + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + return await generate_with_system_prompt_logic(ai, input.question) @ai.flow() -async def say_hi_stream( +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + The messages parameter allows you to pass a conversation history to + maintain context across multiple interactions with the model. Each + message has a role ('user' or 'model') and content. + + See: https://genkit.dev/docs/models#multi-turn-conversations-with-messages + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + return await generate_multi_turn_chat_logic(ai, input.destination) + + +@ai.flow() +async def generate_streaming_story( input: StreamInput, ctx: ActionRunContext | None = None, ) -> str: - """Generate a greeting for the given name. + """Generate a streaming story response. Args: input: Input with name for streaming. @@ -393,19 +348,12 @@ async def say_hi_stream( Returns: The generated response with a function. """ - stream, _ = ai.generate_stream(prompt=f'hi {input.name}') - result: str = '' - async for data in stream: - if ctx is not None: - ctx.send_chunk(data.text) - result += data.text - - return result + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() -async def say_hi_with_configured_temperature(input: TemperatureInput) -> GenerateResponseWrapper: - """Generate a greeting for the given name. +async def generate_with_config(input: TemperatureInput) -> str: + """Generate a greeting with custom model configuration. Args: input: Input with name to greet. @@ -413,10 +361,7 @@ async def say_hi_with_configured_temperature(input: TemperatureInput) -> Generat Returns: The generated response with a function. """ - return await ai.generate( - messages=[Message(role=Role.USER, content=[Part(root=TextPart(text=f'hi {input.data}'))])], - config=GenerationCommonConfig(temperature=0.1), - ) + return await generate_with_config_logic(ai, input.data) @ai.flow() @@ -464,7 +409,43 @@ async def simple_generate_with_tools_flow(input: ToolsFlowInput) -> str: @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Demonstrate streaming with structured output schemas. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. + + See: https://genkit.dev/docs/models#streaming + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. + """ + stream, result = ai.generate_stream( + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), + output=Output(schema=RpgCharacter), + ) + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output + + +@ai.flow() +async def generate_code(input: CodeInput) -> str: """Generate code using Vertex AI Gemini. Args: @@ -473,11 +454,27 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + The model streams its response while also calling tools mid-generation. + Tool calls are resolved automatically and the model continues generating. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) async def main() -> None: diff --git a/py/samples/model-garden/LICENSE b/py/samples/provider-google-genai-vertexai-image/LICENSE similarity index 100% rename from py/samples/model-garden/LICENSE rename to py/samples/provider-google-genai-vertexai-image/LICENSE diff --git a/py/samples/google-genai-vertexai-image/README.md b/py/samples/provider-google-genai-vertexai-image/README.md similarity index 100% rename from py/samples/google-genai-vertexai-image/README.md rename to py/samples/provider-google-genai-vertexai-image/README.md diff --git a/py/samples/google-genai-vertexai-image/pyproject.toml b/py/samples/provider-google-genai-vertexai-image/pyproject.toml similarity index 97% rename from py/samples/google-genai-vertexai-image/pyproject.toml rename to py/samples/provider-google-genai-vertexai-image/pyproject.toml index 51f2922d83..471549f7bc 100644 --- a/py/samples/google-genai-vertexai-image/pyproject.toml +++ b/py/samples/provider-google-genai-vertexai-image/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Image Generation on VertexAI with GenAI library example" license = "Apache-2.0" -name = "google-genai-vertexai-image" +name = "provider-google-genai-vertexai-image" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/google-genai-vertexai-image/run.sh b/py/samples/provider-google-genai-vertexai-image/run.sh similarity index 100% rename from py/samples/google-genai-vertexai-image/run.sh rename to py/samples/provider-google-genai-vertexai-image/run.sh diff --git a/py/samples/google-genai-vertexai-image/src/main.py b/py/samples/provider-google-genai-vertexai-image/src/main.py similarity index 97% rename from py/samples/google-genai-vertexai-image/src/main.py rename to py/samples/provider-google-genai-vertexai-image/src/main.py index a6a6a9412d..c47fa6ff38 100755 --- a/py/samples/google-genai-vertexai-image/src/main.py +++ b/py/samples/provider-google-genai-vertexai-image/src/main.py @@ -53,13 +53,13 @@ from io import BytesIO from PIL import Image -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.blocks.model import GenerateResponseWrapper from genkit.plugins.google_genai import VertexAI +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # Check for GCLOUD_PROJECT or GOOGLE_CLOUD_PROJECT # If GOOGLE_CLOUD_PROJECT is set but GCLOUD_PROJECT isn't, use it diff --git a/py/samples/multi-server/LICENSE b/py/samples/provider-huggingface-hello/LICENSE similarity index 100% rename from py/samples/multi-server/LICENSE rename to py/samples/provider-huggingface-hello/LICENSE diff --git a/py/samples/huggingface-hello/README.md b/py/samples/provider-huggingface-hello/README.md similarity index 100% rename from py/samples/huggingface-hello/README.md rename to py/samples/provider-huggingface-hello/README.md diff --git a/py/samples/huggingface-hello/pyproject.toml b/py/samples/provider-huggingface-hello/pyproject.toml similarity index 96% rename from py/samples/huggingface-hello/pyproject.toml rename to py/samples/provider-huggingface-hello/pyproject.toml index 0843249128..bc6c3d557d 100644 --- a/py/samples/huggingface-hello/pyproject.toml +++ b/py/samples/provider-huggingface-hello/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "uvloop>=0.21.0", ] description = "Hugging Face Hello Sample" -name = "huggingface-hello" +name = "provider-huggingface-hello" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/huggingface-hello/run.sh b/py/samples/provider-huggingface-hello/run.sh similarity index 100% rename from py/samples/huggingface-hello/run.sh rename to py/samples/provider-huggingface-hello/run.sh diff --git a/py/samples/huggingface-hello/src/main.py b/py/samples/provider-huggingface-hello/src/main.py similarity index 61% rename from py/samples/huggingface-hello/src/main.py rename to py/samples/provider-huggingface-hello/src/main.py index 6698e9ac79..8e5ad9d055 100644 --- a/py/samples/huggingface-hello/src/main.py +++ b/py/samples/provider-huggingface-hello/src/main.py @@ -56,30 +56,54 @@ | Default Model Configuration | `ai = Genkit(model=huggingface_name())`| | Defining Flows | `@ai.flow()` decorator | | Defining Tools | `@ai.tool()` decorator | -| Simple Generation (Prompt String) | `say_hi` | -| Streaming Response | `streaming_flow` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompt | `generate_with_system_prompt` | +| Multi-turn Conversation | `generate_multi_turn_chat` | +| Streaming Response | `generate_streaming_story` | | Different Models | `llama_flow`, `qwen_flow` | -| Generation with Config | `custom_config_flow` | -| Code Generation | `code_flow` | +| Generation with Config | `generate_with_config` | +| Code Generation | `generate_code` | | Multi-turn Chat | `chat_flow` | -| Tool Calling | `weather_flow` | +| Tool Calling | `generate_weather` | | Structured Output (JSON) | `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | """ import asyncio import os -import random from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit, Output from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger -from genkit.core.typing import Message, Part, Role, TextPart, ToolChoice +from genkit.core.typing import Message, Part, Role, TextPart from genkit.plugins.huggingface import HuggingFace, huggingface_name +from samples.shared import ( + CharacterInput, + CodeInput, + ConfigInput, + GreetingInput, + MultiTurnInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, +) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'HF_TOKEN' not in os.environ: os.environ['HF_TOKEN'] = input('Please enter your HF_TOKEN: ') @@ -93,126 +117,62 @@ ) -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Hugging Face', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - topic: str = Field(default='machine learning', description='Topic to generate about') - - class ModelInput(BaseModel): """Input for model-specific flows.""" prompt: str = Field(default='What is the meaning of life?', description='Prompt to send to the model') -class CustomConfigInput(BaseModel): - """Input for custom config flow.""" - - task: str = Field(default='creative', description='Task type: creative, precise, or detailed') - - -class WeatherInput(BaseModel): - """Input schema for the weather tool.""" - - location: str = Field(description='City or location name') - - -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" - - location: str = Field(default='San Francisco', description='Location to get weather for') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Luna', description='Character name') - - -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) - - -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - - -class RpgCharacter(BaseModel): - """An RPG character.""" - - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills +ai.tool()(get_weather) -@ai.tool() -def get_weather(input: WeatherInput) -> str: - """Return a random realistic weather string for a location. +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. Args: - input: Weather input location. + input: Input with name to greet. Returns: - Weather information with temperature in degrees Celsius. + Greeting message. """ - weather_options = [ - '20° C sunny with light breeze', - '15° C foggy morning', - '22° C clear skies', - '18° C partly cloudy', - ] - return f'Weather in {input.location}: {random.choice(weather_options)}' + return await generate_greeting_logic(ai, input.name) @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a simple greeting. +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. Args: - input: Input with name to greet. + input: Input with a question to ask. Returns: - Greeting message. + The model's response in the persona defined by the system prompt. """ - response = await ai.generate(prompt=f'Say hello to {input.name}!') - return response.text + return await generate_with_system_prompt_logic(ai, input.question) + + +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter.""" + return await generate_multi_turn_chat_logic(ai, input.destination) @ai.flow() -async def streaming_flow( +async def generate_streaming_story( input: StreamInput, ctx: ActionRunContext | None = None, ) -> str: - """Generate with streaming response. + """Generate a streaming story response. Args: - input: Input with topic to generate about. - ctx: Action run context for streaming chunks to client. + input: Input with name for streaming story. + ctx: Action run context for streaming. Returns: - Generated text. + Complete generated text. """ - response = await ai.generate( - prompt=f'Tell me an interesting fact about {input.topic}', - on_chunk=ctx.send_chunk if ctx else None, - ) - return response.text + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() @@ -267,52 +227,16 @@ async def gemma_flow(input: ModelInput) -> str: @ai.flow() -async def custom_config_flow(input: CustomConfigInput) -> str: - """Demonstrate custom model configurations for different tasks. - - Shows how different config parameters affect generation behavior: - - 'creative': High temperature for diverse, creative outputs - - 'precise': Low temperature for consistent, focused outputs - - 'detailed': Extended output for comprehensive explanations +async def generate_with_config(input: ConfigInput) -> str: + """Generate a greeting with custom model configuration. Args: - input: Input with task type. + input: Input with name to greet. Returns: - Generated response showing the effect of different configs. + Greeting message. """ - task = input.task - - prompts = { - 'creative': 'Write a creative story opener about a robot discovering art', - 'precise': 'List the exact steps to make a cup of tea', - 'detailed': 'Explain how photosynthesis works in detail', - } - - configs: dict[str, dict[str, object]] = { - 'creative': { - 'temperature': 0.9, - 'max_tokens': 200, - 'top_p': 0.95, - }, - 'precise': { - 'temperature': 0.1, - 'max_tokens': 150, - }, - 'detailed': { - 'temperature': 0.5, - 'max_tokens': 400, - }, - } - - prompt = prompts.get(task, prompts['creative']) - config: dict[str, object] = configs.get(task, configs['creative']) - - response = await ai.generate( - prompt=prompt, - config=config, - ) - return response.text + return await generate_with_config_logic(ai, input.name) @ai.flow() @@ -364,43 +288,21 @@ async def chat_flow() -> str: @ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Get weather using Hugging Face tool calling. - - Demonstrates how to use tools with Hugging Face models. The model - will automatically call the get_weather tool when asked about weather. - - Note: Tool calling support depends on the specific model. Many popular - models like Llama 3 and Mistral support function calling. +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. Args: input: Input with location to get weather for. Returns: - Weather information for the location. + Weather information. """ - response = await ai.generate( - model=huggingface_name('mistralai/Mistral-7B-Instruct-v0.3'), - prompt=f'What is the weather in {input.location}?', - system=( - 'You have a tool called get_weather. ' - "It takes an object with a 'location' field. " - 'Always use this tool when asked about weather.' - ), - tools=['get_weather'], - tool_choice=ToolChoice.REQUIRED, - max_turns=2, - ) - return response.text + return await generate_weather_logic(ai, input) @ai.flow() async def generate_character(input: CharacterInput) -> RpgCharacter: - """Generate an RPG character using structured output. - - Demonstrates how to use JSON mode for structured output with - Hugging Face models. The model returns data that matches the - RpgCharacter schema. + """Generate an RPG character with structured output. Args: input: Input with character name. @@ -408,21 +310,48 @@ async def generate_character(input: CharacterInput) -> RpgCharacter: Returns: The generated RPG character. """ - prompt = ( - f'Generate an RPG character named {input.name}. ' - 'Include a creative backstory, 3-4 unique abilities, ' - 'and skill ratings for strength, charisma, and endurance (0-100 each).' - ) - result = await ai.generate( + return await generate_character_logic(ai, input.name) + + +@ai.flow() +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Demonstrate streaming with structured output schemas. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. + + See: https://genkit.dev/docs/models#streaming + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. + """ + stream, result = ai.generate_stream( model=huggingface_name('mistralai/Mistral-7B-Instruct-v0.3'), - prompt=prompt, + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), output=Output(schema=RpgCharacter), ) - return result.output + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def generate_code(input: CodeInput) -> str: """Generate code using Hugging Face models. Args: @@ -431,11 +360,24 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) async def main() -> None: diff --git a/py/samples/observability-hello/LICENSE b/py/samples/provider-microsoft-foundry-hello/LICENSE similarity index 100% rename from py/samples/observability-hello/LICENSE rename to py/samples/provider-microsoft-foundry-hello/LICENSE diff --git a/py/samples/microsoft-foundry-hello/README.md b/py/samples/provider-microsoft-foundry-hello/README.md similarity index 97% rename from py/samples/microsoft-foundry-hello/README.md rename to py/samples/provider-microsoft-foundry-hello/README.md index 870218605d..f183cfb5b8 100644 --- a/py/samples/microsoft-foundry-hello/README.md +++ b/py/samples/provider-microsoft-foundry-hello/README.md @@ -118,6 +118,8 @@ ai = Genkit( | Tool Usage | `weather_flow` | Function calling with tools | | Configuration | `say_hi_with_config` | Custom temperature, max_tokens, etc. | | Multimodal | `describe_image` | Processing image inputs | +| Structured Output | `generate_character` | RPG character generation as JSON | +| Reasoning | `reasoning_flow` | Chain-of-thought reasoning with o4-mini | ## Supported Models diff --git a/py/samples/microsoft-foundry-hello/pyproject.toml b/py/samples/provider-microsoft-foundry-hello/pyproject.toml similarity index 96% rename from py/samples/microsoft-foundry-hello/pyproject.toml rename to py/samples/provider-microsoft-foundry-hello/pyproject.toml index abec62093b..b85a67846c 100644 --- a/py/samples/microsoft-foundry-hello/pyproject.toml +++ b/py/samples/provider-microsoft-foundry-hello/pyproject.toml @@ -24,7 +24,7 @@ dependencies = [ ] description = "Microsoft Foundry Hello Sample" license = "Apache-2.0" -name = "microsoft-foundry-hello" +name = "provider-microsoft-foundry-hello" readme = "README.md" requires-python = ">=3.10" version = "0.0.0" diff --git a/py/samples/microsoft-foundry-hello/run.sh b/py/samples/provider-microsoft-foundry-hello/run.sh similarity index 100% rename from py/samples/microsoft-foundry-hello/run.sh rename to py/samples/provider-microsoft-foundry-hello/run.sh diff --git a/py/samples/microsoft-foundry-hello/src/main.py b/py/samples/provider-microsoft-foundry-hello/src/main.py similarity index 59% rename from py/samples/microsoft-foundry-hello/src/main.py rename to py/samples/provider-microsoft-foundry-hello/src/main.py index cfa3e3e816..42a3fc6fca 100644 --- a/py/samples/microsoft-foundry-hello/src/main.py +++ b/py/samples/provider-microsoft-foundry-hello/src/main.py @@ -48,18 +48,23 @@ Key Features ============ -| Feature | Example | -|----------------------------------|----------------------------------------| -| Plugin Initialization | `MicrosoftFoundry(api_key=..., ...)` | -| Default Model Configuration | `ai = Genkit(model=gpt4o)` | -| Defining Flows | `@ai.flow()` decorator | -| Defining Tools | `@ai.tool()` decorator | -| Simple Generation | `say_hi` | -| Streaming Generation | `say_hi_stream` | -| Generation with Tools | `weather_flow` | -| Generation Configuration | `say_hi_with_config` | -| Code Generation | `code_flow` | -| Multimodal (Image Input) | `describe_image` | +| Feature | Example | +|----------------------------------|--------------------------------------------| +| Plugin Initialization | `MicrosoftFoundry(api_key=..., ...)` | +| Default Model Configuration | `ai = Genkit(model=gpt4o)` | +| Defining Flows | `@ai.flow()` decorator | +| Defining Tools | `@ai.tool()` decorator | +| Simple Generation | `generate_greeting` | +| Streaming Generation | `generate_streaming_story` | +| System Prompt | `generate_with_system_prompt` | +| Multi-turn Conversation | `generate_multi_turn_chat` | +| Generation with Tools | `generate_weather` | +| Structured Output | `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | +| Multimodal (Image Input) | `describe_image` | +| Reasoning (Chain-of-Thought) | `solve_reasoning_problem` | +| Generation Configuration | `generate_with_config` | +| Code Generation | `generate_code` | Endpoint Types ============== @@ -138,18 +143,39 @@ import asyncio import os -import random -from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - -from genkit.ai import Genkit +from genkit.ai import Genkit, Output from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger -from genkit.plugins.microsoft_foundry import MicrosoftFoundry, gpt4o -from genkit.types import Media, MediaPart, Part, TextPart +from genkit.plugins.microsoft_foundry import MicrosoftFoundry, gpt4o, microsoft_foundry_model +from samples.shared import ( + CharacterInput, + CodeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + ReasoningInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, + solve_reasoning_problem_logic, +) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # Configuration from environment variables # Find these values in Microsoft Foundry Portal: @@ -180,162 +206,194 @@ model=gpt4o, ) +ai.tool()(get_weather) -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='World', description='Name to greet') +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. -class WeatherInput(BaseModel): - """Weather tool input schema.""" + Args: + input: Input with name to greet. - location: str = Field(description='Location to get weather for') + Returns: + Greeting message. + """ + return await generate_greeting_logic(ai, input.name) -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. - location: str = Field(default='San Francisco', description='Location to get weather for') + Args: + input: Input with a question to ask. + Returns: + The model's response in the persona defined by the system prompt. + """ + return await generate_with_system_prompt_logic(ai, input.question) -class StreamInput(BaseModel): - """Input for streaming flow.""" - topic: str = Field(default='cats', description='Topic to write about') +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + Args: + input: Input with a travel destination. -class ImageDescribeInput(BaseModel): - """Input for image description flow.""" + Returns: + The model's final response, demonstrating context retention. + """ + return await generate_multi_turn_chat_logic(ai, input.destination) - # Public domain cat image from Wikimedia Commons (no copyright, free for any use) - # Source: https://commons.wikimedia.org/wiki/File:Cute_kitten.jpg - image_url: str = Field( - default='https://upload.wikimedia.org/wikipedia/commons/1/13/Cute_kitten.jpg', - description='URL of the image to describe', - ) +@ai.flow() +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. -class CodeInput(BaseModel): - """Input for code generation flow.""" + Args: + input: Input with location to get weather for. - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) + Returns: + Weather information. + """ + return await generate_weather_logic(ai, input) @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a simple greeting. +async def generate_streaming_story( + input: StreamInput, + ctx: ActionRunContext = None, # type: ignore[assignment] +) -> str: + """Generate a streaming story response. + + Args: + input: Input with name for streaming story. + ctx: Action run context for streaming. - This demonstrates basic text generation with Microsoft Foundry. + Returns: + Complete generated text. """ - response = await ai.generate( - prompt=f'Say hello to {input.name} in a friendly way', - ) - return response.text + return await generate_streaming_story_logic(ai, input.name, ctx) -@ai.tool() -def get_weather(input: WeatherInput) -> str: - """Return weather information for a location. +@ai.flow() +async def describe_image(input: ImageDescribeInput) -> str: + """Describe an image using Microsoft Foundry. - This is a mock tool that demonstrates function calling with Microsoft Foundry. + Args: + input: Input with image URL to describe. + + Returns: + A textual description of the image. + """ + return await describe_image_logic(ai, input.image_url) + + +@ai.flow() +async def generate_with_config(input: GreetingInput) -> str: + """Generate a greeting with custom model configuration. Args: - input: Weather input with location. + input: Input with name to greet. Returns: - Weather information string. + Greeting message. """ - weather_options = [ - '32° C sunny', - '17° C cloudy', - '22° C partly cloudy', - '19° C humid', - ] - return f'{input.location}: {random.choice(weather_options)}' + return await generate_with_config_logic(ai, input.name) @ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Get weather using function calling. +async def generate_code(input: CodeInput) -> str: + """Generate code using Microsoft Foundry models. - This demonstrates Microsoft Foundry's tool/function calling capability. + Args: + input: Input with coding task description. + + Returns: + Generated code. """ - response = await ai.generate( - prompt=f'What is the weather in {input.location}?', - tools=['get_weather'], - ) - return response.text + return await generate_code_logic(ai, input.task) @ai.flow() -async def say_hi_stream( - input: StreamInput, - ctx: ActionRunContext = None, # type: ignore[assignment] -) -> str: - """Generate streaming response. +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. - This demonstrates streaming with Microsoft Foundry. + Args: + input: Input with character name. + + Returns: + The generated RPG character. """ - response = await ai.generate( - prompt=f'Write a short poem about {input.topic}', - on_chunk=ctx.send_chunk, - ) - return response.text + return await generate_character_logic(ai, input.name) @ai.flow() -async def describe_image(input: ImageDescribeInput) -> str: - """Describe an image using Microsoft Foundry. +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Demonstrate streaming with structured output schemas. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. - This demonstrates multimodal capabilities with vision models. - Note: Requires a vision-capable model like gpt-4o. + See: https://genkit.dev/docs/models#streaming + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. """ - response = await ai.generate( - prompt=[ - Part(root=TextPart(text='Describe this image in detail')), - Part(root=MediaPart(media=Media(url=input.image_url, content_type='image/jpeg'))), - ], - config={'visual_detail_level': 'auto'}, + stream, result = ai.generate_stream( + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), + output=Output(schema=RpgCharacter), ) - return response.text + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output @ai.flow() -async def say_hi_with_config(input: SayHiInput) -> str: - """Generate greeting with custom configuration. +async def solve_reasoning_problem(input: ReasoningInput) -> str: + """Solve reasoning problems using a reasoning model. - This demonstrates using MicrosoftFoundryConfig for fine-tuned control. + Args: + input: Input with reasoning question to solve. + + Returns: + The reasoning and answer. """ - response = await ai.generate( - prompt=f'Say hello to {input.name}', - config={ - 'temperature': 0.9, - 'max_tokens': 50, - 'frequency_penalty': 0.5, - }, - ) - return response.text + return await solve_reasoning_problem_logic(ai, input.prompt, model=microsoft_foundry_model('o4-mini')) @ai.flow() -async def code_flow(input: CodeInput) -> str: - """Generate code using Microsoft Foundry models. +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. Args: - input: Input with coding task description. + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. Returns: - Generated code. + The complete generated text. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_streaming_with_tools_logic(ai, input.location, ctx) async def main() -> None: diff --git a/py/samples/ollama-hello/LICENSE b/py/samples/provider-mistral-hello/LICENSE similarity index 100% rename from py/samples/ollama-hello/LICENSE rename to py/samples/provider-mistral-hello/LICENSE diff --git a/py/samples/mistral-hello/README.md b/py/samples/provider-mistral-hello/README.md similarity index 97% rename from py/samples/mistral-hello/README.md rename to py/samples/provider-mistral-hello/README.md index 96f1d48e12..c09e138ca7 100644 --- a/py/samples/mistral-hello/README.md +++ b/py/samples/provider-mistral-hello/README.md @@ -56,7 +56,7 @@ genkit start -- uv run src/main.py - [ ] `custom_config_flow` - Custom temperature/config 3. **Test code generation**: - - [ ] `code_flow` - Code generation with Codestral + - [ ] `generate_code` - Code generation with Codestral 4. **Test large model**: - [ ] `large_model_flow` - Complex reasoning with mistral-large diff --git a/py/samples/mistral-hello/assets/genkit.wav b/py/samples/provider-mistral-hello/assets/genkit.wav similarity index 100% rename from py/samples/mistral-hello/assets/genkit.wav rename to py/samples/provider-mistral-hello/assets/genkit.wav diff --git a/py/samples/mistral-hello/pyproject.toml b/py/samples/provider-mistral-hello/pyproject.toml similarity index 97% rename from py/samples/mistral-hello/pyproject.toml rename to py/samples/provider-mistral-hello/pyproject.toml index d7afbd8101..ccba104072 100644 --- a/py/samples/mistral-hello/pyproject.toml +++ b/py/samples/provider-mistral-hello/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ "uvloop>=0.21.0", ] description = "Mistral AI Hello Sample" -name = "mistral-hello" +name = "provider-mistral-hello" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/mistral-hello/run.sh b/py/samples/provider-mistral-hello/run.sh similarity index 100% rename from py/samples/mistral-hello/run.sh rename to py/samples/provider-mistral-hello/run.sh diff --git a/py/samples/mistral-hello/src/main.py b/py/samples/provider-mistral-hello/src/main.py similarity index 58% rename from py/samples/mistral-hello/src/main.py rename to py/samples/provider-mistral-hello/src/main.py index 3231daa843..309a338f25 100644 --- a/py/samples/mistral-hello/src/main.py +++ b/py/samples/provider-mistral-hello/src/main.py @@ -56,15 +56,18 @@ | Default Model Configuration | `ai = Genkit(model=mistral_name(...))` | | Defining Flows | `@ai.flow()` decorator | | Defining Tools | `@ai.tool()` decorator | -| Simple Generation (Prompt String) | `say_hi` | -| Streaming Response | `streaming_flow` | -| Code Generation (Codestral) | `code_flow` | -| Generation with Config | `custom_config_flow` | -| Multi-turn Chat | `chat_flow` | -| Tool Calling | `weather_flow` | -| Structured Output (JSON) | `generate_character` | -| Multimodal (Image Input) | `describe_image` | -| Reasoning (Magistral) | `reasoning_flow` | +| Simple Generation (Prompt String) | `generate_greeting` | +| System Prompt | `generate_with_system_prompt` | +| Multi-turn Conversation | `generate_multi_turn_chat` | +| Streaming Response | `generate_streaming_story` | +| Code Generation (Codestral) | `generate_code` | +| Generation with Config | `generate_with_config` | +| Multi-turn Chat | `chat_flow` | +| Tool Calling | `generate_weather` | +| Structured Output (JSON) | `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | +| Multimodal (Image Input) | `describe_image` | +| Reasoning (Magistral) | `solve_reasoning_problem` | | Embeddings (Text) | `embed_flow` | | Embeddings (Code) | `code_embed_flow` | | Audio Transcription (Voxtral) | `audio_flow` | @@ -73,20 +76,45 @@ import asyncio import base64 import os -import random from pathlib import Path from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit, Output from genkit.blocks.document import Document from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger -from genkit.core.typing import Media, MediaPart, Message, Part, Role, TextPart, ToolChoice +from genkit.core.typing import Media, MediaPart, Message, Part, Role, TextPart from genkit.plugins.mistral import Mistral, mistral_name +from samples.shared import ( + CharacterInput, + CodeInput, + ConfigInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + ReasoningInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, + solve_reasoning_problem_logic, +) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'MISTRAL_API_KEY' not in os.environ: os.environ['MISTRAL_API_KEY'] = input('Please enter your MISTRAL_API_KEY: ') @@ -98,47 +126,7 @@ model=mistral_name('mistral-small-latest'), ) - -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mistral', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - topic: str = Field(default='artificial intelligence', description='Topic to generate about') - - -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field(default='Write a Python function to calculate fibonacci numbers', description='Coding task') - - -class CustomConfigInput(BaseModel): - """Input for custom config flow.""" - - task: str = Field(default='creative', description='Task type: creative, precise, or detailed') - - -class WeatherInput(BaseModel): - """Input schema for the weather tool.""" - - location: str = Field(description='City or location name') - - -class WeatherFlowInput(BaseModel): - """Input for weather flow.""" - - location: str = Field(default='Paris', description='Location to get weather for') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') +ai.tool()(get_weather) class EmbedInput(BaseModel): @@ -156,24 +144,6 @@ class CodeEmbedInput(BaseModel): ) -class ImageInput(BaseModel): - """Input for image description flow.""" - - image_url: str = Field( - default='https://picsum.photos/id/237/400/300', - description='URL of the image to describe', - ) - - -class ReasoningInput(BaseModel): - """Input for reasoning flow.""" - - question: str = Field( - default='John is one of 4 children. His sister is 4 years old. How old is John?', - description='Reasoning question', - ) - - class AudioInput(BaseModel): """Input for audio transcription flow.""" @@ -183,79 +153,98 @@ class AudioInput(BaseModel): ) -class Skills(BaseModel): - """A set of core character skills for an RPG character.""" - - strength: int = Field(description='strength (0-100)') - charisma: int = Field(description='charisma (0-100)') - endurance: int = Field(description='endurance (0-100)') - +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. -class RpgCharacter(BaseModel): - """An RPG character.""" + Args: + input: Input with name to greet. - name: str = Field(description='name of the character') - back_story: str = Field(description='back story', alias='backStory') - abilities: list[str] = Field(description='list of abilities (3-4)') - skills: Skills + Returns: + Greeting message. + """ + return await generate_greeting_logic(ai, input.name) -@ai.tool() -def get_weather(input: WeatherInput) -> str: - """Return a random realistic weather string for a location. +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. Args: - input: Weather input location. + input: Input with a question to ask. Returns: - Weather information with temperature in degrees Celsius. + The model's response in the persona defined by the system prompt. """ - weather_options = [ - '18° C sunny with light clouds', - '22° C partly cloudy', - '15° C overcast with chance of rain', - '25° C clear and warm', - ] - return f'Weather in {input.location}: {random.choice(weather_options)}' + return await generate_with_system_prompt_logic(ai, input.question) @ai.flow() -async def say_hi(input: SayHiInput) -> str: - """Generate a simple greeting. +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. Args: - input: Input with name to greet. + input: Input with a travel destination. Returns: - Greeting message. + The model's final response, demonstrating context retention. """ - response = await ai.generate(prompt=f'Say hello to {input.name}!') - return response.text + return await generate_multi_turn_chat_logic(ai, input.destination) @ai.flow() -async def streaming_flow( +async def generate_streaming_story( input: StreamInput, ctx: ActionRunContext | None = None, ) -> str: - """Generate with streaming response. + """Generate a streaming story response. Args: - input: Input with topic to generate about. - ctx: Action run context for streaming chunks to client. + input: Input with name for streaming story. + ctx: Action run context for streaming. Returns: - Generated text. + Complete generated text. """ - response = await ai.generate( - prompt=f'Tell me an interesting fact about {input.topic}', - on_chunk=ctx.send_chunk if ctx else None, + return await generate_streaming_story_logic(ai, input.name, ctx) + + +@ai.flow() +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Streaming with structured output schema. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. + """ + stream, result = ai.generate_stream( + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), + output=Output(schema=RpgCharacter), ) - return response.text + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def generate_code(input: CodeInput) -> str: """Generate code using Codestral model. Args: @@ -264,63 +253,20 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ - response = await ai.generate( - model=mistral_name('codestral-latest'), - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_code_logic(ai, input.task) @ai.flow() -async def custom_config_flow(input: CustomConfigInput) -> str: - """Demonstrate custom model configurations for different tasks. - - Shows how different config parameters affect generation behavior: - - 'creative': High temperature for diverse, creative outputs - - 'precise': Low temperature for consistent, focused outputs - - 'detailed': Extended output for comprehensive explanations +async def generate_with_config(input: ConfigInput) -> str: + """Generate a greeting with custom model configuration. Args: - input: Input with task type. + input: Input with name to greet. Returns: - Generated response showing the effect of different configs. + Greeting message. """ - task = input.task - - prompts = { - 'creative': 'Write a creative story opener about a robot discovering art', - 'precise': 'List the exact steps to make a cup of tea', - 'detailed': 'Explain how photosynthesis works in detail', - } - - configs: dict[str, dict[str, object]] = { - 'creative': { - 'temperature': 0.9, - 'max_tokens': 200, - 'top_p': 0.95, - 'presence_penalty': 0.6, - 'frequency_penalty': 0.4, - }, - 'precise': { - 'temperature': 0.1, - 'max_tokens': 150, - }, - 'detailed': { - 'temperature': 0.5, - 'max_tokens': 400, - }, - } - - prompt = prompts.get(task, prompts['creative']) - config: dict[str, object] = configs.get(task, configs['creative']) - - response = await ai.generate( - prompt=prompt, - config=config, - ) - return response.text + return await generate_with_config_logic(ai, input.name) @ai.flow() @@ -392,39 +338,21 @@ async def large_model_flow() -> str: @ai.flow() -async def weather_flow(input: WeatherFlowInput) -> str: - """Get weather using Mistral tool calling. - - Demonstrates how to use tools with Mistral models. The model - will automatically call the get_weather tool when asked about weather. +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. Args: input: Input with location to get weather for. Returns: - Weather information for the location. + Weather information. """ - response = await ai.generate( - model=mistral_name('mistral-small-latest'), - prompt=f'What is the weather in {input.location}?', - system=( - 'You have a tool called get_weather. ' - "It takes an object with a 'location' field. " - 'Always use this tool when asked about weather.' - ), - tools=['get_weather'], - tool_choice=ToolChoice.REQUIRED, - max_turns=2, - ) - return response.text + return await generate_weather_logic(ai, input) @ai.flow() async def generate_character(input: CharacterInput) -> RpgCharacter: - """Generate an RPG character using structured output. - - Demonstrates how to use Mistral's JSON mode for structured output. - The model returns data that matches the RpgCharacter schema. + """Generate an RPG character with structured output. Args: input: Input with character name. @@ -432,28 +360,13 @@ async def generate_character(input: CharacterInput) -> RpgCharacter: Returns: The generated RPG character. """ - prompt = ( - f'Generate an RPG character named {input.name}. ' - 'Include a creative backstory, 3-4 unique abilities, ' - 'and skill ratings for strength, charisma, and endurance (0-100 each).' - ) - result = await ai.generate( - model=mistral_name('mistral-small-latest'), - prompt=prompt, - output=Output(schema=RpgCharacter), - ) - return result.output + return await generate_character_logic(ai, input.name) @ai.flow() async def embed_flow(input: EmbedInput) -> list[float]: """Generate embeddings for text using Mistral's mistral-embed model. - Embeddings are dense vector representations of text, useful for: - - Semantic search: find documents similar to a query - - Clustering: group similar documents together - - RAG: retrieve relevant context for generation - Args: input: Input with text to embed. @@ -472,11 +385,6 @@ async def embed_flow(input: EmbedInput) -> list[float]: async def code_embed_flow(input: CodeEmbedInput) -> list[float]: """Generate code embeddings using Mistral's codestral-embed model. - Codestral Embed produces semantic representations of code snippets, - useful for code search, clone detection, and similarity comparisons. - - See: https://docs.mistral.ai/models/codestral-embed-25-05 - Args: input: Input with code snippet to embed. @@ -492,69 +400,37 @@ async def code_embed_flow(input: CodeEmbedInput) -> list[float]: @ai.flow() -async def describe_image(input: ImageInput) -> str: +async def describe_image(input: ImageDescribeInput) -> str: """Describe an image using Mistral Large 3 (vision). - Mistral Large 3, Medium 3.1, Small 3.2, and Ministral 3 all support - image input alongside text. The model can analyze and describe the - contents of the image. - - See: https://docs.mistral.ai/capabilities/vision/ - Args: - input: Input with image URL. + input: Input with image URL to describe. Returns: - Description of the image. + A textual description of the image. """ - response = await ai.generate( - model=mistral_name('mistral-large-latest'), - messages=[ - Message( - role=Role.USER, - content=[ - Part(root=MediaPart(media=Media(url=input.image_url, content_type='image/png'))), - Part(root=TextPart(text='Describe this image in detail.')), - ], - ), - ], - ) - return response.text + return await describe_image_logic(ai, input.image_url, model=mistral_name('mistral-large-latest')) @ai.flow() -async def reasoning_flow(input: ReasoningInput) -> str: - """Use Magistral for step-by-step reasoning. - - Magistral models think through problems step by step before answering. - They are optimized for math, logic, and complex reasoning tasks. - - See: https://docs.mistral.ai/capabilities/reasoning +async def solve_reasoning_problem(input: ReasoningInput) -> str: + """Solve reasoning problems using Magistral. Args: - input: Input with reasoning question. + input: Input with reasoning question to solve. Returns: - The reasoned answer. + The reasoning and answer. """ - response = await ai.generate( - model=mistral_name('magistral-small-latest'), - prompt=input.question, - ) - return response.text + return await solve_reasoning_problem_logic(ai, input.prompt, model=mistral_name('magistral-small-latest')) @ai.flow() async def audio_flow(input: AudioInput) -> str: """Transcribe audio using Voxtral Mini. - Voxtral models accept audio input alongside text. The audio is - base64-encoded and sent as a MediaPart with audio/* content type. - Uses the bundled genkit.wav file by default. - See: https://docs.mistral.ai/capabilities/audio/ - Args: input: Input with optional path to an audio file. @@ -581,6 +457,23 @@ async def audio_flow(input: AudioInput) -> str: return response.text +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) + + async def main() -> None: """Main entry point for the Mistral sample - keep alive for Dev UI.""" await logger.ainfo('Genkit server running. Press Ctrl+C to stop.') diff --git a/py/samples/ollama-simple-embed/LICENSE b/py/samples/provider-observability-hello/LICENSE similarity index 100% rename from py/samples/ollama-simple-embed/LICENSE rename to py/samples/provider-observability-hello/LICENSE diff --git a/py/samples/observability-hello/README.md b/py/samples/provider-observability-hello/README.md similarity index 100% rename from py/samples/observability-hello/README.md rename to py/samples/provider-observability-hello/README.md diff --git a/py/samples/observability-hello/pyproject.toml b/py/samples/provider-observability-hello/pyproject.toml similarity index 96% rename from py/samples/observability-hello/pyproject.toml rename to py/samples/provider-observability-hello/pyproject.toml index 644dd0b06f..f7d829c950 100644 --- a/py/samples/observability-hello/pyproject.toml +++ b/py/samples/provider-observability-hello/pyproject.toml @@ -24,7 +24,7 @@ dependencies = [ ] description = "Third-party observability sample for Genkit" license = "Apache-2.0" -name = "observability-hello" +name = "provider-observability-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/observability-hello/run.sh b/py/samples/provider-observability-hello/run.sh similarity index 100% rename from py/samples/observability-hello/run.sh rename to py/samples/provider-observability-hello/run.sh diff --git a/py/samples/observability-hello/src/main.py b/py/samples/provider-observability-hello/src/main.py similarity index 98% rename from py/samples/observability-hello/src/main.py rename to py/samples/provider-observability-hello/src/main.py index 5dc788789c..ec2b01d6c0 100644 --- a/py/samples/observability-hello/src/main.py +++ b/py/samples/provider-observability-hello/src/main.py @@ -117,13 +117,13 @@ import asyncio from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - -install_rich_traceback(show_locals=True, width=120, extra_lines=3) from genkit.ai import Genkit from genkit.plugins.google_genai import GoogleAI from genkit.plugins.observability import configure_telemetry +from samples.shared.logging import setup_sample + +setup_sample() # Configure observability telemetry FIRST (before creating Genkit instance) # Change backend to: "sentry", "datadog", "grafana", "axiom" as needed diff --git a/py/samples/prompt-demo/LICENSE b/py/samples/provider-ollama-hello/LICENSE similarity index 100% rename from py/samples/prompt-demo/LICENSE rename to py/samples/provider-ollama-hello/LICENSE diff --git a/py/samples/provider-ollama-hello/README.md b/py/samples/provider-ollama-hello/README.md new file mode 100644 index 0000000000..419b5e95dd --- /dev/null +++ b/py/samples/provider-ollama-hello/README.md @@ -0,0 +1,77 @@ +# Ollama Hello World + +Local LLM inference, tools, vision, and embeddings with Genkit — all running +privately on your machine via [Ollama](https://ollama.com/). + +## Prerequisites + +- **Ollama** installed and running locally. + +## Quick Start + +The `run.sh` script handles everything — it installs dependencies, pulls +required models, and starts the Dev UI: + +```bash +./run.sh +``` + +## Manual Setup + +1. **Install Ollama**: Download from [ollama.com](https://ollama.com/download). +2. **Start the server**: + ```bash + ollama serve + ``` +3. **Pull models**: + ```bash + ollama pull gemma3:latest # General generation + ollama pull mistral-nemo:latest # Tool calling + ollama pull llava:latest # Vision / image description + ollama pull nomic-embed-text # Embeddings for RAG + ``` +4. **Run the sample**: + ```bash + genkit start -- uv run src/main.py + ``` + +## Testing This Demo + +1. **Open DevUI** at http://localhost:4000 + +2. **Test basic flows** (uses gemma3): + - [ ] `say_hi` — Simple generation + - [ ] `say_hi_stream` — Streaming response + - [ ] `say_hi_constrained` — Structured output (HelloSchema) + - [ ] `generate_character` — Structured output (RpgCharacter) + - [ ] `generate_code` — Code generation + +3. **Test tools** (uses mistral-nemo): + - [ ] `calculate_gablorken` — Tool calling demo + - [ ] `currency_exchange` — Currency conversion tool + - [ ] `weather_flow` — Weather tool + +4. **Test vision** (uses llava): + - [ ] `describe_image` — Image description from URL + +5. **Test embeddings & RAG** (uses nomic-embed-text + gemma3): + - [ ] `Pokedex` — Ask questions about Pokemon using local RAG + +6. **Example queries for Pokedex**: + - "Tell me about fire-type Pokemon" + - "Which Pokemon can fly?" + - "What's the strongest water Pokemon?" + +7. **Notes**: + - gemma2:latest does NOT support tool calling; use mistral-nemo + - Vision requires `ollama pull llava` before use + - First run may be slow (model loading into memory) + - All processing happens locally — no API calls + +8. **Expected behavior**: + - Responses generated locally (no external requests) + - Streaming shows incremental output + - Tools work with compatible models only + - Vision describes images accurately + - Embeddings computed locally, similarity search finds relevant Pokemon + - RAG combines retrieval with generation diff --git a/py/samples/ollama-hello/pyproject.toml b/py/samples/provider-ollama-hello/pyproject.toml similarity index 98% rename from py/samples/ollama-hello/pyproject.toml rename to py/samples/provider-ollama-hello/pyproject.toml index acb4052e2e..74e2205484 100644 --- a/py/samples/ollama-hello/pyproject.toml +++ b/py/samples/provider-ollama-hello/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Ollama hello sample" license = "Apache-2.0" -name = "ollama-hello" +name = "provider-ollama-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/ollama-hello/run.sh b/py/samples/provider-ollama-hello/run.sh similarity index 66% rename from py/samples/ollama-hello/run.sh rename to py/samples/provider-ollama-hello/run.sh index b106ece055..2e74a1cb78 100755 --- a/py/samples/ollama-hello/run.sh +++ b/py/samples/provider-ollama-hello/run.sh @@ -5,7 +5,7 @@ # Ollama Hello World Demo # ======================= # -# Demonstrates usage of Ollama local models with Genkit. +# Demonstrates local LLM inference, tools, vision, and embeddings with Genkit. # # Prerequisites: # - Ollama installed and running locally @@ -19,6 +19,14 @@ set -euo pipefail cd "$(dirname "$0")" source "../_common.sh" +# Models used by this sample +MODELS=( + "gemma3:latest" # General generation & structured output + "mistral-nemo:latest" # Tool calling (gablorken, currency, weather) + "llava:latest" # Vision / image description + "nomic-embed-text" # Embeddings for RAG +) + check_ollama() { if ! command -v ollama &> /dev/null; then echo -e "${RED}Error: Ollama not found${NC}" @@ -35,6 +43,16 @@ check_ollama() { fi } +pull_models() { + echo "" + echo "Pulling required models..." + for model in "${MODELS[@]}"; do + echo -e " Pulling ${CYAN}${model}${NC}..." + ollama pull "$model" 2>/dev/null || echo -e " ${YELLOW}⚠${NC} Could not pull ${model} (will retry on first use)" + done + echo "" +} + print_help() { print_banner "Ollama Hello World" "🦙" echo "Usage: ./run.sh [options]" @@ -45,7 +63,11 @@ print_help() { echo "Prerequisites:" echo " - Ollama installed: https://ollama.com/download" echo " - Ollama running: ollama serve" - echo " - Model pulled: ollama pull gemma3:4b" + echo "" + echo "Models (auto-pulled on first run):" + for model in "${MODELS[@]}"; do + echo " - $model" + done print_help_footer } @@ -59,6 +81,7 @@ esac print_banner "Ollama Hello World" "🦙" check_ollama || true +pull_models install_deps diff --git a/py/samples/provider-ollama-hello/src/main.py b/py/samples/provider-ollama-hello/src/main.py new file mode 100755 index 0000000000..f397ad8b77 --- /dev/null +++ b/py/samples/provider-ollama-hello/src/main.py @@ -0,0 +1,588 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Ollama sample - Local LLM inference, tools, vision, and embeddings. + +This sample demonstrates how to use Ollama for local AI with Genkit, covering +generation, streaming, tool calling, multimodal vision, structured output, +and embedding-based RAG — all without external API dependencies. + +See README.md for setup and testing instructions. + +Key Concepts (ELI5):: + + ┌─────────────────────┬────────────────────────────────────────────────────┐ + │ Concept │ ELI5 Explanation │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Ollama │ Software that runs AI on YOUR computer. No cloud │ + │ │ needed - your data stays private! │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Local LLM │ An AI that runs offline on your machine. │ + │ │ Like having a mini ChatGPT at home. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Gemma │ Google's open-source model. Free to run locally. │ + │ │ Good for general tasks and coding help. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Mistral │ Another open-source model. Good at reasoning │ + │ │ and supports tool calling. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ LLaVA │ A vision model that understands images locally. │ + │ │ Describe photos without uploading them anywhere. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Moondream │ A tiny but capable vision model for object │ + │ │ detection. Great for bounding box tasks. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Embeddings │ Convert text to numbers so AI can compare them. │ + │ │ "Pikachu" → [0.2, -0.5, 0.8, ...] │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ RAG │ Retrieval Augmented Generation: find relevant │ + │ │ context, then ask the LLM to answer from it. │ + ├─────────────────────┼────────────────────────────────────────────────────┤ + │ Vector Similarity │ Find similar items by comparing number arrays. │ + │ │ "electric mouse" finds Pikachu! │ + └─────────────────────┴────────────────────────────────────────────────────┘ + +Key Features +============ +| Feature Description | Flow / Function | +|----------------------------------------------------------|------------------------------| +| Simple Generation (Prompt String) | ``generate_greeting`` | +| System Prompts | ``generate_with_system_prompt`` | +| Multi-turn Conversations (``messages``) | ``generate_multi_turn_chat`` | +| Streaming Generation | ``generate_streaming_story`` | +| Structured Output (Simple) | ``structured_menu_suggestion``| +| Structured Output (Complex / Nested) | ``generate_character`` | +| Tool Calling | ``calculate_gablorken`` | +| Tool Calling (Currency) | ``convert_currency`` | +| Tool Calling (Weather) | ``generate_weather`` | +| Multimodal Vision (Image Input) | ``describe_image`` | +| Object Detection (Bounding Boxes) | ``detect_objects`` | +| Code Generation | ``generate_code`` | +| Local Embeddings | ``embed_pokemons`` | +| Vector Similarity Search | ``pokedex`` | +""" + +from math import sqrt + +from pydantic import BaseModel, Field + +from genkit.ai import Genkit, Output +from genkit.blocks.model import GenerateResponseWrapper +from genkit.core.action import ActionRunContext +from genkit.core.logging import get_logger +from genkit.core.typing import Media, MediaPart, Part, TextPart +from genkit.plugins.ollama import Ollama, ollama_name +from genkit.plugins.ollama.embedders import EmbeddingDefinition +from genkit.plugins.ollama.models import ModelDefinition +from samples.shared import ( + CharacterInput, + CodeInput, + CurrencyExchangeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, + WeatherInput, + convert_currency as _convert_currency_tool, + convert_currency_logic, + describe_image_logic, + generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_system_prompt_logic, + get_weather, + setup_sample, +) + +setup_sample() + +logger = get_logger(__name__) + +# Pull models with: ollama pull +GEMMA_MODEL = 'gemma3:latest' + +# gemma2:latest does NOT support tool calling — use mistral-nemo instead. +MISTRAL_MODEL = 'mistral-nemo:latest' + +# Vision models: llava and moondream support image understanding locally. +LLAVA_MODEL = 'llava:latest' +MOONDREAM_MODEL = 'moondream:v2' + +# Embedding model for RAG. +EMBEDDER_MODEL = 'nomic-embed-text' + +ai = Genkit( + plugins=[ + Ollama( + models=[ + ModelDefinition(name=GEMMA_MODEL), + ModelDefinition(name=MISTRAL_MODEL), + ModelDefinition(name=LLAVA_MODEL), + ModelDefinition(name=MOONDREAM_MODEL), + ], + embedders=[ + EmbeddingDefinition(name=EMBEDDER_MODEL, dimensions=512), + ], + ) + ], + model=ollama_name(GEMMA_MODEL), +) + +ai.tool()(get_weather) +ai.tool()(_convert_currency_tool) + + +class GablorkenInput(BaseModel): + """Input model for the gablorken tool function. + + Attributes: + value: The value to calculate gablorken for. + """ + + value: int = Field(description='value to calculate gablorken for') + + +class GablorkenOutputSchema(BaseModel): + """Gablorken output schema. + + Args: + result: The result of the gablorken. + """ + + result: int + + +class MenuSuggestion(BaseModel): + """A suggested menu item from a themed restaurant. + + Demonstrates structured output with multiple field types: strings, + numbers, lists, and booleans — matching the Genkit documentation + example for structured output. + """ + + name: str = Field(description='The name of the menu item') + description: str = Field(description='A short, appetizing description') + price: float = Field(description='Estimated price in USD') + allergens: list[str] = Field(description='Known allergens (e.g., nuts, dairy, gluten)') + is_vegetarian: bool = Field(description='Whether the item is vegetarian') + + +class MenuSuggestionInput(BaseModel): + """Input for structured menu suggestion flow.""" + + theme: str = Field(default='pirate', description='Restaurant theme (e.g., pirate, space, medieval)') + + +class GablorkenFlowInput(BaseModel): + """Input for gablorken calculation flow.""" + + value: int = Field(default=33, description='Value to calculate gablorken for') + + +class PokemonInfo(BaseModel): + """Information about a Pokemon for the embedding demo.""" + + name: str + description: str + embedding: list[float] | None = None + + +class PokemonFlowInput(BaseModel): + """Input for Pokemon RAG flow.""" + + question: str = Field(default='Who is the best water pokemon?', description='Question about Pokemon') + + +@ai.tool() +def gablorken_tool(input: GablorkenInput) -> int: + """Calculate a gablorken.""" + return input.value * 3 - 5 + + +@ai.flow() +async def generate_greeting(input: GreetingInput) -> str: + """Generate a simple greeting. + + Args: + input: Input with name to greet. + + Returns: + The greeting message. + """ + return await generate_greeting_logic(ai, input.name) + + +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + System prompts give the model instructions about how to respond, such as + adopting a specific persona, tone, or response format. + + See: https://genkit.dev/docs/models#system-prompts + + Args: + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + return await generate_with_system_prompt_logic(ai, input.question) + + +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + The messages parameter allows you to pass a conversation history to + maintain context across multiple interactions with the model. Each + message has a role ('user' or 'model') and content. + + See: https://genkit.dev/docs/models#multi-turn-conversations-with-messages + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + return await generate_multi_turn_chat_logic(ai, input.destination) + + +@ai.flow() +async def structured_menu_suggestion(input: MenuSuggestionInput) -> MenuSuggestion: + """Suggest a themed menu item using structured output. + + Demonstrates Genkit's structured output feature: the model returns + data conforming to a Pydantic schema with multiple field types + (str, float, list, bool) rather than free-form text. + + See: https://genkit.dev/docs/models#structured-output + + Args: + input: Input with restaurant theme. + + Returns: + A MenuSuggestion with name, description, price, allergens, etc. + """ + response = await ai.generate( + prompt=f'Suggest a menu item for a {input.theme}-themed restaurant.', + output=Output(schema=MenuSuggestion), + ) + return response.output + + +@ai.flow() +async def generate_streaming_story( + input: StreamInput, + ctx: ActionRunContext | None = None, +) -> str: + """Generate a streaming story response. + + Args: + input: Input with name for streaming. + ctx: the context of the tool + + Returns: + The complete response text. + """ + return await generate_streaming_story_logic(ai, input.name, ctx) + + +@ai.flow() +async def generate_character(input: CharacterInput) -> RpgCharacter: + """Generate an RPG character with structured output. + + Args: + input: Input with character name. + + Returns: + The generated RPG character. + """ + return await generate_character_logic(ai, input.name) + + +@ai.flow() +async def generate_code(input: CodeInput) -> str: + """Generate code using local Ollama models. + + Args: + input: Input with coding task description. + + Returns: + Generated code. + """ + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def calculate_gablorken(input: GablorkenFlowInput) -> str: + """Use the gablorken_tool to calculate a gablorken value. + + Args: + input: Input with value for gablorken calculation. + + Returns: + The gablorken result. + """ + response = await ai.generate( + prompt=f'Use the gablorken_tool to calculate the gablorken of {input.value}', + model=ollama_name(MISTRAL_MODEL), + tools=['gablorken_tool'], + ) + return response.text + + +@ai.flow() +async def convert_currency(input: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. + + Args: + input: Currency exchange parameters. + + Returns: + Conversion result. + """ + return await convert_currency_logic(ai, input, model=ollama_name(MISTRAL_MODEL)) + + +@ai.flow() +async def generate_weather(input: WeatherInput) -> str: + """Get weather information using tool calling. + + Args: + input: Input with location for weather. + + Returns: + Weather information for the location. + """ + return await generate_weather_logic(ai, input, model=ollama_name(MISTRAL_MODEL)) + + +@ai.flow() +async def describe_image(input: ImageDescribeInput) -> str: + """Describe an image using a local vision model (llava). + + Uses the llava model for local, private image understanding. + Requires: ``ollama pull llava`` before running. + + The Ollama plugin handles MediaPart by downloading the image URL + client-side and converting it to an Ollama Image object. + + Args: + input: Input with image URL to describe. + + Returns: + A textual description of the image. + """ + return await describe_image_logic(ai, input.image_url, model=ollama_name(LLAVA_MODEL)) + + +class ObjectDetectionInput(BaseModel): + """Input for object detection with bounding boxes.""" + + image_url: str = Field( + default='https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/280px-PNG_transparency_demonstration_1.png', + description='URL of the image to detect objects in', + ) + prompt: str = Field( + default='Detect the objects in this image and return bounding boxes.', + description='Detection prompt', + ) + + +@ai.flow() +async def detect_objects(input: ObjectDetectionInput) -> str: + """Detect objects in an image using moondream:v2. + + Uses the moondream vision model for lightweight, local object detection. + Moondream is a tiny but capable vision model that excels at describing + image content and returning bounding box coordinates. + + Requires: ``ollama pull moondream:v2`` before running. + + Args: + input: Input with image URL and detection prompt. + + Returns: + Detection results with bounding box coordinates. + """ + response = await ai.generate( + model=ollama_name(MOONDREAM_MODEL), + prompt=[ + Part(root=TextPart(text=input.prompt)), + Part(root=MediaPart(media=Media(url=input.image_url, content_type='image/png'))), + ], + ) + return response.text + + +pokemon_list = [ + PokemonInfo( + name='Pikachu', + description='An Electric-type Pokemon known for its strong electric attacks.', + ), + PokemonInfo( + name='Charmander', + description='A Fire-type Pokemon that evolves into the powerful Charizard.', + ), + PokemonInfo( + name='Bulbasaur', + description='A Grass/Poison-type Pokemon that grows into a powerful Venusaur.', + ), + PokemonInfo( + name='Squirtle', + description='A Water-type Pokemon known for its water-based attacks and high defense.', + ), + PokemonInfo( + name='Jigglypuff', + description='A Normal/Fairy-type Pokemon with a hypnotic singing ability.', + ), +] + + +def cosine_distance(a: list[float], b: list[float]) -> float: + """Calculate the cosine distance between two vectors. + + Args: + a: The first vector. + b: The second vector. + + Returns: + The cosine distance (0 = identical, 2 = opposite). + """ + if len(a) != len(b): + raise ValueError('Input vectors must have the same length') + dot_product = sum(ai_val * bi_val for ai_val, bi_val in zip(a, b, strict=True)) + magnitude_a = sqrt(sum(ai_val * ai_val for ai_val in a)) + magnitude_b = sqrt(sum(bi_val * bi_val for bi_val in b)) + + if magnitude_a == 0 or magnitude_b == 0: + raise ValueError('Invalid input: zero vector') + + return 1 - (dot_product / (magnitude_a * magnitude_b)) + + +async def embed_pokemons() -> None: + """Embed all Pokemon descriptions using the local embedding model.""" + embeddings = await ai.embed_many( + embedder=ollama_name(EMBEDDER_MODEL), + content=[pokemon.description for pokemon in pokemon_list], + ) + for pokemon, embedding in zip(pokemon_list, embeddings, strict=True): + pokemon.embedding = embedding.embedding + + +def find_nearest_pokemons(input_embedding: list[float], top_n: int = 3) -> list[PokemonInfo]: + """Find the nearest Pokemon by cosine similarity. + + Args: + input_embedding: The query embedding. + top_n: Number of results to return. + + Returns: + The most similar Pokemon. + """ + if any(pokemon.embedding is None for pokemon in pokemon_list): + raise AttributeError('Some Pokemon are not yet embedded') + + pokemon_distances = [] + for pokemon in pokemon_list: + if pokemon.embedding is not None: + distance = cosine_distance(input_embedding, pokemon.embedding) + pokemon_distances.append((distance, pokemon)) + + pokemon_distances.sort(key=lambda item: item[0]) + return [pokemon for _distance, pokemon in pokemon_distances[:top_n]] + + +async def generate_rag_response(question: str) -> GenerateResponseWrapper: + """Generate a RAG response: embed the question, find context, generate. + + Args: + question: The user's question. + + Returns: + The model's response with retrieved context. + """ + input_embedding = await ai.embed( + embedder=ollama_name(EMBEDDER_MODEL), + content=question, + ) + nearest_pokemon = find_nearest_pokemons(input_embedding[0].embedding) + pokemons_context = '\n'.join(f'{pokemon.name}: {pokemon.description}' for pokemon in nearest_pokemon) + + return await ai.generate( + model=ollama_name(GEMMA_MODEL), + prompt=f'Given the following context on Pokemon:\n{pokemons_context}\n\nQuestion: {question}\n\nAnswer:', + ) + + +@ai.flow(name='Pokedex') +async def pokedex(input: PokemonFlowInput) -> str: + """Answer Pokemon questions using local RAG (embed → retrieve → generate). + + Args: + input: A question about Pokemon. + + Returns: + The generated answer. + """ + await embed_pokemons() + response = await generate_rag_response(question=input.question) + if not response.message or not response.message.content: + raise ValueError('No message content returned from model') + text = response.message.content[0].root.text + return str(text) if text is not None else '' + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + The model streams its response while also calling tools mid-generation. + Tool calls are resolved automatically and the model continues generating. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx, model=ollama_name(MISTRAL_MODEL)) + + +async def main() -> None: + """Main function.""" + await logger.ainfo(await generate_greeting(GreetingInput(name='John Doe'))) + await logger.ainfo(str(await structured_menu_suggestion(MenuSuggestionInput(theme='pirate')))) + await logger.ainfo(await calculate_gablorken(GablorkenFlowInput(value=33))) + await logger.ainfo(await generate_weather(WeatherInput(location='San Francisco'))) + + +if __name__ == '__main__': + ai.run_main(main()) diff --git a/py/samples/realtime-tracing-demo/LICENSE b/py/samples/provider-vertex-ai-model-garden/LICENSE similarity index 100% rename from py/samples/realtime-tracing-demo/LICENSE rename to py/samples/provider-vertex-ai-model-garden/LICENSE diff --git a/py/samples/model-garden/README.md b/py/samples/provider-vertex-ai-model-garden/README.md similarity index 97% rename from py/samples/model-garden/README.md rename to py/samples/provider-vertex-ai-model-garden/README.md index 54e1afaee1..8d5eedadd1 100644 --- a/py/samples/model-garden/README.md +++ b/py/samples/provider-vertex-ai-model-garden/README.md @@ -48,7 +48,7 @@ genkit start -- uv run src/main.py 3. **Run the demo**: ```bash - cd py/samples/model-garden + cd py/samples/provider-vertex-ai-model-garden ./run.sh ``` diff --git a/py/samples/model-garden/pyproject.toml b/py/samples/provider-vertex-ai-model-garden/pyproject.toml similarity index 97% rename from py/samples/model-garden/pyproject.toml rename to py/samples/provider-vertex-ai-model-garden/pyproject.toml index be60e30e12..cc252b6fb7 100644 --- a/py/samples/model-garden/pyproject.toml +++ b/py/samples/provider-vertex-ai-model-garden/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ ] description = "Model Garden sample" license = "Apache-2.0" -name = "model-garden" +name = "provider-vertex-ai-model-garden" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/model-garden/run.sh b/py/samples/provider-vertex-ai-model-garden/run.sh similarity index 100% rename from py/samples/model-garden/run.sh rename to py/samples/provider-vertex-ai-model-garden/run.sh diff --git a/py/samples/model-garden/src/main.py b/py/samples/provider-vertex-ai-model-garden/src/main.py similarity index 73% rename from py/samples/model-garden/src/main.py rename to py/samples/provider-vertex-ai-model-garden/src/main.py index 6f0aff24ec..d320e8228e 100644 --- a/py/samples/model-garden/src/main.py +++ b/py/samples/provider-vertex-ai-model-garden/src/main.py @@ -47,6 +47,13 @@ |-----------------------------------------|-------------------------------------| | Model Garden Plugin | `ModelGardenPlugin()` | | Specific Model Usage | `model_garden_name('anthropic/...')`| +| Simple Generation | `say_hi` | +| System Prompt | `system_prompt` | +| Multi-turn Conversation | `multi_turn_chat` | +| Streaming Generation | `say_hi_stream` | +| Tool Calling | `weather_flow` | +| Structured Output | `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | | Generation Config | `max_output_tokens`, `temperature` | See README.md for testing instructions. @@ -56,15 +63,16 @@ import os from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit, Output from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger from genkit.plugins.google_genai import VertexAI from genkit.plugins.vertex_ai.model_garden import ModelGardenPlugin, model_garden_name +from genkit.types import Message, Part, Role, TextPart +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() logger = get_logger(__name__) @@ -138,6 +146,18 @@ class SayHiInput(BaseModel): name: str = Field(default='Mittens', description='Name to greet') +class SystemPromptInput(BaseModel): + """Input for system_prompt flow.""" + + question: str = Field(default='What is your quest?', description='Question to ask') + + +class MultiTurnInput(BaseModel): + """Input for multi_turn_chat flow.""" + + destination: str = Field(default='Japan', description='Travel destination') + + class StreamInput(BaseModel): """Input for streaming flow.""" @@ -341,6 +361,112 @@ async def weather_flow(input: WeatherFlowInput) -> str: return response.text +@ai.flow() +async def system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + System prompts give the model instructions about how to respond, such as + adopting a specific persona, tone, or response format. + + See: https://genkit.dev/docs/models#system-prompts + + Args: + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + response = await ai.generate( + model=model_garden_name('anthropic/claude-3-5-sonnet-v2@20241022'), + prompt=input.question, + system='You are a pirate captain from the 18th century. Always respond in character, ' + 'using pirate slang and nautical terminology.', + ) + return response.text + + +@ai.flow() +async def multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + The messages parameter allows you to pass a conversation history to + maintain context across multiple interactions with the model. Each + message has a role ('user' or 'model') and content. + + See: https://genkit.dev/docs/models#multi-turn-conversations-with-messages + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + model = model_garden_name('anthropic/claude-3-5-sonnet-v2@20241022') + + # Turn 1: Start the conversation + response1 = await ai.generate( + model=model, + system='You are a helpful travel assistant.', + messages=[ + Message( + role=Role.USER, + content=[Part(root=TextPart(text=f'I want to visit {input.destination} for two weeks in spring.'))], + ), + ], + ) + + # Turn 2: Follow-up question that requires context from turn 1 + response2 = await ai.generate( + model=model, + system='You are a helpful travel assistant.', + messages=[ + *response1.messages, + Message( + role=Role.USER, + content=[Part(root=TextPart(text='What should I pack for that trip?'))], + ), + ], + ) + return response2.text + + +@ai.flow() +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Demonstrate streaming with structured output schemas. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. + + See: https://genkit.dev/docs/models#streaming + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. + """ + stream, result = ai.generate_stream( + model=model_garden_name('anthropic/claude-3-5-sonnet-v2@20241022'), + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), + output=Output(schema=RpgCharacter), + ) + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output + + async def main() -> None: """Main entry point for the Model Garden sample - keep alive for Dev UI.""" await logger.ainfo('Genkit server running. Press Ctrl+C to stop.') diff --git a/py/samples/vertexai-rerank-eval/LICENSE b/py/samples/provider-vertex-ai-rerank-eval/LICENSE similarity index 100% rename from py/samples/vertexai-rerank-eval/LICENSE rename to py/samples/provider-vertex-ai-rerank-eval/LICENSE diff --git a/py/samples/vertexai-rerank-eval/README.md b/py/samples/provider-vertex-ai-rerank-eval/README.md similarity index 100% rename from py/samples/vertexai-rerank-eval/README.md rename to py/samples/provider-vertex-ai-rerank-eval/README.md diff --git a/py/samples/vertexai-rerank-eval/pyproject.toml b/py/samples/provider-vertex-ai-rerank-eval/pyproject.toml similarity index 97% rename from py/samples/vertexai-rerank-eval/pyproject.toml rename to py/samples/provider-vertex-ai-rerank-eval/pyproject.toml index 18c632c381..2e62b32818 100644 --- a/py/samples/vertexai-rerank-eval/pyproject.toml +++ b/py/samples/provider-vertex-ai-rerank-eval/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "Vertex AI Rerankers and Evaluators Demo" license = "Apache-2.0" -name = "vertexai-rerank-eval" +name = "provider-vertex-ai-rerank-eval" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/vertexai-rerank-eval/run.sh b/py/samples/provider-vertex-ai-rerank-eval/run.sh similarity index 100% rename from py/samples/vertexai-rerank-eval/run.sh rename to py/samples/provider-vertex-ai-rerank-eval/run.sh diff --git a/py/samples/vertexai-rerank-eval/src/main.py b/py/samples/provider-vertex-ai-rerank-eval/src/main.py similarity index 99% rename from py/samples/vertexai-rerank-eval/src/main.py rename to py/samples/provider-vertex-ai-rerank-eval/src/main.py index ea6acb47de..7e70015402 100644 --- a/py/samples/vertexai-rerank-eval/src/main.py +++ b/py/samples/provider-vertex-ai-rerank-eval/src/main.py @@ -36,6 +36,9 @@ from genkit.blocks.document import Document from genkit.core.typing import BaseDataPoint, DocumentData, Score from genkit.plugins.google_genai import VertexAI +from samples.shared.logging import setup_sample + +setup_sample() logger = structlog.get_logger(__name__) diff --git a/py/samples/short-n-long/LICENSE b/py/samples/provider-vertex-ai-vector-search-bigquery/LICENSE similarity index 100% rename from py/samples/short-n-long/LICENSE rename to py/samples/provider-vertex-ai-vector-search-bigquery/LICENSE diff --git a/py/samples/vertex-ai-vector-search-bigquery/README.md b/py/samples/provider-vertex-ai-vector-search-bigquery/README.md similarity index 98% rename from py/samples/vertex-ai-vector-search-bigquery/README.md rename to py/samples/provider-vertex-ai-vector-search-bigquery/README.md index 746928d099..2b492ae48a 100644 --- a/py/samples/vertex-ai-vector-search-bigquery/README.md +++ b/py/samples/provider-vertex-ai-vector-search-bigquery/README.md @@ -83,7 +83,7 @@ This index must be created with update method set as `stream`. VertexAI Index is 3. **Run the demo**: ```bash - cd py/samples/vertex-ai-vector-search-bigquery + cd py/samples/provider-vertex-ai-vector-search-bigquery ./run.sh ``` diff --git a/py/samples/vertex-ai-vector-search-bigquery/pyproject.toml b/py/samples/provider-vertex-ai-vector-search-bigquery/pyproject.toml similarity index 97% rename from py/samples/vertex-ai-vector-search-bigquery/pyproject.toml rename to py/samples/provider-vertex-ai-vector-search-bigquery/pyproject.toml index 256f3496ed..df900ef8a5 100644 --- a/py/samples/vertex-ai-vector-search-bigquery/pyproject.toml +++ b/py/samples/provider-vertex-ai-vector-search-bigquery/pyproject.toml @@ -46,7 +46,7 @@ dependencies = [ ] description = "An example demonstrating the use Vector Search API with BigQuery retriever for Vertex AI" license = "Apache-2.0" -name = "vertex-ai-vector-search-bigquery" +name = "provider-vertex-ai-vector-search-bigquery" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/vertex-ai-vector-search-bigquery/run.sh b/py/samples/provider-vertex-ai-vector-search-bigquery/run.sh similarity index 100% rename from py/samples/vertex-ai-vector-search-bigquery/run.sh rename to py/samples/provider-vertex-ai-vector-search-bigquery/run.sh diff --git a/py/samples/vertex-ai-vector-search-bigquery/src/main.py b/py/samples/provider-vertex-ai-vector-search-bigquery/src/main.py similarity index 98% rename from py/samples/vertex-ai-vector-search-bigquery/src/main.py rename to py/samples/provider-vertex-ai-vector-search-bigquery/src/main.py index 7743541db6..9cc0bfe46b 100755 --- a/py/samples/vertex-ai-vector-search-bigquery/src/main.py +++ b/py/samples/provider-vertex-ai-vector-search-bigquery/src/main.py @@ -108,7 +108,7 @@ 3. **Run the demo**: ```bash - cd py/samples/vertex-ai-vector-search-bigquery + cd py/samples/provider-vertex-ai-vector-search-bigquery ./run.sh ``` @@ -131,15 +131,15 @@ from google.cloud import aiplatform, bigquery from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.blocks.document import Document from genkit.core.logging import get_logger from genkit.plugins.google_genai import VertexAI from genkit.plugins.vertex_ai import define_vertex_vector_search_big_query +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() LOCATION = os.getenv('LOCATION') PROJECT_ID = os.getenv('PROJECT_ID') diff --git a/py/samples/vertex-ai-vector-search-bigquery/src/setup_env.py b/py/samples/provider-vertex-ai-vector-search-bigquery/src/setup_env.py similarity index 100% rename from py/samples/vertex-ai-vector-search-bigquery/src/setup_env.py rename to py/samples/provider-vertex-ai-vector-search-bigquery/src/setup_env.py diff --git a/py/samples/tool-interrupts/LICENSE b/py/samples/provider-vertex-ai-vector-search-firestore/LICENSE similarity index 100% rename from py/samples/tool-interrupts/LICENSE rename to py/samples/provider-vertex-ai-vector-search-firestore/LICENSE diff --git a/py/samples/vertex-ai-vector-search-firestore/README.md b/py/samples/provider-vertex-ai-vector-search-firestore/README.md similarity index 98% rename from py/samples/vertex-ai-vector-search-firestore/README.md rename to py/samples/provider-vertex-ai-vector-search-firestore/README.md index 7a2c27b525..43a3a80d8c 100644 --- a/py/samples/vertex-ai-vector-search-firestore/README.md +++ b/py/samples/provider-vertex-ai-vector-search-firestore/README.md @@ -76,7 +76,7 @@ genkit start -- uv run src/main.py 3. **Run the demo**: ```bash - cd py/samples/vertex-ai-vector-search-firestore + cd py/samples/provider-vertex-ai-vector-search-firestore ./run.sh ``` diff --git a/py/samples/vertex-ai-vector-search-firestore/pyproject.toml b/py/samples/provider-vertex-ai-vector-search-firestore/pyproject.toml similarity index 97% rename from py/samples/vertex-ai-vector-search-firestore/pyproject.toml rename to py/samples/provider-vertex-ai-vector-search-firestore/pyproject.toml index 1a928103ae..b6367e0f08 100644 --- a/py/samples/vertex-ai-vector-search-firestore/pyproject.toml +++ b/py/samples/provider-vertex-ai-vector-search-firestore/pyproject.toml @@ -46,7 +46,7 @@ dependencies = [ ] description = "An example demonstrating the use Vector Search API with Firestore retriever for Vertex AI" license = "Apache-2.0" -name = "vertex-ai-vector-search-firestore" +name = "provider-vertex-ai-vector-search-firestore" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/vertex-ai-vector-search-firestore/run.sh b/py/samples/provider-vertex-ai-vector-search-firestore/run.sh similarity index 100% rename from py/samples/vertex-ai-vector-search-firestore/run.sh rename to py/samples/provider-vertex-ai-vector-search-firestore/run.sh diff --git a/py/samples/vertex-ai-vector-search-firestore/src/main.py b/py/samples/provider-vertex-ai-vector-search-firestore/src/main.py similarity index 98% rename from py/samples/vertex-ai-vector-search-firestore/src/main.py rename to py/samples/provider-vertex-ai-vector-search-firestore/src/main.py index ad9c9a3716..46e75f5aa6 100755 --- a/py/samples/vertex-ai-vector-search-firestore/src/main.py +++ b/py/samples/provider-vertex-ai-vector-search-firestore/src/main.py @@ -107,7 +107,7 @@ 3. **Run the demo**: ```bash - cd py/samples/vertex-ai-vector-search-firestore + cd py/samples/provider-vertex-ai-vector-search-firestore ./run.sh ``` @@ -130,7 +130,6 @@ from google.cloud import aiplatform, firestore from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.blocks.document import Document @@ -138,8 +137,9 @@ from genkit.core.typing import RetrieverResponse from genkit.plugins.google_genai import VertexAI from genkit.plugins.vertex_ai import define_vertex_vector_search_firestore +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() LOCATION = os.environ['LOCATION'] PROJECT_ID = os.environ['PROJECT_ID'] diff --git a/py/samples/vertex-ai-vector-search-bigquery/LICENSE b/py/samples/provider-xai-hello/LICENSE similarity index 100% rename from py/samples/vertex-ai-vector-search-bigquery/LICENSE rename to py/samples/provider-xai-hello/LICENSE diff --git a/py/samples/xai-hello/README.md b/py/samples/provider-xai-hello/README.md similarity index 84% rename from py/samples/xai-hello/README.md rename to py/samples/provider-xai-hello/README.md index b53743ef6d..49ba395829 100644 --- a/py/samples/xai-hello/README.md +++ b/py/samples/provider-xai-hello/README.md @@ -38,7 +38,7 @@ export XAI_API_KEY=your_api_key_here ## Run ```bash -cd py/samples/xai-hello +cd py/samples/provider-xai-hello uv run src/main.py ``` @@ -62,7 +62,15 @@ uv run src/main.py - [ ] `weather_flow` - Weather tool calling - [ ] `calculate` - Math calculation tool -4. **Expected behavior**: +4. **Test vision**: + - [ ] `describe_image` - Image description using Grok Vision + +5. **Test reasoning**: + - [ ] `reasoning_flow` - Chain-of-thought reasoning with Grok 4 + +6. **Expected behavior**: - Grok responds with characteristic wit - Streaming shows incremental output - Tools are invoked correctly + - Vision describes the image accurately + - Reasoning shows chain-of-thought explanation diff --git a/py/samples/xai-hello/pyproject.toml b/py/samples/provider-xai-hello/pyproject.toml similarity index 97% rename from py/samples/xai-hello/pyproject.toml rename to py/samples/provider-xai-hello/pyproject.toml index 2917d39779..3b25de0dec 100644 --- a/py/samples/xai-hello/pyproject.toml +++ b/py/samples/provider-xai-hello/pyproject.toml @@ -26,7 +26,7 @@ dependencies = [ "uvloop>=0.21.0", ] description = "xAI Hello Sample" -name = "xai-hello" +name = "provider-xai-hello" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/xai-hello/run.sh b/py/samples/provider-xai-hello/run.sh similarity index 100% rename from py/samples/xai-hello/run.sh rename to py/samples/provider-xai-hello/run.sh diff --git a/py/samples/xai-hello/src/main.py b/py/samples/provider-xai-hello/src/main.py similarity index 54% rename from py/samples/xai-hello/src/main.py rename to py/samples/provider-xai-hello/src/main.py index 7111b3071c..b238631e43 100755 --- a/py/samples/xai-hello/src/main.py +++ b/py/samples/provider-xai-hello/src/main.py @@ -48,20 +48,21 @@ |-----------------------------------------|-------------------------------------| | Plugin Initialization | `ai = Genkit(plugins=[XAI()])` | | Model Configuration | `xai_name('grok-3')` | -| Simple Text Generation | `say_hi` | -| Streaming Generation | `say_hi_stream` | +| Simple Text Generation | `generate_greeting` | +| System Prompts | `generate_with_system_prompt` | +| Multi-turn Conversations (`messages`) | `generate_multi_turn_chat` | +| Streaming Generation | `generate_streaming_story` | | Tool Usage (Decorated) | `get_weather`, `calculate` | -| Generation Configuration | `say_hi_with_config` | -| Code Generation | `code_flow` | -| Tool Calling | `weather_flow` | +| Generation Configuration | `generate_with_config` | +| Multimodal (Image Input / Vision) | `describe_image` | +| Reasoning (Chain-of-Thought) | `solve_reasoning_problem` | +| Code Generation | `generate_code` | +| Tool Calling | `generate_weather` | """ import asyncio import os -from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback - from genkit.ai import Genkit from genkit.core.action import ActionRunContext from genkit.core.logging import get_logger @@ -69,22 +70,39 @@ from genkit.plugins.xai import XAI, xai_name from samples.shared import ( CalculatorInput, + CharacterInput, + CodeInput, + ConfigInput, CurrencyExchangeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + ReasoningInput, RpgCharacter, + StreamingToolInput, + StreamInput, + SystemPromptInput, WeatherInput, calculate, calculation_logic, - convert_currency, - currency_exchange_logic, + convert_currency as _convert_currency_tool, + convert_currency_logic, + describe_image_logic, generate_character_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, get_weather, - say_hi_logic, - say_hi_stream_logic, - say_hi_with_config_logic, - weather_logic, + setup_sample, + solve_reasoning_problem_logic, ) -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'XAI_API_KEY' not in os.environ: os.environ['XAI_API_KEY'] = input('Please enter your XAI_API_KEY: ') @@ -103,66 +121,41 @@ ) -class SayHiInput(BaseModel): - """Input for say_hi flow.""" - - name: str = Field(default='Mittens', description='Name to greet') - - -class StreamInput(BaseModel): - """Input for streaming flow.""" - - name: str = Field(default='Shadow', description='Name for streaming story') - - -class CharacterInput(BaseModel): - """Input for character generation.""" - - name: str = Field(default='Whiskers', description='Character name') - - -class ConfigInput(BaseModel): - """Input for config flow.""" - - name: str = Field(default='Ginger', description='User name for greeting') - - -class CodeInput(BaseModel): - """Input for code generation flow.""" - - task: str = Field( - default='Write a Python function to calculate fibonacci numbers', - description='Coding task description', - ) - - # Decorated tools ai.tool()(get_weather) -ai.tool()(convert_currency) +ai.tool()(_convert_currency_tool) ai.tool()(calculate) @ai.flow() -async def currency_exchange_flow(input_data: CurrencyExchangeInput) -> str: - """Genkit entry point for the currency exchange flow. +async def convert_currency(input_data: CurrencyExchangeInput) -> str: + """Convert currency using tool calling. + + Args: + input_data: Currency exchange parameters. - Exposes conversion logic as a traceable Genkit flow. + Returns: + Conversion result. """ - return await currency_exchange_logic(ai, input_data) + return await convert_currency_logic(ai, input_data) @ai.flow() -async def calculator_flow(input_data: CalculatorInput) -> str: - """Genkit entry point for the calculator flow. +async def calculate(input_data: CalculatorInput) -> str: + """Perform a calculation using tool calling. + + Args: + input_data: Calculator parameters. - Exposes calculation logic as a traceable Genkit flow. + Returns: + Calculation result. """ return await calculation_logic(ai, input_data) @ai.flow() async def generate_character(input: CharacterInput) -> RpgCharacter: - """Generate an RPG character. + """Generate an RPG character with structured output. Args: input: Input with character name. @@ -174,7 +167,7 @@ async def generate_character(input: CharacterInput) -> RpgCharacter: @ai.flow() -async def say_hi(input: SayHiInput) -> str: +async def generate_greeting(input: GreetingInput) -> str: """Generate a simple greeting. Args: @@ -182,59 +175,81 @@ async def say_hi(input: SayHiInput) -> str: Returns: Greeting message. + """ + return await generate_greeting_logic(ai, input.name) + + +@ai.flow() +async def generate_with_system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + Args: + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + return await generate_with_system_prompt_logic(ai, input.question) + + +@ai.flow() +async def generate_multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + Args: + input: Input with a travel destination. - Example: - >>> await say_hi(SayHiInput(name='Alice')) - "Hello Alice!" + Returns: + The model's final response, demonstrating context retention. """ - return await say_hi_logic(ai, input.name) + return await generate_multi_turn_chat_logic(ai, input.destination) @ai.flow() -async def say_hi_stream( +async def generate_streaming_story( input: StreamInput, ctx: ActionRunContext | None = None, ) -> str: """Generate a streaming story response. Args: - input: Input with name for story. - ctx: Action context for streaming. + input: Input with name for streaming story. + ctx: Action run context for streaming. Returns: - Complete story text. - - Example: - >>> await say_hi_stream(StreamInput(name='Bob'), ctx) - "Once upon a time..." + Complete generated text. """ - return await say_hi_stream_logic(ai, input.name, ctx) + return await generate_streaming_story_logic(ai, input.name, ctx) @ai.flow() -async def say_hi_with_config(input: ConfigInput) -> str: +async def generate_with_config(input: ConfigInput) -> str: """Generate a greeting with custom model configuration. Args: - input: Input with user name. + input: Input with name to greet. Returns: Greeting message. """ - return await say_hi_with_config_logic(ai, input.name) + return await generate_with_config_logic(ai, input.name) @ai.flow() -async def weather_flow(input_data: WeatherInput) -> str: - """Genkit entry point for the weather information flow. +async def generate_weather(input_data: WeatherInput) -> str: + """Get weather information using tool calling. - Exposes weather logic as a traceable Genkit flow. + Args: + input_data: Input with location to get weather for. + + Returns: + Weather information. """ - return await weather_logic(ai, input_data) + return await generate_weather_logic(ai, input_data) @ai.flow() -async def code_flow(input: CodeInput) -> str: +async def generate_code(input: CodeInput) -> str: """Generate code using Grok. Args: @@ -243,11 +258,56 @@ async def code_flow(input: CodeInput) -> str: Returns: Generated code. """ - response = await ai.generate( - prompt=input.task, - system='You are an expert programmer. Provide clean, well-documented code with explanations.', - ) - return response.text + return await generate_code_logic(ai, input.task) + + +@ai.flow() +async def describe_image(input: ImageDescribeInput) -> str: + """Describe an image using Grok 2 Vision. + + Uses grok-2-vision-1212 which supports media=True for image understanding. + The xAI gRPC SDK handles image URLs in MediaPart messages. + + Args: + input: Input with image URL to describe. + + Returns: + A textual description of the image. + """ + return await describe_image_logic(ai, input.image_url, model=xai_name('grok-2-vision-1212')) + + +@ai.flow() +async def solve_reasoning_problem(input: ReasoningInput) -> str: + """Solve reasoning problems using Grok 4. + + Grok 4 is a reasoning model that provides chain-of-thought responses. + It is registered with REASONING_MODEL_SUPPORTS in the xAI plugin. + + Args: + input: Input with reasoning question to solve. + + Returns: + The reasoning and answer. + """ + return await solve_reasoning_problem_logic(ai, input.prompt, model=xai_name('grok-4')) + + +@ai.flow() +async def generate_streaming_with_tools( + input: StreamingToolInput, + ctx: ActionRunContext | None = None, +) -> str: + """Demonstrate streaming generation with tool calling. + + Args: + input: Input with location for weather lookup. + ctx: Action context for streaming chunks to the client. + + Returns: + The complete generated text. + """ + return await generate_streaming_with_tools_logic(ai, input.location, ctx) async def main() -> None: diff --git a/py/samples/sample-test/README.md b/py/samples/sample-test/README.md index e558f5c0e4..5ea91edac4 100644 --- a/py/samples/sample-test/README.md +++ b/py/samples/sample-test/README.md @@ -12,10 +12,10 @@ Reviews and tests all flows in a sample's `main.py`. ```bash # Test all flows in a sample cd py -uv run samples/sample-test/review_sample_flows.py samples/google-genai-hello +uv run samples/sample-test/review_sample_flows.py samples/provider-google-genai-hello # Specify custom output file -uv run samples/sample-test/review_sample_flows.py samples/google-genai-hello --output results.txt +uv run samples/sample-test/review_sample_flows.py samples/provider-google-genai-hello --output results.txt ``` ### `run_single_flow.py` @@ -24,7 +24,7 @@ Runs a single flow from a sample. Used internally by `review_sample_flows.py`. ```bash cd py -uv run samples/sample-test/run_single_flow.py samples/google-genai-hello flow_name --input '{"key": "value"}' +uv run samples/sample-test/run_single_flow.py samples/provider-google-genai-hello flow_name --input '{"key": "value"}' ``` ## Output diff --git a/py/samples/sample-test/review_sample_flows.py b/py/samples/sample-test/review_sample_flows.py index 8238feb3dc..8e33b05f71 100644 --- a/py/samples/sample-test/review_sample_flows.py +++ b/py/samples/sample-test/review_sample_flows.py @@ -20,7 +20,7 @@ python review_sample_flows.py Example: - python review_sample_flows.py samples/google-genai-hello + python review_sample_flows.py samples/provider-google-genai-hello """ import argparse diff --git a/py/samples/shared/__init__.py b/py/samples/shared/__init__.py index 1a8fb32722..181a411d4b 100644 --- a/py/samples/shared/__init__.py +++ b/py/samples/shared/__init__.py @@ -13,17 +13,24 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 -"""Shared utilities and types for samples.""" +"""Shared utilities, types, and flow logic for provider samples.""" from .flows import ( calculation_logic, - currency_exchange_logic, + convert_currency_logic, + describe_image_logic, generate_character_logic, - say_hi_logic, - say_hi_stream_logic, - say_hi_with_config_logic, - weather_logic, + generate_code_logic, + generate_greeting_logic, + generate_multi_turn_chat_logic, + generate_streaming_story_logic, + generate_streaming_with_tools_logic, + generate_weather_logic, + generate_with_config_logic, + generate_with_system_prompt_logic, + solve_reasoning_problem_logic, ) +from .logging import setup_sample from .tools import ( calculate, convert_currency, @@ -31,24 +38,57 @@ ) from .types import ( CalculatorInput, + CharacterInput, + CodeInput, + ConfigInput, CurrencyExchangeInput, + GreetingInput, + ImageDescribeInput, + MultiTurnInput, + ReasoningInput, RpgCharacter, + Skills, + StreamingToolInput, + StreamInput, + SystemPromptInput, WeatherInput, ) __all__ = [ - get_weather, - convert_currency, - calculate, - weather_logic, - currency_exchange_logic, - calculation_logic, - say_hi_logic, - say_hi_stream_logic, - say_hi_with_config_logic, - WeatherInput, - CurrencyExchangeInput, - CalculatorInput, - RpgCharacter, - generate_character_logic, + # Setup + 'setup_sample', + # Tools + 'calculate', + 'convert_currency', + 'get_weather', + # Flow logic + 'calculation_logic', + 'describe_image_logic', + 'convert_currency_logic', + 'generate_character_logic', + 'generate_code_logic', + 'generate_greeting_logic', + 'generate_multi_turn_chat_logic', + 'generate_streaming_story_logic', + 'generate_streaming_with_tools_logic', + 'generate_weather_logic', + 'generate_with_config_logic', + 'generate_with_system_prompt_logic', + 'solve_reasoning_problem_logic', + # Types + 'CalculatorInput', + 'CharacterInput', + 'CodeInput', + 'ConfigInput', + 'CurrencyExchangeInput', + 'GreetingInput', + 'ImageDescribeInput', + 'MultiTurnInput', + 'ReasoningInput', + 'RpgCharacter', + 'Skills', + 'StreamInput', + 'StreamingToolInput', + 'SystemPromptInput', + 'WeatherInput', ] diff --git a/py/samples/shared/flows.py b/py/samples/shared/flows.py index a7050d480b..cbb91c7eca 100644 --- a/py/samples/shared/flows.py +++ b/py/samples/shared/flows.py @@ -14,56 +14,92 @@ # # SPDX-License-Identifier: Apache-2.0 -"""Common flows for samples.""" +"""Common flow logic for provider samples. + +Each function takes a ``Genkit`` instance (and typed inputs) so that +provider samples can delegate to them from thin ``@ai.flow()`` wrappers. +Provider-specific flow logic stays in each sample's main.py. +""" from genkit.ai import Genkit, Output from genkit.core.action import ActionRunContext +from genkit.types import Media, MediaPart, Message, Part, Role, TextPart from .types import CalculatorInput, CurrencyExchangeInput, RpgCharacter, WeatherInput -async def calculation_logic(ai: Genkit, input: CalculatorInput) -> str: - """Business logic to perform currency conversion via an LLM tool call. +async def calculation_logic(ai: Genkit, input: CalculatorInput, model: str | None = None) -> str: + """Perform a calculation via an LLM tool call. Args: ai: The initialized Genkit instance. - input: Validated currency exchange parameters. + input: Calculator parameters. + model: Optional model override (needed for providers whose default + model doesn't support tool calling, e.g. Ollama/Gemma). Returns: - Conversion result. + Calculation result. """ response = await ai.generate( + model=model, prompt=f'Calculate {input.a} {input.operation} {input.b}', tools=['calculate'], ) + return response.text + +async def describe_image_logic(ai: Genkit, image_url: str, model: str | None = None) -> str: + """Describe an image using multimodal generation. + + Args: + ai: The Genkit instance. + image_url: URL of the image to describe. + model: Optional model override for provider-specific vision models. + + Returns: + A textual description of the image. + """ + response = await ai.generate( + model=model, + messages=[ + Message( + role=Role.USER, + content=[ + Part(root=TextPart(text='Describe this image in detail')), + Part(root=MediaPart(media=Media(url=image_url, content_type='image/jpeg'))), + ], + ) + ], + ) return response.text -async def currency_exchange_logic(ai: Genkit, input: CurrencyExchangeInput) -> str: - """Business logic to perform currency conversion via an LLM tool call. +async def convert_currency_logic(ai: Genkit, input: CurrencyExchangeInput, model: str | None = None) -> str: + """Convert currency using tool calling. Args: ai: The initialized Genkit instance. input: Validated currency exchange parameters. + model: Optional model override (needed for providers whose default + model doesn't support tool calling, e.g. Ollama/Gemma). Returns: Conversion result. """ response = await ai.generate( + model=model, prompt=f'Convert {input.amount} {input.from_currency} to {input.to_currency}', tools=['convert_currency'], ) - return response.text async def generate_character_logic(ai: Genkit, name: str) -> RpgCharacter: - """Generate an RPG character. + """Generate an RPG character with structured output. Args: ai: The Genkit instance. - name: The name of the character + name: The name of the character. Returns: The generated RPG character. @@ -72,11 +108,28 @@ async def generate_character_logic(ai: Genkit, name: str) -> RpgCharacter: prompt=f'Generate a structured RPG character named {name}. Output ONLY the JSON object.', output=Output(schema=RpgCharacter), ) - return result.output # type: RpgCharacter from Output(schema=RpgCharacter) + return result.output + + +async def generate_code_logic(ai: Genkit, task: str) -> str: + """Generate code for a given task. + + Args: + ai: The Genkit instance. + task: Coding task description. + + Returns: + Generated code. + """ + response = await ai.generate( + prompt=task, + system='You are an expert programmer. Provide clean, well-documented code with explanations.', + ) + return response.text -async def say_hi_logic(ai: Genkit, name: str) -> str: - """Generates a simple greeting via the AI model. +async def generate_greeting_logic(ai: Genkit, name: str) -> str: + """Generate a simple greeting. Args: ai: The Genkit instance. @@ -89,13 +142,51 @@ async def say_hi_logic(ai: Genkit, name: str) -> str: return response.text -async def say_hi_stream_logic(ai: Genkit, name: str, ctx: ActionRunContext | None) -> str: - """Generates a streaming story. +async def generate_multi_turn_chat_logic(ai: Genkit, destination: str) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + Builds a 2-turn travel conversation where the second turn requires + context from the first. + + Args: + ai: The Genkit instance. + destination: Travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + response1 = await ai.generate( + system='You are a helpful travel assistant.', + messages=[ + Message( + role=Role.USER, + content=[Part(root=TextPart(text=f'I want to visit {destination} for two weeks in spring.'))], + ), + ], + ) + response2 = await ai.generate( + system='You are a helpful travel assistant.', + messages=[ + *response1.messages, + Message( + role=Role.USER, + content=[Part(root=TextPart(text='What should I pack for that trip?'))], + ), + ], + ) + return response2.text + + +async def generate_streaming_story_logic(ai: Genkit, name: str, ctx: ActionRunContext | None) -> str: + """Generate a streaming story response. Args: ai: The Genkit instance. name: Name to greet. ctx: Action context for streaming. + + Returns: + Complete story text. """ response = await ai.generate( prompt=f'Tell me a short story about {name}', @@ -104,8 +195,59 @@ async def say_hi_stream_logic(ai: Genkit, name: str, ctx: ActionRunContext | Non return response.text -async def say_hi_with_config_logic(ai: Genkit, name: str) -> str: - """Generates a greeting with custom model configuration. +async def generate_streaming_with_tools_logic( + ai: Genkit, location: str, ctx: ActionRunContext | None, model: str | None = None +) -> str: + """Demonstrate streaming generation with tool calling. + + The model streams its response while also calling tools mid-generation. + + Args: + ai: The Genkit instance. + location: Location for weather lookup. + ctx: Action context for streaming chunks to the client. + model: Optional model override (needed for providers whose default + model doesn't support tool calling, e.g. Ollama/Gemma). + + Returns: + The complete generated text. + """ + stream, response = ai.generate_stream( + model=model, + prompt=f'What is the weather in {location}? Describe it poetically.', + tools=['get_weather'], + ) + full_text = '' + async for chunk in stream: + if chunk.text: + if ctx is not None: + ctx.send_chunk(chunk.text) + full_text += chunk.text + return (await response).text + + +async def generate_weather_logic(ai: Genkit, input: WeatherInput, model: str | None = None) -> str: + """Get weather information using tool calling. + + Args: + ai: The Genkit instance. + input: Weather input data. + model: Optional model override (needed for providers whose default + model doesn't support tool calling, e.g. Ollama/Gemma). + + Returns: + Formatted weather string. + """ + response = await ai.generate( + model=model, + prompt=f'What is the weather in {input.location}?', + tools=['get_weather'], + ) + return response.text + + +async def generate_with_config_logic(ai: Genkit, name: str) -> str: + """Generate a greeting with custom model configuration. Args: ai: The Genkit instance. @@ -121,22 +263,39 @@ async def say_hi_with_config_logic(ai: Genkit, name: str) -> str: return response.text -async def weather_logic(ai: Genkit, input: WeatherInput) -> str: - """Get weather info using the weather tool (via model tool calling). +async def generate_with_system_prompt_logic(ai: Genkit, question: str) -> str: + """Demonstrate system prompts to control model persona and behavior. + + Uses a pirate captain persona as a fun, recognizable example. Args: - ai: The AI model or client used to generate the weather response. - input: Weather input data. + ai: The Genkit instance. + question: Question to ask. Returns: - Formatted weather string. + The model's response in the persona defined by the system prompt. + """ + response = await ai.generate( + prompt=question, + system='You are a pirate captain from the 18th century. Always respond in character, ' + 'using pirate slang and nautical terminology.', + ) + return response.text + + +async def solve_reasoning_problem_logic(ai: Genkit, prompt: str, model: str | None = None) -> str: + """Solve reasoning problems using a reasoning model. + + Args: + ai: The Genkit instance. + prompt: Reasoning question to solve. + model: Optional model override for provider-specific reasoning models. - Example: - >>> await weather_flow(WeatherInput(location='London')) - "Weather in London: 15°C, cloudy" + Returns: + The reasoning and answer. """ response = await ai.generate( - prompt=f'What is the weather in {input.location}?', - tools=['get_weather'], + model=model, + prompt=prompt, ) return response.text diff --git a/py/samples/shared/logging.py b/py/samples/shared/logging.py new file mode 100644 index 0000000000..0fb70829fd --- /dev/null +++ b/py/samples/shared/logging.py @@ -0,0 +1,121 @@ +# Copyright 2026 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""Common developer-experience setup for Genkit samples. + +Provides a single ``setup_sample()`` call that configures: + +- **Rich tracebacks** -- pretty, colorful exception rendering with local + variables shown in context. +- **Colored structlog + stdlib logging** -- human-readable, color-coded + log messages for *both* structlog loggers (used by sample code) and + stdlib loggers (used by uvicorn, third-party libraries, etc.). + +The integration uses ``structlog.stdlib.ProcessorFormatter`` so that +**every** log line -- regardless of origin -- flows through the same +colored ``ConsoleRenderer``. + +Usage:: + + from samples.shared.logging import setup_sample + + setup_sample() # call once at the top of each sample main.py +""" + +import logging +import os +import sys + +import structlog +from rich.traceback import install as _install_rich_traceback + + +def _want_colors() -> bool: + """Decide whether to emit ANSI color codes. + + Color is enabled unless explicitly suppressed via ``NO_COLOR=1`` + (see https://no-color.org). We default to **True** rather than + checking ``isatty()`` because ``genkit start`` pipes + stdout/stderr through the dev-server, which makes ``isatty()`` + return ``False`` even though the output ultimately lands in a + color-capable terminal or the Dev UI. + """ + return os.environ.get('NO_COLOR', '') == '' + + +def setup_sample(log_level: int = logging.DEBUG) -> None: + """One-stop developer-experience setup for Genkit samples. + + Installs Rich tracebacks and configures *both* structlog and + Python's standard ``logging`` module for pretty, colored console + output. This ensures that log lines from uvicorn, third-party + libraries, and Genkit internals all render with the same style. + + Call this once at the top of each sample's ``main.py`` before + any logging calls. + + Args: + log_level: Minimum log level to display. Defaults to + ``logging.DEBUG``. + """ + # ── Rich tracebacks ───────────────────────────────────────── + _install_rich_traceback(show_locals=True, width=120, extra_lines=3) + + # ── Shared processor chain ────────────────────────────────── + # These processors run on EVERY log entry -- both structlog and + # stdlib ("foreign") entries. They must NOT include a final + # renderer; the renderer lives in the ProcessorFormatter below. + shared_processors: list[structlog.types.Processor] = [ + structlog.contextvars.merge_contextvars, + structlog.stdlib.add_log_level, + structlog.stdlib.add_logger_name, + structlog.processors.StackInfoRenderer(), + structlog.dev.set_exc_info, + structlog.processors.TimeStamper(fmt='iso'), + ] + + # ── structlog configuration ───────────────────────────────── + # Route structlog entries into stdlib logging so that a single + # ProcessorFormatter handles final rendering for everything. + structlog.configure( + processors=[ + *shared_processors, + structlog.stdlib.ProcessorFormatter.wrap_for_formatter, + ], + wrapper_class=structlog.stdlib.BoundLogger, + context_class=dict, + logger_factory=structlog.stdlib.LoggerFactory(), + cache_logger_on_first_use=True, + ) + + # ── stdlib logging configuration ──────────────────────────── + # A single handler on the root logger with ProcessorFormatter + # ensures uvicorn, third-party, and structlog entries all get + # the same colored ConsoleRenderer treatment. + formatter = structlog.stdlib.ProcessorFormatter( + foreign_pre_chain=shared_processors, + processors=[ + structlog.stdlib.ProcessorFormatter.remove_processors_meta, + structlog.dev.ConsoleRenderer(colors=_want_colors()), + ], + ) + + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(formatter) + + root_logger = logging.getLogger() + root_logger.handlers.clear() + root_logger.addHandler(handler) + root_logger.setLevel(log_level) diff --git a/py/samples/shared/types.py b/py/samples/shared/types.py index b857faf118..0f6a2c4f48 100644 --- a/py/samples/shared/types.py +++ b/py/samples/shared/types.py @@ -14,11 +14,44 @@ # # SPDX-License-Identifier: Apache-2.0 -"""Common types for samples.""" +"""Common types for provider samples. + +Centralizes Pydantic models that are shared across multiple provider +hello samples. Provider-specific types stay in each sample's main.py. +""" from pydantic import BaseModel, Field +class CalculatorInput(BaseModel): + """Input for the calculator tool.""" + + operation: str = Field(description='Math operation: add, subtract, multiply, divide', default='add') + a: float = Field(description='First number', default=123) + b: float = Field(description='Second number', default=321) + + +class CharacterInput(BaseModel): + """Input for character generation.""" + + name: str = Field(default='Whiskers', description='Character name') + + +class CodeInput(BaseModel): + """Input for code generation flow.""" + + task: str = Field( + default='Write a Python function to calculate fibonacci numbers', + description='Coding task description', + ) + + +class ConfigInput(BaseModel): + """Input for config flow.""" + + name: str = Field(default='Ginger', description='User name for greeting') + + class CurrencyExchangeInput(BaseModel): """Currency conversion input schema.""" @@ -27,12 +60,34 @@ class CurrencyExchangeInput(BaseModel): to_currency: str = Field(description='Target currency code (e.g., EUR)', default='EUR') -class CalculatorInput(BaseModel): - """Input for the calculator tool.""" +class ImageDescribeInput(BaseModel): + """Input for image description flow.""" - operation: str = Field(description='Math operation: add, subtract, multiply, divide', default='add') - a: float = Field(description='First number', default=123) - b: float = Field(description='Second number', default=321) + image_url: str = Field( + default='https://upload.wikimedia.org/wikipedia/commons/1/13/Cute_kitten.jpg', + description='URL of the image to describe', + ) + + +class MultiTurnInput(BaseModel): + """Input for multi_turn_chat flow.""" + + destination: str = Field(default='Japan', description='Travel destination') + + +class ReasoningInput(BaseModel): + """Input for reasoning flow.""" + + prompt: str = Field( + default='What is heavier, one kilo of steel or one kilo of feathers? Explain step by step.', + description='Reasoning question to solve', + ) + + +class GreetingInput(BaseModel): + """Input for generate_greeting flow.""" + + name: str = Field(default='Mittens', description='Name to greet') class Skills(BaseModel): @@ -52,6 +107,24 @@ class RpgCharacter(BaseModel): skills: Skills +class StreamInput(BaseModel): + """Input for streaming flow.""" + + name: str = Field(default='Shadow', description='Name for streaming story') + + +class StreamingToolInput(BaseModel): + """Input for streaming tool flow.""" + + location: str = Field(default='London', description='Location to get weather for') + + +class SystemPromptInput(BaseModel): + """Input for system_prompt flow.""" + + question: str = Field(default='What is your quest?', description='Question to ask') + + class WeatherInput(BaseModel): """Input for the weather tool.""" diff --git a/py/samples/vertex-ai-vector-search-firestore/LICENSE b/py/samples/web-flask-hello/LICENSE similarity index 100% rename from py/samples/vertex-ai-vector-search-firestore/LICENSE rename to py/samples/web-flask-hello/LICENSE diff --git a/py/samples/flask-hello/README.md b/py/samples/web-flask-hello/README.md similarity index 98% rename from py/samples/flask-hello/README.md rename to py/samples/web-flask-hello/README.md index cd81d3df4b..5e36373ca3 100644 --- a/py/samples/flask-hello/README.md +++ b/py/samples/web-flask-hello/README.md @@ -1,4 +1,4 @@ -# Flask hello example +# Web Flask hello example ## Setup environment diff --git a/py/samples/flask-hello/pyproject.toml b/py/samples/web-flask-hello/pyproject.toml similarity index 98% rename from py/samples/flask-hello/pyproject.toml rename to py/samples/web-flask-hello/pyproject.toml index 79dcb88e71..9c8c3d35c8 100644 --- a/py/samples/flask-hello/pyproject.toml +++ b/py/samples/web-flask-hello/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ ] description = "hello Genkit sample" license = "Apache-2.0" -name = "flask-hello" +name = "web-flask-hello" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/flask-hello/run.sh b/py/samples/web-flask-hello/run.sh similarity index 100% rename from py/samples/flask-hello/run.sh rename to py/samples/web-flask-hello/run.sh diff --git a/py/samples/flask-hello/src/main.py b/py/samples/web-flask-hello/src/main.py similarity index 97% rename from py/samples/flask-hello/src/main.py rename to py/samples/web-flask-hello/src/main.py index 9a8800cb00..7a0e26dc18 100755 --- a/py/samples/flask-hello/src/main.py +++ b/py/samples/web-flask-hello/src/main.py @@ -57,7 +57,6 @@ from flask import Flask from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit from genkit.blocks.model import GenerateResponseWrapper @@ -66,8 +65,9 @@ from genkit.plugins.flask import genkit_flask_handler from genkit.plugins.google_genai import GoogleAI from genkit.plugins.google_genai.models.gemini import GoogleAIGeminiVersion +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() if 'GEMINI_API_KEY' not in os.environ: os.environ['GEMINI_API_KEY'] = input('Please enter your GEMINI_API_KEY: ') diff --git a/py/samples/xai-hello/LICENSE b/py/samples/web-multi-server/LICENSE similarity index 100% rename from py/samples/xai-hello/LICENSE rename to py/samples/web-multi-server/LICENSE diff --git a/py/samples/multi-server/README.md b/py/samples/web-multi-server/README.md similarity index 99% rename from py/samples/multi-server/README.md rename to py/samples/web-multi-server/README.md index 3efad62850..e5dfd27d8a 100644 --- a/py/samples/multi-server/README.md +++ b/py/samples/web-multi-server/README.md @@ -56,7 +56,7 @@ kill -15 ${PROCESS_ID} 1. **Run the demo**: ```bash - cd py/samples/multi-server + cd py/samples/web-multi-server ./run.sh ``` diff --git a/py/samples/multi-server/architecture.svg b/py/samples/web-multi-server/architecture.svg similarity index 100% rename from py/samples/multi-server/architecture.svg rename to py/samples/web-multi-server/architecture.svg diff --git a/py/samples/multi-server/pyproject.toml b/py/samples/web-multi-server/pyproject.toml similarity index 98% rename from py/samples/multi-server/pyproject.toml rename to py/samples/web-multi-server/pyproject.toml index bf8252d9b7..0ded2f80ed 100644 --- a/py/samples/multi-server/pyproject.toml +++ b/py/samples/web-multi-server/pyproject.toml @@ -44,7 +44,7 @@ dependencies = [ ] description = "Sample implementation to exercise the Genkit multi server manager." license = "Apache-2.0" -name = "multi-server" +name = "web-multi-server" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/multi-server/run.sh b/py/samples/web-multi-server/run.sh similarity index 100% rename from py/samples/multi-server/run.sh rename to py/samples/web-multi-server/run.sh diff --git a/py/samples/multi-server/src/main.py b/py/samples/web-multi-server/src/main.py similarity index 99% rename from py/samples/multi-server/src/main.py rename to py/samples/web-multi-server/src/main.py index 89736eb032..bb0784c6c4 100755 --- a/py/samples/multi-server/src/main.py +++ b/py/samples/web-multi-server/src/main.py @@ -79,7 +79,6 @@ from litestar.middleware.base import AbstractMiddleware from litestar.plugins.structlog import StructlogPlugin from litestar.types import Message, Receive, Scope, Send -from rich.traceback import install as install_rich_traceback from starlette.applications import Starlette from genkit import Genkit @@ -101,8 +100,9 @@ ) from genkit.web.manager.signals import terminate_all_servers from genkit.web.typing import Application +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() # TODO(#4368): Logging middleware > log ALL access requests and fix dups # TODO(#4368): Logging middleware > access requests different color for each server. diff --git a/py/samples/web-short-n-long/LICENSE b/py/samples/web-short-n-long/LICENSE new file mode 100644 index 0000000000..2205396735 --- /dev/null +++ b/py/samples/web-short-n-long/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Google LLC + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/py/samples/short-n-long/README.md b/py/samples/web-short-n-long/README.md similarity index 98% rename from py/samples/short-n-long/README.md rename to py/samples/web-short-n-long/README.md index 2f5b4a360c..240d92ddf0 100644 --- a/py/samples/short-n-long/README.md +++ b/py/samples/web-short-n-long/README.md @@ -77,7 +77,7 @@ genkit start -- uv run --python python3.10 src/main.py 2. **Run the server** (two modes): ```bash - cd py/samples/short-n-long + cd py/samples/web-short-n-long # Short mode (development with DevUI) ./run.sh diff --git a/py/samples/short-n-long/pyproject.toml b/py/samples/web-short-n-long/pyproject.toml similarity index 98% rename from py/samples/short-n-long/pyproject.toml rename to py/samples/web-short-n-long/pyproject.toml index a26e4ee52c..b5ee4eaffb 100644 --- a/py/samples/short-n-long/pyproject.toml +++ b/py/samples/web-short-n-long/pyproject.toml @@ -43,7 +43,7 @@ dependencies = [ ] description = "Short and long sample" license = "Apache-2.0" -name = "short-n-long" +name = "web-short-n-long" readme = "README.md" requires-python = ">=3.10" version = "0.1.0" diff --git a/py/samples/short-n-long/run.sh b/py/samples/web-short-n-long/run.sh similarity index 100% rename from py/samples/short-n-long/run.sh rename to py/samples/web-short-n-long/run.sh diff --git a/py/samples/short-n-long/src/main.py b/py/samples/web-short-n-long/src/main.py similarity index 78% rename from py/samples/short-n-long/src/main.py rename to py/samples/web-short-n-long/src/main.py index eb4e61ccd9..1eeb874804 100755 --- a/py/samples/short-n-long/src/main.py +++ b/py/samples/web-short-n-long/src/main.py @@ -53,6 +53,8 @@ | Defining Tools | `@ai.tool()` decorator (multiple uses) | | Tool Input Schema (Pydantic) | `GablorkenInput` | | Simple Generation (Prompt String) | `say_hi` | +| System Prompt | `system_prompt` | +| Multi-turn Conversation | `multi_turn_chat` | | Generation with Messages (`Message`, `Role`, `TextPart`) | `simple_generate_with_tools_flow` | | Generation with Tools | `simple_generate_with_tools_flow` | | Tool Response Handling | `simple_generate_with_interrupts` | @@ -62,8 +64,9 @@ | Streaming Generation (`ai.generate_stream`) | `say_hi_stream` | | Streaming Chunk Handling (`ctx.send_chunk`) | `say_hi_stream`, `generate_character` | | Structured Output (Schema) | `generate_character` | +| Streaming Structured Output | `streaming_structured_output` | | Pydantic for Structured Output Schema | `RpgCharacter` | -| Unconstrained Structured Output | `generate_character_unconstrained` | +| Structured Output (Instruction-Based) | `generate_character_instructions` | | Multi-modal Output Configuration | `generate_images` | See README.md for testing instructions. @@ -75,7 +78,6 @@ import uvicorn from pydantic import BaseModel, Field -from rich.traceback import install as install_rich_traceback from genkit.ai import Genkit, Output, ToolRunContext, tool_response from genkit.blocks.model import GenerateResponseWrapper @@ -97,8 +99,9 @@ Role, TextPart, ) +from samples.shared.logging import setup_sample -install_rich_traceback(show_locals=True, width=120, extra_lines=3) +setup_sample() logger = get_logger(__name__) @@ -129,6 +132,18 @@ class SayHiInput(BaseModel): name: str = Field(default='Mittens', description='Name to greet') +class SystemPromptInput(BaseModel): + """Input for system_prompt flow.""" + + question: str = Field(default='What is your quest?', description='Question to ask') + + +class MultiTurnInput(BaseModel): + """Input for multi_turn_chat flow.""" + + destination: str = Field(default='Japan', description='Travel destination') + + class TemperatureInput(BaseModel): """Input for temperature config flow.""" @@ -259,6 +274,70 @@ async def say_hi(input: SayHiInput) -> str: return resp.text +@ai.flow() +async def system_prompt(input: SystemPromptInput) -> str: + """Demonstrate system prompts to control model persona and behavior. + + System prompts give the model instructions about how to respond, such as + adopting a specific persona, tone, or response format. + + See: https://genkit.dev/docs/models#system-prompts + + Args: + input: Input with a question to ask. + + Returns: + The model's response in the persona defined by the system prompt. + """ + response = await ai.generate( + prompt=input.question, + system='You are a pirate captain from the 18th century. Always respond in character, ' + 'using pirate slang and nautical terminology.', + ) + return response.text + + +@ai.flow() +async def multi_turn_chat(input: MultiTurnInput) -> str: + """Demonstrate multi-turn conversations using the messages parameter. + + The messages parameter allows you to pass a conversation history to + maintain context across multiple interactions with the model. Each + message has a role ('user' or 'model') and content. + + See: https://genkit.dev/docs/models#multi-turn-conversations-with-messages + + Args: + input: Input with a travel destination. + + Returns: + The model's final response, demonstrating context retention. + """ + # Turn 1: Start the conversation + response1 = await ai.generate( + system='You are a helpful travel assistant.', + messages=[ + Message( + role=Role.USER, + content=[Part(root=TextPart(text=f'I want to visit {input.destination} for two weeks in spring.'))], + ), + ], + ) + + # Turn 2: Follow-up question that requires context from turn 1 + response2 = await ai.generate( + system='You are a helpful travel assistant.', + messages=[ + *response1.messages, + Message( + role=Role.USER, + content=[Part(root=TextPart(text='What should I pack for that trip?'))], + ), + ], + ) + return response2.text + + @ai.flow() async def embed_docs(docs: list[str] | None = None) -> list[Embedding]: """Generate an embedding for the words in a list. @@ -395,11 +474,22 @@ async def generate_character( @ai.flow() -async def generate_character_unconstrained( +async def generate_character_instructions( input: CharacterInput, _ctx: ActionRunContext | None = None, ) -> RpgCharacter: - """Generate an unconstrained RPG character. + """Generate an RPG character using instruction-based structured output. + + Unlike ``generate_character`` which uses constrained decoding (the model + is forced to output valid JSON matching the schema), this flow uses + ``output_constrained=False`` to guide the model via prompt instructions + instead. This is useful when:: + + - The model doesn't support constrained decoding. + - You want the model to have more flexibility in its output. + - You're debugging schema adherence issues. + + See: https://genkit.dev/docs/models#structured-output Args: input: Input with character name. @@ -417,6 +507,42 @@ async def generate_character_unconstrained( return result.output +@ai.flow() +async def streaming_structured_output( + input: CharacterInput, + ctx: ActionRunContext | None = None, +) -> RpgCharacter: + """Demonstrate streaming with structured output schemas. + + Combines `generate_stream` with `Output(schema=...)` so the model + streams JSON tokens that are progressively parsed into the Pydantic + model. Each chunk exposes a partial `.output` you can forward to + clients for incremental rendering. + + See: https://genkit.dev/docs/models#streaming + + Args: + input: Input with character name. + ctx: Action context for streaming partial outputs. + + Returns: + The fully-parsed RPG character once streaming completes. + """ + stream, result = ai.generate_stream( + prompt=( + f'Generate an RPG character named {input.name}. ' + 'Include a creative backstory, 3-4 unique abilities, ' + 'and skill ratings for strength, charisma, and endurance (0-100 each).' + ), + output=Output(schema=RpgCharacter), + ) + async for chunk in stream: + if ctx is not None: + ctx.send_chunk(chunk.output) + + return (await result).output + + @ai.flow() async def generate_images( input: GenerateImagesInput, diff --git a/py/typos.toml b/py/typos.toml index f72be69e57..afc6b08ccf 100644 --- a/py/typos.toml +++ b/py/typos.toml @@ -31,9 +31,6 @@ extend-ignore-re = [ ot = "ot" # Partial strings in test data ba = "ba" -# Sample directory name (legacy, would require renaming to fix) -retreive = "retreive" -retreiver = "retreiver" # Weather example uses celsius misspelling intentionally in function name celcius = "celcius" diff --git a/py/uv.lock b/py/uv.lock index 0d84d2ae2b..51a0d2f07e 100644 --- a/py/uv.lock +++ b/py/uv.lock @@ -11,16 +11,16 @@ resolution-markers = [ [manifest] members = [ - "amazon-bedrock-hello", - "anthropic-hello", - "cloudflare-workers-ai-hello", - "compat-oai-hello", - "deepseek-hello", "dev-local-vectorstore-hello", - "evaluator-demo", - "firestore-retreiver", - "flask-hello", - "format-demo", + "framework-context-demo", + "framework-dynamic-tools-demo", + "framework-evaluator-demo", + "framework-format-demo", + "framework-middleware-demo", + "framework-prompt-demo", + "framework-realtime-tracing-demo", + "framework-restaurant-demo", + "framework-tool-interrupts", "genkit", "genkit-plugin-amazon-bedrock", "genkit-plugin-anthropic", @@ -42,30 +42,31 @@ members = [ "genkit-plugin-vertex-ai", "genkit-plugin-xai", "genkit-workspace", - "google-genai-code-execution", - "google-genai-context-caching", - "google-genai-hello", - "google-genai-image", - "google-genai-vertexai-hello", - "google-genai-vertexai-image", - "huggingface-hello", - "media-models-demo", - "menu", - "microsoft-foundry-hello", - "mistral-hello", - "model-garden", - "multi-server", - "observability-hello", - "ollama-hello", - "ollama-simple-embed", - "prompt-demo", - "realtime-tracing-demo", - "short-n-long", - "tool-interrupts", - "vertex-ai-vector-search-bigquery", - "vertex-ai-vector-search-firestore", - "vertexai-rerank-eval", - "xai-hello", + "provider-amazon-bedrock-hello", + "provider-anthropic-hello", + "provider-cloudflare-workers-ai-hello", + "provider-compat-oai-hello", + "provider-deepseek-hello", + "provider-firestore-retriever", + "provider-google-genai-code-execution", + "provider-google-genai-context-caching", + "provider-google-genai-hello", + "provider-google-genai-media-models-demo", + "provider-google-genai-vertexai-hello", + "provider-google-genai-vertexai-image", + "provider-huggingface-hello", + "provider-microsoft-foundry-hello", + "provider-mistral-hello", + "provider-observability-hello", + "provider-ollama-hello", + "provider-vertex-ai-model-garden", + "provider-vertex-ai-rerank-eval", + "provider-vertex-ai-vector-search-bigquery", + "provider-vertex-ai-vector-search-firestore", + "provider-xai-hello", + "web-flask-hello", + "web-multi-server", + "web-short-n-long", ] [[package]] @@ -235,36 +236,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/db/33/ef2f2409450ef6daa61459d5de5c08128e7d3edb773fefd0a324d1310238/altair-6.0.0-py3-none-any.whl", hash = "sha256:09ae95b53d5fe5b16987dccc785a7af8588f2dca50de1e7a156efa8a461515f8", size = 795410, upload-time = "2025-11-12T08:59:09.804Z" }, ] -[[package]] -name = "amazon-bedrock-hello" -version = "0.1.0" -source = { editable = "samples/amazon-bedrock-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-amazon-bedrock" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-amazon-bedrock", editable = "plugins/amazon-bedrock" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "annotated-types" version = "0.7.0" @@ -302,36 +273,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3b/03/2f50931a942e5e13f80e24d83406714672c57964be593fc046d81369335b/anthropic-0.78.0-py3-none-any.whl", hash = "sha256:2a9887d2e99d1b0f9fe08857a1e9fe5d2d4030455dbf9ac65aab052e2efaeac4", size = 405485, upload-time = "2026-02-05T17:52:03.674Z" }, ] -[[package]] -name = "anthropic-hello" -version = "0.1.0" -source = { editable = "samples/anthropic-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-anthropic" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-anthropic", editable = "plugins/anthropic" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "anyio" version = "4.12.1" @@ -944,23 +885,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, ] -[[package]] -name = "cloudflare-workers-ai-hello" -version = "0.1.0" -source = { editable = "samples/cloudflare-workers-ai-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-cloudflare-workers-ai" }, - { name = "rich" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-cloudflare-workers-ai", editable = "plugins/cloudflare-workers-ai" }, - { name = "rich", specifier = ">=13.0.0" }, -] - [[package]] name = "colorama" version = "0.4.6" @@ -991,38 +915,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, ] -[[package]] -name = "compat-oai-hello" -version = "0.1.0" -source = { editable = "samples/compat-oai-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-compat-oai" }, - { name = "httpx" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-compat-oai", editable = "plugins/compat-oai" }, - { name = "httpx", specifier = ">=0.28.1" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "coverage" version = "7.13.3" @@ -1378,36 +1270,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] -[[package]] -name = "deepseek-hello" -version = "0.1.0" -source = { editable = "samples/deepseek-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-deepseek" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-deepseek", editable = "plugins/deepseek" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "defusedxml" version = "0.7.1" @@ -1626,42 +1488,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, ] -[[package]] -name = "evaluator-demo" -version = "0.0.1" -source = { editable = "samples/evaluator-demo" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-dev-local-vectorstore" }, - { name = "genkit-plugin-evaluators" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "pypdf" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-dev-local-vectorstore", editable = "plugins/dev-local-vectorstore" }, - { name = "genkit-plugin-evaluators", editable = "plugins/evaluators" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "pypdf", specifier = ">=6.6.2" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.22.1" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "exceptiongroup" version = "1.3.1" @@ -1714,15 +1540,41 @@ wheels = [ ] [[package]] -name = "firestore-retreiver" +name = "flask" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "markupsafe" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/6d/cfe3c0fcc5e477df242b98bfe186a4c34357b4847e87ecaef04507332dab/flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", size = 720160, upload-time = "2025-08-19T21:03:21.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/f9/7f9263c5695f4bd0023734af91bedb2ff8209e8de6ead162f35d8dc762fd/flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c", size = 103308, upload-time = "2025-08-19T21:03:19.499Z" }, +] + +[[package]] +name = "fqdn" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +] + +[[package]] +name = "framework-context-demo" version = "0.1.0" -source = { editable = "samples/firestore-retreiver" } +source = { editable = "samples/framework-context-demo" } dependencies = [ { name = "genkit" }, - { name = "genkit-plugin-firebase" }, { name = "genkit-plugin-google-genai" }, - { name = "google-cloud-firestore" }, + { name = "pydantic" }, { name = "rich" }, + { name = "structlog" }, { name = "uvloop" }, ] @@ -1734,42 +1586,58 @@ dev = [ [package.metadata] requires-dist = [ { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-firebase", editable = "plugins/firebase" }, { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "google-cloud-firestore" }, + { name = "pydantic", specifier = ">=2.0.0" }, { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, { name = "uvloop", specifier = ">=0.21.0" }, { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] provides-extras = ["dev"] [[package]] -name = "flask" -version = "3.1.2" -source = { registry = "https://pypi.org/simple" } +name = "framework-dynamic-tools-demo" +version = "0.1.0" +source = { editable = "samples/framework-dynamic-tools-demo" } dependencies = [ - { name = "blinker" }, - { name = "click" }, - { name = "itsdangerous" }, - { name = "jinja2" }, - { name = "markupsafe" }, - { name = "werkzeug" }, + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dc/6d/cfe3c0fcc5e477df242b98bfe186a4c34357b4847e87ecaef04507332dab/flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", size = 720160, upload-time = "2025-08-19T21:03:21.205Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/f9/7f9263c5695f4bd0023734af91bedb2ff8209e8de6ead162f35d8dc762fd/flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c", size = 103308, upload-time = "2025-08-19T21:03:19.499Z" }, + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] +provides-extras = ["dev"] [[package]] -name = "flask-hello" -version = "0.1.0" -source = { editable = "samples/flask-hello" } +name = "framework-evaluator-demo" +version = "0.0.1" +source = { editable = "samples/framework-evaluator-demo" } dependencies = [ - { name = "flask" }, { name = "genkit" }, - { name = "genkit-plugin-flask" }, + { name = "genkit-plugin-dev-local-vectorstore" }, + { name = "genkit-plugin-evaluators" }, { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "pypdf" }, { name = "rich" }, + { name = "structlog" }, { name = "uvloop" }, ] @@ -1780,20 +1648,23 @@ dev = [ [package.metadata] requires-dist = [ - { name = "flask" }, { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-flask", editable = "plugins/flask" }, + { name = "genkit-plugin-dev-local-vectorstore", editable = "plugins/dev-local-vectorstore" }, + { name = "genkit-plugin-evaluators", editable = "plugins/evaluators" }, { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "pypdf", specifier = ">=6.6.2" }, { name = "rich", specifier = ">=13.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.22.1" }, { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] provides-extras = ["dev"] [[package]] -name = "format-demo" +name = "framework-format-demo" version = "0.1.0" -source = { editable = "samples/format-demo" } +source = { editable = "samples/framework-format-demo" } dependencies = [ { name = "genkit" }, { name = "genkit-plugin-google-genai" }, @@ -1821,13 +1692,160 @@ requires-dist = [ provides-extras = ["dev"] [[package]] -name = "fqdn" -version = "1.5.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +name = "framework-middleware-demo" +version = "0.1.0" +source = { editable = "samples/framework-middleware-demo" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "framework-prompt-demo" +version = "0.0.1" +source = { editable = "samples/framework-prompt-demo" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "framework-realtime-tracing-demo" +version = "0.1.0" +source = { editable = "samples/framework-realtime-tracing-demo" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "framework-restaurant-demo" +version = "0.1.0" +source = { editable = "samples/framework-restaurant-demo" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-dev-local-vectorstore" }, + { name = "genkit-plugin-firebase" }, + { name = "genkit-plugin-google-cloud" }, + { name = "genkit-plugin-google-genai" }, + { name = "genkit-plugin-ollama" }, + { name = "genkit-plugin-vertex-ai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-dev-local-vectorstore", editable = "plugins/dev-local-vectorstore" }, + { name = "genkit-plugin-firebase", editable = "plugins/firebase" }, + { name = "genkit-plugin-google-cloud", editable = "plugins/google-cloud" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "genkit-plugin-ollama", editable = "plugins/ollama" }, + { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "framework-tool-interrupts" +version = "0.1.0" +source = { editable = "samples/framework-tool-interrupts" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] +provides-extras = ["dev"] [[package]] name = "frozenlist" @@ -2818,220 +2836,32 @@ wheels = [ ] [[package]] -name = "google-genai-code-execution" -version = "0.1.0" -source = { editable = "samples/google-genai-code-execution" } +name = "google-resumable-media" +version = "2.8.0" +source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, + { name = "google-crc32c" }, ] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +sdist = { url = "https://files.pythonhosted.org/packages/64/d7/520b62a35b23038ff005e334dba3ffc75fcf583bee26723f1fd8fd4b6919/google_resumable_media-2.8.0.tar.gz", hash = "sha256:f1157ed8b46994d60a1bc432544db62352043113684d4e030ee02e77ebe9a1ae", size = 2163265, upload-time = "2025-11-17T15:38:06.659Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/0b/93afde9cfe012260e9fe1522f35c9b72d6ee222f316586b1f23ecf44d518/google_resumable_media-2.8.0-py3-none-any.whl", hash = "sha256:dd14a116af303845a8d932ddae161a26e86cc229645bc98b39f026f9b1717582", size = 81340, upload-time = "2025-11-17T15:38:05.594Z" }, ] -provides-extras = ["dev"] [[package]] -name = "google-genai-context-caching" -version = "0.1.0" -source = { editable = "samples/google-genai-context-caching" } +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "requests" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "requests", specifier = ">=2.32.3" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, + { name = "protobuf" }, ] -provides-extras = ["dev"] - -[[package]] -name = "google-genai-hello" -version = "0.1.0" -source = { editable = "samples/google-genai-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-evaluators" }, - { name = "genkit-plugin-google-cloud" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] [package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-evaluators", editable = "plugins/evaluators" }, - { name = "genkit-plugin-google-cloud", editable = "plugins/google-cloud" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "google-genai-image" -version = "0.1.0" -source = { editable = "samples/google-genai-image" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "google-genai" }, - { name = "pillow" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "google-genai" }, - { name = "pillow" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "google-genai-vertexai-hello" -version = "0.1.0" -source = { editable = "samples/google-genai-vertexai-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "google-genai-vertexai-image" -version = "0.1.0" -source = { editable = "samples/google-genai-vertexai-image" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pillow" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pillow" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "google-resumable-media" -version = "2.8.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-crc32c" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/64/d7/520b62a35b23038ff005e334dba3ffc75fcf583bee26723f1fd8fd4b6919/google_resumable_media-2.8.0.tar.gz", hash = "sha256:f1157ed8b46994d60a1bc432544db62352043113684d4e030ee02e77ebe9a1ae", size = 2163265, upload-time = "2025-11-17T15:38:06.659Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1f/0b/93afde9cfe012260e9fe1522f35c9b72d6ee222f316586b1f23ecf44d518/google_resumable_media-2.8.0-py3-none-any.whl", hash = "sha256:dd14a116af303845a8d932ddae161a26e86cc229645bc98b39f026f9b1717582", size = 81340, upload-time = "2025-11-17T15:38:05.594Z" }, -] - -[[package]] -name = "googleapis-common-protos" -version = "1.72.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "protobuf" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, -] - -[package.optional-dependencies] -grpc = [ - { name = "grpcio" }, +grpc = [ + { name = "grpcio" }, ] [[package]] @@ -3258,36 +3088,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, ] -[[package]] -name = "huggingface-hello" -version = "0.1.0" -source = { editable = "samples/huggingface-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-huggingface" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-huggingface", editable = "plugins/huggingface" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "huggingface-hub" version = "1.4.0" @@ -4285,117 +4085,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, ] -[[package]] -name = "media-models-demo" -version = "0.1.0" -source = { editable = "samples/media-models-demo" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "rich" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=4.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "menu" -version = "0.1.0" -source = { editable = "samples/menu" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-dev-local-vectorstore" }, - { name = "genkit-plugin-firebase" }, - { name = "genkit-plugin-google-cloud" }, - { name = "genkit-plugin-google-genai" }, - { name = "genkit-plugin-ollama" }, - { name = "genkit-plugin-vertex-ai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-dev-local-vectorstore", editable = "plugins/dev-local-vectorstore" }, - { name = "genkit-plugin-firebase", editable = "plugins/firebase" }, - { name = "genkit-plugin-google-cloud", editable = "plugins/google-cloud" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "genkit-plugin-ollama", editable = "plugins/ollama" }, - { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "microsoft-foundry-hello" -version = "0.0.0" -source = { editable = "samples/microsoft-foundry-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-microsoft-foundry" }, - { name = "pydantic" }, - { name = "rich" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-microsoft-foundry", editable = "plugins/microsoft-foundry" }, - { name = "pydantic" }, - { name = "rich", specifier = ">=13.0.0" }, -] - -[[package]] -name = "mistral-hello" -version = "0.1.0" -source = { editable = "samples/mistral-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-mistral" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-mistral", editable = "plugins/mistral" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "mistralai" version = "1.9.11" @@ -4426,34 +4115,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" }, ] -[[package]] -name = "model-garden" -version = "0.1.0" -source = { editable = "samples/model-garden" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-vertex-ai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "more-itertools" version = "10.8.0" @@ -4623,43 +4284,9 @@ wheels = [ ] [[package]] -name = "multi-server" -version = "0.1.0" -source = { editable = "samples/multi-server" } -dependencies = [ - { name = "asgiref" }, - { name = "genkit" }, - { name = "litestar" }, - { name = "rich" }, - { name = "starlette" }, - { name = "structlog" }, - { name = "uvicorn" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "asgiref", specifier = ">=3.8.1" }, - { name = "genkit", editable = "packages/genkit" }, - { name = "litestar", specifier = ">=2.15.1" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "starlette", specifier = ">=0.46.1" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvicorn", specifier = ">=0.34.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "multidict" -version = "6.7.1" -source = { registry = "https://pypi.org/simple" } +name = "multidict" +version = "6.7.1" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] @@ -5191,25 +4818,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" }, ] -[[package]] -name = "observability-hello" -version = "0.1.0" -source = { editable = "samples/observability-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "genkit-plugin-observability" }, - { name = "rich" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "genkit-plugin-observability", editable = "plugins/observability" }, - { name = "rich", specifier = ">=13.0.0" }, -] - [[package]] name = "ollama" version = "0.6.1" @@ -5223,66 +4831,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/47/4f/4a617ee93d8208d2bcf26b2d8b9402ceaed03e3853c754940e2290fed063/ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c", size = 14354, upload-time = "2025-11-13T23:02:16.292Z" }, ] -[[package]] -name = "ollama-hello" -version = "0.1.0" -source = { editable = "samples/ollama-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-ollama" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-ollama", editable = "plugins/ollama" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - -[[package]] -name = "ollama-simple-embed" -version = "0.1.0" -source = { editable = "samples/ollama-simple-embed" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-ollama" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-ollama", editable = "plugins/ollama" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "openai" version = "2.17.0" @@ -5834,36 +5382,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/74/c3/24a2f845e3917201628ecaba4f18bab4d18a337834c1df2a159ee9d22a42/prometheus_client-0.24.1-py3-none-any.whl", hash = "sha256:150db128af71a5c2482b36e588fc8a6b95e498750da4b17065947c16070f4055", size = 64057, upload-time = "2026-01-14T15:26:24.42Z" }, ] -[[package]] -name = "prompt-demo" -version = "0.0.1" -source = { editable = "samples/prompt-demo" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "prompt-toolkit" version = "3.0.52" @@ -6018,82 +5536,725 @@ wheels = [ ] [[package]] -name = "psutil" -version = "7.2.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/aa/c6/d1ddf4abb55e93cebc4f2ed8b5d6dbad109ecb8d63748dd2b20ab5e57ebe/psutil-7.2.2.tar.gz", hash = "sha256:0746f5f8d406af344fd547f1c8daa5f5c33dbc293bb8d6a16d80b4bb88f59372", size = 493740, upload-time = "2026-01-28T18:14:54.428Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/51/08/510cbdb69c25a96f4ae523f733cdc963ae654904e8db864c07585ef99875/psutil-7.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2edccc433cbfa046b980b0df0171cd25bcaeb3a68fe9022db0979e7aa74a826b", size = 130595, upload-time = "2026-01-28T18:14:57.293Z" }, - { url = "https://files.pythonhosted.org/packages/d6/f5/97baea3fe7a5a9af7436301f85490905379b1c6f2dd51fe3ecf24b4c5fbf/psutil-7.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78c8603dcd9a04c7364f1a3e670cea95d51ee865e4efb3556a3a63adef958ea", size = 131082, upload-time = "2026-01-28T18:14:59.732Z" }, - { url = "https://files.pythonhosted.org/packages/37/d6/246513fbf9fa174af531f28412297dd05241d97a75911ac8febefa1a53c6/psutil-7.2.2-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a571f2330c966c62aeda00dd24620425d4b0cc86881c89861fbc04549e5dc63", size = 181476, upload-time = "2026-01-28T18:15:01.884Z" }, - { url = "https://files.pythonhosted.org/packages/b8/b5/9182c9af3836cca61696dabe4fd1304e17bc56cb62f17439e1154f225dd3/psutil-7.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:917e891983ca3c1887b4ef36447b1e0873e70c933afc831c6b6da078ba474312", size = 184062, upload-time = "2026-01-28T18:15:04.436Z" }, - { url = "https://files.pythonhosted.org/packages/16/ba/0756dca669f5a9300d0cbcbfae9a4c30e446dfc7440ffe43ded5724bfd93/psutil-7.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:ab486563df44c17f5173621c7b198955bd6b613fb87c71c161f827d3fb149a9b", size = 139893, upload-time = "2026-01-28T18:15:06.378Z" }, - { url = "https://files.pythonhosted.org/packages/1c/61/8fa0e26f33623b49949346de05ec1ddaad02ed8ba64af45f40a147dbfa97/psutil-7.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:ae0aefdd8796a7737eccea863f80f81e468a1e4cf14d926bd9b6f5f2d5f90ca9", size = 135589, upload-time = "2026-01-28T18:15:08.03Z" }, - { url = "https://files.pythonhosted.org/packages/81/69/ef179ab5ca24f32acc1dac0c247fd6a13b501fd5534dbae0e05a1c48b66d/psutil-7.2.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:eed63d3b4d62449571547b60578c5b2c4bcccc5387148db46e0c2313dad0ee00", size = 130664, upload-time = "2026-01-28T18:15:09.469Z" }, - { url = "https://files.pythonhosted.org/packages/7b/64/665248b557a236d3fa9efc378d60d95ef56dd0a490c2cd37dafc7660d4a9/psutil-7.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7b6d09433a10592ce39b13d7be5a54fbac1d1228ed29abc880fb23df7cb694c9", size = 131087, upload-time = "2026-01-28T18:15:11.724Z" }, - { url = "https://files.pythonhosted.org/packages/d5/2e/e6782744700d6759ebce3043dcfa661fb61e2fb752b91cdeae9af12c2178/psutil-7.2.2-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fa4ecf83bcdf6e6c8f4449aff98eefb5d0604bf88cb883d7da3d8d2d909546a", size = 182383, upload-time = "2026-01-28T18:15:13.445Z" }, - { url = "https://files.pythonhosted.org/packages/57/49/0a41cefd10cb7505cdc04dab3eacf24c0c2cb158a998b8c7b1d27ee2c1f5/psutil-7.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e452c464a02e7dc7822a05d25db4cde564444a67e58539a00f929c51eddda0cf", size = 185210, upload-time = "2026-01-28T18:15:16.002Z" }, - { url = "https://files.pythonhosted.org/packages/dd/2c/ff9bfb544f283ba5f83ba725a3c5fec6d6b10b8f27ac1dc641c473dc390d/psutil-7.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c7663d4e37f13e884d13994247449e9f8f574bc4655d509c3b95e9ec9e2b9dc1", size = 141228, upload-time = "2026-01-28T18:15:18.385Z" }, - { url = "https://files.pythonhosted.org/packages/f2/fc/f8d9c31db14fcec13748d373e668bc3bed94d9077dbc17fb0eebc073233c/psutil-7.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:11fe5a4f613759764e79c65cf11ebdf26e33d6dd34336f8a337aa2996d71c841", size = 136284, upload-time = "2026-01-28T18:15:19.912Z" }, - { url = "https://files.pythonhosted.org/packages/e7/36/5ee6e05c9bd427237b11b3937ad82bb8ad2752d72c6969314590dd0c2f6e/psutil-7.2.2-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ed0cace939114f62738d808fdcecd4c869222507e266e574799e9c0faa17d486", size = 129090, upload-time = "2026-01-28T18:15:22.168Z" }, - { url = "https://files.pythonhosted.org/packages/80/c4/f5af4c1ca8c1eeb2e92ccca14ce8effdeec651d5ab6053c589b074eda6e1/psutil-7.2.2-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a7b04c10f32cc88ab39cbf606e117fd74721c831c98a27dc04578deb0c16979", size = 129859, upload-time = "2026-01-28T18:15:23.795Z" }, - { url = "https://files.pythonhosted.org/packages/b5/70/5d8df3b09e25bce090399cf48e452d25c935ab72dad19406c77f4e828045/psutil-7.2.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:076a2d2f923fd4821644f5ba89f059523da90dc9014e85f8e45a5774ca5bc6f9", size = 155560, upload-time = "2026-01-28T18:15:25.976Z" }, - { url = "https://files.pythonhosted.org/packages/63/65/37648c0c158dc222aba51c089eb3bdfa238e621674dc42d48706e639204f/psutil-7.2.2-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0726cecd84f9474419d67252add4ac0cd9811b04d61123054b9fb6f57df6e9e", size = 156997, upload-time = "2026-01-28T18:15:27.794Z" }, - { url = "https://files.pythonhosted.org/packages/8e/13/125093eadae863ce03c6ffdbae9929430d116a246ef69866dad94da3bfbc/psutil-7.2.2-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fd04ef36b4a6d599bbdb225dd1d3f51e00105f6d48a28f006da7f9822f2606d8", size = 148972, upload-time = "2026-01-28T18:15:29.342Z" }, - { url = "https://files.pythonhosted.org/packages/04/78/0acd37ca84ce3ddffaa92ef0f571e073faa6d8ff1f0559ab1272188ea2be/psutil-7.2.2-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b58fabe35e80b264a4e3bb23e6b96f9e45a3df7fb7eed419ac0e5947c61e47cc", size = 148266, upload-time = "2026-01-28T18:15:31.597Z" }, - { url = "https://files.pythonhosted.org/packages/b4/90/e2159492b5426be0c1fef7acba807a03511f97c5f86b3caeda6ad92351a7/psutil-7.2.2-cp37-abi3-win_amd64.whl", hash = "sha256:eb7e81434c8d223ec4a219b5fc1c47d0417b12be7ea866e24fb5ad6e84b3d988", size = 137737, upload-time = "2026-01-28T18:15:33.849Z" }, - { url = "https://files.pythonhosted.org/packages/8c/c7/7bb2e321574b10df20cbde462a94e2b71d05f9bbda251ef27d104668306a/psutil-7.2.2-cp37-abi3-win_arm64.whl", hash = "sha256:8c233660f575a5a89e6d4cb65d9f938126312bca76d8fe087b947b3a1aaac9ee", size = 134617, upload-time = "2026-01-28T18:15:36.514Z" }, +name = "provider-amazon-bedrock-hello" +version = "0.1.0" +source = { editable = "samples/provider-amazon-bedrock-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-amazon-bedrock" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, ] -[[package]] -name = "ptyprocess" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-amazon-bedrock", editable = "plugins/amazon-bedrock" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] +provides-extras = ["dev"] [[package]] -name = "pure-eval" -version = "0.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +name = "provider-anthropic-hello" +version = "0.1.0" +source = { editable = "samples/provider-anthropic-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-anthropic" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-anthropic", editable = "plugins/anthropic" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] +provides-extras = ["dev"] [[package]] -name = "py-serializable" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } +name = "provider-cloudflare-workers-ai-hello" +version = "0.1.0" +source = { editable = "samples/provider-cloudflare-workers-ai-hello" } dependencies = [ - { name = "defusedxml" }, + { name = "genkit" }, + { name = "genkit-plugin-cloudflare-workers-ai" }, + { name = "rich" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/73/21/d250cfca8ff30c2e5a7447bc13861541126ce9bd4426cd5d0c9f08b5547d/py_serializable-2.1.0.tar.gz", hash = "sha256:9d5db56154a867a9b897c0163b33a793c804c80cee984116d02d49e4578fc103", size = 52368, upload-time = "2025-07-21T09:56:48.07Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/bf/7595e817906a29453ba4d99394e781b6fabe55d21f3c15d240f85dd06bb1/py_serializable-2.1.0-py3-none-any.whl", hash = "sha256:b56d5d686b5a03ba4f4db5e769dc32336e142fc3bd4d68a8c25579ebb0a67304", size = 23045, upload-time = "2025-07-21T09:56:46.848Z" }, + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-cloudflare-workers-ai", editable = "plugins/cloudflare-workers-ai" }, + { name = "rich", specifier = ">=13.0.0" }, ] [[package]] -name = "pyarrow" -version = "23.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/01/33/ffd9c3eb087fa41dd79c3cf20c4c0ae3cdb877c4f8e1107a446006344924/pyarrow-23.0.0.tar.gz", hash = "sha256:180e3150e7edfcd182d3d9afba72f7cf19839a497cc76555a8dce998a8f67615", size = 1167185, upload-time = "2026-01-18T16:19:42.218Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/2f/23e042a5aa99bcb15e794e14030e8d065e00827e846e53a66faec73c7cd6/pyarrow-23.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cbdc2bf5947aa4d462adcf8453cf04aee2f7932653cb67a27acd96e5e8528a67", size = 34281861, upload-time = "2026-01-18T16:13:34.332Z" }, - { url = "https://files.pythonhosted.org/packages/8b/65/1651933f504b335ec9cd8f99463718421eb08d883ed84f0abd2835a16cad/pyarrow-23.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4d38c836930ce15cd31dce20114b21ba082da231c884bdc0a7b53e1477fe7f07", size = 35825067, upload-time = "2026-01-18T16:13:42.549Z" }, - { url = "https://files.pythonhosted.org/packages/84/ec/d6fceaec050c893f4e35c0556b77d4cc9973fcc24b0a358a5781b1234582/pyarrow-23.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:4222ff8f76919ecf6c716175a0e5fddb5599faeed4c56d9ea41a2c42be4998b2", size = 44458539, upload-time = "2026-01-18T16:13:52.975Z" }, - { url = "https://files.pythonhosted.org/packages/fd/d9/369f134d652b21db62fe3ec1c5c2357e695f79eb67394b8a93f3a2b2cffa/pyarrow-23.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:87f06159cbe38125852657716889296c83c37b4d09a5e58f3d10245fd1f69795", size = 47535889, upload-time = "2026-01-18T16:14:03.693Z" }, - { url = "https://files.pythonhosted.org/packages/a3/95/f37b6a252fdbf247a67a78fb3f61a529fe0600e304c4d07741763d3522b1/pyarrow-23.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1675c374570d8b91ea6d4edd4608fa55951acd44e0c31bd146e091b4005de24f", size = 48157777, upload-time = "2026-01-18T16:14:12.483Z" }, - { url = "https://files.pythonhosted.org/packages/ab/ab/fb94923108c9c6415dab677cf1f066d3307798eafc03f9a65ab4abc61056/pyarrow-23.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:247374428fde4f668f138b04031a7e7077ba5fa0b5b1722fdf89a017bf0b7ee0", size = 50580441, upload-time = "2026-01-18T16:14:20.187Z" }, - { url = "https://files.pythonhosted.org/packages/ae/78/897ba6337b517fc8e914891e1bd918da1c4eb8e936a553e95862e67b80f6/pyarrow-23.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:de53b1bd3b88a2ee93c9af412c903e57e738c083be4f6392288294513cd8b2c1", size = 27530028, upload-time = "2026-01-18T16:14:27.353Z" }, - { url = "https://files.pythonhosted.org/packages/aa/c0/57fe251102ca834fee0ef69a84ad33cc0ff9d5dfc50f50b466846356ecd7/pyarrow-23.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5574d541923efcbfdf1294a2746ae3b8c2498a2dc6cd477882f6f4e7b1ac08d3", size = 34276762, upload-time = "2026-01-18T16:14:34.128Z" }, - { url = "https://files.pythonhosted.org/packages/f8/4e/24130286548a5bc250cbed0b6bbf289a2775378a6e0e6f086ae8c68fc098/pyarrow-23.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:2ef0075c2488932e9d3c2eb3482f9459c4be629aa673b725d5e3cf18f777f8e4", size = 35821420, upload-time = "2026-01-18T16:14:40.699Z" }, - { url = "https://files.pythonhosted.org/packages/ee/55/a869e8529d487aa2e842d6c8865eb1e2c9ec33ce2786eb91104d2c3e3f10/pyarrow-23.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:65666fc269669af1ef1c14478c52222a2aa5c907f28b68fb50a203c777e4f60c", size = 44457412, upload-time = "2026-01-18T16:14:49.051Z" }, - { url = "https://files.pythonhosted.org/packages/36/81/1de4f0edfa9a483bbdf0082a05790bd6a20ed2169ea12a65039753be3a01/pyarrow-23.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:4d85cb6177198f3812db4788e394b757223f60d9a9f5ad6634b3e32be1525803", size = 47534285, upload-time = "2026-01-18T16:14:56.748Z" }, - { url = "https://files.pythonhosted.org/packages/f2/04/464a052d673b5ece074518f27377861662449f3c1fdb39ce740d646fd098/pyarrow-23.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1a9ff6fa4141c24a03a1a434c63c8fa97ce70f8f36bccabc18ebba905ddf0f17", size = 48157913, upload-time = "2026-01-18T16:15:05.114Z" }, - { url = "https://files.pythonhosted.org/packages/f4/1b/32a4de9856ee6688c670ca2def588382e573cce45241a965af04c2f61687/pyarrow-23.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:84839d060a54ae734eb60a756aeacb62885244aaa282f3c968f5972ecc7b1ecc", size = 50582529, upload-time = "2026-01-18T16:15:12.846Z" }, +name = "provider-compat-oai-hello" +version = "0.1.0" +source = { editable = "samples/provider-compat-oai-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-compat-oai" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-compat-oai", editable = "plugins/compat-oai" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-deepseek-hello" +version = "0.1.0" +source = { editable = "samples/provider-deepseek-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-deepseek" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-deepseek", editable = "plugins/deepseek" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-firestore-retriever" +version = "0.1.0" +source = { editable = "samples/provider-firestore-retriever" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-firebase" }, + { name = "genkit-plugin-google-genai" }, + { name = "google-cloud-firestore" }, + { name = "rich" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-firebase", editable = "plugins/firebase" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "google-cloud-firestore" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-google-genai-code-execution" +version = "0.1.0" +source = { editable = "samples/provider-google-genai-code-execution" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-google-genai-context-caching" +version = "0.1.0" +source = { editable = "samples/provider-google-genai-context-caching" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-google-genai-hello" +version = "0.1.0" +source = { editable = "samples/provider-google-genai-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-evaluators" }, + { name = "genkit-plugin-google-cloud" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-evaluators", editable = "plugins/evaluators" }, + { name = "genkit-plugin-google-cloud", editable = "plugins/google-cloud" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-google-genai-media-models-demo" +version = "0.1.0" +source = { editable = "samples/provider-google-genai-media-models-demo" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "rich" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=4.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-google-genai-vertexai-hello" +version = "0.1.0" +source = { editable = "samples/provider-google-genai-vertexai-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-google-genai-vertexai-image" +version = "0.1.0" +source = { editable = "samples/provider-google-genai-vertexai-image" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pillow" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pillow" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-huggingface-hello" +version = "0.1.0" +source = { editable = "samples/provider-huggingface-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-huggingface" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-huggingface", editable = "plugins/huggingface" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-microsoft-foundry-hello" +version = "0.0.0" +source = { editable = "samples/provider-microsoft-foundry-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-microsoft-foundry" }, + { name = "pydantic" }, + { name = "rich" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-microsoft-foundry", editable = "plugins/microsoft-foundry" }, + { name = "pydantic" }, + { name = "rich", specifier = ">=13.0.0" }, +] + +[[package]] +name = "provider-mistral-hello" +version = "0.1.0" +source = { editable = "samples/provider-mistral-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-mistral" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-mistral", editable = "plugins/mistral" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-observability-hello" +version = "0.1.0" +source = { editable = "samples/provider-observability-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "genkit-plugin-observability" }, + { name = "rich" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "genkit-plugin-observability", editable = "plugins/observability" }, + { name = "rich", specifier = ">=13.0.0" }, +] + +[[package]] +name = "provider-ollama-hello" +version = "0.1.0" +source = { editable = "samples/provider-ollama-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-ollama" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-ollama", editable = "plugins/ollama" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-vertex-ai-model-garden" +version = "0.1.0" +source = { editable = "samples/provider-vertex-ai-model-garden" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-vertex-ai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-vertex-ai-rerank-eval" +version = "0.1.0" +source = { editable = "samples/provider-vertex-ai-rerank-eval" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-vertex-ai-vector-search-bigquery" +version = "0.1.0" +source = { editable = "samples/provider-vertex-ai-vector-search-bigquery" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "genkit-plugin-vertex-ai" }, + { name = "google-cloud-aiplatform" }, + { name = "google-cloud-bigquery" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "strenum", marker = "python_full_version < '3.11'" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, + { name = "google-cloud-aiplatform" }, + { name = "google-cloud-bigquery" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "strenum", marker = "python_full_version < '3.11'", specifier = ">=0.4.15" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-vertex-ai-vector-search-firestore" +version = "0.1.0" +source = { editable = "samples/provider-vertex-ai-vector-search-firestore" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-genai" }, + { name = "genkit-plugin-vertex-ai" }, + { name = "google-cloud-aiplatform" }, + { name = "google-cloud-firestore" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "strenum", marker = "python_full_version < '3.11'" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, + { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, + { name = "google-cloud-aiplatform" }, + { name = "google-cloud-firestore" }, + { name = "pydantic", specifier = ">=2.10.5" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "strenum", marker = "python_full_version < '3.11'", specifier = ">=0.4.15" }, + { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "provider-xai-hello" +version = "0.1.0" +source = { editable = "samples/provider-xai-hello" } +dependencies = [ + { name = "genkit" }, + { name = "genkit-plugin-google-cloud" }, + { name = "genkit-plugin-xai" }, + { name = "pydantic" }, + { name = "rich" }, + { name = "structlog" }, + { name = "uvloop" }, +] + +[package.optional-dependencies] +dev = [ + { name = "watchdog" }, +] + +[package.metadata] +requires-dist = [ + { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-google-cloud", editable = "plugins/google-cloud" }, + { name = "genkit-plugin-xai", editable = "plugins/xai" }, + { name = "pydantic", specifier = ">=2.0.0" }, + { name = "rich", specifier = ">=13.0.0" }, + { name = "structlog", specifier = ">=24.0.0" }, + { name = "uvloop", specifier = ">=0.21.0" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "psutil" +version = "7.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/c6/d1ddf4abb55e93cebc4f2ed8b5d6dbad109ecb8d63748dd2b20ab5e57ebe/psutil-7.2.2.tar.gz", hash = "sha256:0746f5f8d406af344fd547f1c8daa5f5c33dbc293bb8d6a16d80b4bb88f59372", size = 493740, upload-time = "2026-01-28T18:14:54.428Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/08/510cbdb69c25a96f4ae523f733cdc963ae654904e8db864c07585ef99875/psutil-7.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2edccc433cbfa046b980b0df0171cd25bcaeb3a68fe9022db0979e7aa74a826b", size = 130595, upload-time = "2026-01-28T18:14:57.293Z" }, + { url = "https://files.pythonhosted.org/packages/d6/f5/97baea3fe7a5a9af7436301f85490905379b1c6f2dd51fe3ecf24b4c5fbf/psutil-7.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78c8603dcd9a04c7364f1a3e670cea95d51ee865e4efb3556a3a63adef958ea", size = 131082, upload-time = "2026-01-28T18:14:59.732Z" }, + { url = "https://files.pythonhosted.org/packages/37/d6/246513fbf9fa174af531f28412297dd05241d97a75911ac8febefa1a53c6/psutil-7.2.2-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a571f2330c966c62aeda00dd24620425d4b0cc86881c89861fbc04549e5dc63", size = 181476, upload-time = "2026-01-28T18:15:01.884Z" }, + { url = "https://files.pythonhosted.org/packages/b8/b5/9182c9af3836cca61696dabe4fd1304e17bc56cb62f17439e1154f225dd3/psutil-7.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:917e891983ca3c1887b4ef36447b1e0873e70c933afc831c6b6da078ba474312", size = 184062, upload-time = "2026-01-28T18:15:04.436Z" }, + { url = "https://files.pythonhosted.org/packages/16/ba/0756dca669f5a9300d0cbcbfae9a4c30e446dfc7440ffe43ded5724bfd93/psutil-7.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:ab486563df44c17f5173621c7b198955bd6b613fb87c71c161f827d3fb149a9b", size = 139893, upload-time = "2026-01-28T18:15:06.378Z" }, + { url = "https://files.pythonhosted.org/packages/1c/61/8fa0e26f33623b49949346de05ec1ddaad02ed8ba64af45f40a147dbfa97/psutil-7.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:ae0aefdd8796a7737eccea863f80f81e468a1e4cf14d926bd9b6f5f2d5f90ca9", size = 135589, upload-time = "2026-01-28T18:15:08.03Z" }, + { url = "https://files.pythonhosted.org/packages/81/69/ef179ab5ca24f32acc1dac0c247fd6a13b501fd5534dbae0e05a1c48b66d/psutil-7.2.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:eed63d3b4d62449571547b60578c5b2c4bcccc5387148db46e0c2313dad0ee00", size = 130664, upload-time = "2026-01-28T18:15:09.469Z" }, + { url = "https://files.pythonhosted.org/packages/7b/64/665248b557a236d3fa9efc378d60d95ef56dd0a490c2cd37dafc7660d4a9/psutil-7.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7b6d09433a10592ce39b13d7be5a54fbac1d1228ed29abc880fb23df7cb694c9", size = 131087, upload-time = "2026-01-28T18:15:11.724Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2e/e6782744700d6759ebce3043dcfa661fb61e2fb752b91cdeae9af12c2178/psutil-7.2.2-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fa4ecf83bcdf6e6c8f4449aff98eefb5d0604bf88cb883d7da3d8d2d909546a", size = 182383, upload-time = "2026-01-28T18:15:13.445Z" }, + { url = "https://files.pythonhosted.org/packages/57/49/0a41cefd10cb7505cdc04dab3eacf24c0c2cb158a998b8c7b1d27ee2c1f5/psutil-7.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e452c464a02e7dc7822a05d25db4cde564444a67e58539a00f929c51eddda0cf", size = 185210, upload-time = "2026-01-28T18:15:16.002Z" }, + { url = "https://files.pythonhosted.org/packages/dd/2c/ff9bfb544f283ba5f83ba725a3c5fec6d6b10b8f27ac1dc641c473dc390d/psutil-7.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c7663d4e37f13e884d13994247449e9f8f574bc4655d509c3b95e9ec9e2b9dc1", size = 141228, upload-time = "2026-01-28T18:15:18.385Z" }, + { url = "https://files.pythonhosted.org/packages/f2/fc/f8d9c31db14fcec13748d373e668bc3bed94d9077dbc17fb0eebc073233c/psutil-7.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:11fe5a4f613759764e79c65cf11ebdf26e33d6dd34336f8a337aa2996d71c841", size = 136284, upload-time = "2026-01-28T18:15:19.912Z" }, + { url = "https://files.pythonhosted.org/packages/e7/36/5ee6e05c9bd427237b11b3937ad82bb8ad2752d72c6969314590dd0c2f6e/psutil-7.2.2-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ed0cace939114f62738d808fdcecd4c869222507e266e574799e9c0faa17d486", size = 129090, upload-time = "2026-01-28T18:15:22.168Z" }, + { url = "https://files.pythonhosted.org/packages/80/c4/f5af4c1ca8c1eeb2e92ccca14ce8effdeec651d5ab6053c589b074eda6e1/psutil-7.2.2-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a7b04c10f32cc88ab39cbf606e117fd74721c831c98a27dc04578deb0c16979", size = 129859, upload-time = "2026-01-28T18:15:23.795Z" }, + { url = "https://files.pythonhosted.org/packages/b5/70/5d8df3b09e25bce090399cf48e452d25c935ab72dad19406c77f4e828045/psutil-7.2.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:076a2d2f923fd4821644f5ba89f059523da90dc9014e85f8e45a5774ca5bc6f9", size = 155560, upload-time = "2026-01-28T18:15:25.976Z" }, + { url = "https://files.pythonhosted.org/packages/63/65/37648c0c158dc222aba51c089eb3bdfa238e621674dc42d48706e639204f/psutil-7.2.2-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0726cecd84f9474419d67252add4ac0cd9811b04d61123054b9fb6f57df6e9e", size = 156997, upload-time = "2026-01-28T18:15:27.794Z" }, + { url = "https://files.pythonhosted.org/packages/8e/13/125093eadae863ce03c6ffdbae9929430d116a246ef69866dad94da3bfbc/psutil-7.2.2-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fd04ef36b4a6d599bbdb225dd1d3f51e00105f6d48a28f006da7f9822f2606d8", size = 148972, upload-time = "2026-01-28T18:15:29.342Z" }, + { url = "https://files.pythonhosted.org/packages/04/78/0acd37ca84ce3ddffaa92ef0f571e073faa6d8ff1f0559ab1272188ea2be/psutil-7.2.2-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b58fabe35e80b264a4e3bb23e6b96f9e45a3df7fb7eed419ac0e5947c61e47cc", size = 148266, upload-time = "2026-01-28T18:15:31.597Z" }, + { url = "https://files.pythonhosted.org/packages/b4/90/e2159492b5426be0c1fef7acba807a03511f97c5f86b3caeda6ad92351a7/psutil-7.2.2-cp37-abi3-win_amd64.whl", hash = "sha256:eb7e81434c8d223ec4a219b5fc1c47d0417b12be7ea866e24fb5ad6e84b3d988", size = 137737, upload-time = "2026-01-28T18:15:33.849Z" }, + { url = "https://files.pythonhosted.org/packages/8c/c7/7bb2e321574b10df20cbde462a94e2b71d05f9bbda251ef27d104668306a/psutil-7.2.2-cp37-abi3-win_arm64.whl", hash = "sha256:8c233660f575a5a89e6d4cb65d9f938126312bca76d8fe087b947b3a1aaac9ee", size = 134617, upload-time = "2026-01-28T18:15:36.514Z" }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +] + +[[package]] +name = "py-serializable" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "defusedxml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/73/21/d250cfca8ff30c2e5a7447bc13861541126ce9bd4426cd5d0c9f08b5547d/py_serializable-2.1.0.tar.gz", hash = "sha256:9d5db56154a867a9b897c0163b33a793c804c80cee984116d02d49e4578fc103", size = 52368, upload-time = "2025-07-21T09:56:48.07Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/bf/7595e817906a29453ba4d99394e781b6fabe55d21f3c15d240f85dd06bb1/py_serializable-2.1.0-py3-none-any.whl", hash = "sha256:b56d5d686b5a03ba4f4db5e769dc32336e142fc3bd4d68a8c25579ebb0a67304", size = 23045, upload-time = "2025-07-21T09:56:46.848Z" }, +] + +[[package]] +name = "pyarrow" +version = "23.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/33/ffd9c3eb087fa41dd79c3cf20c4c0ae3cdb877c4f8e1107a446006344924/pyarrow-23.0.0.tar.gz", hash = "sha256:180e3150e7edfcd182d3d9afba72f7cf19839a497cc76555a8dce998a8f67615", size = 1167185, upload-time = "2026-01-18T16:19:42.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/2f/23e042a5aa99bcb15e794e14030e8d065e00827e846e53a66faec73c7cd6/pyarrow-23.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cbdc2bf5947aa4d462adcf8453cf04aee2f7932653cb67a27acd96e5e8528a67", size = 34281861, upload-time = "2026-01-18T16:13:34.332Z" }, + { url = "https://files.pythonhosted.org/packages/8b/65/1651933f504b335ec9cd8f99463718421eb08d883ed84f0abd2835a16cad/pyarrow-23.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4d38c836930ce15cd31dce20114b21ba082da231c884bdc0a7b53e1477fe7f07", size = 35825067, upload-time = "2026-01-18T16:13:42.549Z" }, + { url = "https://files.pythonhosted.org/packages/84/ec/d6fceaec050c893f4e35c0556b77d4cc9973fcc24b0a358a5781b1234582/pyarrow-23.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:4222ff8f76919ecf6c716175a0e5fddb5599faeed4c56d9ea41a2c42be4998b2", size = 44458539, upload-time = "2026-01-18T16:13:52.975Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d9/369f134d652b21db62fe3ec1c5c2357e695f79eb67394b8a93f3a2b2cffa/pyarrow-23.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:87f06159cbe38125852657716889296c83c37b4d09a5e58f3d10245fd1f69795", size = 47535889, upload-time = "2026-01-18T16:14:03.693Z" }, + { url = "https://files.pythonhosted.org/packages/a3/95/f37b6a252fdbf247a67a78fb3f61a529fe0600e304c4d07741763d3522b1/pyarrow-23.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1675c374570d8b91ea6d4edd4608fa55951acd44e0c31bd146e091b4005de24f", size = 48157777, upload-time = "2026-01-18T16:14:12.483Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ab/fb94923108c9c6415dab677cf1f066d3307798eafc03f9a65ab4abc61056/pyarrow-23.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:247374428fde4f668f138b04031a7e7077ba5fa0b5b1722fdf89a017bf0b7ee0", size = 50580441, upload-time = "2026-01-18T16:14:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/ae/78/897ba6337b517fc8e914891e1bd918da1c4eb8e936a553e95862e67b80f6/pyarrow-23.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:de53b1bd3b88a2ee93c9af412c903e57e738c083be4f6392288294513cd8b2c1", size = 27530028, upload-time = "2026-01-18T16:14:27.353Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c0/57fe251102ca834fee0ef69a84ad33cc0ff9d5dfc50f50b466846356ecd7/pyarrow-23.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5574d541923efcbfdf1294a2746ae3b8c2498a2dc6cd477882f6f4e7b1ac08d3", size = 34276762, upload-time = "2026-01-18T16:14:34.128Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4e/24130286548a5bc250cbed0b6bbf289a2775378a6e0e6f086ae8c68fc098/pyarrow-23.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:2ef0075c2488932e9d3c2eb3482f9459c4be629aa673b725d5e3cf18f777f8e4", size = 35821420, upload-time = "2026-01-18T16:14:40.699Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/a869e8529d487aa2e842d6c8865eb1e2c9ec33ce2786eb91104d2c3e3f10/pyarrow-23.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:65666fc269669af1ef1c14478c52222a2aa5c907f28b68fb50a203c777e4f60c", size = 44457412, upload-time = "2026-01-18T16:14:49.051Z" }, + { url = "https://files.pythonhosted.org/packages/36/81/1de4f0edfa9a483bbdf0082a05790bd6a20ed2169ea12a65039753be3a01/pyarrow-23.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:4d85cb6177198f3812db4788e394b757223f60d9a9f5ad6634b3e32be1525803", size = 47534285, upload-time = "2026-01-18T16:14:56.748Z" }, + { url = "https://files.pythonhosted.org/packages/f2/04/464a052d673b5ece074518f27377861662449f3c1fdb39ce740d646fd098/pyarrow-23.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1a9ff6fa4141c24a03a1a434c63c8fa97ce70f8f36bccabc18ebba905ddf0f17", size = 48157913, upload-time = "2026-01-18T16:15:05.114Z" }, + { url = "https://files.pythonhosted.org/packages/f4/1b/32a4de9856ee6688c670ca2def588382e573cce45241a965af04c2f61687/pyarrow-23.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:84839d060a54ae734eb60a756aeacb62885244aaa282f3c968f5972ecc7b1ecc", size = 50582529, upload-time = "2026-01-18T16:15:12.846Z" }, { url = "https://files.pythonhosted.org/packages/db/c7/d6581f03e9b9e44ea60b52d1750ee1a7678c484c06f939f45365a45f7eef/pyarrow-23.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a149a647dbfe928ce8830a713612aa0b16e22c64feac9d1761529778e4d4eaa5", size = 27542646, upload-time = "2026-01-18T16:15:18.89Z" }, { url = "https://files.pythonhosted.org/packages/3d/bd/c861d020831ee57609b73ea721a617985ece817684dc82415b0bc3e03ac3/pyarrow-23.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:5961a9f646c232697c24f54d3419e69b4261ba8a8b66b0ac54a1851faffcbab8", size = 34189116, upload-time = "2026-01-18T16:15:28.054Z" }, { url = "https://files.pythonhosted.org/packages/8c/23/7725ad6cdcbaf6346221391e7b3eecd113684c805b0a95f32014e6fa0736/pyarrow-23.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:632b3e7c3d232f41d64e1a4a043fb82d44f8a349f339a1188c6a0dd9d2d47d8a", size = 35803831, upload-time = "2026-01-18T16:15:33.798Z" }, @@ -6818,45 +6979,15 @@ wheels = [ name = "readme-renderer" version = "44.0" source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "docutils" }, - { name = "nh3" }, - { name = "pygments" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056, upload-time = "2024-07-08T15:00:57.805Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310, upload-time = "2024-07-08T15:00:56.577Z" }, -] - -[[package]] -name = "realtime-tracing-demo" -version = "0.1.0" -source = { editable = "samples/realtime-tracing-demo" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, +dependencies = [ + { name = "docutils" }, + { name = "nh3" }, + { name = "pygments" }, ] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, +sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056, upload-time = "2024-07-08T15:00:57.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310, upload-time = "2024-07-08T15:00:56.577Z" }, ] -provides-extras = ["dev"] [[package]] name = "referencing" @@ -7227,38 +7358,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, ] -[[package]] -name = "short-n-long" -version = "0.1.0" -source = { editable = "samples/short-n-long" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "strenum", marker = "python_full_version < '3.11'" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=14.0.0" }, - { name = "strenum", marker = "python_full_version < '3.11'", specifier = ">=0.4.15" }, - { name = "structlog", specifier = ">=25.2.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "six" version = "1.17.0" @@ -7511,34 +7610,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90", size = 6675, upload-time = "2025-01-15T12:07:22.074Z" }, ] -[[package]] -name = "tool-interrupts" -version = "0.1.0" -source = { editable = "samples/tool-interrupts" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "tornado" version = "6.5.4" @@ -7853,19 +7924,71 @@ wheels = [ ] [[package]] -name = "vertex-ai-vector-search-bigquery" +name = "virtualenv" +version = "20.36.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390, upload-time = "2024-11-01T14:06:24.793Z" }, + { url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389, upload-time = "2024-11-01T14:06:27.112Z" }, + { url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020, upload-time = "2024-11-01T14:06:29.876Z" }, + { url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393, upload-time = "2024-11-01T14:06:31.756Z" }, + { url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392, upload-time = "2024-11-01T14:06:32.99Z" }, + { url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019, upload-time = "2024-11-01T14:06:34.963Z" }, + { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, + { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, + { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902, upload-time = "2024-11-01T14:06:53.119Z" }, + { url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380, upload-time = "2024-11-01T14:06:55.19Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/62/a7c072fbfefb2980a00f99ca994279cb9ecf310cb2e6b2a4d2a28fe192b3/wcwidth-0.5.3.tar.gz", hash = "sha256:53123b7af053c74e9fe2e92ac810301f6139e64379031f7124574212fb3b4091", size = 157587, upload-time = "2026-01-31T03:52:10.92Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/c1/d73f12f8cdb1891334a2ccf7389eed244d3941e74d80dd220badb937f3fb/wcwidth-0.5.3-py3-none-any.whl", hash = "sha256:d584eff31cd4753e1e5ff6c12e1edfdb324c995713f75d26c29807bb84bf649e", size = 92981, upload-time = "2026-01-31T03:52:09.14Z" }, +] + +[[package]] +name = "web-flask-hello" version = "0.1.0" -source = { editable = "samples/vertex-ai-vector-search-bigquery" } +source = { editable = "samples/web-flask-hello" } dependencies = [ + { name = "flask" }, { name = "genkit" }, + { name = "genkit-plugin-flask" }, { name = "genkit-plugin-google-genai" }, - { name = "genkit-plugin-vertex-ai" }, - { name = "google-cloud-aiplatform" }, - { name = "google-cloud-bigquery" }, - { name = "pydantic" }, { name = "rich" }, - { name = "strenum", marker = "python_full_version < '3.11'" }, - { name = "structlog" }, { name = "uvloop" }, ] @@ -7876,34 +7999,28 @@ dev = [ [package.metadata] requires-dist = [ + { name = "flask" }, { name = "genkit", editable = "packages/genkit" }, + { name = "genkit-plugin-flask", editable = "plugins/flask" }, { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, - { name = "google-cloud-aiplatform" }, - { name = "google-cloud-bigquery" }, - { name = "pydantic", specifier = ">=2.10.5" }, { name = "rich", specifier = ">=13.0.0" }, - { name = "strenum", marker = "python_full_version < '3.11'", specifier = ">=0.4.15" }, - { name = "structlog", specifier = ">=25.2.0" }, { name = "uvloop", specifier = ">=0.21.0" }, { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] provides-extras = ["dev"] [[package]] -name = "vertex-ai-vector-search-firestore" +name = "web-multi-server" version = "0.1.0" -source = { editable = "samples/vertex-ai-vector-search-firestore" } +source = { editable = "samples/web-multi-server" } dependencies = [ + { name = "asgiref" }, { name = "genkit" }, - { name = "genkit-plugin-google-genai" }, - { name = "genkit-plugin-vertex-ai" }, - { name = "google-cloud-aiplatform" }, - { name = "google-cloud-firestore" }, - { name = "pydantic" }, + { name = "litestar" }, { name = "rich" }, - { name = "strenum", marker = "python_full_version < '3.11'" }, + { name = "starlette" }, { name = "structlog" }, + { name = "uvicorn" }, { name = "uvloop" }, ] @@ -7914,29 +8031,28 @@ dev = [ [package.metadata] requires-dist = [ + { name = "asgiref", specifier = ">=3.8.1" }, { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, - { name = "genkit-plugin-vertex-ai", editable = "plugins/vertex-ai" }, - { name = "google-cloud-aiplatform" }, - { name = "google-cloud-firestore" }, - { name = "pydantic", specifier = ">=2.10.5" }, + { name = "litestar", specifier = ">=2.15.1" }, { name = "rich", specifier = ">=13.0.0" }, - { name = "strenum", marker = "python_full_version < '3.11'", specifier = ">=0.4.15" }, + { name = "starlette", specifier = ">=0.46.1" }, { name = "structlog", specifier = ">=25.2.0" }, + { name = "uvicorn", specifier = ">=0.34.0" }, { name = "uvloop", specifier = ">=0.21.0" }, { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] provides-extras = ["dev"] [[package]] -name = "vertexai-rerank-eval" +name = "web-short-n-long" version = "0.1.0" -source = { editable = "samples/vertexai-rerank-eval" } +source = { editable = "samples/web-short-n-long" } dependencies = [ { name = "genkit" }, { name = "genkit-plugin-google-genai" }, { name = "pydantic" }, { name = "rich" }, + { name = "strenum", marker = "python_full_version < '3.11'" }, { name = "structlog" }, { name = "uvloop" }, ] @@ -7951,69 +8067,14 @@ requires-dist = [ { name = "genkit", editable = "packages/genkit" }, { name = "genkit-plugin-google-genai", editable = "plugins/google-genai" }, { name = "pydantic", specifier = ">=2.10.5" }, - { name = "rich", specifier = ">=13.0.0" }, + { name = "rich", specifier = ">=14.0.0" }, + { name = "strenum", marker = "python_full_version < '3.11'", specifier = ">=0.4.15" }, { name = "structlog", specifier = ">=25.2.0" }, { name = "uvloop", specifier = ">=0.21.0" }, { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, ] provides-extras = ["dev"] -[[package]] -name = "virtualenv" -version = "20.36.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "distlib" }, - { name = "filelock" }, - { name = "platformdirs" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, -] - -[[package]] -name = "watchdog" -version = "6.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390, upload-time = "2024-11-01T14:06:24.793Z" }, - { url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389, upload-time = "2024-11-01T14:06:27.112Z" }, - { url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020, upload-time = "2024-11-01T14:06:29.876Z" }, - { url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393, upload-time = "2024-11-01T14:06:31.756Z" }, - { url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392, upload-time = "2024-11-01T14:06:32.99Z" }, - { url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019, upload-time = "2024-11-01T14:06:34.963Z" }, - { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, - { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, - { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, - { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, - { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, - { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, - { url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902, upload-time = "2024-11-01T14:06:53.119Z" }, - { url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380, upload-time = "2024-11-01T14:06:55.19Z" }, - { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, - { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, - { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, - { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, - { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, - { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, - { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, - { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, - { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, -] - -[[package]] -name = "wcwidth" -version = "0.5.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c2/62/a7c072fbfefb2980a00f99ca994279cb9ecf310cb2e6b2a4d2a28fe192b3/wcwidth-0.5.3.tar.gz", hash = "sha256:53123b7af053c74e9fe2e92ac810301f6139e64379031f7124574212fb3b4091", size = 157587, upload-time = "2026-01-31T03:52:10.92Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/c1/d73f12f8cdb1891334a2ccf7389eed244d3941e74d80dd220badb937f3fb/wcwidth-0.5.3-py3-none-any.whl", hash = "sha256:d584eff31cd4753e1e5ff6c12e1edfdb324c995713f75d26c29807bb84bf649e", size = 92981, upload-time = "2026-01-31T03:52:09.14Z" }, -] - [[package]] name = "webcolors" version = "25.10.0" @@ -8190,38 +8251,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, ] -[[package]] -name = "xai-hello" -version = "0.1.0" -source = { editable = "samples/xai-hello" } -dependencies = [ - { name = "genkit" }, - { name = "genkit-plugin-google-cloud" }, - { name = "genkit-plugin-xai" }, - { name = "pydantic" }, - { name = "rich" }, - { name = "structlog" }, - { name = "uvloop" }, -] - -[package.optional-dependencies] -dev = [ - { name = "watchdog" }, -] - -[package.metadata] -requires-dist = [ - { name = "genkit", editable = "packages/genkit" }, - { name = "genkit-plugin-google-cloud", editable = "plugins/google-cloud" }, - { name = "genkit-plugin-xai", editable = "plugins/xai" }, - { name = "pydantic", specifier = ">=2.0.0" }, - { name = "rich", specifier = ">=13.0.0" }, - { name = "structlog", specifier = ">=24.0.0" }, - { name = "uvloop", specifier = ">=0.21.0" }, - { name = "watchdog", marker = "extra == 'dev'", specifier = ">=6.0.0" }, -] -provides-extras = ["dev"] - [[package]] name = "xai-sdk" version = "1.6.1"