diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2049280..068462c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,12 +7,53 @@ on:
branches: [main]
jobs:
+ lint:
+ name: Lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install linting tools
+ run: |
+ python -m pip install --upgrade pip
+ pip install ruff mypy
+
+ - name: Install packages for type checking
+ run: |
+ pip install pydantic langchain-core
+ cd packages/promptpack && pip install -e .
+ cd ../promptpack-langchain && pip install -e .
+
+ - name: Run ruff linter on packages
+ run: ruff check packages/
+
+ - name: Run ruff linter on examples
+ run: ruff check examples/
+
+ - name: Run ruff formatter check on packages
+ run: ruff format --check packages/
+
+ - name: Run ruff formatter check on examples
+ run: ruff format --check examples/
+
+ - name: Run mypy on promptpack
+ run: mypy packages/promptpack/src/promptpack --ignore-missing-imports
+
+ - name: Run mypy on promptpack-langchain
+ run: mypy packages/promptpack-langchain/src/promptpack_langchain --ignore-missing-imports
+
test:
+ name: Test (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
@@ -25,7 +66,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install hatch
+ pip install pytest pytest-asyncio pytest-cov
- name: Install promptpack package
run: |
@@ -40,14 +81,22 @@ jobs:
- name: Run promptpack tests
run: |
cd packages/promptpack
- pytest tests/ -v --tb=short
+ pytest tests/ -v --tb=short --cov=src/promptpack --cov-report=xml:coverage-promptpack.xml
- name: Run promptpack-langchain tests
run: |
cd packages/promptpack-langchain
- pytest tests/ -v --tb=short
+ pytest tests/ -v --tb=short --cov=src/promptpack_langchain --cov-report=xml:coverage-langchain.xml
- lint:
+ - name: Upload coverage reports
+ uses: codecov/codecov-action@v4
+ if: matrix.python-version == '3.12'
+ with:
+ files: packages/promptpack/coverage-promptpack.xml,packages/promptpack-langchain/coverage-langchain.xml
+ fail_ci_if_error: false
+
+ examples:
+ name: Verify Examples
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -57,25 +106,21 @@ jobs:
with:
python-version: "3.12"
- - name: Install linting tools
+ - name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install ruff mypy
+ pip install pytest
- - name: Install packages for type checking
+ - name: Install packages
run: |
- pip install pydantic langchain-core
cd packages/promptpack && pip install -e .
cd ../promptpack-langchain && pip install -e .
- - name: Run ruff linter
- run: ruff check packages/
-
- - name: Run ruff formatter check
- run: ruff format --check packages/
+ - name: Run basic_usage example
+ run: python examples/basic_usage.py
- - name: Run mypy on promptpack
- run: mypy packages/promptpack/src/promptpack --ignore-missing-imports
+ - name: Run tools_example
+ run: python examples/tools_example.py
- - name: Run mypy on promptpack-langchain
- run: mypy packages/promptpack-langchain/src/promptpack_langchain --ignore-missing-imports
+ - name: Run validation_example
+ run: python examples/validation_example.py
diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs
index b3da189..448c11c 100644
--- a/docs/astro.config.mjs
+++ b/docs/astro.config.mjs
@@ -23,6 +23,7 @@ export default defineConfig({
sidebar: [
{ label: 'Getting Started', autogenerate: { directory: 'getting-started' } },
{ label: 'Packages', autogenerate: { directory: 'packages' } },
+ { label: 'Examples', autogenerate: { directory: 'examples' } },
{ label: 'API Reference', autogenerate: { directory: 'api' } },
{ label: 'Contributors', autogenerate: { directory: 'contributors' } },
],
diff --git a/docs/src/content/docs/examples/basic-usage.md b/docs/src/content/docs/examples/basic-usage.md
new file mode 100644
index 0000000..023c2ac
--- /dev/null
+++ b/docs/src/content/docs/examples/basic-usage.md
@@ -0,0 +1,172 @@
+---
+title: Basic Usage
+description: Getting started with PromptPack Python
+sidebar:
+ order: 2
+---
+
+This example demonstrates the core PromptPack workflow: loading packs, creating templates, and formatting prompts.
+
+## Loading a PromptPack
+
+```python
+from pathlib import Path
+from promptpack import parse_promptpack
+
+# Load from a file
+pack = parse_promptpack("path/to/pack.json")
+
+# Or parse from a string
+from promptpack import parse_promptpack_string
+
+pack_json = '{"id": "my-pack", ...}'
+pack = parse_promptpack_string(pack_json)
+```
+
+## Creating Templates
+
+Use `PromptPackTemplate` to create LangChain-compatible templates:
+
+```python
+from promptpack_langchain import PromptPackTemplate
+
+# Create a template from a specific prompt in the pack
+template = PromptPackTemplate.from_promptpack(pack, "support")
+
+# Check input variables
+print(template.input_variables) # ['role', 'company']
+
+# Get LLM parameters
+params = template.get_parameters()
+print(params) # {'temperature': 0.7, 'max_tokens': 1500}
+```
+
+## Formatting Prompts
+
+Format the template with your variables:
+
+```python
+# Format with variables
+formatted = template.format(
+ role="customer support agent",
+ issue_type="billing"
+)
+
+print(formatted)
+```
+
+Output:
+```
+You are a customer support agent assistant for TechCorp.
+
+# Company Context
+TechCorp provides cloud infrastructure, SaaS products, and enterprise solutions.
+
+# Your Role
+Handle billing customer inquiries effectively.
+
+# Guidelines
+Maintain a professional yet friendly tone. Be concise and solution-oriented.
+```
+
+## Using Fragments
+
+PromptPacks support reusable fragments that are automatically resolved:
+
+```json
+{
+ "fragments": {
+ "company_context": "TechCorp provides cloud infrastructure...",
+ "tone_guidelines": "Maintain a professional yet friendly tone..."
+ },
+ "prompts": {
+ "support": {
+ "system_template": "{{fragment:company_context}}\n\n{{fragment:tone_guidelines}}"
+ }
+ }
+}
+```
+
+Fragments are resolved automatically when you call `template.format()`.
+
+## Model Overrides
+
+Templates can have model-specific configurations:
+
+```python
+# Create template with model-specific overrides
+template = PromptPackTemplate.from_promptpack(
+ pack,
+ "support",
+ model_name="gpt-4"
+)
+
+# The template and parameters will use GPT-4 specific settings
+params = template.get_parameters()
+```
+
+## Using with LangChain
+
+Convert to a ChatPromptTemplate for use with LangChain:
+
+```python
+from langchain_openai import ChatOpenAI
+
+# Create chat template
+chat_template = template.to_chat_prompt_template(
+ role="support agent",
+ company="Acme Corp"
+)
+
+# Create chain
+model = ChatOpenAI(
+ model="gpt-4o-mini",
+ temperature=template.get_parameters().get("temperature", 0.7)
+)
+
+chain = chat_template | model
+
+# Invoke
+response = chain.invoke({
+ "messages": [("human", "I was charged twice for my subscription")]
+})
+
+print(response.content)
+```
+
+## Complete Example
+
+Here's the complete `basic_usage.py` example:
+
+```python
+#!/usr/bin/env python3
+from pathlib import Path
+
+from promptpack import parse_promptpack
+from promptpack_langchain import PromptPackTemplate
+
+
+def main():
+ # Load PromptPack
+ pack_path = Path(__file__).parent / "packs" / "customer-support.json"
+ pack = parse_promptpack(pack_path)
+
+ print(f"Loaded pack: {pack.name} (v{pack.version})")
+ print(f"Available prompts: {list(pack.prompts.keys())}")
+
+ # Create template
+ template = PromptPackTemplate.from_promptpack(pack, "support")
+ print(f"Input variables: {template.input_variables}")
+ print(f"Parameters: {template.get_parameters()}")
+
+ # Format
+ formatted = template.format(
+ role="customer support agent",
+ issue_type="billing"
+ )
+ print(formatted)
+
+
+if __name__ == "__main__":
+ main()
+```
diff --git a/docs/src/content/docs/examples/index.md b/docs/src/content/docs/examples/index.md
new file mode 100644
index 0000000..9276eab
--- /dev/null
+++ b/docs/src/content/docs/examples/index.md
@@ -0,0 +1,45 @@
+---
+title: Examples Overview
+description: Learn how to use PromptPack Python through practical examples
+sidebar:
+ order: 1
+---
+
+This section contains practical examples demonstrating how to use PromptPack Python in real-world scenarios.
+
+## Available Examples
+
+### [Basic Usage](/promptpack-python/examples/basic-usage/)
+Learn the fundamentals of loading PromptPacks, creating templates, and formatting prompts with variables and fragments.
+
+### [Tools Integration](/promptpack-python/examples/tools/)
+Discover how to convert PromptPack tools to LangChain format, bind custom handlers, and use tools with agents.
+
+### [Validation](/promptpack-python/examples/validation/)
+Explore the validation system including banned words, length limits, and regex pattern matching.
+
+## Running the Examples
+
+All examples are located in the `examples/` directory of the repository. To run them:
+
+```bash
+# Clone the repository
+git clone https://github.com/AltairaLabs/promptpack-python.git
+cd promptpack-python
+
+# Install dependencies
+pip install -e packages/promptpack
+pip install -e packages/promptpack-langchain
+
+# Run an example
+python examples/basic_usage.py
+python examples/tools_example.py
+python examples/validation_example.py
+```
+
+## Example Packs
+
+The examples use PromptPack JSON files located in `examples/packs/`:
+
+- **customer-support.json** - Customer support prompts with fragments, validators, and multiple prompt types
+- **sales-assistant.json** - Sales assistant with CRM tools (customer lookup, inventory, orders)
diff --git a/docs/src/content/docs/examples/tools.md b/docs/src/content/docs/examples/tools.md
new file mode 100644
index 0000000..5365283
--- /dev/null
+++ b/docs/src/content/docs/examples/tools.md
@@ -0,0 +1,204 @@
+---
+title: Tools Integration
+description: Using PromptPack tools with LangChain
+sidebar:
+ order: 3
+---
+
+This example shows how to convert PromptPack tools to LangChain format and use them with agents.
+
+## Defining Tools in PromptPack
+
+Tools are defined in your PromptPack JSON:
+
+```json
+{
+ "tools": {
+ "lookup_customer": {
+ "name": "lookup_customer",
+ "description": "Look up customer information by ID",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "customer_id": {
+ "type": "string",
+ "description": "The unique customer ID"
+ }
+ },
+ "required": ["customer_id"]
+ }
+ }
+ },
+ "prompts": {
+ "sales": {
+ "tools": ["lookup_customer", "check_inventory"],
+ "tool_policy": {
+ "tool_choice": "auto",
+ "max_rounds": 5,
+ "blocklist": ["dangerous_tool"]
+ }
+ }
+ }
+}
+```
+
+## Converting Tools
+
+Convert PromptPack tools to LangChain format:
+
+```python
+from promptpack import parse_promptpack
+from promptpack_langchain import convert_tools
+
+pack = parse_promptpack("sales-assistant.json")
+
+# Convert all tools
+all_tools = convert_tools(pack)
+
+# Convert tools for a specific prompt (respects blocklist)
+prompt_tools = convert_tools(pack, prompt_name="sales")
+
+print(f"Tools: {[t.name for t in prompt_tools]}")
+```
+
+## Adding Tool Handlers
+
+Tools need handlers to execute. Provide them when converting:
+
+```python
+def lookup_customer(customer_id: str) -> str:
+ """Look up customer by ID."""
+ # Your implementation here
+ return json.dumps({"id": customer_id, "name": "Alice"})
+
+def check_inventory(product_id: str) -> str:
+ """Check inventory for a product."""
+ return json.dumps({"product_id": product_id, "in_stock": True})
+
+# Map tool names to handlers
+handlers = {
+ "lookup_customer": lookup_customer,
+ "check_inventory": check_inventory,
+}
+
+# Convert with handlers
+tools = convert_tools(pack, prompt_name="sales", handlers=handlers)
+
+# Now tools are executable
+result = tools[0].invoke({"customer_id": "CUST-001"})
+print(result) # {"id": "CUST-001", "name": "Alice"}
+```
+
+## Tool Filtering
+
+Prompts can specify which tools are available and blocklisted:
+
+```python
+# Sales prompt has all tools
+sales_tools = convert_tools(pack, prompt_name="sales", handlers=handlers)
+print([t.name for t in sales_tools])
+# ['lookup_customer', 'check_inventory', 'create_order', 'calculate_discount']
+
+# Inquiry prompt has limited tools (read-only, create_order blocklisted)
+inquiry_tools = convert_tools(pack, prompt_name="inquiry", handlers=handlers)
+print([t.name for t in inquiry_tools])
+# ['lookup_customer', 'check_inventory', 'check_order_status']
+```
+
+## Using with LangChain Agents
+
+Bind tools to an LLM and create an agent:
+
+```python
+from langchain_openai import ChatOpenAI
+from promptpack_langchain import PromptPackTemplate
+
+# Create template and tools
+template = PromptPackTemplate.from_promptpack(pack, "sales")
+tools = convert_tools(pack, prompt_name="sales", handlers=handlers)
+
+# Create model with tools
+model = ChatOpenAI(model="gpt-4o-mini").bind_tools(tools)
+
+# Create chat template
+chat_template = template.to_chat_prompt_template(company="Acme Corp")
+
+# Build chain
+chain = chat_template | model
+
+# Invoke
+response = chain.invoke({
+ "messages": [("human", "Can you look up customer CUST-001?")]
+})
+
+# Handle tool calls
+if response.tool_calls:
+ for tool_call in response.tool_calls:
+ tool = next(t for t in tools if t.name == tool_call['name'])
+ result = tool.invoke(tool_call['args'])
+ print(f"Tool: {tool_call['name']}, Result: {result}")
+```
+
+## Complete Example
+
+Here's a complete tools example with a mock database:
+
+```python
+#!/usr/bin/env python3
+import json
+from pathlib import Path
+from typing import Any
+
+from promptpack import parse_promptpack
+from promptpack_langchain import PromptPackTemplate, convert_tools
+
+# Mock database
+MOCK_DB = {
+ "customers": {
+ "CUST-001": {"id": "CUST-001", "name": "Alice", "tier": "premium"},
+ },
+ "products": [
+ {"id": "PROD-001", "name": "Laptop", "price": 1299},
+ ],
+}
+
+
+def lookup_customer(customer_id: str, email: str | None = None) -> str:
+ customer = MOCK_DB["customers"].get(customer_id)
+ if not customer:
+ return json.dumps({"error": "Customer not found"})
+ return json.dumps(customer)
+
+
+def check_inventory(product_id: str, warehouse: str | None = None) -> str:
+ for product in MOCK_DB["products"]:
+ if product["id"] == product_id:
+ return json.dumps({**product, "in_stock": True, "quantity": 50})
+ return json.dumps({"error": "Product not found"})
+
+
+HANDLERS = {
+ "lookup_customer": lookup_customer,
+ "check_inventory": check_inventory,
+}
+
+
+def main():
+ pack = parse_promptpack("examples/packs/sales-assistant.json")
+
+ # Create template
+ template = PromptPackTemplate.from_promptpack(pack, "sales")
+ print(template.format(company="Acme Corp"))
+
+ # Convert tools with handlers
+ tools = convert_tools(pack, prompt_name="sales", handlers=HANDLERS)
+
+ # Execute a tool
+ lookup_tool = next(t for t in tools if t.name == "lookup_customer")
+ result = lookup_tool.invoke({"customer_id": "CUST-001"})
+ print(f"Customer lookup: {result}")
+
+
+if __name__ == "__main__":
+ main()
+```
diff --git a/docs/src/content/docs/examples/validation.md b/docs/src/content/docs/examples/validation.md
new file mode 100644
index 0000000..f40a536
--- /dev/null
+++ b/docs/src/content/docs/examples/validation.md
@@ -0,0 +1,275 @@
+---
+title: Validation
+description: Validating LLM outputs with PromptPack validators
+sidebar:
+ order: 4
+---
+
+PromptPack includes a powerful validation system for checking LLM outputs against rules like banned words, length limits, and regex patterns.
+
+## Validator Types
+
+### Banned Words
+
+Check for prohibited words in content:
+
+```python
+from promptpack import Validator
+from promptpack_langchain import run_validators
+
+validators = [
+ Validator(
+ type="banned_words",
+ enabled=True,
+ fail_on_violation=True, # Blocking violation
+ params={"words": ["inappropriate", "offensive", "banned"]}
+ )
+]
+
+result = run_validators("This is appropriate content", validators)
+print(result.is_valid) # True
+
+result = run_validators("This is inappropriate", validators)
+print(result.is_valid) # False
+print(result.violations[0].message)
+# "Content contains banned words: ['inappropriate']"
+```
+
+### Length Validators
+
+Enforce minimum and maximum content length:
+
+```python
+validators = [
+ Validator(
+ type="max_length",
+ enabled=True,
+ fail_on_violation=False, # Non-blocking warning
+ params={"max_characters": 500}
+ ),
+ Validator(
+ type="min_length",
+ enabled=True,
+ fail_on_violation=False,
+ params={"min_characters": 10}
+ )
+]
+
+result = run_validators("Hi", validators)
+print(result.is_valid) # True (non-blocking)
+print(result.violations[0].message)
+# "Content below min length: 2 < 10"
+```
+
+### Regex Patterns
+
+Validate content against regex patterns:
+
+```python
+validators = [
+ # Content MUST match this pattern
+ Validator(
+ type="regex_match",
+ enabled=True,
+ fail_on_violation=True,
+ params={
+ "pattern": r"^[A-Z]", # Must start with capital
+ "must_match": True
+ }
+ ),
+ # Content must NOT match this pattern
+ Validator(
+ type="regex_match",
+ enabled=True,
+ fail_on_violation=False,
+ params={
+ "pattern": r"password|secret", # Forbidden words
+ "must_match": False
+ }
+ )
+]
+
+# Passes - starts with capital, no forbidden words
+result = run_validators("Hello, how can I help?", validators)
+print(result.is_valid) # True
+
+# Fails - doesn't start with capital
+result = run_validators("hello there", validators)
+print(result.is_valid) # False
+```
+
+## Blocking vs Non-Blocking Violations
+
+Validators can be configured as blocking or non-blocking:
+
+```python
+validators = [
+ Validator(
+ type="banned_words",
+ enabled=True,
+ fail_on_violation=True, # BLOCKING - is_valid will be False
+ params={"words": ["bad"]}
+ ),
+ Validator(
+ type="max_length",
+ enabled=True,
+ fail_on_violation=False, # NON-BLOCKING - is_valid stays True
+ params={"max_characters": 10}
+ )
+]
+
+result = run_validators("This is way too long", validators)
+
+# Non-blocking violation only
+print(result.is_valid) # True
+print(result.has_blocking_violations) # False
+print(len(result.violations)) # 1
+
+result = run_validators("This is bad", validators)
+
+# Blocking violation
+print(result.is_valid) # False
+print(result.has_blocking_violations) # True
+```
+
+## ValidationRunnable
+
+Use `ValidationRunnable` in LangChain chains:
+
+```python
+from promptpack_langchain import ValidationRunnable
+
+validators = [
+ Validator(
+ type="banned_words",
+ enabled=True,
+ fail_on_violation=True,
+ params={"words": ["inappropriate"]}
+ )
+]
+
+runnable = ValidationRunnable(validators)
+
+# Sync
+result = runnable.invoke("Check this content")
+
+# Async
+result = await runnable.ainvoke("Check this content")
+
+if not result.is_valid:
+ print("Validation failed!")
+ for v in result.violations:
+ print(f" - {v.validator_type}: {v.message}")
+```
+
+## Defining Validators in PromptPack
+
+Validators can be defined directly in your PromptPack JSON:
+
+```json
+{
+ "prompts": {
+ "support": {
+ "system_template": "You are a support agent...",
+ "validators": [
+ {
+ "type": "banned_words",
+ "enabled": true,
+ "fail_on_violation": true,
+ "params": {
+ "words": ["inappropriate", "offensive"]
+ }
+ },
+ {
+ "type": "max_length",
+ "enabled": true,
+ "fail_on_violation": false,
+ "params": {
+ "max_characters": 2000
+ }
+ }
+ ]
+ }
+ }
+}
+```
+
+Then access them from the prompt:
+
+```python
+from promptpack import parse_promptpack
+from promptpack_langchain import run_validators
+
+pack = parse_promptpack("pack.json")
+prompt = pack.get_prompt("support")
+
+# Get validators from the prompt
+validators = prompt.validators
+
+# Validate LLM output
+llm_response = "Here is the response..."
+result = run_validators(llm_response, validators)
+```
+
+## Complete Example
+
+```python
+#!/usr/bin/env python3
+from promptpack import Validator
+from promptpack_langchain import ValidationRunnable, run_validators
+
+
+def main():
+ # Create validators
+ validators = [
+ Validator(
+ type="banned_words",
+ enabled=True,
+ fail_on_violation=True,
+ params={"words": ["inappropriate", "offensive"]}
+ ),
+ Validator(
+ type="max_length",
+ enabled=True,
+ fail_on_violation=False,
+ params={"max_characters": 500}
+ ),
+ Validator(
+ type="min_length",
+ enabled=True,
+ fail_on_violation=False,
+ params={"min_characters": 10}
+ ),
+ ]
+
+ # Test cases
+ test_contents = [
+ "This is a helpful response.", # Valid
+ "This is inappropriate.", # Banned word
+ "Hi", # Too short
+ "x" * 600, # Too long
+ ]
+
+ for content in test_contents:
+ result = run_validators(content, validators)
+ status = "PASS" if result.is_valid else "FAIL"
+ print(f"[{status}] '{content[:30]}...'")
+
+ for v in result.violations:
+ print(f" - {v.validator_type}: {v.message}")
+
+
+if __name__ == "__main__":
+ main()
+```
+
+Output:
+```
+[PASS] 'This is a helpful response....'
+[FAIL] 'This is inappropriate....'
+ - banned_words: Content contains banned words: ['inappropriate']
+[PASS] 'Hi...'
+ - min_length: Content below min length: 2 < 10
+[PASS] 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx...'
+ - max_length: Content exceeds max length: 600 > 500
+```
diff --git a/docs/src/content/docs/getting-started/quickstart.md b/docs/src/content/docs/getting-started/quickstart.md
index 8803ba7..3efc46e 100644
--- a/docs/src/content/docs/getting-started/quickstart.md
+++ b/docs/src/content/docs/getting-started/quickstart.md
@@ -15,31 +15,16 @@ from promptpack import parse_promptpack
# Parse from file
pack = parse_promptpack("path/to/pack.json")
-# Access prompts
-prompt = pack.prompts["support"]
-print(prompt.system_template)
-```
+# Access pack metadata
+print(f"Pack: {pack.name} v{pack.version}")
+print(f"Prompts: {list(pack.prompts.keys())}")
-### Rendering Templates
-
-```python
-from promptpack import parse_promptpack
-
-pack = parse_promptpack("path/to/pack.json")
-prompt = pack.prompts["support"]
-
-# Render template with variables
-rendered = prompt.render({
- "role": "support agent",
- "company": "Acme Inc."
-})
-
-print(rendered)
+# Access a specific prompt
+prompt = pack.get_prompt("support")
+print(prompt.system_template)
```
-## LangChain Integration
-
-### Creating Prompt Templates
+### Using with LangChain
```python
from promptpack import parse_promptpack
@@ -50,14 +35,19 @@ pack = parse_promptpack("path/to/pack.json")
# Create LangChain prompt template
template = PromptPackTemplate.from_promptpack(pack, "support")
-# Use with LangChain
-messages = template.format_messages(
+# Check input variables and parameters
+print(template.input_variables) # ['role', 'company']
+print(template.get_parameters()) # {'temperature': 0.7, 'max_tokens': 1500}
+
+# Format the template
+formatted = template.format(
role="support agent",
company="Acme Inc."
)
+print(formatted)
```
-### Converting Tools
+## Converting Tools
```python
from promptpack import parse_promptpack
@@ -65,12 +55,49 @@ from promptpack_langchain import convert_tools
pack = parse_promptpack("path/to/pack.json")
-# Convert tools to LangChain format
-tools = convert_tools(pack)
+# Define tool handlers
+def lookup_customer(customer_id: str) -> str:
+ return f"Customer: {customer_id}"
+
+handlers = {"lookup_customer": lookup_customer}
-# Use with LangChain agent
-from langchain.agents import create_tool_calling_agent
-agent = create_tool_calling_agent(llm, tools, prompt)
+# Convert tools to LangChain format with handlers
+tools = convert_tools(pack, prompt_name="sales", handlers=handlers)
+
+# Execute a tool
+result = tools[0].invoke({"customer_id": "CUST-001"})
+print(result)
+```
+
+## Validating Output
+
+```python
+from promptpack import Validator
+from promptpack_langchain import run_validators
+
+validators = [
+ Validator(
+ type="banned_words",
+ enabled=True,
+ fail_on_violation=True,
+ params={"words": ["inappropriate", "offensive"]}
+ ),
+ Validator(
+ type="max_length",
+ enabled=True,
+ fail_on_violation=False,
+ params={"max_characters": 500}
+ )
+]
+
+# Validate LLM output
+result = run_validators("Here is my response...", validators)
+
+if result.is_valid:
+ print("Output is valid!")
+else:
+ for v in result.violations:
+ print(f"Violation: {v.message}")
```
## Example PromptPack
@@ -79,25 +106,53 @@ Here's an example PromptPack JSON file:
```json
{
- "version": "1.0",
- "name": "support-agent",
+ "$schema": "https://promptpack.org/schema/v1.0/promptpack.schema.json",
+ "id": "customer-support",
+ "name": "Customer Support Pack",
+ "version": "1.0.0",
+ "template_engine": {
+ "version": "v1",
+ "syntax": "{{variable}}"
+ },
+ "fragments": {
+ "guidelines": "Be helpful and professional. Always verify customer identity."
+ },
"prompts": {
"support": {
- "system": "You are a {{role}} at {{company}}. Help customers with their questions.",
- "variables": {
- "role": {
+ "id": "support",
+ "name": "Support Agent",
+ "version": "1.0.0",
+ "system_template": "You are a {{role}} at {{company}}.\n\n{{fragment:guidelines}}",
+ "variables": [
+ {
+ "name": "role",
"type": "string",
+ "required": true,
"description": "The role of the agent"
},
- "company": {
+ {
+ "name": "company",
"type": "string",
+ "required": true,
"description": "The company name"
}
- }
+ ],
+ "parameters": {
+ "temperature": 0.7,
+ "max_tokens": 1500
+ },
+ "validators": [
+ {
+ "type": "banned_words",
+ "enabled": true,
+ "fail_on_violation": true,
+ "params": {"words": ["inappropriate"]}
+ }
+ ]
}
},
- "tools": [
- {
+ "tools": {
+ "search_docs": {
"name": "search_docs",
"description": "Search the documentation",
"parameters": {
@@ -111,6 +166,12 @@ Here's an example PromptPack JSON file:
"required": ["query"]
}
}
- ]
+ }
}
```
+
+## Next Steps
+
+- Check out the [Examples](/promptpack-python/examples/) for more detailed usage patterns
+- Learn about [Tools Integration](/promptpack-python/examples/tools/) for agent workflows
+- Explore [Validation](/promptpack-python/examples/validation/) for output guardrails
diff --git a/docs/src/content/docs/index.mdx b/docs/src/content/docs/index.mdx
index c1f623d..c96389b 100644
--- a/docs/src/content/docs/index.mdx
+++ b/docs/src/content/docs/index.mdx
@@ -28,8 +28,8 @@ import { Card, CardGrid } from '@astrojs/starlight/components';
Convert PromptPack tools to LangChain-compatible tool definitions.
-
- Render templates with variables and support for complex types.
+
+ Validate LLM outputs with banned words, length limits, and regex patterns.
Use reusable prompt fragments across your templates.
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 0000000..2c7ec48
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,92 @@
+# PromptPack Python Examples
+
+This directory contains example scripts demonstrating how to use the PromptPack Python library with LangChain.
+
+## Prerequisites
+
+Install the packages:
+
+```bash
+pip install promptpack promptpack-langchain
+```
+
+For examples that use OpenAI:
+
+```bash
+pip install langchain-openai
+export OPENAI_API_KEY=your-key-here
+```
+
+## Examples
+
+### Basic Usage (`basic_usage.py`)
+
+Demonstrates the core PromptPack workflow:
+- Loading a pack from JSON
+- Creating templates
+- Formatting prompts with variables
+- Using fragments
+
+```bash
+python examples/basic_usage.py
+```
+
+### Tools Integration (`tools_example.py`)
+
+Shows how to use PromptPack tools with LangChain:
+- Converting PromptPack tools to LangChain format
+- Binding handlers to tools
+- Tool filtering by prompt
+- Executing tools
+
+```bash
+python examples/tools_example.py
+```
+
+### Validation (`validation_example.py`)
+
+Demonstrates the validation system:
+- Creating validators (banned words, length limits, regex)
+- Running validators on content
+- Using ValidationRunnable in chains
+
+```bash
+python examples/validation_example.py
+```
+
+## Example Packs
+
+The `packs/` directory contains example PromptPack JSON files:
+
+- **customer-support.json**: Customer support prompts with multiple roles, fragments, and validators
+- **sales-assistant.json**: Sales assistant with CRM tools (customer lookup, inventory, orders)
+
+## Using with LangChain
+
+Each example shows both standalone usage and integration with LangChain. To use with an actual LLM, uncomment the LangChain sections and set your API key.
+
+Example with OpenAI:
+
+```python
+from langchain_openai import ChatOpenAI
+from promptpack import parse_promptpack
+from promptpack_langchain import PromptPackTemplate
+
+# Load pack and create template
+pack = parse_promptpack("examples/packs/customer-support.json")
+template = PromptPackTemplate.from_promptpack(pack, "support")
+
+# Create chain
+model = ChatOpenAI(model="gpt-4o-mini")
+chat_template = template.to_chat_prompt_template(
+ role="customer support agent",
+ issue_type="billing",
+)
+chain = chat_template | model
+
+# Invoke
+response = chain.invoke({
+ "messages": [("human", "I was charged twice")]
+})
+print(response.content)
+```
diff --git a/examples/basic_usage.py b/examples/basic_usage.py
new file mode 100644
index 0000000..b72cf6c
--- /dev/null
+++ b/examples/basic_usage.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# Copyright 2025 Altaira Labs
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Basic usage example for promptpack-langchain.
+
+This example shows the core PromptPack workflow:
+1. Load a pack from JSON
+2. Create a template
+3. Use it in a LangChain chain
+
+To run this example:
+ export OPENAI_API_KEY=your-key-here
+ python examples/basic_usage.py
+"""
+
+from pathlib import Path
+
+from promptpack import parse_promptpack
+from promptpack_langchain import PromptPackTemplate
+
+
+def main() -> None:
+ """Run basic usage example."""
+ print("=== Basic PromptPack Usage ===\n")
+
+ # 1. Load PromptPack from JSON file
+ pack_path = Path(__file__).parent / "packs" / "customer-support.json"
+ pack = parse_promptpack(pack_path)
+
+ print(f"Loaded pack: {pack.name} (v{pack.version})")
+ print(f"Available prompts: {list(pack.prompts.keys())}")
+
+ # 2. Create a template from the pack
+ template = PromptPackTemplate.from_promptpack(pack, "support")
+
+ print("\n--- Template Metadata ---")
+ print(f"Input variables: {template.input_variables}")
+ print(f"Parameters: {template.get_parameters()}")
+
+ # 3. Format the template (without calling the LLM)
+ formatted = template.format(role="customer support agent", issue_type="billing")
+ print("\n--- Formatted System Prompt ---")
+ print(formatted)
+
+ # 4. (Optional) Use with LangChain to make actual API calls
+ # Uncomment the following to use with OpenAI:
+ #
+ # model = ChatOpenAI(
+ # model="gpt-4o-mini",
+ # temperature=template.get_parameters().get("temperature", 0.7),
+ # )
+ #
+ # # Create a chat template and invoke
+ # chat_template = template.to_chat_prompt_template(
+ # role="customer support agent",
+ # issue_type="billing",
+ # )
+ # chain = chat_template | model
+ #
+ # response = chain.invoke({
+ # "messages": [("human", "I was charged twice for my subscription")]
+ # })
+ # print("\n--- LLM Response ---")
+ # print(response.content)
+
+ # 5. Use the escalation template
+ print("\n\n--- Escalation Template ---")
+ escalation_template = PromptPackTemplate.from_promptpack(pack, "escalation")
+ formatted_escalation = escalation_template.format(
+ issue_type="billing",
+ customer_tier="enterprise",
+ )
+ print(formatted_escalation)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/packs/customer-support.json b/examples/packs/customer-support.json
new file mode 100644
index 0000000..17c2ba4
--- /dev/null
+++ b/examples/packs/customer-support.json
@@ -0,0 +1,102 @@
+{
+ "$schema": "https://promptpack.org/schema/v1.0/promptpack.schema.json",
+ "id": "customer-support",
+ "name": "Customer Support Pack",
+ "version": "1.0.0",
+ "description": "Complete customer support prompt pack with multiple task types",
+ "template_engine": {
+ "version": "v1",
+ "syntax": "{{variable}}",
+ "features": ["basic_substitution", "fragments"]
+ },
+ "fragments": {
+ "company_context": "TechCorp provides cloud infrastructure, SaaS products, and enterprise solutions. Key products include: CloudCore (hosting), DataFlow (analytics), and SecureVault (security).",
+ "tone_guidelines": "Maintain a professional yet friendly tone. Be concise and solution-oriented. Acknowledge customer frustration when appropriate.",
+ "response_format": "Structure responses with: 1) Acknowledge the issue, 2) Explain the solution, 3) Provide actionable next steps."
+ },
+ "prompts": {
+ "support": {
+ "id": "support",
+ "name": "Support Agent",
+ "description": "General customer support assistant for handling inquiries",
+ "version": "1.0.0",
+ "system_template": "You are a {{role}} assistant for TechCorp.\n\n# Company Context\n{{fragment:company_context}}\n\n# Your Role\nHandle {{issue_type}} customer inquiries effectively.\n\n# Guidelines\n{{fragment:tone_guidelines}}\n\n{{fragment:response_format}}",
+ "variables": [
+ {
+ "name": "role",
+ "type": "string",
+ "required": true,
+ "description": "The role of the assistant",
+ "example": "customer support"
+ },
+ {
+ "name": "issue_type",
+ "type": "string",
+ "required": false,
+ "default": "general",
+ "description": "Type of issue to handle",
+ "validation": {
+ "enum": ["billing", "technical", "general", "account"]
+ }
+ }
+ ],
+ "parameters": {
+ "temperature": 0.7,
+ "max_tokens": 1500
+ },
+ "validators": [
+ {
+ "type": "banned_words",
+ "enabled": true,
+ "fail_on_violation": true,
+ "params": {
+ "words": ["inappropriate", "offensive"]
+ }
+ },
+ {
+ "type": "max_length",
+ "enabled": true,
+ "fail_on_violation": false,
+ "params": {
+ "max_characters": 2000
+ }
+ }
+ ]
+ },
+ "escalation": {
+ "id": "escalation",
+ "name": "Escalation Handler",
+ "description": "Handles escalated customer issues",
+ "version": "1.0.0",
+ "system_template": "You are a senior support specialist handling escalated {{issue_type}} issues.\n\nCustomer tier: {{customer_tier}}\n\nBe empathetic and solution-focused. Prioritize customer satisfaction.",
+ "variables": [
+ {
+ "name": "issue_type",
+ "type": "string",
+ "required": true,
+ "validation": {
+ "enum": ["billing", "technical", "account"]
+ }
+ },
+ {
+ "name": "customer_tier",
+ "type": "string",
+ "required": false,
+ "default": "standard",
+ "validation": {
+ "enum": ["standard", "premium", "enterprise"]
+ }
+ }
+ ],
+ "parameters": {
+ "temperature": 0.6,
+ "max_tokens": 2000
+ }
+ }
+ },
+ "metadata": {
+ "domain": "customer-service",
+ "language": "en",
+ "tags": ["support", "customer-service", "helpdesk"]
+ }
+}
diff --git a/examples/packs/sales-assistant.json b/examples/packs/sales-assistant.json
new file mode 100644
index 0000000..8d949df
--- /dev/null
+++ b/examples/packs/sales-assistant.json
@@ -0,0 +1,212 @@
+{
+ "$schema": "https://promptpack.org/schema/v1.0/promptpack.schema.json",
+ "id": "sales-assistant",
+ "name": "Sales Assistant Pack",
+ "version": "1.0.0",
+ "description": "Complete sales assistant with CRM integration tools",
+ "template_engine": {
+ "version": "v1",
+ "syntax": "{{variable}}",
+ "features": ["basic_substitution"]
+ },
+ "tools": {
+ "lookup_customer": {
+ "name": "lookup_customer",
+ "description": "Look up customer information by customer ID or email address. Returns customer details including name, email, account tier, and order history.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "customer_id": {
+ "type": "string",
+ "description": "The unique customer ID (e.g., CUST-12345)"
+ },
+ "email": {
+ "type": "string",
+ "description": "Customer email address as alternative lookup method"
+ }
+ },
+ "required": ["customer_id"]
+ }
+ },
+ "check_inventory": {
+ "name": "check_inventory",
+ "description": "Check current inventory levels for a product. Returns available quantity, warehouse location, and estimated restock date if out of stock.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "product_id": {
+ "type": "string",
+ "description": "The product SKU or ID (e.g., PRD-ABC-123)"
+ },
+ "warehouse": {
+ "type": "string",
+ "description": "Specific warehouse code to check (optional, defaults to all warehouses)"
+ }
+ },
+ "required": ["product_id"]
+ }
+ },
+ "create_order": {
+ "name": "create_order",
+ "description": "Create a new sales order for a customer. Returns order confirmation with order ID and estimated delivery date.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "customer_id": {
+ "type": "string",
+ "description": "Customer ID for the order"
+ },
+ "items": {
+ "type": "array",
+ "description": "List of items to include in the order",
+ "items": {
+ "type": "object",
+ "properties": {
+ "product_id": {
+ "type": "string",
+ "description": "Product SKU"
+ },
+ "quantity": {
+ "type": "number",
+ "description": "Quantity to order"
+ }
+ },
+ "required": ["product_id", "quantity"]
+ }
+ },
+ "shipping_address": {
+ "type": "string",
+ "description": "Shipping address (optional if using customer default)"
+ },
+ "priority": {
+ "type": "string",
+ "enum": ["standard", "express", "overnight"],
+ "description": "Shipping priority level"
+ }
+ },
+ "required": ["customer_id", "items"]
+ }
+ },
+ "calculate_discount": {
+ "name": "calculate_discount",
+ "description": "Calculate available discounts for a customer and order. Returns discount percentage and any applicable promo codes.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "customer_id": {
+ "type": "string",
+ "description": "Customer ID to check for loyalty discounts"
+ },
+ "order_total": {
+ "type": "number",
+ "description": "Total order amount before discounts"
+ },
+ "promo_code": {
+ "type": "string",
+ "description": "Optional promo code to validate"
+ }
+ },
+ "required": ["customer_id", "order_total"]
+ }
+ },
+ "check_order_status": {
+ "name": "check_order_status",
+ "description": "Check the current status of an existing order. Returns order status, tracking number, and estimated delivery date.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "order_id": {
+ "type": "string",
+ "description": "The order ID to check (e.g., ORD-2025-12345)"
+ }
+ },
+ "required": ["order_id"]
+ }
+ }
+ },
+ "prompts": {
+ "sales": {
+ "id": "sales",
+ "name": "Sales Representative",
+ "description": "Full-featured sales agent with access to all CRM tools",
+ "version": "1.0.0",
+ "system_template": "You are a {{role}} for {{company}}. You help customers with orders, product inquiries, and account management.\n\nYou have access to the following capabilities:\n- Look up customer information\n- Check product inventory\n- Create new orders\n- Calculate discounts\n- Check order status\n\nAlways be professional, friendly, and proactive in helping customers.",
+ "variables": [
+ {
+ "name": "role",
+ "type": "string",
+ "required": false,
+ "default": "sales representative",
+ "description": "The role title"
+ },
+ {
+ "name": "company",
+ "type": "string",
+ "required": true,
+ "description": "Company name"
+ }
+ ],
+ "tools": [
+ "lookup_customer",
+ "check_inventory",
+ "create_order",
+ "calculate_discount",
+ "check_order_status"
+ ],
+ "tool_policy": {
+ "tool_choice": "auto",
+ "max_rounds": 5,
+ "max_tool_calls_per_turn": 10,
+ "blocklist": []
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "max_tokens": 1500
+ },
+ "validators": [
+ {
+ "type": "banned_words",
+ "enabled": true,
+ "fail_on_violation": false,
+ "params": {
+ "words": ["inappropriate", "unprofessional"]
+ }
+ }
+ ]
+ },
+ "inquiry": {
+ "id": "inquiry",
+ "name": "Customer Inquiry Handler",
+ "description": "Read-only customer service agent for answering questions",
+ "version": "1.0.0",
+ "system_template": "You are a customer service agent for {{company}}. Help customers by answering questions about products, inventory, and existing orders.\n\nYou can look up information but cannot create or modify orders. For order changes, direct customers to speak with a sales representative.",
+ "variables": [
+ {
+ "name": "company",
+ "type": "string",
+ "required": true
+ }
+ ],
+ "tools": [
+ "lookup_customer",
+ "check_inventory",
+ "check_order_status"
+ ],
+ "tool_policy": {
+ "tool_choice": "auto",
+ "max_rounds": 3,
+ "max_tool_calls_per_turn": 5,
+ "blocklist": ["create_order"]
+ },
+ "parameters": {
+ "temperature": 0.6,
+ "max_tokens": 1000
+ }
+ }
+ },
+ "metadata": {
+ "domain": "sales",
+ "language": "en",
+ "tags": ["sales", "crm", "orders", "customer-service"]
+ }
+}
diff --git a/examples/tools_example.py b/examples/tools_example.py
new file mode 100644
index 0000000..4a60d7f
--- /dev/null
+++ b/examples/tools_example.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python3
+# Copyright 2025 Altaira Labs
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Tools integration example for promptpack-langchain.
+
+This example shows how to use PromptPack tools with LangChain:
+1. Load a pack with tool definitions
+2. Convert tools to LangChain format
+3. Use tools with an LLM
+
+To run this example:
+ export OPENAI_API_KEY=your-key-here
+ python examples/tools_example.py
+"""
+
+import json
+from pathlib import Path
+from typing import Any
+
+from promptpack import parse_promptpack
+from promptpack_langchain import PromptPackTemplate, convert_tools
+
+# Mock database for the example
+MOCK_DB = {
+ "customers": {
+ "CUST-001": {
+ "id": "CUST-001",
+ "name": "Alice Johnson",
+ "email": "alice@example.com",
+ "tier": "premium",
+ },
+ "CUST-002": {
+ "id": "CUST-002",
+ "name": "Bob Smith",
+ "email": "bob@example.com",
+ "tier": "standard",
+ },
+ },
+ "products": [
+ {"id": "PROD-001", "name": "Laptop Pro", "category": "computers", "price": 1299},
+ {"id": "PROD-002", "name": "Wireless Mouse", "category": "accessories", "price": 29},
+ {"id": "PROD-003", "name": "USB-C Cable", "category": "accessories", "price": 15},
+ ],
+ "orders": [],
+}
+
+
+def lookup_customer(customer_id: str, email: str | None = None) -> str:
+ """Look up customer by ID or email."""
+ customer = MOCK_DB["customers"].get(customer_id)
+ if not customer:
+ return json.dumps({"error": "Customer not found"})
+ return json.dumps(customer)
+
+
+def check_inventory(product_id: str, warehouse: str | None = None) -> str:
+ """Check inventory for a product."""
+ for product in MOCK_DB["products"]:
+ if product["id"] == product_id:
+ return json.dumps({**product, "in_stock": True, "quantity": 50})
+ return json.dumps({"error": "Product not found"})
+
+
+def create_order(customer_id: str, items: list[dict[str, Any]], **kwargs: Any) -> str:
+ """Create a new order."""
+ import time
+
+ order = {
+ "order_id": f"ORD-{int(time.time())}",
+ "customer_id": customer_id,
+ "items": items,
+ "status": "created",
+ "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
+ }
+ MOCK_DB["orders"].append(order)
+ return json.dumps(order)
+
+
+def calculate_discount(customer_id: str, order_total: float, promo_code: str | None = None) -> str:
+ """Calculate discount for a customer."""
+ customer = MOCK_DB["customers"].get(customer_id)
+ discount = 0.15 if customer and customer.get("tier") == "premium" else 0.05
+ return json.dumps(
+ {
+ "discount_percentage": discount * 100,
+ "discount_amount": order_total * discount,
+ }
+ )
+
+
+def check_order_status(order_id: str) -> str:
+ """Check status of an order."""
+ for order in MOCK_DB["orders"]:
+ if order["order_id"] == order_id:
+ return json.dumps({**order, "tracking": "TRK-123456", "status": "shipped"})
+ return json.dumps({"error": "Order not found"})
+
+
+# Map tool names to handler functions
+TOOL_HANDLERS = {
+ "lookup_customer": lookup_customer,
+ "check_inventory": check_inventory,
+ "create_order": create_order,
+ "calculate_discount": calculate_discount,
+ "check_order_status": check_order_status,
+}
+
+
+def main() -> None:
+ """Run tools integration example."""
+ print("=== PromptPack Tools Integration ===\n")
+
+ # 1. Load the sales assistant pack
+ pack_path = Path(__file__).parent / "packs" / "sales-assistant.json"
+ pack = parse_promptpack(pack_path)
+
+ print(f"Loaded pack: {pack.name}")
+ print(f"Available tools: {list(pack.tools.keys()) if pack.tools else []}")
+
+ # 2. Create a template
+ template = PromptPackTemplate.from_promptpack(pack, "sales")
+
+ print("\n--- System Prompt ---")
+ formatted = template.format(company="Acme Corp")
+ print(formatted)
+
+ # 3. Convert tools to LangChain format (with handlers)
+ tools = convert_tools(pack, prompt_name="sales", handlers=TOOL_HANDLERS)
+
+ print(f"\n--- Converted Tools ({len(tools)}) ---")
+ for tool in tools:
+ print(f" - {tool.name}: {tool.description[:50]}...")
+
+ # 4. Demonstrate tool execution
+ print("\n--- Tool Execution Demo ---")
+
+ # Look up a customer
+ lookup_tool = next(t for t in tools if t.name == "lookup_customer")
+ result = lookup_tool.invoke({"customer_id": "CUST-001"})
+ print(f"\nLookup customer CUST-001: {result}")
+
+ # Check inventory
+ inventory_tool = next(t for t in tools if t.name == "check_inventory")
+ result = inventory_tool.invoke({"product_id": "PROD-001"})
+ print(f"\nCheck inventory PROD-001: {result}")
+
+ # Create an order
+ order_tool = next(t for t in tools if t.name == "create_order")
+ result = order_tool.invoke(
+ {
+ "customer_id": "CUST-001",
+ "items": [{"product_id": "PROD-001", "quantity": 1}],
+ }
+ )
+ print(f"\nCreate order: {result}")
+
+ # 5. Show tool filtering for different prompts
+ print("\n--- Tool Filtering by Prompt ---")
+
+ # Sales prompt has all tools
+ sales_tools = convert_tools(pack, prompt_name="sales", handlers=TOOL_HANDLERS)
+ print(f"Sales prompt tools: {[t.name for t in sales_tools]}")
+
+ # Inquiry prompt has limited tools (read-only)
+ inquiry_tools = convert_tools(pack, prompt_name="inquiry", handlers=TOOL_HANDLERS)
+ print(f"Inquiry prompt tools: {[t.name for t in inquiry_tools]}")
+
+ # 6. (Optional) Use with LangChain to make actual API calls
+ # Uncomment to use with OpenAI:
+ #
+ # from langchain_openai import ChatOpenAI
+ #
+ # model = ChatOpenAI(model="gpt-4o-mini").bind_tools(tools)
+ # chat_template = template.to_chat_prompt_template(company="Acme Corp")
+ # chain = chat_template | model
+ #
+ # response = chain.invoke({
+ # "messages": [("human", "Can you look up customer CUST-001?")]
+ # })
+ #
+ # if response.tool_calls:
+ # for tool_call in response.tool_calls:
+ # print(f"Tool call: {tool_call['name']}({tool_call['args']})")
+ # # Execute the tool
+ # tool = next(t for t in tools if t.name == tool_call['name'])
+ # result = tool.invoke(tool_call['args'])
+ # print(f"Result: {result}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/validation_example.py b/examples/validation_example.py
new file mode 100644
index 0000000..e3f184d
--- /dev/null
+++ b/examples/validation_example.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# Copyright 2025 Altaira Labs
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Validation example for promptpack-langchain.
+
+This example shows how to use PromptPack validators:
+1. Run validators on content
+2. Handle validation results
+3. Use ValidationRunnable in a chain
+
+To run this example:
+ python examples/validation_example.py
+"""
+
+from promptpack import Validator
+from promptpack_langchain import ValidationRunnable, run_validators
+
+
+def main() -> None:
+ """Run validation example."""
+ print("=== PromptPack Validation Example ===\n")
+
+ # 1. Create validators
+ validators = [
+ Validator(
+ type="banned_words",
+ enabled=True,
+ fail_on_violation=True,
+ params={"words": ["inappropriate", "offensive", "banned"]},
+ ),
+ Validator(
+ type="max_length",
+ enabled=True,
+ fail_on_violation=False,
+ params={"max_characters": 500},
+ ),
+ Validator(
+ type="min_length",
+ enabled=True,
+ fail_on_violation=False,
+ params={"min_characters": 10},
+ ),
+ ]
+
+ print("Validators configured:")
+ for v in validators:
+ print(f" - {v.type} (fail_on_violation={v.fail_on_violation})")
+
+ # 2. Test with valid content
+ print("\n--- Test 1: Valid Content ---")
+ valid_content = "This is a helpful and professional response to the customer's inquiry."
+ result = run_validators(valid_content, validators)
+ print(f"Content: '{valid_content[:50]}...'")
+ print(f"Is valid: {result.is_valid}")
+ print(f"Violations: {len(result.violations)}")
+
+ # 3. Test with banned word
+ print("\n--- Test 2: Content with Banned Word ---")
+ bad_content = "This response contains inappropriate content."
+ result = run_validators(bad_content, validators)
+ print(f"Content: '{bad_content}'")
+ print(f"Is valid: {result.is_valid}")
+ print(f"Has blocking violations: {result.has_blocking_violations}")
+ for violation in result.violations:
+ print(f" - {violation.validator_type}: {violation.message}")
+
+ # 4. Test with length violation
+ print("\n--- Test 3: Content Too Short ---")
+ short_content = "Hi"
+ result = run_validators(short_content, validators)
+ print(f"Content: '{short_content}'")
+ print(f"Is valid: {result.is_valid}")
+ for violation in result.violations:
+ print(f" - {violation.validator_type}: {violation.message}")
+
+ # 5. Test with content too long
+ print("\n--- Test 4: Content Too Long ---")
+ long_content = "This is a very long response. " * 50
+ result = run_validators(long_content, validators)
+ print(f"Content: '{long_content[:50]}...' ({len(long_content)} chars)")
+ print(f"Is valid: {result.is_valid}")
+ for violation in result.violations:
+ print(f" - {violation.validator_type}: {violation.message}")
+
+ # 6. Using ValidationRunnable
+ print("\n--- Using ValidationRunnable ---")
+ runnable = ValidationRunnable(validators)
+
+ contents = [
+ "This is a good response.",
+ "This is inappropriate.",
+ "OK", # Too short
+ ]
+
+ for content in contents:
+ result = runnable.invoke(content)
+ status = "PASS" if result.is_valid else "FAIL"
+ print(f" [{status}] '{content[:30]}...' - {len(result.violations)} violations")
+
+ # 7. Regex validator example
+ print("\n--- Regex Validator Example ---")
+ regex_validators = [
+ Validator(
+ type="regex_match",
+ enabled=True,
+ fail_on_violation=True,
+ params={
+ "pattern": r"^[A-Z]", # Must start with capital letter
+ "must_match": True,
+ },
+ ),
+ Validator(
+ type="regex_match",
+ enabled=True,
+ fail_on_violation=False,
+ params={
+ "pattern": r"password|secret|key", # Forbidden patterns
+ "must_match": False,
+ },
+ ),
+ ]
+
+ test_cases = [
+ "Hello, how can I help you today?",
+ "hello, how can I help?", # Doesn't start with capital
+ "Here is your password: 12345", # Contains forbidden word
+ ]
+
+ for content in test_cases:
+ result = run_validators(content, regex_validators)
+ status = "PASS" if result.is_valid else "FAIL"
+ violations = ", ".join(v.validator_type for v in result.violations) or "none"
+ print(f" [{status}] '{content[:40]}...' - violations: {violations}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/packages/promptpack-langchain/tests/test_multimodal.py b/packages/promptpack-langchain/tests/test_multimodal.py
new file mode 100644
index 0000000..07d2d6c
--- /dev/null
+++ b/packages/promptpack-langchain/tests/test_multimodal.py
@@ -0,0 +1,248 @@
+# Copyright 2025 Altaira Labs
+# SPDX-License-Identifier: Apache-2.0
+
+"""Tests for multimodal content conversion."""
+
+from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
+from promptpack import ContentPart, MediaReference
+from promptpack_langchain import convert_content_parts, create_multimodal_message
+
+
+class TestConvertContentParts:
+ """Tests for convert_content_parts function."""
+
+ def test_text_content(self) -> None:
+ """Test converting text content."""
+ parts = [ContentPart(type="text", text="Hello world")]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "text"
+ assert result[0]["text"] == "Hello world"
+
+ def test_empty_text_content(self) -> None:
+ """Test converting text content with None text."""
+ parts = [ContentPart(type="text")]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "text"
+ assert result[0]["text"] == ""
+
+ def test_image_with_url(self) -> None:
+ """Test converting image with URL."""
+ parts = [
+ ContentPart(
+ type="image",
+ media=MediaReference(
+ url="https://example.com/image.jpg",
+ mime_type="image/jpeg",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "image_url"
+ assert result[0]["image_url"]["url"] == "https://example.com/image.jpg"
+
+ def test_image_with_base64(self) -> None:
+ """Test converting image with base64 data."""
+ parts = [
+ ContentPart(
+ type="image",
+ media=MediaReference(
+ base64="abc123",
+ mime_type="image/png",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "image_url"
+ assert result[0]["image_url"]["url"] == "data:image/png;base64,abc123"
+
+ def test_image_with_detail(self) -> None:
+ """Test converting image with detail level."""
+ parts = [
+ ContentPart(
+ type="image",
+ media=MediaReference(
+ url="https://example.com/image.jpg",
+ mime_type="image/jpeg",
+ detail="high",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert result[0]["image_url"]["detail"] == "high"
+
+ def test_image_with_file_path(self) -> None:
+ """Test converting image with file path."""
+ parts = [
+ ContentPart(
+ type="image",
+ media=MediaReference(
+ file_path="/path/to/image.jpg",
+ mime_type="image/jpeg",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert result[0]["image_url"]["url"] == "file:///path/to/image.jpg"
+
+ def test_image_missing_media(self) -> None:
+ """Test converting image without media reference is skipped."""
+ parts = [ContentPart(type="image")]
+ result = convert_content_parts(parts)
+ # Images without media are skipped (no fallback added)
+ assert len(result) == 0
+
+ def test_image_missing_media_with_text(self) -> None:
+ """Test converting image without media but with text fallback."""
+ parts = [ContentPart(type="image", text="[Placeholder image]")]
+ result = convert_content_parts(parts)
+ # Falls through to the else case which adds text if available
+ assert len(result) == 1
+ assert result[0]["type"] == "text"
+ assert result[0]["text"] == "[Placeholder image]"
+
+ def test_audio_with_url(self) -> None:
+ """Test converting audio with URL."""
+ parts = [
+ ContentPart(
+ type="audio",
+ media=MediaReference(
+ url="https://example.com/audio.mp3",
+ mime_type="audio/mpeg",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "audio"
+ assert result[0]["audio_url"]["url"] == "https://example.com/audio.mp3"
+
+ def test_audio_with_base64(self) -> None:
+ """Test converting audio with base64 data."""
+ parts = [
+ ContentPart(
+ type="audio",
+ media=MediaReference(
+ base64="audio_data",
+ mime_type="audio/wav",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert result[0]["audio_data"]["data"] == "audio_data"
+ assert result[0]["audio_data"]["mime_type"] == "audio/wav"
+
+ def test_video_with_url(self) -> None:
+ """Test converting video with URL."""
+ parts = [
+ ContentPart(
+ type="video",
+ media=MediaReference(
+ url="https://example.com/video.mp4",
+ mime_type="video/mp4",
+ ),
+ )
+ ]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "video"
+ assert result[0]["video_url"]["url"] == "https://example.com/video.mp4"
+
+ def test_multiple_parts(self) -> None:
+ """Test converting multiple content parts."""
+ parts = [
+ ContentPart(type="text", text="Check this image:"),
+ ContentPart(
+ type="image",
+ media=MediaReference(
+ url="https://example.com/image.jpg",
+ mime_type="image/jpeg",
+ ),
+ ),
+ ContentPart(type="text", text="What do you see?"),
+ ]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 3
+ assert result[0]["type"] == "text"
+ assert result[1]["type"] == "image_url"
+ assert result[2]["type"] == "text"
+
+ def test_unknown_type_with_text(self) -> None:
+ """Test unknown content type with text fallback."""
+ parts = [ContentPart(type="unknown", text="Fallback text")] # type: ignore[arg-type]
+ result = convert_content_parts(parts)
+
+ assert len(result) == 1
+ assert result[0]["type"] == "text"
+ assert result[0]["text"] == "Fallback text"
+
+
+class TestCreateMultimodalMessage:
+ """Tests for create_multimodal_message function."""
+
+ def test_single_text_returns_string(self) -> None:
+ """Test single text part returns simple string content."""
+ parts = [ContentPart(type="text", text="Hello")]
+ message = create_multimodal_message("user", parts)
+
+ assert isinstance(message, HumanMessage)
+ assert message.content == "Hello"
+
+ def test_human_message(self) -> None:
+ """Test creating human message."""
+ parts = [ContentPart(type="text", text="Question")]
+ message = create_multimodal_message("user", parts)
+
+ assert isinstance(message, HumanMessage)
+
+ def test_ai_message(self) -> None:
+ """Test creating AI message."""
+ parts = [ContentPart(type="text", text="Answer")]
+ message = create_multimodal_message("assistant", parts)
+
+ assert isinstance(message, AIMessage)
+
+ def test_system_message(self) -> None:
+ """Test creating system message."""
+ parts = [ContentPart(type="text", text="Instructions")]
+ message = create_multimodal_message("system", parts)
+
+ assert isinstance(message, SystemMessage)
+
+ def test_multimodal_content_returns_list(self) -> None:
+ """Test multimodal content returns list of content parts."""
+ parts = [
+ ContentPart(type="text", text="Check this:"),
+ ContentPart(
+ type="image",
+ media=MediaReference(
+ url="https://example.com/image.jpg",
+ mime_type="image/jpeg",
+ ),
+ ),
+ ]
+ message = create_multimodal_message("user", parts)
+
+ assert isinstance(message, HumanMessage)
+ assert isinstance(message.content, list)
+ assert len(message.content) == 2
+
+ def test_default_to_human_message(self) -> None:
+ """Test unknown role defaults to human message."""
+ parts = [ContentPart(type="text", text="Text")]
+ message = create_multimodal_message("unknown", parts)
+
+ assert isinstance(message, HumanMessage)
diff --git a/packages/promptpack-langchain/tests/test_template.py b/packages/promptpack-langchain/tests/test_template.py
index 766596d..e039129 100644
--- a/packages/promptpack-langchain/tests/test_template.py
+++ b/packages/promptpack-langchain/tests/test_template.py
@@ -30,10 +30,25 @@ def sample_pack_json() -> str:
"temperature": 0.7,
"max_tokens": 1500
},
+ "validators": [
+ {
+ "type": "banned_words",
+ "enabled": true,
+ "fail_on_violation": false,
+ "params": {"words": ["bad", "evil"]}
+ }
+ ],
"model_overrides": {
"gpt-4": {
"system_template_prefix": "[GPT-4 Mode] ",
"parameters": {"temperature": 0.5}
+ },
+ "claude-3": {
+ "system_template_suffix": " Be concise.",
+ "parameters": {"temperature": 0.8}
+ },
+ "gpt-4-turbo": {
+ "system_template": "You are a GPT-4 Turbo assistant."
}
}
},
@@ -42,6 +57,15 @@ def sample_pack_json() -> str:
"name": "Simple Bot",
"version": "1.0.0",
"system_template": "You are a simple assistant."
+ },
+ "with_defaults": {
+ "id": "with_defaults",
+ "name": "Bot with Defaults",
+ "version": "1.0.0",
+ "system_template": "You are a {{role}} assistant.",
+ "variables": [
+ {"name": "role", "type": "string", "required": false, "default": "helpful"}
+ ]
}
},
"fragments": {
@@ -119,3 +143,114 @@ def test_to_chat_prompt_template(self, pack) -> None:
template = PromptPackTemplate.from_promptpack(pack, "support")
chat_template = template.to_chat_prompt_template(role="agent", company="TestCo")
assert chat_template is not None
+
+ def test_model_override_suffix(self, pack) -> None:
+ """Test model-specific template with suffix."""
+ template = PromptPackTemplate.from_promptpack(pack, "support", model_name="claude-3")
+ result = template.format(role="agent", company="TestCo")
+ assert "Be concise." in result
+ assert "Be helpful and professional" in result
+
+ def test_model_override_full_template(self, pack) -> None:
+ """Test model-specific full template replacement."""
+ template = PromptPackTemplate.from_promptpack(pack, "support", model_name="gpt-4-turbo")
+ result = template.format(role="agent", company="TestCo")
+ assert result == "You are a GPT-4 Turbo assistant."
+
+ def test_no_model_override(self, pack) -> None:
+ """Test using base template when model has no override."""
+ template = PromptPackTemplate.from_promptpack(pack, "support", model_name="unknown-model")
+ result = template.format(role="agent", company="TestCo")
+ assert "[GPT-4 Mode]" not in result
+ assert "agent" in result
+
+ def test_prompt_type(self, pack) -> None:
+ """Test prompt type property."""
+ template = PromptPackTemplate.from_promptpack(pack, "support")
+ assert template._prompt_type == "promptpack"
+
+ def test_default_variables(self, pack) -> None:
+ """Test variables with default values."""
+ template = PromptPackTemplate.from_promptpack(pack, "with_defaults")
+ # Default values should not be in input_variables
+ assert "role" not in template.input_variables
+ result = template.format()
+ assert "helpful" in result
+
+ def test_override_default_value(self, pack) -> None:
+ """Test overriding default value."""
+ template = PromptPackTemplate.from_promptpack(pack, "with_defaults")
+ result = template.format(role="friendly")
+ assert "friendly" in result
+
+ def test_get_parameters_empty(self, pack) -> None:
+ """Test getting parameters from prompt without any."""
+ template = PromptPackTemplate.from_promptpack(pack, "simple")
+ params = template.get_parameters()
+ assert params == {}
+
+ def test_available_prompts_in_error(self, pack) -> None:
+ """Test error message shows available prompts."""
+ with pytest.raises(ValueError) as exc_info:
+ PromptPackTemplate.from_promptpack(pack, "nonexistent")
+ error_msg = str(exc_info.value)
+ assert "support" in error_msg
+ assert "simple" in error_msg
+
+ @pytest.mark.asyncio
+ async def test_aformat_prompt(self, pack) -> None:
+ """Test async format_prompt."""
+ template = PromptPackTemplate.from_promptpack(pack, "support")
+ result = await template.aformat_prompt(role="agent", company="TestCo")
+ assert "agent" in result.text
+ assert "TestCo" in result.text
+
+ def test_to_chat_prompt_template_without_vars(self, pack) -> None:
+ """Test conversion to ChatPromptTemplate without variables."""
+ template = PromptPackTemplate.from_promptpack(pack, "support")
+ chat_template = template.to_chat_prompt_template()
+ # Should include the template with placeholders
+ assert chat_template is not None
+
+
+class TestPromptPackTemplateIntegration:
+ """Integration tests for PromptPackTemplate."""
+
+ def test_full_workflow(self) -> None:
+ """Test complete workflow from JSON to formatted output."""
+ pack_json = """{
+ "id": "workflow-test",
+ "name": "Workflow Test",
+ "version": "1.0.0",
+ "template_engine": {"version": "v1", "syntax": "{{variable}}"},
+ "prompts": {
+ "main": {
+ "id": "main",
+ "name": "Main Prompt",
+ "version": "1.0.0",
+ "system_template": "Role: {{role}}. Task: {{task}}. Guidelines: {{fragment:rules}}",
+ "variables": [
+ {"name": "role", "type": "string", "required": true},
+ {"name": "task", "type": "string", "required": true}
+ ],
+ "parameters": {"temperature": 0.5, "max_tokens": 1000}
+ }
+ },
+ "fragments": {
+ "rules": "Follow best practices."
+ }
+ }"""
+
+ pack = parse_promptpack_string(pack_json)
+ template = PromptPackTemplate.from_promptpack(pack, "main")
+
+ # Verify metadata
+ assert template.input_variables == ["role", "task"]
+ params = template.get_parameters()
+ assert params["temperature"] == 0.5
+
+ # Verify formatting
+ result = template.format(role="assistant", task="help users")
+ assert "Role: assistant" in result
+ assert "Task: help users" in result
+ assert "Follow best practices" in result
diff --git a/packages/promptpack-langchain/tests/test_validators.py b/packages/promptpack-langchain/tests/test_validators.py
index 388ef5f..9a8bede 100644
--- a/packages/promptpack-langchain/tests/test_validators.py
+++ b/packages/promptpack-langchain/tests/test_validators.py
@@ -194,3 +194,120 @@ async def test_ainvoke(self) -> None:
result = await runnable.ainvoke("This is good")
assert result.is_valid
+
+
+class TestMultipleValidators:
+ """Tests for running multiple validators together."""
+
+ def test_all_pass(self) -> None:
+ """Test all validators pass."""
+ validators = [
+ make_validator("banned_words", params={"words": ["bad"]}),
+ make_validator("max_length", params={"max_characters": 100}),
+ make_validator("min_length", params={"min_characters": 5}),
+ ]
+ result = run_validators("This is valid content", validators)
+ assert result.is_valid
+ assert len(result.violations) == 0
+
+ def test_multiple_failures(self) -> None:
+ """Test multiple validators fail."""
+ validators = [
+ make_validator("banned_words", params={"words": ["bad"]}),
+ make_validator("max_length", params={"max_characters": 10}),
+ ]
+ result = run_validators("This is bad and way too long", validators)
+ assert len(result.violations) == 2
+
+ def test_mixed_fail_on_violation(self) -> None:
+ """Test mixed fail_on_violation settings."""
+ validators = [
+ make_validator(
+ "banned_words",
+ params={"words": ["bad"]},
+ fail_on_violation=True, # Blocking
+ ),
+ make_validator(
+ "max_length",
+ params={"max_characters": 10},
+ fail_on_violation=False, # Non-blocking
+ ),
+ ]
+ result = run_validators("This is bad content", validators)
+ assert not result.is_valid
+ assert result.has_blocking_violations
+
+ def test_only_non_blocking_violations(self) -> None:
+ """Test violations that are all non-blocking."""
+ validators = [
+ make_validator(
+ "max_length",
+ params={"max_characters": 5},
+ fail_on_violation=False,
+ ),
+ ]
+ result = run_validators("This is too long", validators)
+ assert len(result.violations) == 1
+ assert not result.has_blocking_violations
+
+
+class TestEdgeCases:
+ """Edge case tests for validators."""
+
+ def test_empty_content(self) -> None:
+ """Test validation with empty content."""
+ validators = [
+ make_validator("min_length", params={"min_characters": 10}),
+ ]
+ result = run_validators("", validators)
+ assert len(result.violations) == 1
+
+ def test_whitespace_content(self) -> None:
+ """Test validation with whitespace content."""
+ validators = [
+ make_validator("min_length", params={"min_characters": 5}),
+ ]
+ result = run_validators(" ", validators)
+ assert len(result.violations) == 1
+
+ def test_missing_params(self) -> None:
+ """Test validator with missing params uses defaults."""
+ validators = [
+ make_validator("banned_words", params=None),
+ ]
+ # Should not raise, just return valid (no words to check)
+ result = run_validators("Any content", validators)
+ assert result.is_valid
+
+ def test_unicode_content(self) -> None:
+ """Test validation with unicode content."""
+ validators = [
+ make_validator("banned_words", params={"words": ["prohibited"]}),
+ make_validator("min_length", params={"min_characters": 5}),
+ ]
+ result = run_validators("Hello world!", validators)
+ assert result.is_valid
+
+ def test_regex_special_chars(self) -> None:
+ """Test regex validation with special characters."""
+ validators = [
+ make_validator(
+ "regex_match",
+ params={
+ "pattern": r"\d{3}-\d{3}-\d{4}", # Phone number
+ "must_match": True,
+ },
+ ),
+ ]
+ result = run_validators("Call me at 123-456-7890", validators)
+ assert result.is_valid
+
+ def test_multiple_banned_words_found(self) -> None:
+ """Test content with multiple banned words."""
+ validators = [
+ make_validator("banned_words", params={"words": ["bad", "evil", "wrong"]}),
+ ]
+ result = run_validators("This is bad and evil content", validators)
+ assert len(result.violations) == 1
+ # Should report the found words
+ assert "bad" in result.violations[0].message or "evil" in result.violations[0].message
diff --git a/pyproject.toml b/pyproject.toml
index a12113e..45d2305 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -48,6 +48,9 @@ line-length = 100
select = ["E", "F", "W", "I", "N", "UP", "B", "A", "C4", "T20"]
ignore = ["E501"]
+[tool.ruff.lint.per-file-ignores]
+"examples/*.py" = ["T201"] # Allow print statements in examples
+
[tool.mypy]
python_version = "3.10"
warn_return_any = false