Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,4 +1,19 @@
POLYGON_WALLET_PRIVATE_KEY=""

# === LLM Provider Options ===
# Option 1: OpenAI (requires API key)
OPENAI_API_KEY=""

# Option 2: BlockRun (x402 micropayments, no API key needed)
# Set BLOCKRUN_ENABLED=true to use BlockRun instead of OpenAI
# Payment: USDC on Base network ONLY (chain ID 8453)
# Your agent pays for LLM calls directly - ensure wallet has USDC on Base
# Private key is used locally for signing only - never transmitted
# Learn more: https://blockrun.ai
BLOCKRUN_ENABLED=false
BLOCKRUN_WALLET_KEY="" # Wallet private key for signing x402 payments (or uses POLYGON_WALLET_PRIVATE_KEY)
BLOCKRUN_API_URL="https://blockrun.ai/api"

# === Other API Keys ===
TAVILY_API_KEY=""
NEWSAPI_API_KEY=""
51 changes: 51 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,57 @@ This repo is inteded for use with Python 3.9
./scripts/bash/run-docker-dev.sh
```

## Using BlockRun (x402 Micropayments)

BlockRun enables your Polymarket agent to pay for LLM calls with USDC micropayments instead of managing OpenAI API keys. Your agent pays directly with its wallet via the x402 protocol on Base.

### Benefits

- **No API key management** - Your agent's wallet handles payment
- **31+ AI models** - Access GPT-4, Claude, Gemini, and more
- **Pay-per-use** - Only pay for what you use
- **0% markup** - Same pricing as official APIs during beta

### Setup

1. Set `BLOCKRUN_ENABLED=true` in your `.env` file:

```
BLOCKRUN_ENABLED=true
BLOCKRUN_API_URL="https://api.blockrun.ai/v1"
```

2. Ensure your wallet has USDC on Base network

3. That's it! Your agent will now use BlockRun for LLM calls

### Using Different Models

With BlockRun, you can use models from multiple providers:

```python
from agents.application.executor import Executor

# Use GPT-4 (default behavior, but via BlockRun)
executor = Executor(default_model='gpt-4o', use_blockrun=True)

# Use Claude
executor = Executor(default_model='claude-3-5-sonnet', use_blockrun=True)

# Use Gemini
executor = Executor(default_model='gemini-2.0-flash', use_blockrun=True)
```

### Available Models

| Provider | Models |
|----------|--------|
| OpenAI | gpt-5, gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-3.5-turbo |
| Anthropic | claude-3-5-sonnet, claude-3-5-haiku, claude-3-opus |
| Google | gemini-2.0-flash, gemini-1.5-pro, gemini-1.5-flash |

Learn more at [blockrun.ai](https://blockrun.ai)

## Architecture

The Polymarket Agents architecture features modular components that can be maintained and extended by individual community members.
Expand Down
63 changes: 54 additions & 9 deletions agents/application/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json
import ast
import re
from typing import List, Dict, Any
from typing import List, Dict, Any, Optional

import math

Expand All @@ -12,6 +12,11 @@

from agents.polymarket.gamma import GammaMarketClient as Gamma
from agents.connectors.chroma import PolymarketRAG as Chroma
from agents.connectors.blockrun import (
create_blockrun_llm,
get_blockrun_model_name,
get_blockrun_token_limit,
)
Comment thread
cursor[bot] marked this conversation as resolved.
from agents.utils.objects import SimpleEvent, SimpleMarket
from agents.application.prompts import Prompter
from agents.polymarket.polymarket import Polymarket
Expand All @@ -29,16 +34,56 @@ def retain_keys(data, keys_to_retain):
return data

class Executor:
def __init__(self, default_model='gpt-3.5-turbo-16k') -> None:
def __init__(
self,
default_model: str = 'gpt-3.5-turbo-16k',
use_blockrun: Optional[bool] = None,
) -> None:
"""
Initialize the Executor with LLM configuration.

Args:
default_model: Model to use for LLM calls
use_blockrun: If True, use BlockRun for LLM access via x402 USDC payments.
If None, auto-detect based on BLOCKRUN_ENABLED env var.

BlockRun Mode:
When use_blockrun=True, agents pay for LLM calls with USDC micropayments
on Base via the x402 protocol. No OpenAI API key required.
Set BLOCKRUN_ENABLED=true in .env to enable by default.

Benefits:
- No API key management
- Access to 31+ models (GPT-4, Claude, Gemini, etc.)
- Pay-per-use with agent's own wallet
- 0% markup during beta

Learn more: https://blockrun.ai
"""
load_dotenv()
max_token_model = {'gpt-3.5-turbo-16k':15000, 'gpt-4-1106-preview':95000}
self.token_limit = max_token_model.get(default_model)

# Determine if BlockRun should be used
if use_blockrun is None:
use_blockrun = os.getenv("BLOCKRUN_ENABLED", "false").lower() == "true"

self.use_blockrun = use_blockrun
self.prompter = Prompter()
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.llm = ChatOpenAI(
model=default_model, #gpt-3.5-turbo"
temperature=0,
)

if use_blockrun:
# Use BlockRun for LLM access via x402 micropayments
self.llm = create_blockrun_llm(model=default_model, temperature=0)
self.token_limit = get_blockrun_token_limit(default_model)
print(f"[BlockRun] Using {get_blockrun_model_name(default_model)} via x402")
else:
# Use standard OpenAI
max_token_model = {'gpt-3.5-turbo-16k': 15000, 'gpt-4-1106-preview': 95000}
self.token_limit = max_token_model.get(default_model, 15000)
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.llm = ChatOpenAI(
model=default_model,
temperature=0,
)

self.gamma = Gamma()
self.chroma = Chroma()
self.polymarket = Polymarket()
Expand Down
Loading