-
-
Notifications
You must be signed in to change notification settings - Fork 781
Expand file tree
/
Copy pathperformance_example.py
More file actions
59 lines (49 loc) · 1.71 KB
/
performance_example.py
File metadata and controls
59 lines (49 loc) · 1.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
"""
Performance Evaluation Example
This example demonstrates how to benchmark agent performance
by measuring runtime and memory usage across multiple iterations.
"""
import os
from praisonaiagents import Agent
from praisonaiagents.eval import PerformanceEvaluator
# Check if we have an API key
has_api_key = os.getenv("OPENAI_API_KEY") is not None
if has_api_key:
print("--- Testing Agent Performance ---")
# Create a simple agent
agent = Agent(
instructions="You are a helpful assistant. Answer questions briefly."
)
# Create performance evaluator
evaluator = PerformanceEvaluator(
agent=agent,
input_text="What is the capital of France?",
num_iterations=10, # Run 10 benchmark iterations
warmup_runs=2, # 2 warmup runs before measurement
track_memory=True, # Track memory usage
output="verbose"
)
# Run evaluation
result = evaluator.run(print_summary=True)
# Access detailed metrics
print("\nAgent Performance Results:")
print(f" Average Time: {result.avg_run_time:.4f}s")
print(f" Min Time: {result.min_run_time:.4f}s")
print(f" Max Time: {result.max_run_time:.4f}s")
print(f" Median Time: {result.median_run_time:.4f}s")
print(f" P95 Time: {result.p95_run_time:.4f}s")
print(f" Avg Memory: {result.avg_memory:.2f} MB")
else:
print("⚠️ No OPENAI_API_KEY found. Skipping agent performance test...")
# You can also benchmark any function
def my_function():
import time
time.sleep(0.1)
return "done"
func_evaluator = PerformanceEvaluator(
func=my_function,
num_iterations=5,
warmup_runs=1,
track_memory=False
)
func_result = func_evaluator.run(print_summary=True)