Skip to content

Commit a95187a

Browse files
Евгений БлиновЕвгений Блинов
authored andcommitted
Add tests for CLI help output, empty/invalid durations, and
ScenarioGroup radd
1 parent 6f265c1 commit a95187a

4 files changed

Lines changed: 57 additions & 6 deletions

File tree

tests/cli/test_scenario_cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def test_help_mentions_number(self) -> None:
120120
def test_help_mentions_max_mean(self) -> None:
121121
proc = run_script(scenario_script(), '--help')
122122
combined = proc.stdout + proc.stderr
123-
assert 'max-mean' in combined.lower() or 'max_mean' in combined.lower()
123+
assert '--max-mean' in combined
124124

125125
def test_help_does_not_run_benchmark(self) -> None:
126126
proc = run_script(scenario_script(), '--help')

tests/units/test_benchmark_result.py

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,33 @@ def test_durations_is_tuple(self) -> None:
3434
result = make_result((0.1, 0.2, 0.3))
3535
assert isinstance(result.durations, tuple)
3636

37+
def test_empty_durations_raises(self) -> None:
38+
# BenchmarkResult does not validate durations length;
39+
# creating with empty tuple causes ZeroDivisionError in __post_init__
40+
s = Scenario(lambda: None, name='s', number=1)
41+
with pytest.raises((ZeroDivisionError, ValueError)):
42+
BenchmarkResult(scenario=s, durations=(), is_primary=True)
43+
44+
def test_inf_durations_fields(self) -> None:
45+
result = make_result((float('inf'), 1.0, 2.0))
46+
assert math.isinf(result.worst)
47+
assert math.isinf(result.mean)
48+
assert result.best == 1.0
49+
50+
def test_nan_durations_fields(self) -> None:
51+
result = make_result((float('nan'),))
52+
assert math.isnan(result.mean)
53+
assert math.isnan(result.best)
54+
assert math.isnan(result.worst)
55+
3756
def test_mean_computed_correctly(self) -> None:
3857
result = make_result((1.0, 2.0, 3.0))
3958
expected = math.fsum([1.0, 2.0, 3.0]) / 3
4059
assert result.mean == expected
4160

4261
def test_mean_uses_fsum_precision(self) -> None:
43-
# fsum handles cancellation correctly; plain sum loses precision:
44-
# sum([1e20, 1.0, -1e20]) == 0.0, fsum == 1.0
62+
# fsum handles cancellation correctly; plain sum loses precision
63+
# for (1e20, 1.0, -1e20): fsum=1.0, sum=0.0 due to IEEE 754
4564
durations = (1e20, 1.0, -1e20)
4665
result = make_result(durations)
4766
assert result.mean == pytest.approx(1.0 / 3)
@@ -337,6 +356,12 @@ def test_percentile_single_element(self) -> None:
337356
assert trimmed.durations == (5.0,)
338357
assert trimmed.is_primary is False
339358

359+
def test_percentile_100_single_element(self) -> None:
360+
result = make_result((5.0,))
361+
trimmed = result.percentile(100)
362+
assert trimmed.durations == (5.0,)
363+
assert trimmed.is_primary is False
364+
340365
def test_from_json_missing_is_primary_raises(self) -> None:
341366
payload = json.dumps({'durations': [0.1, 0.2]})
342367
with pytest.raises(ValueError, match='required fields'):

tests/units/test_scenario.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,12 +167,13 @@ def fn() -> None:
167167
assert counter[0] == 5
168168

169169
def test_run_uses_custom_timer(self) -> None:
170-
# timer produces: 0.000, 0.001, 0.002, 0.003, 0.004, 0.005, ...
170+
# timer produces: 0.000, 0.001, 0.002, 0.003, ... (infinite)
171171
# each measured interval: end - start = 0.001
172-
values = iter(t * 0.001 for t in range(200))
172+
import itertools # noqa: PLC0415
173+
counter = itertools.count(0)
173174

174175
def fake_timer() -> float:
175-
return next(values)
176+
return next(counter) * 0.001
176177

177178
s = Scenario(lambda: None, name='s', number=3, timer=fake_timer)
178179
result = s.run()

tests/units/test_scenario_group.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
from __future__ import annotations
22

3+
import pytest
4+
35
from microbenchmark import BenchmarkResult, Scenario, ScenarioGroup
46

57

@@ -80,6 +82,19 @@ def test_radd_scenario_to_group(self) -> None:
8082
assert isinstance(group, ScenarioGroup)
8183
assert len(group.run()) == 2
8284

85+
def test_radd_group_to_group(self) -> None:
86+
s1, s2, s3 = make_scenario('s1'), make_scenario('s2'), make_scenario('s3')
87+
g1 = ScenarioGroup(s1, s2)
88+
g2 = ScenarioGroup(s3)
89+
# g2.__radd__(g1) = ScenarioGroup(*g1._scenarios, *g2._scenarios) = [s1, s2, s3]
90+
group = g2.__radd__(g1)
91+
assert isinstance(group, ScenarioGroup)
92+
results = group.run()
93+
assert len(results) == 3
94+
assert results[0].scenario is s1
95+
assert results[1].scenario is s2
96+
assert results[2].scenario is s3
97+
8398
def test_duplicate_scenarios(self) -> None:
8499
s = make_scenario('s')
85100
group = s + s
@@ -156,3 +171,13 @@ def test_run_warmup_different_numbers(self) -> None:
156171
results = g.run(warmup=2)
157172
assert len(results[0].durations) == 3
158173
assert len(results[1].durations) == 7
174+
175+
def test_run_propagates_exception_from_scenario(self) -> None:
176+
def bad() -> None:
177+
raise RuntimeError('scenario failed')
178+
179+
s1 = make_scenario('s1')
180+
s2 = Scenario(bad, name='s2', number=1)
181+
g = ScenarioGroup(s1, s2)
182+
with pytest.raises(RuntimeError, match='scenario failed'):
183+
g.run()

0 commit comments

Comments
 (0)