Skip to content

Commit 044f863

Browse files
committed
group comparison page by source, update fixtures and tests
Replace benchmark_type='C' filter with grouping by Benchmark.S_TYPES. Sidebar now shows source labels ('Legacy', 'PyPerformance') as group headers. Default checked benchmarks no longer require source='legacy'. Update fixtures and tests to use the renamed field. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 3a4d60f commit 044f863

7 files changed

Lines changed: 27 additions & 36 deletions

File tree

codespeed/admin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ class ExecutableAdmin(admin.ModelAdmin):
5858

5959
@admin.register(Benchmark)
6060
class BenchmarkAdmin(admin.ModelAdmin):
61-
list_display = ('name', 'benchmark_type', 'data_type', 'description',
61+
list_display = ('name', 'source', 'data_type', 'description',
6262
'units_title', 'units', 'lessisbetter',
6363
'default_on_comparison')
6464
list_filter = ('data_type', 'lessisbetter')

codespeed/fixtures/jruby.json

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

codespeed/fixtures/testdata.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@
219219
"fields": {
220220
"name": "float",
221221
"parent": null,
222-
"benchmark_type": "C",
222+
"source": "legacy",
223223
"data_type": "U",
224224
"description": "",
225225
"units_title": "Time",
@@ -234,7 +234,7 @@
234234
"fields": {
235235
"name": "int",
236236
"parent": null,
237-
"benchmark_type": "C",
237+
"source": "legacy",
238238
"data_type": "U",
239239
"description": "",
240240
"units_title": "Time",

codespeed/fixtures/timeline_tests.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@
210210
"fields": {
211211
"parent": null,
212212
"name": "float",
213-
"benchmark_type": "C",
213+
"source": "legacy",
214214
"default_on_comparison": true,
215215
"units_title": "Time",
216216
"units": "seconds",
@@ -224,7 +224,7 @@
224224
"fields": {
225225
"parent": null,
226226
"name": "int",
227-
"benchmark_type": "C",
227+
"source": "legacy",
228228
"default_on_comparison": true,
229229
"units_title": "Time",
230230
"units": "seconds",

codespeed/models.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,9 @@ def __str__(self):
172172

173173

174174
class Benchmark(models.Model):
175-
B_TYPES = (
176-
('C', 'Cross-project'),
177-
('O', 'Own-project'),
175+
S_TYPES = (
176+
('legacy', 'Legacy'),
177+
('pyperformance', 'PyPerformance'),
178178
)
179179
D_TYPES = (
180180
('U', 'Mean'),
@@ -186,7 +186,7 @@ class Benchmark(models.Model):
186186
'self', on_delete=models.CASCADE, verbose_name="parent",
187187
help_text="allows to group benchmarks in hierarchies",
188188
null=True, blank=True, default=None)
189-
benchmark_type = models.CharField(max_length=1, choices=B_TYPES, default='C')
189+
source = models.CharField(max_length=14, choices=S_TYPES, default='legacy')
190190
data_type = models.CharField(max_length=1, choices=D_TYPES, default='U')
191191
description = models.CharField(max_length=300, blank=True)
192192
units_title = models.CharField(max_length=30, default='Time')
@@ -198,12 +198,6 @@ class Benchmark(models.Model):
198198
def __str__(self):
199199
return self.name
200200

201-
def clean(self):
202-
if self.default_on_comparison and self.benchmark_type != 'C':
203-
raise ValidationError("Only cross-project benchmarks are shown "
204-
"on the comparison page. Deactivate "
205-
"'default_on_comparison' first.")
206-
207201

208202
class Environment(models.Model):
209203
name = models.CharField(unique=True, max_length=100)
@@ -223,6 +217,7 @@ class Result(models.Model):
223217
val_max = models.FloatField(blank=True, null=True)
224218
q1 = models.FloatField(blank=True, null=True)
225219
q3 = models.FloatField(blank=True, null=True)
220+
suite_version = models.CharField(max_length=50, blank=True, default='')
226221
date = models.DateTimeField(blank=True, null=True)
227222
revision = models.ForeignKey(
228223
Revision, on_delete=models.CASCADE, related_name="results")

codespeed/tests/test_views.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def test_add_correct_result(self):
4242
# Check that the data was correctly saved
4343
e = Environment.objects.get(name='Dual Core')
4444
b = Benchmark.objects.get(name='float')
45-
self.assertEqual(b.benchmark_type, "C")
45+
self.assertEqual(b.source, "legacy")
4646
self.assertEqual(b.units, "seconds")
4747
self.assertEqual(b.lessisbetter, True)
4848
p = Project.objects.get(name='MyProject')
@@ -224,7 +224,7 @@ def test_add_correct_results(self):
224224
# Check that the data was correctly saved
225225
e = Environment.objects.get(name='bigdog')
226226
b = Benchmark.objects.get(name='Richards')
227-
self.assertEqual(b.benchmark_type, "C")
227+
self.assertEqual(b.source, "legacy")
228228
self.assertEqual(b.units, "seconds")
229229
self.assertEqual(b.lessisbetter, True)
230230
p = Project.objects.get(name='pypy')

codespeed/views.py

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -317,23 +317,21 @@ def comparison(request):
317317
if not checkedexecutables:
318318
checkedexecutables = exekeys
319319

320-
units_titles = Benchmark.objects.filter(
321-
benchmark_type="C"
322-
).values('units_title').distinct()
323-
units_titles = [unit['units_title'] for unit in units_titles]
324320
benchmarks = {}
325321
bench_units = {}
326-
for unit in units_titles:
327-
# Only include benchmarks marked as cross-project
328-
benchmarks[unit] = Benchmark.objects.filter(
329-
benchmark_type="C"
330-
).filter(units_title=unit)
331-
units = benchmarks[unit][0].units
332-
lessisbetter = (benchmarks[unit][0].lessisbetter and
333-
' (less is better)' or ' (more is better)')
334-
bench_units[unit] = [
335-
[b.id for b in benchmarks[unit]], lessisbetter, units
336-
]
322+
for source_val, source_label in Benchmark.S_TYPES:
323+
qs = Benchmark.objects.filter(source=source_val)
324+
if not qs.exists():
325+
continue
326+
benchmarks[source_label] = qs
327+
for unit in qs.values_list('units_title', flat=True).distinct():
328+
unit_qs = qs.filter(units_title=unit)
329+
units = unit_qs[0].units
330+
lessisbetter = (unit_qs[0].lessisbetter and
331+
' (less is better)' or ' (more is better)')
332+
bench_units[unit] = [
333+
[b.id for b in unit_qs], lessisbetter, units
334+
]
337335
checkedbenchmarks = []
338336
if 'ben' in data:
339337
checkedbenchmarks = []
@@ -345,9 +343,7 @@ def comparison(request):
345343
except Benchmark.DoesNotExist:
346344
pass
347345
if not checkedbenchmarks:
348-
# Only include benchmarks marked as cross-project
349-
checkedbenchmarks = Benchmark.objects.filter(
350-
benchmark_type="C", default_on_comparison=True)
346+
checkedbenchmarks = Benchmark.objects.filter(default_on_comparison=True)
351347

352348
charts = ['normal bars', 'stacked bars', 'relative bars']
353349
# Don't show relative charts as an option if there is only one executable

0 commit comments

Comments
 (0)