Skip to content

Commit 686b468

Browse files
mattipclaude
andcommitted
Warn on suite_version mismatch in comparison view
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent af72a5f commit 686b468

2 files changed

Lines changed: 29 additions & 2 deletions

File tree

codespeed/static/js/comparison.js

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,22 @@ function refreshContent() {
5252
msg = '<p class="warning">Normalized stacked bars actually represent the weighted arithmetic sum, useful to spot which individual benchmarks take up the most time. Choosing different weightings from the "Normalization" menu will change the totals relative to one another. For the correct way to calculate total bars, the geometric mean must be used (see <a href="http://portal.acm.org/citation.cfm?id=5666.5673 " title="How not to lie with statistics: the correct way to summarize benchmark results">paper</a>)</p>';
5353
}
5454

55+
if (compdata && compdata.suite_versions) {
56+
var mismatchedEnvs = enviros.filter(function(envId) {
57+
var versions = new Set();
58+
exes.forEach(function(exeKey) {
59+
var sv = compdata.suite_versions[exeKey];
60+
if (sv && sv[envId]) {
61+
sv[envId].forEach(function(v) { versions.add(v); });
62+
}
63+
});
64+
return versions.size > 1;
65+
});
66+
if (mismatchedEnvs.length > 0) {
67+
msg += '<p class="warning">The executables being compared used different benchmark suite versions. Results may not be directly comparable.</p>';
68+
}
69+
}
70+
5571
chartInstances.forEach(function(c) { c.destroy(); });
5672
chartInstances = [];
5773
$("#plotwrapper").fadeOut("fast", function() {

codespeed/views.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,26 +222,37 @@ def getcomparisondata(request):
222222

223223
compdata = {}
224224
compdata['error'] = "Unknown error"
225+
suite_versions = {} # exe_key -> env_id -> sorted list of unique non-empty versions
225226
for proj in executables:
226227
for exe in executables[proj]:
227228
if requested_exes is not None and exe['key'] not in requested_exes:
228229
continue
229230
compdata[exe['key']] = {}
231+
suite_versions[exe['key']] = {}
230232
for env in environments:
231233
compdata[exe['key']][env.id] = {}
232234

233235
# Load all results for this env/executable/revision in a
234236
# dict for fast lookup
235-
results = dict(Result.objects.filter(
237+
rows = Result.objects.filter(
236238
environment=env,
237239
executable=exe['executable'],
238240
revision=exe['revision'],
239-
).values_list('benchmark', 'value'))
241+
).values_list('benchmark', 'value', 'suite_version')
242+
243+
results = {}
244+
env_versions = set()
245+
for bench_id, value, sv in rows:
246+
results[bench_id] = value
247+
if sv:
248+
env_versions.add(sv)
240249

241250
for bench in benchmarks:
242251
compdata[exe['key']][env.id][bench.id] = results.get(
243252
bench.id, None)
253+
suite_versions[exe['key']][env.id] = sorted(env_versions)
244254

255+
compdata['suite_versions'] = suite_versions
245256
compdata['error'] = "None"
246257

247258
return HttpResponse(json.dumps(compdata))

0 commit comments

Comments
 (0)