Skip to content

Commit 802b1ee

Browse files
authored
Merge pull request #61 from mattip/pyperformance
Pyperformance
2 parents ec49688 + 068e3c8 commit 802b1ee

14 files changed

Lines changed: 335 additions & 61 deletions

File tree

codespeed/admin.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,15 @@ class ProjectAdmin(admin.ModelAdmin):
3939
class BranchAdmin(admin.ModelAdmin):
4040
list_display = ('name', 'project', 'display_on_comparison_page')
4141
list_filter = ('project',)
42+
actions = ['enable_comparison_page', 'disable_comparison_page']
43+
44+
@admin.action(description='Display selected branches on comparison page')
45+
def enable_comparison_page(self, request, queryset):
46+
queryset.update(display_on_comparison_page=True)
47+
48+
@admin.action(description='Hide selected branches from comparison page')
49+
def disable_comparison_page(self, request, queryset):
50+
queryset.update(display_on_comparison_page=False)
4251

4352

4453
@admin.register(Revision)
@@ -58,7 +67,7 @@ class ExecutableAdmin(admin.ModelAdmin):
5867

5968
@admin.register(Benchmark)
6069
class BenchmarkAdmin(admin.ModelAdmin):
61-
list_display = ('name', 'benchmark_type', 'data_type', 'description',
70+
list_display = ('name', 'source', 'data_type', 'description',
6271
'units_title', 'units', 'lessisbetter',
6372
'default_on_comparison')
6473
list_filter = ('data_type', 'lessisbetter')

codespeed/commits/git.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,12 @@ def getlogs(endrev, startrev):
5252
else:
5353
logfmt = '--format=format:%h%x00%H%x00%at%x00%an%x00%ae%x00%s%x00%b%x1e'
5454

55+
max_log_entries = getattr(settings, 'GIT_MAX_LOG_ENTRIES', 30)
56+
5557
cmd = ["git", "log", logfmt]
5658

5759
if endrev.commitid != startrev.commitid:
60+
cmd.append("-n%d" % max_log_entries)
5861
cmd.append("%s...%s" % (startrev.commitid, endrev.commitid))
5962
else:
6063
cmd.append("-1") # Only return one commit

codespeed/fixtures/jruby.json

Lines changed: 1 addition & 1 deletion
Large diffs are not rendered by default.

codespeed/fixtures/testdata.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@
219219
"fields": {
220220
"name": "float",
221221
"parent": null,
222-
"benchmark_type": "C",
222+
"source": "legacy",
223223
"data_type": "U",
224224
"description": "",
225225
"units_title": "Time",
@@ -234,7 +234,7 @@
234234
"fields": {
235235
"name": "int",
236236
"parent": null,
237-
"benchmark_type": "C",
237+
"source": "legacy",
238238
"data_type": "U",
239239
"description": "",
240240
"units_title": "Time",

codespeed/fixtures/timeline_tests.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@
210210
"fields": {
211211
"parent": null,
212212
"name": "float",
213-
"benchmark_type": "C",
213+
"source": "legacy",
214214
"default_on_comparison": true,
215215
"units_title": "Time",
216216
"units": "seconds",
@@ -224,7 +224,7 @@
224224
"fields": {
225225
"parent": null,
226226
"name": "int",
227-
"benchmark_type": "C",
227+
"source": "legacy",
228228
"default_on_comparison": true,
229229
"units_title": "Time",
230230
"units": "seconds",
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
from django.db import migrations, models
2+
3+
4+
def remap_benchmark_type(apps, schema_editor):
5+
Benchmark = apps.get_model('codespeed', 'Benchmark')
6+
Benchmark.objects.all().update(source='legacy')
7+
8+
9+
class Migration(migrations.Migration):
10+
11+
dependencies = [
12+
('codespeed', '0004_branch_display_on_comparison_page'),
13+
]
14+
15+
operations = [
16+
migrations.RenameField(
17+
model_name='benchmark',
18+
old_name='benchmark_type',
19+
new_name='source',
20+
),
21+
migrations.AlterField(
22+
model_name='benchmark',
23+
name='source',
24+
field=models.CharField(
25+
choices=[('legacy', 'Legacy'), ('pyperformance', 'PyPerformance')],
26+
default='legacy',
27+
max_length=14,
28+
),
29+
),
30+
migrations.RunPython(remap_benchmark_type, migrations.RunPython.noop),
31+
migrations.AddField(
32+
model_name='result',
33+
name='suite_version',
34+
field=models.CharField(blank=True, default='', max_length=50),
35+
),
36+
]

codespeed/models.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,9 @@ def __str__(self):
172172

173173

174174
class Benchmark(models.Model):
175-
B_TYPES = (
176-
('C', 'Cross-project'),
177-
('O', 'Own-project'),
175+
S_TYPES = (
176+
('legacy', 'Legacy'),
177+
('pyperformance', 'PyPerformance'),
178178
)
179179
D_TYPES = (
180180
('U', 'Mean'),
@@ -186,7 +186,7 @@ class Benchmark(models.Model):
186186
'self', on_delete=models.CASCADE, verbose_name="parent",
187187
help_text="allows to group benchmarks in hierarchies",
188188
null=True, blank=True, default=None)
189-
benchmark_type = models.CharField(max_length=1, choices=B_TYPES, default='C')
189+
source = models.CharField(max_length=14, choices=S_TYPES, default='legacy')
190190
data_type = models.CharField(max_length=1, choices=D_TYPES, default='U')
191191
description = models.CharField(max_length=300, blank=True)
192192
units_title = models.CharField(max_length=30, default='Time')
@@ -198,12 +198,6 @@ class Benchmark(models.Model):
198198
def __str__(self):
199199
return self.name
200200

201-
def clean(self):
202-
if self.default_on_comparison and self.benchmark_type != 'C':
203-
raise ValidationError("Only cross-project benchmarks are shown "
204-
"on the comparison page. Deactivate "
205-
"'default_on_comparison' first.")
206-
207201

208202
class Environment(models.Model):
209203
name = models.CharField(unique=True, max_length=100)
@@ -223,6 +217,7 @@ class Result(models.Model):
223217
val_max = models.FloatField(blank=True, null=True)
224218
q1 = models.FloatField(blank=True, null=True)
225219
q3 = models.FloatField(blank=True, null=True)
220+
suite_version = models.CharField(max_length=50, blank=True, default='')
226221
date = models.DateTimeField(blank=True, null=True)
227222
revision = models.ForeignKey(
228223
Revision, on_delete=models.CASCADE, related_name="results")

codespeed/results.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,8 @@ def save_result(data, update_repo=True):
6969
b.units_title = data["units_title"]
7070
if "lessisbetter" in data:
7171
b.lessisbetter = data["lessisbetter"]
72+
if "source" in data:
73+
b.source = data["source"]
7274
b.full_clean()
7375
b.save()
7476

@@ -127,6 +129,7 @@ def save_result(data, update_repo=True):
127129
r.val_max = data.get('max')
128130
r.q1 = data.get('q1')
129131
r.q3 = data.get('q3')
132+
r.suite_version = data.get('suite_version', '')
130133

131134
r.full_clean()
132135
r.save()

codespeed/static/css/main.css

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -457,6 +457,8 @@ tbody.commits tr td.date {
457457

458458
a#permalink { float: right; font-size: small; }
459459
a#permalink:hover { text-decoration: underline; }
460+
a#exportcsv { float: right; font-size: small; margin-right: 1em; }
461+
a#exportcsv:hover { text-decoration: underline; }
460462

461463
/* Plot styles */
462464
div#plot { text-align: left; height: 500px; width: 100%; }

codespeed/static/js/comparison.js

Lines changed: 118 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ function getConfiguration() {
2222
ben: readCheckbox("input[name='benchmarks']:checked"),
2323
env: readCheckbox("input[name='environments']:checked"),
2424
hor: $("input[name='direction']").is(':checked'),
25-
bas: $("#baseline option:selected").val(),
25+
bas: $("#baseline option:selected").val() || "none",
2626
chart: $("#chart_type option:selected").val()
2727
};
2828
}
@@ -52,6 +52,22 @@ function refreshContent() {
5252
msg = '<p class="warning">Normalized stacked bars actually represent the weighted arithmetic sum, useful to spot which individual benchmarks take up the most time. Choosing different weightings from the "Normalization" menu will change the totals relative to one another. For the correct way to calculate total bars, the geometric mean must be used (see <a href="http://portal.acm.org/citation.cfm?id=5666.5673 " title="How not to lie with statistics: the correct way to summarize benchmark results">paper</a>)</p>';
5353
}
5454

55+
if (compdata && compdata.suite_versions) {
56+
var mismatchedEnvs = enviros.filter(function(envId) {
57+
var versions = new Set();
58+
exes.forEach(function(exeKey) {
59+
var sv = compdata.suite_versions[exeKey];
60+
if (sv && sv[envId]) {
61+
sv[envId].forEach(function(v) { versions.add(v); });
62+
}
63+
});
64+
return versions.size > 1;
65+
});
66+
if (mismatchedEnvs.length > 0) {
67+
msg += '<p class="warning">The executables being compared used different benchmark suite versions. Results may not be directly comparable.</p>';
68+
}
69+
}
70+
5571
chartInstances.forEach(function(c) { c.destroy(); });
5672
chartInstances = [];
5773
$("#plotwrapper").fadeOut("fast", function() {
@@ -90,10 +106,19 @@ function updateBaselineDropdown() {
90106
var $baseline = $("#baseline");
91107
var current = $baseline.val();
92108
$baseline.find("option:not([value='none'])").remove();
109+
var enviros = readCheckbox("input[name='environments']:checked").split(",").filter(Boolean);
110+
var multiEnv = enviros.length > 1;
93111
$("input[name='executables']:checked").each(function() {
94112
var key = $(this).val();
95113
var name = $(this).next('label').text().trim();
96-
$baseline.append($('<option>').val(key).text(name));
114+
if (multiEnv) {
115+
enviros.forEach(function(envId) {
116+
var envName = $("label[for='env_" + envId + "']").text().trim();
117+
$baseline.append($('<option>').val(key + ':' + envId).text(name + ' @ ' + envName));
118+
});
119+
} else {
120+
$baseline.append($('<option>').val(key).text(name));
121+
}
97122
});
98123
if ($baseline.find("option[value='" + current + "']").length) {
99124
$baseline.val(current);
@@ -104,7 +129,7 @@ function updateBaselineDropdown() {
104129

105130
function loadData() {
106131
var conf = getConfiguration();
107-
if (!conf.exe || !conf.ben) { return; }
132+
if (!conf.exe || !conf.ben) { refreshContent(); return; }
108133
var cacheKey = conf.exe + "|" + conf.ben;
109134
if (dataCache[cacheKey]) {
110135
compdata = dataCache[cacheKey];
@@ -119,7 +144,22 @@ function loadData() {
119144
}
120145

121146
function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline, chart, horizontal) {
122-
var baselineLabel = baseline !== "none" ? $("label[for='exe_" + baseline + "']").text().trim() : "";
147+
// baseline may be "exe_key" or "exe_key:env_id" (for cross-env normalization)
148+
if (!baseline) { baseline = "none"; }
149+
var baselineExe = baseline, baselineEnv = null;
150+
if (baseline !== "none" && baseline.indexOf(':') !== -1) {
151+
var bparts = baseline.split(':');
152+
baselineExe = bparts[0];
153+
baselineEnv = bparts[1];
154+
}
155+
156+
var baselineLabel = "";
157+
if (baseline !== "none") {
158+
baselineLabel = $("label[for='exe_" + baselineExe + "']").text().trim();
159+
if (baselineEnv !== null) {
160+
baselineLabel += ' @ ' + $("label[for='env_" + baselineEnv + "']").text().trim();
161+
}
162+
}
123163

124164
var title;
125165
if (baseline === "none") {
@@ -148,15 +188,17 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
148188
for (var i = 0; i < exes.length; i++) {
149189
for (var j = 0; j < enviros.length; j++) {
150190
var exeLabel = $("label[for='exe_" + exes[i] + "']").text().trim();
151-
if (chart === "relative bars" && exes[i] === baseline) { continue; }
191+
if (chart === "relative bars" && exes[i] === baselineExe &&
192+
(baselineEnv === null || baselineEnv === enviros[j])) { continue; }
152193
var data = [];
153194
for (var b = 0; b < benchmarks.length; b++) {
154195
var val = compdata[exes[i]] && compdata[exes[i]][enviros[j]]
155196
? compdata[exes[i]][enviros[j]][benchmarks[b]]
156197
: null;
157198
if (val !== null && baseline !== "none") {
158-
var baseval = compdata[baseline] && compdata[baseline][enviros[j]]
159-
? compdata[baseline][enviros[j]][benchmarks[b]]
199+
var envForBase = baselineEnv !== null ? baselineEnv : enviros[j];
200+
var baseval = compdata[baselineExe] && compdata[baselineExe][envForBase]
201+
? compdata[baselineExe][envForBase][benchmarks[b]]
160202
: null;
161203
val = (baseval === null || baseval === 0) ? null : val / baseval;
162204
}
@@ -189,8 +231,9 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
189231
? compdata[exes[i]][enviros[j]][benchmarks[b]]
190232
: null;
191233
if (val !== null && baseline !== "none") {
192-
var baseval = compdata[baseline] && compdata[baseline][enviros[j]]
193-
? compdata[baseline][enviros[j]][benchmarks[b]]
234+
var envForBase = baselineEnv !== null ? baselineEnv : enviros[j];
235+
var baseval = compdata[baselineExe] && compdata[baselineExe][envForBase]
236+
? compdata[baselineExe][envForBase][benchmarks[b]]
194237
: null;
195238
val = (baseval === null || baseval === 0) ? null : val / baseval;
196239
}
@@ -204,6 +247,11 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
204247

205248
if (datasets.length === 0) { return; }
206249

250+
// Mark datasets where all values are null (baseline has no data for that env)
251+
datasets.forEach(function(ds) {
252+
ds.allNull = ds.data.every(function(v) { return v === null; });
253+
});
254+
207255
// Size the container
208256
var wrapWidth = $("#plotwrapper").width();
209257
var h = horizontal
@@ -247,7 +295,19 @@ function renderComparisonPlot(plotid, unit, benchmarks, exes, enviros, baseline,
247295
font: {size: FONT_SIZE},
248296
boxWidth: 20,
249297
boxHeight: FONT_SIZE,
250-
padding: 8
298+
padding: 8,
299+
generateLabels: function(chart) {
300+
var items = Chart.defaults.plugins.legend.labels.generateLabels(chart);
301+
items.forEach(function(item) {
302+
var ds = chart.data.datasets[item.datasetIndex];
303+
if (ds && ds.allNull) {
304+
// Apply unicode combining strikethrough to each character
305+
item.text = item.text.split('').join('\u0336') + '\u0336';
306+
item.fontColor = '#aaa';
307+
}
308+
});
309+
return items;
310+
}
251311
}
252312
}
253313
},
@@ -298,7 +358,11 @@ function init(defaults) {
298358
$("#benchmark .checkall, #benchmark .uncheckall").click(loadData);
299359

300360
// Re-render without re-fetching for other controls
301-
$("#chart_type, #baseline, #direction, input[name='environments']").change(refreshContent);
361+
$("#chart_type, #baseline, #direction").change(refreshContent);
362+
$("input[name='environments']").change(function() {
363+
updateBaselineDropdown();
364+
refreshContent();
365+
});
302366

303367
$.ajaxSetup ({
304368
cache: false
@@ -309,6 +373,49 @@ function init(defaults) {
309373
$("#permalink").click(function() {
310374
window.location = "?" + $.param(getConfiguration());
311375
});
376+
377+
$("#exportcsv").click(function(e) {
378+
e.preventDefault();
379+
if (!compdata) { return; }
380+
var conf = getConfiguration();
381+
var exes = conf.exe ? conf.exe.split(",").filter(Boolean) : [];
382+
var enviros = readCheckbox("input[name='environments']:checked").split(",").filter(Boolean);
383+
var benchmarks = conf.ben ? conf.ben.split(",").filter(Boolean) : [];
384+
385+
// Header row: benchmark, then one column per exe@env
386+
var header = ["benchmark"];
387+
for (var i = 0; i < exes.length; i++) {
388+
for (var j = 0; j < enviros.length; j++) {
389+
var exeLabel = $("label[for='exe_" + exes[i] + "']").text().trim();
390+
var envLabel = $("label[for='env_" + enviros[j] + "']").text().trim();
391+
header.push(enviros.length > 1 ? exeLabel + "@" + envLabel : exeLabel);
392+
}
393+
}
394+
395+
var rows = [header];
396+
for (var b = 0; b < benchmarks.length; b++) {
397+
var benchLabel = $("label[for='benchmark_" + benchmarks[b] + "']").text().trim();
398+
var row = [benchLabel];
399+
for (var i = 0; i < exes.length; i++) {
400+
for (var j = 0; j < enviros.length; j++) {
401+
var val = compdata[exes[i]] && compdata[exes[i]][enviros[j]]
402+
? compdata[exes[i]][enviros[j]][benchmarks[b]]
403+
: "";
404+
row.push(val === null || val === undefined ? "" : val);
405+
}
406+
}
407+
rows.push(row);
408+
}
409+
410+
var csv = rows.map(function(r) { return r.join(","); }).join("\n");
411+
var blob = new Blob([csv], {type: "text/csv"});
412+
var url = URL.createObjectURL(blob);
413+
var a = document.createElement("a");
414+
a.href = url;
415+
a.download = "comparison.csv";
416+
a.click();
417+
URL.revokeObjectURL(url);
418+
});
312419
}
313420

314421
return {

0 commit comments

Comments
 (0)