Skip to content

Commit b36c9f8

Browse files
committed
Add gcovr UI customization templates and build scripts
- Add HTML templates for customized gcovr coverage reports with sidebar navigation - Add build_tree.py script to generate tree.json for directory navigation - Add gcovr_wrapper.py for local coverage processing - Update build.sh to handle both local/macOS and CI/Linux environments - Add macOS workaround: auto-detect paths from .info files since gcovr cannot read .gcda files directly on macOS - Add .gitignore for generated coverage reports and temp files
1 parent 832bc10 commit b36c9f8

19 files changed

Lines changed: 2880 additions & 7 deletions

.gitignore

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# Generated coverage reports
2+
json/gcovr/
3+
json/gcovr-lcov/
4+
json/gcovr-output/
5+
6+
# Claude Code
7+
.claude/
8+
9+
# Temp files
10+
*.bak

build.sh

Lines changed: 41 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,50 @@ export REPONAME="json"
88
export ORGANIZATION="boostorg"
99
GCOVRFILTER=".*/$REPONAME/.*"
1010

11-
cd "$REPONAME"
11+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
12+
cd "$SCRIPT_DIR/$REPONAME"
1213
BOOST_CI_SRC_FOLDER=$(pwd)
1314

14-
cd ../boost-root
15-
1615
outputlocation="$BOOST_CI_SRC_FOLDER/gcovr"
17-
outputlocation="/mnt/c/output"
1816
rm -rf $outputlocation || true
19-
2017
mkdir -p $outputlocation
2118

22-
gcovr --merge-mode-functions separate -p --html-nested --html-template-dir=..\templates --exclude-unreachable-branches --exclude-throw-branches --exclude '.*/test/.*' --exclude '.*/extra/.*' --filter "$GCOVRFILTER" --html --output "$outputlocation/index.html"
19+
if [[ -f "$BOOST_CI_SRC_FOLDER/coverage_filtered.info" ]]; then
20+
# Local/macOS workaround: gcovr cannot read .gcda coverage files directly on macOS,
21+
# so we convert the .info file (from lcov) to Cobertura XML format instead.
22+
# The .info file contains absolute paths from the original build environment,
23+
# which we auto-detect and rewrite to match the local machine's paths.
24+
ORIGINAL_PATH=$(grep -m1 "^SF:" "$BOOST_CI_SRC_FOLDER/coverage_filtered.info" | sed 's|^SF:||' | sed "s|/$REPONAME/.*||")
25+
TEMP_COVERAGE="/tmp/coverage_local.info"
26+
TEMP_XML="/tmp/coverage.xml"
27+
28+
sed "s|$ORIGINAL_PATH|$SCRIPT_DIR|g" "$BOOST_CI_SRC_FOLDER/coverage_filtered.info" > "$TEMP_COVERAGE"
29+
lcov_cobertura "$TEMP_COVERAGE" -o "$TEMP_XML"
30+
sed -i.bak "s|filename=\"\.\./boost-root/|filename=\"$SCRIPT_DIR/boost-root/|g" "$TEMP_XML"
31+
32+
"$SCRIPT_DIR/scripts/gcovr_wrapper.py" \
33+
--cobertura-add-tracefile "$TEMP_XML" \
34+
--root "$SCRIPT_DIR" \
35+
--html-nested \
36+
--html-template-dir "$SCRIPT_DIR/templates/html" \
37+
--output "$outputlocation/index.html"
38+
39+
# Generate tree.json for sidebar navigation
40+
python3 "$SCRIPT_DIR/scripts/build_tree.py" "$outputlocation"
41+
else
42+
# CI/Linux: gcovr reads coverage data directly
43+
cd ../boost-root
44+
gcovr --merge-mode-functions separate -p \
45+
--html-nested \
46+
--html-template-dir=../templates/html \
47+
--exclude-unreachable-branches \
48+
--exclude-throw-branches \
49+
--exclude '.*/test/.*' \
50+
--exclude '.*/extra/.*' \
51+
--filter "$GCOVRFILTER" \
52+
--html \
53+
--output "$outputlocation/index.html"
54+
55+
# Generate tree.json for sidebar navigation
56+
python3 "../scripts/build_tree.py" "$outputlocation"
57+
fi

json/gcovr/keep.txt

Lines changed: 0 additions & 1 deletion
This file was deleted.

scripts/build_tree.py

Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Build a JSON tree structure from gcovr HTML output.
4+
This enables true inline expand/collapse in the sidebar.
5+
"""
6+
7+
import json
8+
import os
9+
import re
10+
import sys
11+
from html.parser import HTMLParser
12+
from pathlib import Path
13+
14+
15+
class FileListParser(HTMLParser):
16+
"""Parse gcovr HTML to extract file list entries and current path."""
17+
18+
def __init__(self):
19+
super().__init__()
20+
self.entries = []
21+
self.current_path = ''
22+
self.in_file_row = False
23+
self.current_entry = {}
24+
self.capture_text = None
25+
self.in_breadcrumb = False
26+
27+
def handle_starttag(self, tag, attrs):
28+
attrs_dict = dict(attrs)
29+
30+
# Detect breadcrumb to extract current path
31+
if tag == 'div' and attrs_dict.get('class') == 'breadcrumb':
32+
self.in_breadcrumb = True
33+
34+
# Detect file-row divs
35+
if tag == 'div' and 'class' in attrs_dict:
36+
classes = attrs_dict['class'].split()
37+
if 'file-row' in classes:
38+
self.in_file_row = True
39+
self.current_entry = {
40+
'name': attrs_dict.get('data-filename', ''),
41+
'coverage': attrs_dict.get('data-coverage', '0'),
42+
'is_dir': 'directory' in classes,
43+
'link': None
44+
}
45+
46+
# Capture links in file rows
47+
if self.in_file_row and tag == 'a':
48+
href = attrs_dict.get('href', '')
49+
if href and not self.current_entry.get('link'):
50+
self.current_entry['link'] = href
51+
52+
# Capture coverage percent
53+
if self.in_file_row and tag == 'span' and 'class' in attrs_dict:
54+
if 'coverage-percent' in attrs_dict['class']:
55+
self.capture_text = 'coverage'
56+
57+
def handle_data(self, data):
58+
if self.capture_text == 'coverage' and self.in_file_row:
59+
match = re.search(r'([\d.]+)%?', data.strip())
60+
if match:
61+
self.current_entry['coverage'] = match.group(1)
62+
self.capture_text = None
63+
64+
def handle_endtag(self, tag):
65+
if tag == 'div' and self.in_file_row and self.current_entry.get('name'):
66+
self.entries.append(self.current_entry)
67+
self.current_entry = {}
68+
self.in_file_row = False
69+
if tag == 'div' and self.in_breadcrumb:
70+
self.in_breadcrumb = False
71+
72+
73+
def parse_html_file(filepath):
74+
"""Parse a single HTML file and extract entries."""
75+
try:
76+
with open(filepath, 'r', encoding='utf-8') as f:
77+
content = f.read()
78+
79+
parser = FileListParser()
80+
parser.feed(content)
81+
return parser.entries
82+
except Exception as e:
83+
print(f"Error parsing {filepath}: {e}", file=sys.stderr)
84+
return []
85+
86+
87+
def get_coverage_class(coverage):
88+
"""Determine coverage class based on percentage."""
89+
try:
90+
pct = float(coverage)
91+
if pct >= 90:
92+
return 'coverage-high'
93+
elif pct >= 75:
94+
return 'coverage-medium'
95+
else:
96+
return 'coverage-low'
97+
except:
98+
return 'coverage-unknown'
99+
100+
101+
def build_tree(output_dir):
102+
"""Build complete tree structure by following links recursively."""
103+
output_path = Path(output_dir)
104+
105+
# Map from HTML filename to entries
106+
file_entries = {}
107+
108+
# Parse all HTML files
109+
for html_file in output_path.glob('index*.html'):
110+
entries = parse_html_file(html_file)
111+
file_entries[html_file.name] = entries
112+
113+
def build_node_from_file(html_filename, visited=None):
114+
"""Recursively build tree from HTML file."""
115+
if visited is None:
116+
visited = set()
117+
118+
if html_filename in visited:
119+
return []
120+
visited.add(html_filename)
121+
122+
entries = file_entries.get(html_filename, [])
123+
nodes = []
124+
125+
for entry in entries:
126+
name = entry['name']
127+
is_dir = entry['is_dir'] or '.' not in name
128+
coverage = entry['coverage']
129+
link = entry['link']
130+
131+
node = {
132+
'name': name,
133+
'coverage': coverage,
134+
'coverageClass': get_coverage_class(coverage),
135+
'isDirectory': is_dir,
136+
'link': link,
137+
'children': []
138+
}
139+
140+
# If directory with a link, recursively get its children
141+
if is_dir and link and link in file_entries:
142+
node['children'] = build_node_from_file(link, visited.copy())
143+
144+
nodes.append(node)
145+
146+
# Sort: directories first, then files, alphabetically
147+
nodes.sort(key=lambda x: (not x['isDirectory'], x['name'].lower()))
148+
return nodes
149+
150+
# Start from index.html
151+
tree = build_node_from_file('index.html')
152+
return tree
153+
154+
155+
def inject_tree_data(output_dir, tree):
156+
"""Inject tree data as JavaScript variable into all HTML files."""
157+
output_path = Path(output_dir)
158+
tree_script = f'<script>window.GCOVR_TREE_DATA={json.dumps(tree)};</script>'
159+
160+
count = 0
161+
for html_file in output_path.glob('*.html'):
162+
try:
163+
with open(html_file, 'r', encoding='utf-8') as f:
164+
content = f.read()
165+
166+
# Check if already injected (look for the actual data, not just the var name)
167+
if 'window.GCOVR_TREE_DATA=[' in content or 'window.GCOVR_TREE_DATA={' in content:
168+
continue
169+
170+
# Inject before </body>
171+
if '</body>' in content:
172+
content = content.replace('</body>', f'{tree_script}\n</body>')
173+
with open(html_file, 'w', encoding='utf-8') as f:
174+
f.write(content)
175+
count += 1
176+
except Exception as e:
177+
print(f"Warning: Could not inject into {html_file}: {e}", file=sys.stderr)
178+
179+
return count
180+
181+
182+
def main():
183+
if len(sys.argv) < 2:
184+
print("Usage: build_tree.py <gcovr_output_dir>", file=sys.stderr)
185+
sys.exit(1)
186+
187+
output_dir = sys.argv[1]
188+
189+
if not os.path.isdir(output_dir):
190+
print(f"Error: {output_dir} is not a directory", file=sys.stderr)
191+
sys.exit(1)
192+
193+
tree = build_tree(output_dir)
194+
195+
# Write tree.json
196+
tree_file = os.path.join(output_dir, 'tree.json')
197+
with open(tree_file, 'w', encoding='utf-8') as f:
198+
json.dump(tree, f, indent=2)
199+
200+
print(f"Generated {tree_file} with {len(tree)} root entries")
201+
202+
# Inject tree data into HTML files for local file:// access
203+
injected = inject_tree_data(output_dir, tree)
204+
print(f"Injected tree data into {injected} HTML files")
205+
206+
207+
if __name__ == '__main__':
208+
main()

scripts/gcovr_wrapper.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
#!/opt/homebrew/Cellar/gcovr/8.6/libexec/bin/python
2+
"""
3+
Wrapper for gcovr that registers .ipp files as C++ for syntax highlighting.
4+
"""
5+
6+
import sys
7+
8+
# Register .ipp extension with Pygments before importing gcovr
9+
from pygments.lexers import get_lexer_by_name, _mapping
10+
11+
# Add .ipp to C++ lexer's filenames
12+
cpp_lexer_info = _mapping.LEXERS.get('CppLexer')
13+
if cpp_lexer_info:
14+
# Format: (module, classname, names, filenames, mimetypes)
15+
module, classname, names, filenames, mimetypes = cpp_lexer_info
16+
if '*.ipp' not in filenames:
17+
filenames = filenames + ('*.ipp',)
18+
_mapping.LEXERS['CppLexer'] = (module, classname, names, filenames, mimetypes)
19+
20+
# Now run gcovr
21+
from gcovr.__main__ import main
22+
sys.exit(main())

0 commit comments

Comments
 (0)