Skip to content

Commit 8a82e7d

Browse files
Merge branch ngt-ci-pipeline into template
2 parents 9574083 + 2457847 commit 8a82e7d

File tree

286 files changed

+16221
-2540
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

286 files changed

+16221
-2540
lines changed

.github/scripts/csv_to_md.py

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
import argparse
22
import csv
3+
import math
34
import tabulate as tab
45

56
parser = argparse.ArgumentParser()
7+
parser.add_argument('-r', '--runs', type=int, required=True, help='Number of runs')
68
parser.add_argument('-b', '--baseline', required=True, help='Baseline CSV file')
79
parser.add_argument('-c', '--current', required=True, help='Current CSV file')
810
args = parser.parse_args()
@@ -11,29 +13,35 @@ def get_2d_list(csv_filename):
1113
with open(csv_filename) as csv_file:
1214
csv_reader = csv.reader(csv_file)
1315
next(csv_reader)
14-
return [[str(name), float(mean), float(stdev)] for name, mean, stdev in csv_reader]
16+
return [[str(name), float(mean), float(stdev), int(count)] for name, mean, stdev, count in csv_reader]
1517

1618
table_baseline = get_2d_list(args.baseline)
1719
table_current = get_2d_list(args.current)
1820

19-
def get_emoji(d, stdev):
20-
z = 1.96 # 95% confidence interval
21-
if d < -z * stdev:
21+
def student(x, sx, m, y, sy, n):
22+
s = 0.0 if m < 2 or n < 2 else math.sqrt(((m - 1) * sx**2 + (n - 1) * sy**2) / (m + n - 2))
23+
d = x - y
24+
t = 0.0 if s == 0.0 else math.sqrt((n * m) / (n + m)) * d / s
25+
return d, s, t
26+
27+
def get_emoji(t):
28+
quantile = 2.0 # 95% confidence interval
29+
if t < -quantile:
2230
return ':green_circle:'
23-
elif d > z * stdev:
31+
elif t > quantile:
2432
return ':red_circle:'
2533
else:
2634
return ':white_circle:'
2735

2836
table = []
2937
for baseline, current in zip(table_baseline, table_current):
30-
baseline_name, baseline_mean, _ = baseline
31-
name, mean, stdev = current
38+
baseline_name, baseline_mean, baseline_stdev, count_baseline = baseline
39+
name, mean, stdev, count = current
3240
assert(baseline_name == name)
33-
diff = baseline_mean - mean
34-
impact = 0.0 if stdev == 0.0 else diff / stdev
35-
emoji = get_emoji(diff, stdev)
36-
table.append([name, int(mean), f'{stdev:.2f}', int(diff), f'{impact:.2f}', emoji])
41+
total_time = mean * (count // args.runs)
42+
d, s, t = student(baseline_mean, baseline_stdev, args.runs, mean, stdev, args.runs)
43+
emoji = get_emoji(t)
44+
table.append([name, int(total_time), int(mean), f'{stdev:.2f}', f'{d:.2f}', f'{t:.2f}', emoji])
3745

38-
header = ['name', 'mean (ms)', 'stdev \u03C3', 'diff \u0394', '\u0394 / \u03C3', '']
46+
header = ['name', 'total time (\u03BCs)', 'mean (\u03BCs)', 'stdev \u03C3', 'diff \u0394', 't', '']
3947
print(tab.tabulate(table, header, tablefmt="github"))

.github/scripts/profiler_ncu.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import argparse
2+
import csv
3+
import statistics
4+
5+
parser = argparse.ArgumentParser()
6+
parser.add_argument('-i', '--input', required=True, help='Input CSV file')
7+
parser.add_argument('-o', '--output', required=True, help='Output CSV file')
8+
args = parser.parse_args()
9+
10+
kernel_dict = {}
11+
with open(args.input) as csv_file:
12+
csv_reader = csv.reader(csv_file)
13+
next(csv_reader)
14+
for row in csv_reader:
15+
full_name = row[4]
16+
time = int(row[14]) / 1000.0
17+
if len(full_name) > 5 and full_name[:5] == "krnl_":
18+
name = full_name[5:]
19+
if name in kernel_dict.keys():
20+
kernel_dict[name].append(time)
21+
else:
22+
kernel_dict[name] = [time]
23+
24+
data = [["name", "time", "stdev", "count"]]
25+
for name, time_list in kernel_dict.items():
26+
count = len(time_list)
27+
mean = statistics.mean(time_list)
28+
stdev = 0 if count == 1 else statistics.stdev(time_list)
29+
data.append([name, mean, stdev, count])
30+
31+
with open(args.output, 'w') as csv_file:
32+
csv_writer = csv.writer(csv_file)
33+
csv_writer.writerows(data)

.github/scripts/profiler_nsys.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import argparse
2+
import csv
3+
import statistics
4+
5+
parser = argparse.ArgumentParser()
6+
parser.add_argument('-i', '--input', required=True, help='Input CSV file')
7+
parser.add_argument('-o', '--output', required=True, help='Output CSV file')
8+
args = parser.parse_args()
9+
10+
kernel_list = []
11+
with open(args.input) as csv_file:
12+
csv_reader = csv.reader(csv_file)
13+
next(csv_reader)
14+
next(csv_reader)
15+
next(csv_reader)
16+
for row in csv_reader:
17+
if row:
18+
full_name = row[8]
19+
count = int(row[2])
20+
mean = float(row[3])
21+
stdev = float(row[7])
22+
if len(full_name) > 5 and full_name[:5] == "krnl_":
23+
name = full_name[5:]
24+
kernel_list.append([name, mean, stdev, count])
25+
26+
kernel_list.sort(key = lambda row: row[0])
27+
28+
data = [["name", "mean", "stdev", "count"]]
29+
data += kernel_list
30+
31+
with open(args.output, 'w') as csv_file:
32+
csv_writer = csv.writer(csv_file)
33+
csv_writer.writerows(data)
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import argparse
2+
import csv
3+
import statistics
4+
5+
parser = argparse.ArgumentParser()
6+
parser.add_argument('-i', '--input', required=True, help='Input CSV file')
7+
parser.add_argument('-o', '--output', required=True, help='Output CSV file')
8+
args = parser.parse_args()
9+
10+
kernel_dict = dict({})
11+
with open(args.input) as csv_file:
12+
csv_reader = csv.reader(csv_file)
13+
next(csv_reader)
14+
for row in csv_reader:
15+
full_name = row[13]
16+
time = (int(row[15]) - int(row[14])) / 1000.0
17+
if len(full_name) > 5 and full_name[:5] == "krnl_":
18+
name = full_name[5:-3]
19+
if name in kernel_dict.keys():
20+
kernel_dict[name].append(time)
21+
else:
22+
kernel_dict[name] = [time]
23+
24+
data = [["name", "mean", "stdev", "count"]]
25+
for name, time_list in kernel_dict.items():
26+
count = len(time_list)
27+
mean = statistics.mean(time_list)
28+
stdev = 0 if count == 1 else statistics.stdev(time_list)
29+
data.append([name, mean, stdev, count])
30+
31+
with open(args.output, 'w') as csv_file:
32+
csv_writer = csv.writer(csv_file)
33+
csv_writer.writerows(data)
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import argparse
2+
import csv
3+
import math
4+
import statistics
5+
6+
parser = argparse.ArgumentParser()
7+
parser.add_argument('-d', '--discard', type=int, default=0, help='Number of initial measurements to discard')
8+
parser.add_argument('-i', '--input', required=True, help='Input CSV file')
9+
parser.add_argument('-o', '--output', required=True, help='Output CSV file')
10+
args = parser.parse_args()
11+
12+
time_dict = dict({})
13+
count_dict = dict({})
14+
with open(args.input) as csv_file:
15+
csv_reader = csv.reader(csv_file)
16+
next(csv_reader)
17+
for row in csv_reader:
18+
name = row[2]
19+
time = float(row[3])
20+
count = row[1]
21+
if name in time_dict.keys():
22+
time_dict[name].append(time)
23+
else:
24+
time_dict[name] = [time]
25+
count_dict[name] = 1 if count == '' else int(count)
26+
27+
data = [["name", "mean", "stdev", "count"]]
28+
for name, time_list in time_dict.items():
29+
count = count_dict[name]
30+
mean = statistics.mean(time_list[args.discard:]) / count
31+
runs = len(time_list[args.discard:])
32+
stdev = 0.0 if runs == 1 else statistics.stdev(time_list[args.discard:]) / math.sqrt(count)
33+
data.append([name, mean, stdev, runs * count])
34+
35+
with open(args.output, 'w') as csv_file:
36+
csv_writer = csv.writer(csv_file)
37+
csv_writer.writerows(data)

.github/workflows/clean-test.yml

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,6 @@ name: Clean PR checks
1919
# Warning: the check_* keys are magic and must consist of the string
2020
# "check_" followed by the applicable check name exactly. The
2121
# "description" field is only the human-readable label for the input.
22-
'check_build/AliceO2/O2/o2/macOS':
23-
description: build/AliceO2/O2/o2/macOS
24-
type: boolean
25-
default: true
2622
'check_build/AliceO2/O2/o2/macOS-arm':
2723
description: build/AliceO2/O2/o2/macOS-arm
2824
type: boolean
@@ -31,8 +27,8 @@ name: Clean PR checks
3127
description: build/O2/fullCI
3228
type: boolean
3329
default: true
34-
'check_build/O2/o2-dataflow-cs8':
35-
description: build/O2/o2-dataflow-cs8
30+
'check_build/O2/o2-dataflow-slc9':
31+
description: build/O2/o2-dataflow-slc9
3632
type: boolean
3733
default: true
3834
'check_build/O2/o2/aarch64':

.github/workflows/standalone-benchmark.yml

Lines changed: 74 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -14,26 +14,46 @@ jobs:
1414
strategy:
1515
fail-fast: false
1616
matrix:
17-
name: [nvidia-h100, nvidia-l40s, amd-mi300x, amd-w7900]
17+
name: [cpu, nvidia-h100, nvidia-l40s, amd-mi300x, amd-w7900]
1818
include:
19+
- name: cpu
20+
runner: cern-nextgen-h100
21+
cmake_args: -DENABLE_CUDA=0 -DENABLE_HIP=0
22+
profiler_runs: 42
23+
standalone_runs: 42
24+
cpu_gpu: "-c"
1925
- name: nvidia-h100
2026
runner: cern-nextgen-h100
2127
cmake_args: -DENABLE_CUDA=1 -DENABLE_HIP=0 -DCUDA_COMPUTETARGET=90
28+
profiler_runs: 21
29+
standalone_runs: 42
30+
cpu_gpu: "-g --memSize 20000000000"
2231
- name: nvidia-l40s
2332
runner: cern-nextgen-l40s
2433
cmake_args: -DENABLE_CUDA=1 -DENABLE_HIP=0 -DCUDA_COMPUTETARGET=89
34+
profiler_runs: 42
35+
standalone_runs: 42
36+
cpu_gpu: "-g --memSize 20000000000"
2537
- name: amd-mi300x
2638
runner: cern-nextgen-mi300x
2739
cmake_args: -DENABLE_CUDA=0 -DENABLE_HIP=1 -DHIP_AMDGPUTARGET=gfx942
40+
profiler_runs: 42
41+
standalone_runs: 42
42+
cpu_gpu: "-g --memSize 20000000000"
2843
- name: amd-w7900
2944
runner: cern-nextgen-w7900
3045
cmake_args: -DENABLE_CUDA=0 -DENABLE_HIP=1 -DHIP_AMDGPUTARGET=gfx1100
46+
profiler_runs: 42
47+
standalone_runs: 42
48+
cpu_gpu: "-g --memSize 20000000000"
3149
env:
3250
WORK_DIR: /cvmfs/alice.cern.ch
3351
ALIBUILD_ARCH_PREFIX: el9-x86_64/Packages
3452
MODULEPATH: /cvmfs/alice.cern.ch/etc/toolchain/modulefiles/el9-x86_64:/cvmfs/alice.cern.ch/el9-x86_64/Modules/modulefiles
3553
STANDALONE_DIR: /root/standalone
36-
BENCHMARK_CSV: /root/${{ matrix.name }}.csv
54+
BENCHMARK_CSV: standalone_${{ matrix.name }}.csv
55+
PROFILER_CSV: profiler_${{ matrix.name }}.csv
56+
TIMING_CA: ./ca -e 50kHz ${{ matrix.cpu_gpu }} --seed 0 --sync --debug 1 # Add --PROCdebugMarkdown 1 --runs 42 --runsInit 2 --PROCresetTimers 1 for benchmark runs
3757
LD_LIBRARY_PATH: /usr/local/cuda-13.0/compat
3858

3959
name: ${{ matrix.name }}
@@ -47,9 +67,6 @@ jobs:
4767
4868
curl -fL --retry 3 -o ${STANDALONE_DIR}/o2-simple-GPU.out https://cernbox.cern.ch/remote.php/dav/public-files/SfYXgQOHFga2w75/o2-simple-GPU.out
4969
50-
mkdir -p ${STANDALONE_DIR}/baseline
51-
curl -fL --retry 3 -o ${STANDALONE_DIR}/baseline/${{ matrix.name }}.csv https://cernbox.cern.ch/remote.php/dav/public-files/SfYXgQOHFga2w75/baseline/${{ matrix.name }}.csv
52-
5370
mkdir -p ${STANDALONE_DIR}/events
5471
curl -fL --retry 3 -o ${STANDALONE_DIR}/events/o2-simple.tar.xz https://cernbox.cern.ch/remote.php/dav/public-files/SfYXgQOHFga2w75/events/o2-simple.tar.xz
5572
tar -xf ${STANDALONE_DIR}/events/o2-simple.tar.xz -C ${STANDALONE_DIR}/events
@@ -68,38 +85,79 @@ jobs:
6885
env:
6986
DETERMINISTIC_MODE: GPU
7087

71-
- name: Test GPU Track Reconstruction
88+
- name: Test Track Reconstruction
7289
run: |
7390
source /etc/profile.d/modules.sh
7491
module load ninja/fortran-v1.11.1.g9-15 Vc/1.4.5-10 boost/v1.83.0-alice2-57 fmt/11.1.2-14 CMake/v3.31.6-10 ms_gsl/4.2.1-3 Clang/v20.1.7-9 TBB/v2022.3.0-3 ROOT/v6-36-04-alice9-15 ONNXRuntime/v1.22.0-71 GLFW/3.3.2-25
7592
cd ${STANDALONE_DIR}
76-
${STANDALONE_DIR}/ca -e o2-simple -g --seed 0 --memSize 20000000000 --sync --runs 1 --RTCenable --PROCdeterministicGPUReconstruction 1 --RTCoptConstexpr 1 --RTCoptSpecialCode 1 --debug 6
77-
cmp ${STANDALONE_DIR}/GPU.out ${STANDALONE_DIR}/o2-simple-GPU.out
78-
rm -rf ${STANDALONE_DIR}/GPU.out ${STANDALONE_DIR}/o2-simple-GPU.out ${STANDALONE_DIR}/events/o2-simple ${STANDALONE_DIR}/build
93+
${STANDALONE_DIR}/ca -e o2-simple ${{ matrix.cpu_gpu }} --seed 0 --sync --runs 1 --RTCenable --PROCdeterministicGPUReconstruction 1 --RTCoptConstexpr 1 --RTCoptSpecialCode 1 --debug 6
94+
cmp ${STANDALONE_DIR}/*.out
95+
rm -rf ${STANDALONE_DIR}/*.out ${STANDALONE_DIR}/events/o2-simple ${STANDALONE_DIR}/build
7996
8097
- name: Build Non-Deterministic
8198
run: *build
8299
env:
83100
DETERMINISTIC_MODE: OFF
84101

85-
- name: Benchmark GPU Track Reconstruction
102+
- name: Benchmark Track Reconstruction
103+
run: |
104+
source /etc/profile.d/modules.sh
105+
module load ninja/fortran-v1.11.1.g9-15 Vc/1.4.5-10 boost/v1.83.0-alice2-57 fmt/11.1.2-14 CMake/v3.31.6-10 ms_gsl/4.2.1-3 Clang/v20.1.7-9 TBB/v2022.3.0-3 ROOT/v6-36-04-alice9-15 ONNXRuntime/v1.22.0-71 GLFW/3.3.2-25
106+
cd ${STANDALONE_DIR}
107+
${TIMING_CA} --debug 1 --runs ${{ matrix.standalone_runs }} --runsInit 0 --PROCdebugMarkdown 1 --PROCresetTimers 1 --PROCdebugCSV /root/${BENCHMARK_CSV}
108+
python3 ${GITHUB_WORKSPACE}/.github/scripts/profiler_standalone.py --discard 0 --input /root/${BENCHMARK_CSV} --output /root/summary_${BENCHMARK_CSV}
109+
110+
- name: Profiler - Nsight Compute
111+
if: ${{ matrix.name == 'nvidia-h100' }}
86112
run: |
113+
dnf install -y cuda-nsight-compute-13-1
87114
source /etc/profile.d/modules.sh
88115
module load ninja/fortran-v1.11.1.g9-15 Vc/1.4.5-10 boost/v1.83.0-alice2-57 fmt/11.1.2-14 CMake/v3.31.6-10 ms_gsl/4.2.1-3 Clang/v20.1.7-9 TBB/v2022.3.0-3 ROOT/v6-36-04-alice9-15 ONNXRuntime/v1.22.0-71 GLFW/3.3.2-25
89116
cd ${STANDALONE_DIR}
90-
${STANDALONE_DIR}/ca -e 50kHz -g --memSize 15000000000 --sync --runs 12 --debug 1 --PROCtimingCSV ${BENCHMARK_CSV}
117+
ncu --set none --metrics gpu__time_duration.avg --export ${{ matrix.name }} --clock-control none --force-overwrite ${TIMING_CA} --runs ${{ matrix.profiler_runs }} --debug 1 --PROCdebugMarkdown 1 # Generates ${{ matrix.name }}.ncu-rep
118+
ncu --import ${STANDALONE_DIR}/${{ matrix.name }}.ncu-rep --print-units base --csv > /root/${PROFILER_CSV}
91119
rm -rf ${STANDALONE_DIR}/events/50kHz ${STANDALONE_DIR}/build
120+
python3 ${GITHUB_WORKSPACE}/.github/scripts/profiler_ncu.py --input /root/${PROFILER_CSV} --output /root/summary_${PROFILER_CSV}
92121
93-
- name: Display table on GitHub web
122+
- name: Profiler - Nsight Systems
123+
if: ${{ matrix.name == 'nvidia-l40s' }}
94124
run: |
125+
dnf config-manager --add-repo "https://developer.download.nvidia.com/devtools/repos/rhel$(source /etc/os-release; echo ${VERSION_ID%%.*})/$(rpm --eval '%{_arch}' | sed s/aarch/arm/)/"
126+
dnf install --nogpgcheck -y nsight-systems-cli-2026.2.1
95127
source /etc/profile.d/modules.sh
96128
module load ninja/fortran-v1.11.1.g9-15 Vc/1.4.5-10 boost/v1.83.0-alice2-57 fmt/11.1.2-14 CMake/v3.31.6-10 ms_gsl/4.2.1-3 Clang/v20.1.7-9 TBB/v2022.3.0-3 ROOT/v6-36-04-alice9-15 ONNXRuntime/v1.22.0-71 GLFW/3.3.2-25
97-
python3 ${GITHUB_WORKSPACE}/.github/scripts/merge_runs.py --discard 2 --input ${BENCHMARK_CSV} --output ${BENCHMARK_CSV}
98-
python3 ${GITHUB_WORKSPACE}/.github/scripts/csv_to_md.py --baseline ${STANDALONE_DIR}/baseline/${{ matrix.name }}.csv --current ${BENCHMARK_CSV} >> ${GITHUB_STEP_SUMMARY}
99-
rm -rf ${STANDALONE_DIR}/baseline
129+
cd ${STANDALONE_DIR}
130+
nsys profile -o ${{ matrix.name }} ${TIMING_CA} --runs ${{ matrix.profiler_runs }} --debug 1 --PROCdebugMarkdown 1 # Generates ${{ matrix.name }}.nsys-rep
131+
nsys stats --report cuda_gpu_kern_sum --timeunit usec --force-export=true --format csv ${{ matrix.name }}.nsys-rep > /root/${PROFILER_CSV}
132+
rm -rf ${STANDALONE_DIR}/events/50kHz ${STANDALONE_DIR}/build
133+
python3 ${GITHUB_WORKSPACE}/.github/scripts/profiler_nsys.py --input /root/${PROFILER_CSV} --output /root/summary_${PROFILER_CSV}
134+
135+
- name: Profiler - rocprofv2
136+
if: ${{ matrix.name == 'amd-mi300x' || matrix.name == 'amd-w7900' }}
137+
run: |
138+
source /etc/profile.d/modules.sh
139+
module load ninja/fortran-v1.11.1.g9-15 Vc/1.4.5-10 boost/v1.83.0-alice2-57 fmt/11.1.2-14 CMake/v3.31.6-10 ms_gsl/4.2.1-3 Clang/v20.1.7-9 TBB/v2022.3.0-3 ROOT/v6-36-04-alice9-15 ONNXRuntime/v1.22.0-71 GLFW/3.3.2-25
140+
cd ${STANDALONE_DIR}
141+
rocprofv2 --output-directory /root --output-file-name ${{ matrix.name }} ${TIMING_CA} --runs ${{ matrix.standalone_runs }} --debug 1 --PROCdebugMarkdown 1 # Generates results_${{ matrix.name }}.csv
142+
rm -rf ${STANDALONE_DIR}/events/50kHz ${STANDALONE_DIR}/build
143+
mv /root/results_${{ matrix.name }}.csv /root/${PROFILER_CSV}
144+
python3 ${GITHUB_WORKSPACE}/.github/scripts/profiler_rocprofv2.py --input /root/${PROFILER_CSV} --output /root/summary_${PROFILER_CSV}
100145
101146
- name: Upload Artifact
102147
uses: actions/upload-artifact@v6
103148
with:
104149
name: ${{ matrix.name }}-artifact
105-
path: /root/${{ matrix.name }}.csv
150+
path: "/root/*.csv"
151+
152+
- name: Display table on GitHub web
153+
run: |
154+
source /etc/profile.d/modules.sh
155+
module load ninja/fortran-v1.11.1.g9-15 Vc/1.4.5-10 boost/v1.83.0-alice2-57 fmt/11.1.2-14 CMake/v3.31.6-10 ms_gsl/4.2.1-3 Clang/v20.1.7-9 TBB/v2022.3.0-3 ROOT/v6-36-04-alice9-15 ONNXRuntime/v1.22.0-71 GLFW/3.3.2-25
156+
mkdir -p ${STANDALONE_DIR}/baseline
157+
curl -fL --retry 3 -o ${STANDALONE_DIR}/baseline/summary_${PROFILER_CSV} https://cernbox.cern.ch/remote.php/dav/public-files/SfYXgQOHFga2w75/baseline/summary_${PROFILER_CSV}
158+
curl -fL --retry 3 -o ${STANDALONE_DIR}/baseline/summary_${BENCHMARK_CSV} https://cernbox.cern.ch/remote.php/dav/public-files/SfYXgQOHFga2w75/baseline/summary_${BENCHMARK_CSV}
159+
python3 ${GITHUB_WORKSPACE}/.github/scripts/csv_to_md.py --runs ${{ matrix.profiler_runs }} --baseline ${STANDALONE_DIR}/baseline/summary_${PROFILER_CSV} --current /root/summary_${PROFILER_CSV} >> ${GITHUB_STEP_SUMMARY}
160+
echo -e "\n\n" >> ${GITHUB_STEP_SUMMARY}
161+
python3 ${GITHUB_WORKSPACE}/.github/scripts/csv_to_md.py --runs ${{ matrix.standalone_runs }} --baseline ${STANDALONE_DIR}/baseline/summary_${BENCHMARK_CSV} --current /root/summary_${BENCHMARK_CSV} >> ${GITHUB_STEP_SUMMARY}
162+
rm -rf ${STANDALONE_DIR}/baseline
163+
if: ${{ matrix.name != 'cpu' }}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+

.skills/create-a-new-file/SKILL.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
---
2+
name: create-a-new-file
3+
description: describes how to create a new file
4+
---
5+
6+
## Copyright statements
7+
8+
The copyright statement for ALICE / O2 is found in ./o2-copyright-statement.md. It should be at the beginning of
9+
the new file using the proper commenting syntax for the given programming language. For example in C++ it should be commented via
10+
multiline comments:
11+
12+
``` C++
13+
// Copyright 2019-<current-year> CERN and copyright holders of ALICE O2.
14+
// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
15+
// ...
16+
```
17+
18+
The only part which needs to be adapted by you is the `<current-year>` which you need to replace with the actual current year.
19+
20+

0 commit comments

Comments
 (0)