From e37b9b7dcff4ff3d2f72824140217045ea376cc3 Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 20:40:54 +0100 Subject: [PATCH 1/7] actions/checkout@v6 --- .github/workflows/build.yml | 4 ++-- .github/workflows/publish.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7bbeeee1..e7169d52 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: tox-job: [doc, pep8] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - uses: actions/setup-python@v5 with: python-version: 3.x @@ -44,7 +44,7 @@ jobs: build: 'free-threading' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Setup Python uses: actions/setup-python@v5 if: ${{ matrix.build != 'free-threading' }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 4da8f261..7062f450 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -13,7 +13,7 @@ jobs: id-token: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v5 with: From 918cfbf3e5132723503d00b9a333db525428e2aa Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 21:04:05 +0100 Subject: [PATCH 2/7] just pre-commit-config --- .pre-commit-config.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..9101dbdd --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +repos: + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.14.10 + hooks: + - id: ruff-check + name: Run Ruff (lint) + args: [--exit-non-zero-on-fix] + - id: ruff-format + name: Run Ruff (format) + args: [--exit-non-zero-on-fix] From 6ed7815fa62055539fd29eb620fbc55987261b39 Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 21:04:24 +0100 Subject: [PATCH 3/7] basic gh workflow --- .github/workflows/lint.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/lint.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..5f908a40 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,12 @@ +name: Lint + +on: [push, pull_request, workflow_dispatch] + +permissions: {} + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: tox-dev/action-pre-commit-uv@v1 From 5d17240b0edc9911be57469f641413450e9c61df Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 21:21:57 +0100 Subject: [PATCH 4/7] exclude doc --- pyproject.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 001791be..be9768be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,6 +66,11 @@ packages = ["pyperf", "pyperf.tests"] [tool.setuptools.dynamic] version = {attr = "pyperf.__version__"} +[tool.ruff] +exclude = [ + "doc/", +] + [tool.ruff.lint] extend-select = ["C90", "UP"] extend-ignore = ["UP015", "UP031"] From 1d21622f748ffe6b9fb7df75f653d2e97f5567b4 Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 21:32:43 +0100 Subject: [PATCH 5/7] pyproject: target-version --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index be9768be..44998cb3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,6 +67,7 @@ packages = ["pyperf", "pyperf.tests"] version = {attr = "pyperf.__version__"} [tool.ruff] +target-version = "py39" exclude = [ "doc/", ] From 586d6e07943bbd59cb45afd08a2410d35ef91357 Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 21:34:58 +0100 Subject: [PATCH 6/7] dependabot for gh actions --- .github/dependabot.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..5c563144 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: monthly + groups: + actions: + patterns: + - "*" From 9628e26e4780fc8091d2a1566c5ba80c76a9c3ff Mon Sep 17 00:00:00 2001 From: maurycy <5383+maurycy@users.noreply.github.com> Date: Fri, 2 Jan 2026 21:36:57 +0100 Subject: [PATCH 7/7] uvx pre-commit run --all-files --- pyperf/__init__.py | 18 +- pyperf/__main__.py | 518 ++++++++++++++++++-------------- pyperf/_bench.py | 252 ++++++++-------- pyperf/_cli.py | 300 ++++++++++-------- pyperf/_collect_metadata.py | 200 ++++++------ pyperf/_command.py | 22 +- pyperf/_compare.py | 95 +++--- pyperf/_cpu_utils.py | 40 +-- pyperf/_formatter.py | 40 +-- pyperf/_hooks.py | 17 +- pyperf/_linux_memory.py | 4 +- pyperf/_manager.py | 117 ++++---- pyperf/_metadata.py | 94 +++--- pyperf/_process_time.py | 39 +-- pyperf/_psutil_memory.py | 6 +- pyperf/_runner.py | 506 +++++++++++++++++++------------ pyperf/_system.py | 379 ++++++++++++----------- pyperf/_timeit.py | 78 ++--- pyperf/_timeit_cli.py | 72 +++-- pyperf/_utils.py | 123 +++++--- pyperf/_win_memory.py | 29 +- pyperf/_worker.py | 175 ++++++----- pyperf/tests/__init__.py | 20 +- pyperf/tests/mult_list_bench.py | 12 +- pyperf/tests/replay.py | 13 +- pyperf/tests/test_bench.py | 287 ++++++++++-------- pyperf/tests/test_cli.py | 33 +- pyperf/tests/test_examples.py | 58 ++-- pyperf/tests/test_metadata.py | 101 +++---- pyperf/tests/test_misc.py | 43 ++- pyperf/tests/test_perf_cli.py | 392 ++++++++++++------------ pyperf/tests/test_runner.py | 340 +++++++++++---------- pyperf/tests/test_system.py | 12 +- pyperf/tests/test_timeit.py | 295 ++++++++++-------- pyperf/tests/test_utils.py | 140 ++++----- 35 files changed, 2693 insertions(+), 2177 deletions(-) diff --git a/pyperf/__init__.py b/pyperf/__init__.py index fbc05a01..43b66711 100644 --- a/pyperf/__init__.py +++ b/pyperf/__init__.py @@ -1,20 +1,24 @@ from time import perf_counter VERSION = (2, 9, 0) -__version__ = '.'.join(map(str, VERSION)) +__version__ = ".".join(map(str, VERSION)) # Export pyperf.perf_counter for backward compatibility with pyperf 1.7 # which supports Python 2 and Python 3 -__all__ = ['perf_counter'] +__all__ = ["perf_counter"] from pyperf._utils import python_implementation, python_has_jit # noqa -__all__.extend(('python_implementation', 'python_has_jit')) + +__all__.extend(("python_implementation", "python_has_jit")) from pyperf._metadata import format_metadata # noqa -__all__.append('format_metadata') + +__all__.append("format_metadata") from pyperf._bench import Run, Benchmark, BenchmarkSuite, add_runs # noqa -__all__.extend(('Run', 'Benchmark', 'BenchmarkSuite', 'add_runs')) -from pyperf._runner import Runner # noqa -__all__.append('Runner') +__all__.extend(("Run", "Benchmark", "BenchmarkSuite", "add_runs")) + +from pyperf._runner import Runner # noqa + +__all__.append("Runner") diff --git a/pyperf/__main__.py b/pyperf/__main__.py index 572253ae..da7efb01 100644 --- a/pyperf/__main__.py +++ b/pyperf/__main__.py @@ -6,10 +6,17 @@ import pyperf from pyperf._metadata import _common_metadata -from pyperf._cli import (format_metadata, empty_line, - format_checks, format_histogram, format_title, - format_benchmark, display_title, format_result, - catch_broken_pipe_error) +from pyperf._cli import ( + format_metadata, + empty_line, + format_checks, + format_histogram, + format_title, + format_benchmark, + display_title, + format_result, + catch_broken_pipe_error, +) from pyperf._formatter import format_timedelta, format_seconds, format_datetime from pyperf._cpu_utils import parse_cpu_list from pyperf._timeit_cli import TimeitRunner @@ -17,7 +24,7 @@ def add_cmdline_args(cmd, args): - cmd.extend(('--name', args.name)) + cmd.extend(("--name", args.name)) cmd.append(args.program) if args.program_args: cmd.extend(args.program_args) @@ -28,36 +35,43 @@ def __init__(self, cmd): def parse_name(name): return name.strip() - pyperf.Runner.__init__(self, - _argparser=cmd, - add_cmdline_args=add_cmdline_args) - self._program_args = ('-m', 'pyperf', 'command') + pyperf.Runner.__init__(self, _argparser=cmd, add_cmdline_args=add_cmdline_args) + self._program_args = ("-m", "pyperf", "command") - cmd.add_argument('--name', type=parse_name, default='command', - help='Benchmark name (default: command)') - cmd.add_argument('program', - help='Program path') - cmd.add_argument('program_args', nargs=argparse.REMAINDER, - help='Program arguments') + cmd.add_argument( + "--name", + type=parse_name, + default="command", + help="Benchmark name (default: command)", + ) + cmd.add_argument("program", help="Program path") + cmd.add_argument( + "program_args", nargs=argparse.REMAINDER, help="Program arguments" + ) def create_parser(): - parser = argparse.ArgumentParser(description='Display benchmark results.', - prog='-m pyperf') - subparsers = parser.add_subparsers(dest='action') + parser = argparse.ArgumentParser( + description="Display benchmark results.", prog="-m pyperf" + ) + subparsers = parser.add_subparsers(dest="action") def input_filenames(cmd, name=True): if name: - cmd.add_argument('-b', '--benchmark', metavar='NAME', - dest='benchmarks', action='append', - help='only display the benchmark called NAME') - cmd.add_argument('filenames', metavar='file.json', - type=str, nargs='+', - help='Benchmark file') + cmd.add_argument( + "-b", + "--benchmark", + metavar="NAME", + dest="benchmarks", + action="append", + help="only display the benchmark called NAME", + ) + cmd.add_argument( + "filenames", metavar="file.json", type=str, nargs="+", help="Benchmark file" + ) def display_options(cmd): - cmd.add_argument('-q', '--quiet', - action="store_true", help='enable quiet mode') + cmd.add_argument("-q", "--quiet", action="store_true", help="enable quiet mode") input_filenames(cmd) def parse_affinity(value): @@ -66,149 +80,194 @@ def parse_affinity(value): except ValueError: cpus = None if not cpus: - raise argparse.ArgumentTypeError('invalid CPU list: %r' % value) + raise argparse.ArgumentTypeError("invalid CPU list: %r" % value) return cpus def cpu_affinity(cmd): - cmd.add_argument("--affinity", metavar="CPU_LIST", default=None, - type=parse_affinity, - help='Specify CPU affinity. ' - 'By default, use isolated CPUs.') + cmd.add_argument( + "--affinity", + metavar="CPU_LIST", + default=None, + type=parse_affinity, + help="Specify CPU affinity. By default, use isolated CPUs.", + ) # show - cmd = subparsers.add_parser('show', help='Display a benchmark') - cmd.add_argument('-m', '--metadata', dest='metadata', action="store_true", - help="Show metadata.") - cmd.add_argument('-g', '--hist', action="store_true", - help='display an histogram of values') - cmd.add_argument('-t', '--stats', action="store_true", - help='display statistics (min, max, ...)') - cmd.add_argument('-d', '--dump', action="store_true", - help='display benchmark run results') + cmd = subparsers.add_parser("show", help="Display a benchmark") + cmd.add_argument( + "-m", "--metadata", dest="metadata", action="store_true", help="Show metadata." + ) + cmd.add_argument( + "-g", "--hist", action="store_true", help="display an histogram of values" + ) + cmd.add_argument( + "-t", "--stats", action="store_true", help="display statistics (min, max, ...)" + ) + cmd.add_argument( + "-d", "--dump", action="store_true", help="display benchmark run results" + ) display_options(cmd) # hist - cmd = subparsers.add_parser('hist', help='Render an histogram') - cmd.add_argument('--extend', action="store_true", - help="Extend the histogram to fit the terminal") - cmd.add_argument('-n', '--bins', type=int, default=None, - help='Number of histogram bars (default: 25, or less ' - 'depeding on the terminal size)') + cmd = subparsers.add_parser("hist", help="Render an histogram") + cmd.add_argument( + "--extend", action="store_true", help="Extend the histogram to fit the terminal" + ) + cmd.add_argument( + "-n", + "--bins", + type=int, + default=None, + help="Number of histogram bars (default: 25, or less " + "depeding on the terminal size)", + ) display_options(cmd) # compare_to - cmd = subparsers.add_parser('compare_to', help='Compare benchmarks') - cmd.add_argument('-q', '--quiet', action="store_true", - help='enable quiet mode') - cmd.add_argument('-v', '--verbose', action="store_true", - help='enable verbose mode') - cmd.add_argument('-G', '--group-by-speed', action="store_true", - help='group slower/faster/same speed') - cmd.add_argument('--min-speed', type=float, - help='Absolute minimum of speed in percent to ' - 'consider that a benchmark is significant ' - '(default: 0%%)') - cmd.add_argument('--table', action="store_true", - help='Render a table') - cmd.add_argument("--table-format", type=str, default="rest", - choices=["rest", "md"], - help="Format of table rendering") + cmd = subparsers.add_parser("compare_to", help="Compare benchmarks") + cmd.add_argument("-q", "--quiet", action="store_true", help="enable quiet mode") + cmd.add_argument("-v", "--verbose", action="store_true", help="enable verbose mode") + cmd.add_argument( + "-G", + "--group-by-speed", + action="store_true", + help="group slower/faster/same speed", + ) + cmd.add_argument( + "--min-speed", + type=float, + help="Absolute minimum of speed in percent to " + "consider that a benchmark is significant " + "(default: 0%%)", + ) + cmd.add_argument("--table", action="store_true", help="Render a table") + cmd.add_argument( + "--table-format", + type=str, + default="rest", + choices=["rest", "md"], + help="Format of table rendering", + ) input_filenames(cmd) # stats - cmd = subparsers.add_parser('stats', help='Compute statistics') + cmd = subparsers.add_parser("stats", help="Compute statistics") display_options(cmd) # metadata - cmd = subparsers.add_parser('metadata', help='Display metadata') + cmd = subparsers.add_parser("metadata", help="Display metadata") display_options(cmd) # check - cmd = subparsers.add_parser('check', - help='Check if a benchmark seems stable') + cmd = subparsers.add_parser("check", help="Check if a benchmark seems stable") display_options(cmd) # collect_metadata - cmd = subparsers.add_parser('collect_metadata') + cmd = subparsers.add_parser("collect_metadata") cpu_affinity(cmd) - cmd.add_argument('-o', '--output', metavar='FILENAME', - help='Save metadata as JSON into FILENAME') + cmd.add_argument( + "-o", "--output", metavar="FILENAME", help="Save metadata as JSON into FILENAME" + ) # timeit - cmd = subparsers.add_parser('timeit', help='Quick Python microbenchmark') + cmd = subparsers.add_parser("timeit", help="Quick Python microbenchmark") timeit_runner = TimeitRunner(_argparser=cmd) # system - cmd = subparsers.add_parser('system', help='System setup for benchmarks') + cmd = subparsers.add_parser("system", help="System setup for benchmarks") cpu_affinity(cmd) - cmd.add_argument("system_action", nargs="?", - choices=('show', 'tune', 'reset'), - default='show') + cmd.add_argument( + "system_action", nargs="?", choices=("show", "tune", "reset"), default="show" + ) # convert - cmd = subparsers.add_parser('convert', help='Modify benchmarks') - cmd.add_argument( - 'input_filename', help='Filename of the input benchmark suite') + cmd = subparsers.add_parser("convert", help="Modify benchmarks") + cmd.add_argument("input_filename", help="Filename of the input benchmark suite") output = cmd.add_mutually_exclusive_group(required=True) - output.add_argument('-o', '--output', metavar='OUTPUT_FILENAME', - dest='output_filename', - help='Filename where the output benchmark suite ' - 'is written') - output.add_argument('--stdout', action='store_true', - help='Write benchmark encoded to JSON into stdout') - cmd.add_argument('--include-benchmark', metavar='NAME', - dest='include_benchmarks', action='append', - help='Only keep benchmark called NAME') - cmd.add_argument('--exclude-benchmark', metavar='NAME', - dest='exclude_benchmarks', action='append', - help='Remove the benchmark called NAMED') - cmd.add_argument('--include-runs', help='Only keep benchmark runs RUNS') - cmd.add_argument('--exclude-runs', help='Remove specified benchmark runs') - cmd.add_argument('--indent', action='store_true', - help='Indent JSON (rather using compact JSON)') - cmd.add_argument('--remove-warmups', action='store_true', - help='Remove warmup values') - cmd.add_argument('--add', metavar='FILE', - help='Add benchmark runs of benchmark FILE') - cmd.add_argument('--extract-metadata', metavar='NAME', - help='Use metadata NAME as the new run values') - cmd.add_argument('--remove-all-metadata', action="store_true", - help='Remove all benchmarks metadata, but keep ' - 'the benchmarks name') - cmd.add_argument('--update-metadata', metavar='METADATA', - help='Update metadata: METADATA is a comma-separated ' - 'list of KEY=VALUE') + output.add_argument( + "-o", + "--output", + metavar="OUTPUT_FILENAME", + dest="output_filename", + help="Filename where the output benchmark suite is written", + ) + output.add_argument( + "--stdout", + action="store_true", + help="Write benchmark encoded to JSON into stdout", + ) + cmd.add_argument( + "--include-benchmark", + metavar="NAME", + dest="include_benchmarks", + action="append", + help="Only keep benchmark called NAME", + ) + cmd.add_argument( + "--exclude-benchmark", + metavar="NAME", + dest="exclude_benchmarks", + action="append", + help="Remove the benchmark called NAMED", + ) + cmd.add_argument("--include-runs", help="Only keep benchmark runs RUNS") + cmd.add_argument("--exclude-runs", help="Remove specified benchmark runs") + cmd.add_argument( + "--indent", action="store_true", help="Indent JSON (rather using compact JSON)" + ) + cmd.add_argument( + "--remove-warmups", action="store_true", help="Remove warmup values" + ) + cmd.add_argument( + "--add", metavar="FILE", help="Add benchmark runs of benchmark FILE" + ) + cmd.add_argument( + "--extract-metadata", + metavar="NAME", + help="Use metadata NAME as the new run values", + ) + cmd.add_argument( + "--remove-all-metadata", + action="store_true", + help="Remove all benchmarks metadata, but keep the benchmarks name", + ) + cmd.add_argument( + "--update-metadata", + metavar="METADATA", + help="Update metadata: METADATA is a comma-separated list of KEY=VALUE", + ) # dump - cmd = subparsers.add_parser('dump', help='Dump the runs') - cmd.add_argument('-v', '--verbose', action='store_true', - help='enable verbose mode') - cmd.add_argument('--raw', action='store_true', - help='display raw values') + cmd = subparsers.add_parser("dump", help="Dump the runs") + cmd.add_argument("-v", "--verbose", action="store_true", help="enable verbose mode") + cmd.add_argument("--raw", action="store_true", help="display raw values") display_options(cmd) # slowest - cmd = subparsers.add_parser('slowest', - help='List benchmarks which took most ' - 'of the time') - cmd.add_argument('-n', type=int, default=5, - help='Number of slow benchmarks to display (default: 5)') + cmd = subparsers.add_parser( + "slowest", help="List benchmarks which took most of the time" + ) + cmd.add_argument( + "-n", + type=int, + default=5, + help="Number of slow benchmarks to display (default: 5)", + ) input_filenames(cmd, name=False) # command - cmd = subparsers.add_parser('command', - help='Benchmark a command') + cmd = subparsers.add_parser("command", help="Benchmark a command") command_runner = CommandRunner(cmd) return parser, timeit_runner, command_runner -DataItem = collections.namedtuple('DataItem', - 'suite filename benchmark ' - 'name title is_last') -GroupItem = collections.namedtuple('GroupItem', 'benchmark title filename') -GroupItem2 = collections.namedtuple('GroupItem2', 'name benchmarks is_last') -IterSuite = collections.namedtuple('IterSuite', 'filename suite') +DataItem = collections.namedtuple( + "DataItem", "suite filename benchmark name title is_last" +) +GroupItem = collections.namedtuple("GroupItem", "benchmark title filename") +GroupItem2 = collections.namedtuple("GroupItem2", "name benchmarks is_last") +IterSuite = collections.namedtuple("IterSuite", "filename suite") def format_filename_noop(filename): @@ -227,7 +286,7 @@ def format_filename_func(suites): def strip_extension(filename): name = os.path.splitext(filename)[0] - if name.endswith('.json'): + if name.endswith(".json"): # replace "bench.json.gz" with "bench" name = name[:-5] return name @@ -261,8 +320,7 @@ def has_same_unique_benchmark(self): if any(len(suite) > 1 for suite in self.suites): return False names = self.suites[0].get_benchmark_names() - return all(suite.get_benchmark_names() == names - for suite in self.suites[1:]) + return all(suite.get_benchmark_names() == names for suite in self.suites[1:]) def include_benchmarks(self, names): for suite in self.suites: @@ -286,12 +344,12 @@ def iter_suites(self): def __iter__(self): format_filename = format_filename_func(self.suites) - show_name = (len(self) > 1) - show_filename = (self.get_nsuite() > 1) + show_name = len(self) > 1 + show_filename = self.get_nsuite() > 1 for suite_index, suite in enumerate(self.suites): filename = format_filename(suite.filename) - last_suite = (suite_index == (len(self.suites) - 1)) + last_suite = suite_index == (len(self.suites) - 1) benchmarks = suite.get_benchmarks() for bench_index, benchmark in enumerate(benchmarks): @@ -303,8 +361,8 @@ def __iter__(self): title = "%s:%s" % (filename, title) else: title = None - last_benchmark = (bench_index == (len(benchmarks) - 1)) - is_last = (last_suite and last_benchmark) + last_benchmark = bench_index == (len(benchmarks) - 1) + is_last = last_suite and last_benchmark yield DataItem(suite, filename, benchmark, name, title, is_last) @@ -313,16 +371,15 @@ def _group_by_name_names(self): for suite in self.suites[1:]: names &= set(suite.get_benchmark_names()) # Keep original name order - return [name for name in self.suites[0].get_benchmark_names() - if name in names] + return [name for name in self.suites[0].get_benchmark_names() if name in names] def group_by_name(self): format_filename = format_filename_func(self.suites) - show_filename = (self.get_nsuite() > 1) + show_filename = self.get_nsuite() > 1 names = self._group_by_name_names() - show_name = (len(names) > 1) + show_name = len(names) > 1 groups = [] for index, name in enumerate(names): @@ -340,7 +397,7 @@ def group_by_name(self): title = None benchmarks.append(GroupItem(benchmark, title, filename)) - is_last = (index == (len(names) - 1)) + is_last = index == (len(names) - 1) group = GroupItem2(name, benchmarks, is_last) groups.append(group) @@ -360,7 +417,7 @@ def group_by_name_ignored(self): def load_benchmarks(args): data = Benchmarks() data.load_benchmark_suites(args.filenames) - if getattr(args, 'benchmarks', None): + if getattr(args, "benchmarks", None): data.include_benchmarks(args.benchmarks) return data @@ -371,11 +428,11 @@ def _display_common_metadata(metadatas, lines): for metadata in metadatas: # don't display name as metadata, it's already displayed - metadata.pop('name', None) + metadata.pop("name", None) common_metadata = _common_metadata(metadatas) if common_metadata: - format_title('Common metadata', lines=lines) + format_title("Common metadata", lines=lines) empty_line(lines) format_metadata(common_metadata, lines=lines) @@ -394,8 +451,7 @@ def cmd_compare_to(args): sys.exit(1) if args.group_by_speed and data.get_nsuite() != 2: - print("ERROR: --by-speed only works on two benchmark files", - file=sys.stderr) + print("ERROR: --by-speed only works on two benchmark files", file=sys.stderr) sys.exit(1) try: @@ -407,12 +463,21 @@ def cmd_compare_to(args): def cmd_collect_metadata(args): from pyperf._collect_metadata import cmd_collect_metadata as func + func(args) -def display_benchmarks(args, show_metadata=False, hist=False, stats=False, - dump=False, result=False, checks=False, - display_runs_args=None, only_checks=False): +def display_benchmarks( + args, + show_metadata=False, + hist=False, + stats=False, + dump=False, + result=False, + checks=False, + display_runs_args=None, + only_checks=False, +): data = load_benchmarks(args) output = [] @@ -433,10 +498,10 @@ def display_benchmarks(args, show_metadata=False, hist=False, stats=False, break if use_title: - show_filename = (data.get_nsuite() > 1) + show_filename = data.get_nsuite() > 1 show_name = not data.has_same_unique_benchmark() if not show_filename and stats: - show_filename = (len(data) > 1) + show_filename = len(data) > 1 suite = None for index, item in enumerate(data): @@ -449,14 +514,16 @@ def display_benchmarks(args, show_metadata=False, hist=False, stats=False, lines.append("Metadata:") format_metadata(metadata, lines=lines) - bench_lines = format_benchmark(item.benchmark, - hist=hist, - stats=stats, - dump=dump, - checks=checks, - result=result, - display_runs_args=display_runs_args, - only_checks=only_checks) + bench_lines = format_benchmark( + item.benchmark, + hist=hist, + stats=stats, + dump=dump, + checks=checks, + result=result, + display_runs_args=display_runs_args, + only_checks=only_checks, + ) if bench_lines: empty_line(lines) @@ -479,8 +546,13 @@ def display_benchmarks(args, show_metadata=False, hist=False, stats=False, dates = suite.get_dates() if dates: start, end = dates - lines.append("Start date: %s" % format_datetime(start, microsecond=False)) - lines.append("End date: %s" % format_datetime(end, microsecond=False)) + lines.append( + "Start date: %s" + % format_datetime(start, microsecond=False) + ) + lines.append( + "End date: %s" % format_datetime(end, microsecond=False) + ) if show_name: format_title(item.name, 2, lines=lines) @@ -507,7 +579,7 @@ def display_benchmarks(args, show_metadata=False, hist=False, stats=False, for line in output: print(line) - show_filename = (data.get_nsuite() > 1) + show_filename = data.get_nsuite() > 1 suite = None for item in data: @@ -520,18 +592,20 @@ def display_benchmarks(args, show_metadata=False, hist=False, stats=False, line = format_result(item.benchmark) if item.title: - line = '%s: %s' % (item.name, line) + line = "%s: %s" % (item.name, line) print(line) def cmd_show(args): - display_benchmarks(args, - show_metadata=args.metadata, - hist=args.hist, - stats=args.stats, - dump=args.dump, - checks=not args.quiet, - result=True) + display_benchmarks( + args, + show_metadata=args.metadata, + hist=args.hist, + stats=args.stats, + dump=args.dump, + checks=not args.quiet, + result=True, + ) def cmd_metadata(args): @@ -543,17 +617,15 @@ def cmd_check(args): def cmd_dump(args): - display_runs_args = {'quiet': args.quiet, - 'verbose': args.verbose, - 'raw': args.raw} - display_benchmarks(args, - dump=True, - display_runs_args=display_runs_args, - checks=not args.quiet) + display_runs_args = {"quiet": args.quiet, "verbose": args.verbose, "raw": args.raw} + display_benchmarks( + args, dump=True, display_runs_args=display_runs_args, checks=not args.quiet + ) def cmd_timeit(args, timeit_runner): import pyperf._timeit_cli as timeit_cli + timeit_runner._set_args(args) timeit_cli.main(timeit_runner) @@ -569,19 +641,21 @@ def cmd_hist(args): ignored = list(data.group_by_name_ignored()) groups = data.group_by_name() - show_filename = (data.get_nsuite() > 1) - show_group_name = (len(groups) > 1) + show_filename = data.get_nsuite() > 1 + show_group_name = len(groups) > 1 for name, benchmarks, is_last in groups: if show_group_name: display_title(name) - benchmarks = [(benchmark, filename if show_filename else None) - for benchmark, title, filename in benchmarks] + benchmarks = [ + (benchmark, filename if show_filename else None) + for benchmark, title, filename in benchmarks + ] - for line in format_histogram(benchmarks, bins=args.bins, - extend=args.extend, - checks=checks): + for line in format_histogram( + benchmarks, bins=args.bins, extend=args.extend, checks=checks + ): print(line) if not (is_last or ignored): @@ -591,25 +665,27 @@ def cmd_hist(args): for bench in ignored: name = bench.get_name() print("[ %s ]" % name) - for line in format_histogram([name], bins=args.bins, - extend=args.extend, - checks=checks): + for line in format_histogram( + [name], bins=args.bins, extend=args.extend, checks=checks + ): print(line) def fatal_missing_benchmarks(suite, names): - print("ERROR: The benchmark suite %s doesn't contain " - "with benchmark name in %r" - % (suite.filename, names), - file=sys.stderr) + print( + "ERROR: The benchmark suite %s doesn't contain " + "with benchmark name in %r" % (suite.filename, names), + file=sys.stderr, + ) sys.exit(1) def fatal_no_more_benchmark(suite): - print("ERROR: After modification, the benchmark suite %s has no " - "more benchmark!" - % suite.filename, - file=sys.stderr) + print( + "ERROR: After modification, the benchmark suite %s has no " + "more benchmark!" % suite.filename, + file=sys.stderr, + ) sys.exit(1) @@ -651,9 +727,10 @@ def cmd_convert(args): try: benchmark._filter_runs(include, only_runs) except ValueError: - print("ERROR: Benchmark %r has no more run" - % benchmark.get_name(), - file=sys.stderr) + print( + "ERROR: Benchmark %r has no more run" % benchmark.get_name(), + file=sys.stderr, + ) sys.exit(1) if args.remove_warmups: @@ -661,14 +738,13 @@ def cmd_convert(args): benchmark._remove_warmups() if args.update_metadata: - items = [item.strip() - for item in args.update_metadata.split(',')] + items = [item.strip() for item in args.update_metadata.split(",")] metadata = {} for item in items: if not item: continue - key, _, value = item.partition('=') + key, _, value = item.partition("=") metadata[key] = value for benchmark in suite: @@ -680,14 +756,18 @@ def cmd_convert(args): try: benchmark._extract_metadata(name) except KeyError: - print("ERROR: Benchmark %r has no metadata %r" - % (benchmark.get_name(), name), - file=sys.stderr) + print( + "ERROR: Benchmark %r has no metadata %r" + % (benchmark.get_name(), name), + file=sys.stderr, + ) sys.exit(1) except TypeError: - print("ERROR: Metadata %r of benchmark %r is not an integer" - % (name, benchmark.get_name()), - file=sys.stderr) + print( + "ERROR: Metadata %r of benchmark %r is not an integer" + % (name, benchmark.get_name()), + file=sys.stderr, + ) sys.exit(1) if args.remove_all_metadata: @@ -705,7 +785,7 @@ def cmd_slowest(args): data = load_benchmarks(args) nslowest = args.n - use_title = (data.get_nsuite() > 1) + use_title = data.get_nsuite() > 1 for item in data.iter_suites(): if use_title: display_title(item.filename, 1) @@ -718,12 +798,14 @@ def cmd_slowest(args): for index, item in enumerate(benchs[:nslowest], 1): duration, bench = item - print("#%s: %s (%s)" - % (index, bench.get_name(), format_timedelta(duration))) + print( + "#%s: %s (%s)" % (index, bench.get_name(), format_timedelta(duration)) + ) def cmd_system(args): from pyperf._system import System + System().main(args.system_action, args) @@ -740,19 +822,19 @@ def main(): action = args.action dispatch = { - 'show': functools.partial(cmd_show, args), - 'compare_to': functools.partial(cmd_compare_to, args), - 'hist': functools.partial(cmd_hist, args), - 'stats': functools.partial(cmd_stats, args), - 'metadata': functools.partial(cmd_metadata, args), - 'check': functools.partial(cmd_check, args), - 'collect_metadata': functools.partial(cmd_collect_metadata, args), - 'timeit': functools.partial(cmd_timeit, args, timeit_runner), - 'convert': functools.partial(cmd_convert, args), - 'dump': functools.partial(cmd_dump, args), - 'slowest': functools.partial(cmd_slowest, args), - 'system': functools.partial(cmd_system, args), - 'command': functools.partial(cmd_bench_command, command_runner, args), + "show": functools.partial(cmd_show, args), + "compare_to": functools.partial(cmd_compare_to, args), + "hist": functools.partial(cmd_hist, args), + "stats": functools.partial(cmd_stats, args), + "metadata": functools.partial(cmd_metadata, args), + "check": functools.partial(cmd_check, args), + "collect_metadata": functools.partial(cmd_collect_metadata, args), + "timeit": functools.partial(cmd_timeit, args, timeit_runner), + "convert": functools.partial(cmd_convert, args), + "dump": functools.partial(cmd_dump, args), + "slowest": functools.partial(cmd_slowest, args), + "system": functools.partial(cmd_system, args), + "command": functools.partial(cmd_bench_command, command_runner, args), } with catch_broken_pipe_error(): diff --git a/pyperf/_bench.py b/pyperf/_bench.py index 20e90349..2f4f170c 100644 --- a/pyperf/_bench.py +++ b/pyperf/_bench.py @@ -6,9 +6,13 @@ import statistics -from pyperf._metadata import (NUMBER_TYPES, parse_metadata, - _common_metadata, get_metadata_info, - _exclude_common_metadata) +from pyperf._metadata import ( + NUMBER_TYPES, + parse_metadata, + _common_metadata, + get_metadata_info, + _exclude_common_metadata, +) from pyperf._formatter import DEFAULT_UNIT, format_values from pyperf._utils import median_abs_dev, percentile @@ -27,24 +31,25 @@ # 3 - (pyperf 0.7) add Run class # 2 - (pyperf 0.6) support multiple benchmarks per file # 1 - first version -_JSON_VERSION = '1.0' -_JSON_MAP_VERSION = {5: (0, 8, 3), 6: (0, 9, 6), '1.0': (1, 0)} +_JSON_VERSION = "1.0" +_JSON_MAP_VERSION = {5: (0, 8, 3), 6: (0, 9, 6), "1.0": (1, 0)} # Metadata checked by add_run(): all runs have must have the same # value for these metadata (or no run must have this metadata) _CHECKED_METADATA = ( - 'aslr', - 'cpu_count', - 'cpu_model_name', - 'hostname', - 'inner_loops', - 'name', - 'platform', - 'python_executable', - 'python_implementation', - 'python_unicode', - 'python_version', - 'unit') + "aslr", + "cpu_count", + "cpu_model_name", + "hostname", + "inner_loops", + "name", + "platform", + "python_executable", + "python_implementation", + "python_unicode", + "python_version", + "unit", +) _UNSET = object() @@ -72,7 +77,7 @@ def _check_warmups(warmups): def _cached_attr(func): - attr = '_' + func.__name__ + attr = "_" + func.__name__ def method(self): value = getattr(self, attr) @@ -89,18 +94,18 @@ def method(self): class Run: # Run is immutable, so it can be shared/exchanged between two benchmarks - __slots__ = ('_warmups', '_values', '_metadata') + __slots__ = ("_warmups", "_values", "_metadata") - def __init__(self, values, warmups=None, - metadata=None, collect_metadata=True): - if any(not (isinstance(value, NUMBER_TYPES) and value > 0) - for value in values): + def __init__(self, values, warmups=None, metadata=None, collect_metadata=True): + if any(not (isinstance(value, NUMBER_TYPES) and value > 0) for value in values): raise ValueError("values must be a sequence of number > 0.0") if warmups is not None and not _check_warmups(warmups): - raise ValueError("warmups must be a sequence of (loops, value) " - "where loops is a int >= 1 and value " - "is a float >= 0.0") + raise ValueError( + "warmups must be a sequence of (loops, value) " + "where loops is a int >= 1 and value " + "is a float >= 0.0" + ) # tuple of (loops: int, value) items if warmups: @@ -145,36 +150,41 @@ def _replace(self, values=None, warmups=True, metadata=None): def _is_calibration(self): # Run used to calibrate or recalibration the number of loops, # or to calibrate the number of warmups - return (not self._values) + return not self._values def _is_calibration_loops(self): if not self._is_calibration(): return False - if self._has_metadata('calibrate_loops'): + if self._has_metadata("calibrate_loops"): return True # backward compatibility with pyperf 1.1 and older - return not any(self._has_metadata(name) - for name in ('recalibrate_loops', 'calibrate_warmups', - 'recalibrate_warmups')) + return not any( + self._has_metadata(name) + for name in ( + "recalibrate_loops", + "calibrate_warmups", + "recalibrate_warmups", + ) + ) def _is_recalibration_loops(self): - return self._is_calibration() and self._has_metadata('recalibrate_loops') + return self._is_calibration() and self._has_metadata("recalibrate_loops") def _is_calibration_warmups(self): - return self._is_calibration() and self._has_metadata('calibrate_warmups') + return self._is_calibration() and self._has_metadata("calibrate_warmups") def _is_recalibration_warmups(self): - return self._is_calibration() and self._has_metadata('recalibrate_warmups') + return self._is_calibration() and self._has_metadata("recalibrate_warmups") def _has_metadata(self, name): - return (name in self._metadata) + return name in self._metadata def _get_calibration_loops(self): metadata = self._metadata - if 'calibrate_loops' in metadata: - return metadata['calibrate_loops'] - if 'recalibrate_loops' in metadata: - return metadata['recalibrate_loops'] + if "calibrate_loops" in metadata: + return metadata["calibrate_loops"] + if "recalibrate_loops" in metadata: + return metadata["recalibrate_loops"] if self._is_calibration_loops(): # backward compatibility with pyperf 1.1 and older @@ -184,14 +194,14 @@ def _get_calibration_loops(self): def _get_calibration_warmups(self): metadata = self._metadata - if 'calibrate_warmups' in metadata: - return metadata['calibrate_warmups'] - if 'recalibrate_warmups' in metadata: - return metadata['recalibrate_warmups'] + if "calibrate_warmups" in metadata: + return metadata["calibrate_warmups"] + if "recalibrate_warmups" in metadata: + return metadata["recalibrate_warmups"] raise ValueError("run is not a warmup calibration") def _get_name(self): - return self._metadata.get('name', None) + return self._metadata.get("name", None) def get_metadata(self): return dict(self._metadata) @@ -208,10 +218,10 @@ def values(self): return self._values def get_loops(self): - return self._metadata.get('loops', 1) + return self._metadata.get("loops", 1) def get_inner_loops(self): - return self._metadata.get('inner_loops', 1) + return self._metadata.get("inner_loops", 1) def get_total_loops(self): return self.get_loops() * self.get_inner_loops() @@ -221,8 +231,9 @@ def _get_raw_values(self, warmups=False): if warmups and self._warmups: inner_loops = self.get_inner_loops() - raw_values.extend(value * (loops * inner_loops) - for loops, value in self._warmups) + raw_values.extend( + value * (loops * inner_loops) for loops, value in self._warmups + ) total_loops = self.get_total_loops() raw_values.extend(value * total_loops for value in self._values) @@ -235,50 +246,49 @@ def _remove_warmups(self): return self._replace(warmups=False) def _get_duration(self): - duration = self._metadata.get('duration', None) + duration = self._metadata.get("duration", None) if duration is not None: return duration raw_values = self._get_raw_values(warmups=True) return math.fsum(raw_values) def _get_date(self): - return self._metadata.get('date', None) + return self._metadata.get("date", None) def _as_json(self, common_metadata): data = {} if self._warmups: - data['warmups'] = self._warmups + data["warmups"] = self._warmups if self._values: - data['values'] = self._values + data["values"] = self._values metadata = _exclude_common_metadata(self._metadata, common_metadata) if metadata: - data['metadata'] = metadata + data["metadata"] = metadata return data @classmethod def _json_load(cls, version, run_data, common_metadata): - metadata = run_data.get('metadata', {}) + metadata = run_data.get("metadata", {}) if common_metadata: metadata = dict(common_metadata, **metadata) - warmups = run_data.get('warmups', None) + warmups = run_data.get("warmups", None) if warmups: if version >= (1, 0): warmups = [tuple(item) for item in warmups] else: - inner_loops = metadata.get('inner_loops', 1) - warmups = [(loops, raw_value / (loops * inner_loops)) - for loops, raw_value in warmups] + inner_loops = metadata.get("inner_loops", 1) + warmups = [ + (loops, raw_value / (loops * inner_loops)) + for loops, raw_value in warmups + ] if version >= (0, 9, 6): - values = run_data.get('values', ()) + values = run_data.get("values", ()) else: - values = run_data['samples'] + values = run_data["samples"] - return cls(values, - warmups=warmups, - metadata=metadata, - collect_metadata=False) + return cls(values, warmups=warmups, metadata=metadata, collect_metadata=False) def _extract_metadata(self, name): value = self._metadata.get(name, None) @@ -292,26 +302,27 @@ def _extract_metadata(self, name): metadata = None if not isinstance(value, NUMBER_TYPES): - raise TypeError("metadata %r value is not an integer: got %s" - % (name, type(value).__name__)) + raise TypeError( + "metadata %r value is not an integer: got %s" + % (name, type(value).__name__) + ) return self._replace(values=(value,), warmups=False, metadata=metadata) def _remove_all_metadata(self): - name = self._metadata.get('name', None) - unit = self._metadata.get('unit', None) + name = self._metadata.get("name", None) + unit = self._metadata.get("unit", None) metadata = {} if name: - metadata['name'] = name + metadata["name"] = name if unit: - metadata['unit'] = unit + metadata["unit"] = unit return self._replace(metadata=metadata) def _update_metadata(self, metadata): - if 'inner_loops' in metadata: - inner_loops = self._metadata.get('inner_loops', None) - if (inner_loops is not None - and metadata['inner_loops'] != inner_loops): + if "inner_loops" in metadata: + inner_loops = self._metadata.get("inner_loops", None) + if inner_loops is not None and metadata["inner_loops"] != inner_loops: raise ValueError("inner_loops metadata cannot be modified") metadata2 = dict(self._metadata) @@ -321,23 +332,23 @@ def _update_metadata(self, metadata): class Benchmark: def __init__(self, runs): - self._runs = [] # list of Run objects + self._runs = [] # list of Run objects self._clear_runs_cache() if not runs: raise ValueError("runs must be a non-empty sequence of Run objects") # A benchmark must have a name - if not runs[0]._has_metadata('name'): - raise ValueError("A benchmark must have a name: " - "the first run has no name metadata") + if not runs[0]._has_metadata("name"): + raise ValueError( + "A benchmark must have a name: the first run has no name metadata" + ) for run in runs: self.add_run(run) def __repr__(self): - return ('' - % (self.get_name(), len(self._runs))) + return "" % (self.get_name(), len(self._runs)) def get_name(self): run = self._runs[0] @@ -358,8 +369,7 @@ def get_total_duration(self): def _get_run_property(self, get_property): # ignore calibration runs - values = [get_property(run) for run in self._runs - if not run._is_calibration()] + values = [get_property(run) for run in self._runs if not run._is_calibration()] if len(set(values)) == 1: return values[0] @@ -463,7 +473,7 @@ def required_nprocesses(self): W = 0.01 # (4Z²σ²)/(W²) - return math.ceil((4 * Z ** 2 * sigma ** 2) / (W ** 2)) + return math.ceil((4 * Z**2 * sigma**2) / (W**2)) def percentile(self, p): if not (0 <= p <= 100): @@ -482,9 +492,10 @@ def add_run(self, run): value = metadata.get(key, None) run_value = run_metata.get(key, None) if run_value != value: - raise ValueError("incompatible benchmark, metadata %s is " - "different: current=%s, run=%s" - % (key, value, run_value)) + raise ValueError( + "incompatible benchmark, metadata %s is " + "different: current=%s, run=%s" % (key, value, run_value) + ) if self._common_metadata is not None: # Update common metadata @@ -497,7 +508,7 @@ def add_run(self, run): def get_unit(self): run = self._runs[0] - return run._metadata.get('unit', DEFAULT_UNIT) + return run._metadata.get("unit", DEFAULT_UNIT) def format_values(self, values): unit = self.get_unit() @@ -541,15 +552,15 @@ def _only_calibration(self): @classmethod def _json_load(cls, version, data, suite_metadata): if version >= (0, 9, 6): - metadata = data.get('metadata', {}) + metadata = data.get("metadata", {}) else: - metadata = data.get('common_metadata', {}) + metadata = data.get("common_metadata", {}) metadata = parse_metadata(metadata) if suite_metadata: metadata = dict(suite_metadata, **metadata) runs = [] - for run_data in data['runs']: + for run_data in data["runs"]: run = Run._json_load(version, run_data, metadata) # Don't call add_run() to avoid O(n) complexity: # expect that runs were already validated before being written @@ -562,10 +573,10 @@ def _as_json(self, suite_metadata): metadata = self._get_common_metadata() common_metadata = dict(metadata, **suite_metadata) - data = {'runs': [run._as_json(common_metadata) for run in self._runs]} + data = {"runs": [run._as_json(common_metadata) for run in self._runs]} metadata = _exclude_common_metadata(metadata, suite_metadata) if metadata: - data['metadata'] = metadata + data["metadata"] = metadata return data @staticmethod @@ -616,8 +627,7 @@ def _remove_warmups(self): def add_runs(self, benchmark): if not isinstance(benchmark, Benchmark): - raise TypeError("expected Benchmark, got %s" - % type(benchmark).__name__) + raise TypeError("expected Benchmark, got %s" % type(benchmark).__name__) if benchmark is self: raise ValueError("cannot add a benchmark to itself") @@ -671,8 +681,9 @@ def update_metadata(self, metadata): class BenchmarkSuite: def __init__(self, benchmarks, filename=None): if not benchmarks: - raise ValueError("benchmarks must be a non-empty " - "sequence of Benchmark objects") + raise ValueError( + "benchmarks must be a non-empty sequence of Benchmark objects" + ) self.filename = filename self._benchmarks = [] @@ -683,8 +694,7 @@ def get_benchmark_names(self): return [bench.get_name() for bench in self] def get_metadata(self): - benchs_metadata = [bench._get_common_metadata() - for bench in self._benchmarks] + benchs_metadata = [bench._get_common_metadata() for bench in self._benchmarks] return _common_metadata(benchs_metadata) def __len__(self): @@ -709,8 +719,9 @@ def add_runs(self, result): for benchmark in result: self._add_benchmark_runs(benchmark) else: - raise TypeError("expect Benchmark or BenchmarkSuite, got %s" - % type(result).__name__) + raise TypeError( + "expect Benchmark or BenchmarkSuite, got %s" % type(result).__name__ + ) def get_benchmark(self, name): for bench in self._benchmarks: @@ -732,21 +743,20 @@ def add_benchmark(self, benchmark): except KeyError: pass else: - raise ValueError("the suite has already a benchmark called %r" - % name) + raise ValueError("the suite has already a benchmark called %r" % name) self._benchmarks.append(benchmark) @classmethod def _json_load(cls, filename, data): - version = data.get('version') + version = data.get("version") version_info = _JSON_MAP_VERSION.get(version) if not version_info: raise ValueError("file format version %r not supported" % version) - benchmarks_json = data['benchmarks'] + benchmarks_json = data["benchmarks"] if version_info >= (0, 9, 6): - metadata = data.get('metadata', {}) + metadata = data.get("metadata", {}) if metadata is not None: metadata = parse_metadata(metadata) else: @@ -766,13 +776,14 @@ def _json_load(cls, filename, data): @staticmethod def _load_open(filename): if isinstance(filename, bytes): - suffix = b'.gz' + suffix = b".gz" else: - suffix = '.gz' + suffix = ".gz" if filename.endswith(suffix): # Use lazy import to limit imports on 'import pyperf' import gzip + return gzip.open(filename, "rt", encoding="utf-8") else: return open(filename, "r", encoding="utf-8") @@ -783,17 +794,17 @@ def load(cls, file): import json if isinstance(file, (bytes, str)): - if file != '-': + if file != "-": filename = file fp = cls._load_open(filename) with fp: data = json.load(fp) else: - filename = '' + filename = "" data = json.load(sys.stdin) else: # file is a file object - filename = getattr(file, 'name', None) + filename = getattr(file, "name", None) data = json.load(file) return cls._json_load(filename, data) @@ -809,9 +820,9 @@ def loads(cls, string): @staticmethod def _dump_open(filename, replace): if isinstance(filename, bytes): - suffix = b'.gz' + suffix = b".gz" else: - suffix = '.gz' + suffix = ".gz" if not replace and os.path.exists(filename): raise OSError(errno.EEXIST, "File already exists") @@ -826,11 +837,10 @@ def _dump_open(filename, replace): def _as_json(self): metadata = self.get_metadata() - benchmarks = [benchmark._as_json(metadata) - for benchmark in self._benchmarks] - data = {'version': _JSON_VERSION, 'benchmarks': benchmarks} + benchmarks = [benchmark._as_json(metadata) for benchmark in self._benchmarks] + data = {"version": _JSON_VERSION, "benchmarks": benchmarks} if metadata: - data['metadata'] = metadata + data["metadata"] = metadata return data def dump(self, file, compact=True, replace=False): @@ -842,9 +852,9 @@ def dump(self, file, compact=True, replace=False): def dump(data, fp, compact): kw = {} if compact: - kw['separators'] = (',', ':') + kw["separators"] = (",", ":") else: - kw['indent'] = 4 + kw["indent"] = 4 json.dump(data, fp, sort_keys=True, **kw) fp.write("\n") fp.flush() @@ -865,16 +875,14 @@ def _replace_benchmarks(self, benchmarks): def _convert_include_benchmark(self, names): name_set = set(names) - benchmarks = [bench for bench in self - if bench.get_name() in name_set] + benchmarks = [bench for bench in self if bench.get_name() in name_set] if not benchmarks: raise KeyError("no benchmark found with name in %r" % names) self._replace_benchmarks(benchmarks) def _convert_exclude_benchmark(self, names): name_set = set(names) - benchmarks = [bench for bench in self - if bench.get_name() not in name_set] + benchmarks = [bench for bench in self if bench.get_name() not in name_set] self._replace_benchmarks(benchmarks) def get_total_duration(self): diff --git a/pyperf/_cli.py b/pyperf/_cli.py index 10686261..33280fd3 100644 --- a/pyperf/_cli.py +++ b/pyperf/_cli.py @@ -3,14 +3,13 @@ import os.path import sys -from pyperf._formatter import (format_seconds, format_number, - format_datetime) +from pyperf._formatter import format_seconds, format_number, format_datetime from pyperf._metadata import format_metadata as _format_metadata def empty_line(lines): if lines: - lines.append('') + lines.append("") def format_title(title, level=1, lines=None): @@ -21,9 +20,9 @@ def format_title(title, level=1, lines=None): lines.append(title) if level == 1: - char = '=' + char = "=" else: - char = '-' + char = "-" lines.append(char * len(title)) return lines @@ -34,7 +33,7 @@ def display_title(title, level=1): print() -def format_metadata(metadata, prefix='- ', lines=None): +def format_metadata(metadata, prefix="- ", lines=None): if lines is None: lines = [] for name, value in sorted(metadata.items()): @@ -52,12 +51,13 @@ def _format_values_diff(bench, values, raw, total_loops): value = float(value) / total_loops delta = float(value) - mean if abs(delta) > max_delta: - values_str[index] += ' (%+.0f%%)' % (delta * 100 / mean) + values_str[index] += " (%+.0f%%)" % (delta * 100 / mean) return values_str -def format_run(bench, run_index, run, common_metadata=None, raw=False, - verbose=0, lines=None): +def format_run( + bench, run_index, run, common_metadata=None, raw=False, verbose=0, lines=None +): if lines is None: lines = [] @@ -66,52 +66,53 @@ def format_run(bench, run_index, run, common_metadata=None, raw=False, if run._is_calibration(): if run._is_calibration_warmups(): warmups = run._get_calibration_warmups() - action = 'calibrate the number of warmups: %s' % format_number(warmups) + action = "calibrate the number of warmups: %s" % format_number(warmups) elif run._is_recalibration_warmups(): warmups = run._get_calibration_warmups() - action = 'recalibrate the number of warmups: %s' % format_number(warmups) + action = "recalibrate the number of warmups: %s" % format_number(warmups) elif run._is_recalibration_loops(): loops = run._get_calibration_loops() - action = 'recalibrate the number of loops: %s' % format_number(loops) + action = "recalibrate the number of loops: %s" % format_number(loops) else: loops = run._get_calibration_loops() - action = 'calibrate the number of loops: %s' % format_number(loops) + action = "calibrate the number of loops: %s" % format_number(loops) lines.append("Run %s: %s" % (run_index, action)) if raw: - name = 'raw calibrate' + name = "raw calibrate" else: - name = 'calibrate' + name = "calibrate" unit = bench.get_unit() format_value = bench.format_value for index, warmup in enumerate(run.warmups, 1): loops, value = warmup raw_value = value * (loops * inner_loops) if raw: - text = ("%s (loops: %s)" - % (format_value(raw_value), - format_number(loops))) + text = "%s (loops: %s)" % ( + format_value(raw_value), + format_number(loops), + ) # when using --track-memory, displaying value * loops doesn't make # sense, so only display raw value if the unit is seconds - elif unit == 'second': - text = ("%s (loops: %s, raw: %s)" - % (format_value(value), - format_number(loops), - format_value(raw_value))) + elif unit == "second": + text = "%s (loops: %s, raw: %s)" % ( + format_value(value), + format_number(loops), + format_value(raw_value), + ) else: - text = ("%s (loops: %s)" - % (format_value(value), - format_number(loops))) - lines.append("- %s %s: %s" - % (name, index, text)) + text = "%s (loops: %s)" % (format_value(value), format_number(loops)) + lines.append("- %s %s: %s" % (name, index, text)) else: - show_warmup = (verbose >= 0) + show_warmup = verbose >= 0 total_loops = run.get_total_loops() values = run.values if raw: - warmups = [bench.format_value(value * (loops * inner_loops)) - for loops, value in run.warmups] + warmups = [ + bench.format_value(value * (loops * inner_loops)) + for loops, value in run.warmups + ] values = [value * total_loops for value in values] else: warmups = run.warmups @@ -122,38 +123,42 @@ def format_run(bench, run_index, run, common_metadata=None, raw=False, if verbose >= 0: loops = run.get_loops() - lines.append("Run %s: %s, %s, %s" - % (run_index, - format_number(len(warmups), 'warmup'), - format_number(len(values), 'value'), - format_number(loops, 'loop'))) + lines.append( + "Run %s: %s, %s, %s" + % ( + run_index, + format_number(len(warmups), "warmup"), + format_number(len(values), "value"), + format_number(loops, "loop"), + ) + ) else: lines.append("Run %s:" % run_index) if warmups and show_warmup: if raw: - name = 'raw warmup' + name = "raw warmup" else: - name = 'warmup' + name = "warmup" for index, warmup in enumerate(warmups, 1): - lines.append('- %s %s: %s' % (name, index, warmup)) + lines.append("- %s %s: %s" % (name, index, warmup)) if raw: - name = 'raw value' + name = "raw value" else: - name = 'value' + name = "value" for index, value in enumerate(values, 1): - lines.append('- %s %s: %s' % (name, index, value)) + lines.append("- %s %s: %s" % (name, index, value)) if verbose > 0: metadata = run.get_metadata() if metadata: - lines.append('- Metadata:') + lines.append("- Metadata:") for name, value in sorted(metadata.items()): if common_metadata and name in common_metadata: continue value = _format_metadata(name, value) - lines.append(' %s: %s' % (name, value)) + lines.append(" %s: %s" % (name, value)) return lines @@ -175,7 +180,7 @@ def _format_runs(bench, quiet=False, verbose=False, raw=False, lines=None): # FIXME: display metadata in format_benchmark() common_metadata = bench.get_metadata() lines.append("Metadata:") - format_metadata(common_metadata, prefix=' ', lines=lines) + format_metadata(common_metadata, prefix=" ", lines=lines) else: common_metadata = None @@ -186,14 +191,20 @@ def _format_runs(bench, quiet=False, verbose=False, raw=False, lines=None): if not empty_line_written: empty_line_written = True empty_line(lines) - format_run(bench, run_index, run, - common_metadata=common_metadata, - verbose=verbose, raw=raw, lines=lines) + format_run( + bench, + run_index, + run, + common_metadata=common_metadata, + verbose=verbose, + raw=raw, + lines=lines, + ) return lines -PERCENTILE_NAMES = {0: 'minimum', 25: 'Q1', 50: 'median', 75: 'Q3', 100: 'maximum'} +PERCENTILE_NAMES = {0: "minimum", 25: "Q1", 50: "median", 75: "Q3", 100: "maximum"} def format_stats(bench, lines): @@ -221,29 +232,29 @@ def format_stats(bench, lines): raw_values = bench._get_raw_values() lines.append("Raw value minimum: %s" % bench.format_value(min(raw_values))) lines.append("Raw value maximum: %s" % bench.format_value(max(raw_values))) - lines.append('') + lines.append("") # Number of values ncalibration_runs = sum(run._is_calibration() for run in bench._runs) - lines.append("Number of calibration run: %s" - % format_number(ncalibration_runs)) - lines.append("Number of run with values: %s" - % (format_number(nrun - ncalibration_runs))) + lines.append("Number of calibration run: %s" % format_number(ncalibration_runs)) + lines.append( + "Number of run with values: %s" % (format_number(nrun - ncalibration_runs)) + ) lines.append("Total number of run: %s" % format_number(nrun)) - lines.append('') + lines.append("") # Number of values nwarmup = bench._get_nwarmup() text = format_number(nwarmup) if isinstance(nwarmup, float): - text += ' (average)' - lines.append('Number of warmup per run: %s' % text) + text += " (average)" + lines.append("Number of warmup per run: %s" % text) nvalue_per_run = bench._get_nvalue_per_run() text = format_number(nvalue_per_run) if isinstance(nvalue_per_run, float): - text += ' (average)' - lines.append('Number of value per run: %s' % text) + text += " (average)" + lines.append("Number of value per run: %s" % text) # Loop iterations per value loops = bench.get_loops() @@ -256,20 +267,20 @@ def format_stats(bench, lines): if not (isinstance(inner_loops, int) and inner_loops == 1): if isinstance(loops, int): - loops = format_number(loops, 'outer-loop') + loops = format_number(loops, "outer-loop") else: - loops = '%.1f outer-loops (average)' + loops = "%.1f outer-loops (average)" if isinstance(inner_loops, int): - inner_loops = format_number(inner_loops, 'inner-loop') + inner_loops = format_number(inner_loops, "inner-loop") else: inner_loops = "%.1f inner-loops (average)" % inner_loops - text = '%s (%s x %s)' % (text, loops, inner_loops) + text = "%s (%s x %s)" % (text, loops, inner_loops) lines.append("Loop iterations per value: %s" % text) lines.append("Total number of values: %s" % format_number(nvalue)) - lines.append('') + lines.append("") # Minimum table = [("Minimum", bench.format_value(min(values)))] @@ -278,9 +289,12 @@ def format_stats(bench, lines): median = bench.median() if len(values) > 2: median_abs_dev = bench.median_abs_dev() - table.append(("Median +- MAD", - "%s +- %s" - % bench.format_values((median, median_abs_dev)))) + table.append( + ( + "Median +- MAD", + "%s +- %s" % bench.format_values((median, median_abs_dev)), + ) + ) else: table.append(("Mean", bench.format_value(median))) @@ -288,8 +302,9 @@ def format_stats(bench, lines): mean = bench.mean() if len(values) > 2: stdev = bench.stdev() - table.append(("Mean +- std dev", - "%s +- %s" % bench.format_values((mean, stdev)))) + table.append( + ("Mean +- std dev", "%s +- %s" % bench.format_values((mean, stdev))) + ) else: table.append(("Mean", bench.format_value(mean))) @@ -298,13 +313,12 @@ def format_stats(bench, lines): # Render table width = max(len(row[0]) + 1 for row in table) for key, value in table: - key = (key + ':').ljust(width) + key = (key + ":").ljust(width) lines.append("%s %s" % (key, value)) - lines.append('') + lines.append("") def format_limit(mean, value): - return ("%s (%+.0f%% of the mean)" - % (fmt(value), (value - mean) * 100.0 / mean)) + return "%s (%+.0f%% of the mean)" % (fmt(value), (value - mean) * 100.0 / mean) # Percentiles for p in (0, 5, 25, 50, 75, 95, 100): @@ -312,31 +326,31 @@ def format_limit(mean, value): text = "%3sth percentile: %s" % (p, text) name = PERCENTILE_NAMES.get(p) if name: - text = '%s -- %s' % (text, name) + text = "%s -- %s" % (text, name) lines.append(text) - lines.append('') + lines.append("") # Outliers q1 = bench.percentile(25) q3 = bench.percentile(75) iqr = q3 - q1 - outlier_min = (q1 - 1.5 * iqr) - outlier_max = (q3 + 1.5 * iqr) - noutlier = sum(not (outlier_min <= value <= outlier_max) - for value in values) + outlier_min = q1 - 1.5 * iqr + outlier_max = q3 + 1.5 * iqr + noutlier = sum(not (outlier_min <= value <= outlier_max) for value in values) bounds = bench.format_values((outlier_min, outlier_max)) - lines.append('Number of outlier (out of %s..%s): %s' - % (bounds[0], bounds[1], format_number(noutlier))) + lines.append( + "Number of outlier (out of %s..%s): %s" + % (bounds[0], bounds[1], format_number(noutlier)) + ) return lines -def format_histogram(benchmarks, bins=20, extend=False, lines=None, - checks=False): +def format_histogram(benchmarks, bins=20, extend=False, lines=None, checks=False): import collections import shutil - if hasattr(shutil, 'get_terminal_size'): + if hasattr(shutil, "get_terminal_size"): columns, nline = shutil.get_terminal_size() else: columns, nline = (80, 25) @@ -378,9 +392,13 @@ def value_bucket(value): count_max = max(counter.values()) count_width = len(str(count_max)) - value_width = max([len(bench.format_value(bucket * value_k)) - for bucket in range(bucket_min, bucket_max + 1)]) - line = ': %s #' % count_max + value_width = max( + [ + len(bench.format_value(bucket * value_k)) + for bucket in range(bucket_min, bucket_max + 1) + ] + ) + line = ": %s #" % count_max width = columns - (value_width + len(line)) if not extend: width = min(width, 79) @@ -390,9 +408,10 @@ def value_bucket(value): count = counter.get(bucket, 0) linelen = int(round(count * line_k)) text = bench.format_value(bucket * value_k) - line = ('#' * linelen) or '|' - lines.append("{:>{}}: {:>{}} {}".format(text, value_width, - count, count_width, line)) + line = ("#" * linelen) or "|" + lines.append( + "{:>{}}: {:>{}} {}".format(text, value_width, count, count_width, line) + ) if checks: format_checks(bench, lines=lines) @@ -420,21 +439,24 @@ def format_checks(bench, lines=None, check_too_many_processes=False): stdev = bench.stdev() percent = stdev * 100.0 / mean if percent >= 10.0: - warn("the standard deviation (%s) is %.0f%% of the mean (%s)" - % (bench.format_value(stdev), percent, bench.format_value(mean))) + warn( + "the standard deviation (%s) is %.0f%% of the mean (%s)" + % (bench.format_value(stdev), percent, bench.format_value(mean)) + ) else: # display a warning if the number of samples isn't enough to get a stable result required_nprocesses = bench.required_nprocesses() - if ( - required_nprocesses is not None and - required_nprocesses > len(bench._runs) + if required_nprocesses is not None and required_nprocesses > len( + bench._runs ): - warn("Not enough samples to get a stable result (95% certainly of less than 1% variation)") + warn( + "Not enough samples to get a stable result (95% certainly of less than 1% variation)" + ) # Minimum and maximum, detect obvious outliers for minimum, value in ( - ('minimum', min(values)), - ('maximum', max(values)), + ("minimum", min(values)), + ("maximum", max(values)), ): percent = (value - mean) * 100.0 / mean if abs(percent) >= 50: @@ -442,15 +464,16 @@ def format_checks(bench, lines=None, check_too_many_processes=False): text = "%.0f%% greater" % (percent) else: text = "%.0f%% smaller" % (-percent) - warn("the %s (%s) is %s than the mean (%s)" - % (minimum, bench.format_value(value), text, bench.format_value(mean))) + warn( + "the %s (%s) is %s than the mean (%s)" + % (minimum, bench.format_value(value), text, bench.format_value(mean)) + ) # Check that the shortest raw value took at least 1 ms - if bench.get_unit() == 'second': + if bench.get_unit() == "second": shortest = min(bench._get_raw_values()) if shortest < 1e-3: - warn("the shortest raw value is only %s" - % bench.format_value(shortest)) + warn("the shortest raw value is only %s" % bench.format_value(shortest)) if warnings: empty_line(lines) @@ -458,37 +481,43 @@ def format_checks(bench, lines=None, check_too_many_processes=False): for msg in warnings: lines.append("* %s" % msg) empty_line(lines) - lines.append("Try to rerun the benchmark with more runs, values " - "and/or loops.") - lines.append("Run '%s -m pyperf system tune' command to reduce " - "the system jitter." - % os.path.basename(sys.executable)) - lines.append("Use pyperf stats, pyperf dump and pyperf hist to analyze results.") + lines.append("Try to rerun the benchmark with more runs, values and/or loops.") + lines.append( + "Run '%s -m pyperf system tune' command to reduce " + "the system jitter." % os.path.basename(sys.executable) + ) + lines.append( + "Use pyperf stats, pyperf dump and pyperf hist to analyze results." + ) lines.append("Use --quiet option to hide these warnings.") if check_too_many_processes: if required_nprocesses is None: required_nprocesses = bench.required_nprocesses() if ( - required_nprocesses is not None and - required_nprocesses < len(bench._runs) * 0.75 + required_nprocesses is not None + and required_nprocesses < len(bench._runs) * 0.75 ): - lines.append("Benchmark was run more times than necessary to get a stable result.") lines.append( - "Consider passing processes=%d to the Runner constructor to save time." % - required_nprocesses + "Benchmark was run more times than necessary to get a stable result." + ) + lines.append( + "Consider passing processes=%d to the Runner constructor to save time." + % required_nprocesses ) # Warn if nohz_full+intel_pstate combo if found in cpu_config metadata for run in bench._runs: - cpu_config = run._metadata.get('cpu_config') + cpu_config = run._metadata.get("cpu_config") if not cpu_config: continue - if 'nohz_full' in cpu_config and 'intel_pstate' in cpu_config: + if "nohz_full" in cpu_config and "intel_pstate" in cpu_config: empty_line(lines) - warn("WARNING: nohz_full is enabled on CPUs which use the " - "intel_pstate driver, whereas intel_pstate is incompatible " - "with nohz_full") + warn( + "WARNING: nohz_full is enabled on CPUs which use the " + "intel_pstate driver, whereas intel_pstate is incompatible " + "with nohz_full" + ) warn("CPU config: %s" % cpu_config) warn("See https://bugzilla.redhat.com/show_bug.cgi?id=1378529") break @@ -500,7 +529,7 @@ def _format_result_value(bench): mean = bench.mean() if bench.get_nvalue() >= 2: args = bench.format_values((mean, bench.stdev())) - return '%s +- %s' % args + return "%s +- %s" % args else: return bench.format_value(mean) @@ -526,11 +555,11 @@ def format_result_value(bench): info = [] if loops is not None: - info.append(format_number(loops, 'loop')) + info.append(format_number(loops, "loop")) if warmups is not None: - info.append(format_number(warmups, 'warmup')) + info.append(format_number(warmups, "warmup")) if info: - return '' % ', '.join(info) + return "" % ", ".join(info) return _format_result_value(bench) @@ -556,22 +585,31 @@ def format_result(bench): info = [] if loops is not None: - info.append(format_number(loops, 'loop')) + info.append(format_number(loops, "loop")) if warmups is not None: - info.append(format_number(warmups, 'warmup')) + info.append(format_number(warmups, "warmup")) if info: - return 'Calibration: %s' % ', '.join(info) + return "Calibration: %s" % ", ".join(info) text = _format_result_value(bench) if bench.get_nvalue() >= 2: - return 'Mean +- std dev: %s' % text + return "Mean +- std dev: %s" % text else: return text -def format_benchmark(bench, checks=True, metadata=False, - dump=False, stats=False, hist=False, show_name=False, - result=True, display_runs_args=None, only_checks=False): +def format_benchmark( + bench, + checks=True, + metadata=False, + dump=False, + stats=False, + hist=False, + show_name=False, + result=True, + display_runs_args=None, + only_checks=False, +): lines = [] if metadata: @@ -606,7 +644,7 @@ def format_benchmark(bench, checks=True, metadata=False, # FIXME: remove this function? def multiline_output(args): - return (args.hist or args.stats or args.dump or args.metadata) + return args.hist or args.stats or args.dump or args.metadata @contextlib.contextmanager diff --git a/pyperf/_collect_metadata.py b/pyperf/_collect_metadata.py index 37cc4290..6affa4a4 100644 --- a/pyperf/_collect_metadata.py +++ b/pyperf/_collect_metadata.py @@ -5,6 +5,7 @@ import socket import sys import time + try: import resource except ImportError: @@ -12,6 +13,7 @@ try: from pyperf._utils import USE_PSUTIL + if not USE_PSUTIL: psutil = None else: @@ -21,84 +23,89 @@ import pyperf from pyperf._cli import format_metadata -from pyperf._cpu_utils import (format_cpu_list, - parse_cpu_list, get_isolated_cpus, - get_logical_cpu_count, format_cpu_infos, - set_cpu_affinity) +from pyperf._cpu_utils import ( + format_cpu_list, + parse_cpu_list, + get_isolated_cpus, + get_logical_cpu_count, + format_cpu_infos, + set_cpu_affinity, +) from pyperf._formatter import format_timedelta, format_datetime from pyperf._process_time import get_max_rss -from pyperf._utils import (MS_WINDOWS, - open_text, read_first_line, sysfs_path, proc_path) +from pyperf._utils import MS_WINDOWS, open_text, read_first_line, sysfs_path, proc_path + if MS_WINDOWS: from pyperf._win_memory import check_tracking_memory, get_peak_pagefile_usage def normalize_text(text): text = str(text) - text = re.sub(r'\s+', ' ', text) + text = re.sub(r"\s+", " ", text) return text.strip() def collect_python_metadata(metadata): # Implementation impl = pyperf.python_implementation() - metadata['python_implementation'] = impl + metadata["python_implementation"] = impl # Version version = platform.python_version() - match = re.search(r'\[(PyPy [^ ]+)', sys.version) + match = re.search(r"\[(PyPy [^ ]+)", sys.version) if match: - version = '%s (Python %s)' % (match.group(1), version) + version = "%s (Python %s)" % (match.group(1), version) bits = platform.architecture()[0] if bits: - if bits == '64bit': - bits = '64-bit' - elif bits == '32bit': - bits = '32-bit' - version = '%s (%s)' % (version, bits) + if bits == "64bit": + bits = "64-bit" + elif bits == "32bit": + bits = "32-bit" + version = "%s (%s)" % (version, bits) # '74667320778e' in 'Python 2.7.12+ (2.7:74667320778e,' - match = re.search(r'^[^(]+\([^:]+:([a-f0-9]{6,}\+?),', sys.version) + match = re.search(r"^[^(]+\([^:]+:([a-f0-9]{6,}\+?),", sys.version) if match: revision = match.group(1) else: # 'bbd45126bc691f669c4ebdfbd74456cd274c6b92' # in 'Python 2.7.10 (bbd45126bc691f669c4ebdfbd74456cd274c6b92,' - match = re.search(r'^[^(]+\(([a-f0-9]{6,}\+?),', sys.version) + match = re.search(r"^[^(]+\(([a-f0-9]{6,}\+?),", sys.version) if match: revision = match.group(1) else: revision = None if revision: - version = '%s revision %s' % (version, revision) - metadata['python_version'] = version + version = "%s revision %s" % (version, revision) + metadata["python_version"] = version if sys.executable: - metadata['python_executable'] = sys.executable + metadata["python_executable"] = sys.executable # timer - info = time.get_clock_info('perf_counter') - metadata['timer'] = ('%s, resolution: %s' - % (info.implementation, - format_timedelta(info.resolution))) + info = time.get_clock_info("perf_counter") + metadata["timer"] = "%s, resolution: %s" % ( + info.implementation, + format_timedelta(info.resolution), + ) # PYTHONHASHSEED - if os.environ.get('PYTHONHASHSEED'): - hash_seed = os.environ['PYTHONHASHSEED'] + if os.environ.get("PYTHONHASHSEED"): + hash_seed = os.environ["PYTHONHASHSEED"] try: if hash_seed != "random": hash_seed = int(hash_seed) except ValueError: pass else: - metadata['python_hash_seed'] = hash_seed + metadata["python_hash_seed"] = hash_seed # compiler python_compiler = normalize_text(platform.python_compiler()) if python_compiler: - metadata['python_compiler'] = python_compiler + metadata["python_compiler"] = python_compiler # CFLAGS try: @@ -106,15 +113,15 @@ def collect_python_metadata(metadata): except ImportError: pass else: - cflags = sysconfig.get_config_var('CFLAGS') + cflags = sysconfig.get_config_var("CFLAGS") if cflags: cflags = normalize_text(cflags) - metadata['python_cflags'] = cflags + metadata["python_cflags"] = cflags - config_args = sysconfig.get_config_var('CONFIG_ARGS') + config_args = sysconfig.get_config_var("CONFIG_ARGS") if config_args: config_args = normalize_text(config_args) - metadata['python_config_args'] = config_args + metadata["python_config_args"] = config_args # GC disabled? try: @@ -123,7 +130,7 @@ def collect_python_metadata(metadata): pass else: if not gc.isenabled(): - metadata['python_gc'] = 'disabled' + metadata["python_gc"] = "disabled" def read_proc(path): @@ -138,52 +145,52 @@ def read_proc(path): def collect_linux_metadata(metadata): # ASLR - for line in read_proc('sys/kernel/randomize_va_space'): - if line == '0': - metadata['aslr'] = 'No randomization' - elif line == '1': - metadata['aslr'] = 'Conservative randomization' - elif line == '2': - metadata['aslr'] = 'Full randomization' + for line in read_proc("sys/kernel/randomize_va_space"): + if line == "0": + metadata["aslr"] = "No randomization" + elif line == "1": + metadata["aslr"] = "Conservative randomization" + elif line == "2": + metadata["aslr"] = "Full randomization" break def get_cpu_affinity(): - if hasattr(os, 'sched_getaffinity'): + if hasattr(os, "sched_getaffinity"): return os.sched_getaffinity(0) if psutil is not None: proc = psutil.Process() # cpu_affinity() is only available on Linux, Windows and FreeBSD - if hasattr(proc, 'cpu_affinity'): + if hasattr(proc, "cpu_affinity"): return proc.cpu_affinity() return None def collect_system_metadata(metadata): - metadata['platform'] = platform.platform(True, False) - if sys.platform.startswith('linux'): + metadata["platform"] = platform.platform(True, False) + if sys.platform.startswith("linux"): collect_linux_metadata(metadata) # on linux, load average over 1 minute for line in read_proc("loadavg"): fields = line.split() loadavg = fields[0] - metadata['load_avg_1min'] = float(loadavg) + metadata["load_avg_1min"] = float(loadavg) - if len(fields) >= 4 and '/' in fields[3]: - runnable_threads = fields[3].split('/', 1)[0] + if len(fields) >= 4 and "/" in fields[3]: + runnable_threads = fields[3].split("/", 1)[0] runnable_threads = int(runnable_threads) - metadata['runnable_threads'] = runnable_threads + metadata["runnable_threads"] = runnable_threads - if 'load_avg_1min' not in metadata and hasattr(os, 'getloadavg'): - metadata['load_avg_1min'] = os.getloadavg()[0] + if "load_avg_1min" not in metadata and hasattr(os, "getloadavg"): + metadata["load_avg_1min"] = os.getloadavg()[0] # Hostname hostname = socket.gethostname() if hostname: - metadata['hostname'] = hostname + metadata["hostname"] = hostname # Boot time boot_time = None @@ -198,8 +205,8 @@ def collect_system_metadata(metadata): if boot_time is not None: btime = datetime.datetime.fromtimestamp(boot_time) - metadata['boot_time'] = format_datetime(btime) - metadata['uptime'] = time.time() - boot_time + metadata["boot_time"] = format_datetime(btime) + metadata["uptime"] = time.time() - boot_time def collect_memory_metadata(metadata): @@ -213,7 +220,7 @@ def collect_memory_metadata(metadata): if MS_WINDOWS and not check_tracking_memory(): usage = get_peak_pagefile_usage() if usage: - metadata['mem_peak_pagefile_usage'] = usage + metadata["mem_peak_pagefile_usage"] = usage def collect_cpu_freq(metadata, cpus): @@ -221,19 +228,19 @@ def collect_cpu_freq(metadata, cpus): cpu_set = set(cpus) cpu_freq = {} cpu = None - for line in read_proc('cpuinfo'): + for line in read_proc("cpuinfo"): line = line.rstrip() - if line.startswith('processor'): + if line.startswith("processor"): # Intel format, example where \t is a tab (U+0009 character): # processor\t: 7 # model name\t: Intel(R) Core(TM) i7-6820HQ CPU @ 2.70GHz # cpu MHz\t\t: 800.009 - match = re.match(r'^processor\s*: ([0-9]+)', line) + match = re.match(r"^processor\s*: ([0-9]+)", line) if match is None: # IBM Z # Example: "processor 0: version = 00, identification = [...]" - match = re.match(r'^processor ([0-9]+): ', line) + match = re.match(r"^processor ([0-9]+): ", line) if match is None: # unknown /proc/cpuinfo format: silently ignore and exit return @@ -243,24 +250,24 @@ def collect_cpu_freq(metadata, cpus): # skip this CPU cpu = None - elif line.startswith('cpu MHz') and cpu is not None: + elif line.startswith("cpu MHz") and cpu is not None: # Intel: 'cpu MHz : 1261.613' - mhz = line.split(':', 1)[-1].strip() + mhz = line.split(":", 1)[-1].strip() mhz = float(mhz) mhz = int(round(mhz)) - cpu_freq[cpu] = '%s MHz' % mhz + cpu_freq[cpu] = "%s MHz" % mhz - elif line.startswith('clock') and line.endswith('MHz') and cpu is not None: + elif line.startswith("clock") and line.endswith("MHz") and cpu is not None: # Power8: 'clock : 3425.000000MHz' - mhz = line[:-3].split(':', 1)[-1].strip() + mhz = line[:-3].split(":", 1)[-1].strip() mhz = float(mhz) mhz = int(round(mhz)) - cpu_freq[cpu] = '%s MHz' % mhz + cpu_freq[cpu] = "%s MHz" % mhz if not cpu_freq: return - metadata['cpu_freq'] = '; '.join(format_cpu_infos(cpu_freq)) + metadata["cpu_freq"] = "; ".join(format_cpu_infos(cpu_freq)) def get_cpu_config(cpu): @@ -270,26 +277,26 @@ def get_cpu_config(cpu): path = os.path.join(sys_cpu_path, "cpu%s/cpufreq/scaling_driver" % cpu) scaling_driver = read_first_line(path) if scaling_driver: - info.append('driver:%s' % scaling_driver) + info.append("driver:%s" % scaling_driver) - if scaling_driver == 'intel_pstate': + if scaling_driver == "intel_pstate": path = os.path.join(sys_cpu_path, "intel_pstate/no_turbo") no_turbo = read_first_line(path) - if no_turbo == '1': - info.append('intel_pstate:no turbo') - elif no_turbo == '0': - info.append('intel_pstate:turbo') + if no_turbo == "1": + info.append("intel_pstate:no turbo") + elif no_turbo == "0": + info.append("intel_pstate:turbo") path = os.path.join(sys_cpu_path, "cpu%s/cpufreq/scaling_governor" % cpu) scaling_governor = read_first_line(path) if scaling_governor: - info.append('governor:%s' % scaling_governor) + info.append("governor:%s" % scaling_governor) return info def collect_cpu_config(metadata, cpus): - nohz_full = read_first_line(sysfs_path('devices/system/cpu/nohz_full')) + nohz_full = read_first_line(sysfs_path("devices/system/cpu/nohz_full")) if nohz_full: nohz_full = parse_cpu_list(nohz_full) @@ -301,25 +308,25 @@ def collect_cpu_config(metadata, cpus): for cpu in cpus: config = get_cpu_config(cpu) if nohz_full and cpu in nohz_full: - config.append('nohz_full') + config.append("nohz_full") if isolated and cpu in isolated: - config.append('isolated') + config.append("isolated") if config: - configs[cpu] = ', '.join(config) + configs[cpu] = ", ".join(config) config = format_cpu_infos(configs) - cpuidle = read_first_line('/sys/devices/system/cpu/cpuidle/current_driver') + cpuidle = read_first_line("/sys/devices/system/cpu/cpuidle/current_driver") if cpuidle: - config.append('idle:%s' % cpuidle) + config.append("idle:%s" % cpuidle) if not config: return - metadata['cpu_config'] = '; '.join(config) + metadata["cpu_config"] = "; ".join(config) def get_cpu_temperature(path, cpu_temp): - hwmon_name = read_first_line(os.path.join(path, 'name')) - if not hwmon_name.startswith('coretemp'): + hwmon_name = read_first_line(os.path.join(path, "name")) + if not hwmon_name.startswith("coretemp"): return index = 1 @@ -327,17 +334,17 @@ def get_cpu_temperature(path, cpu_temp): template = os.path.join(path, "temp%s_%%s" % index) try: - temp_label = read_first_line(template % 'label', error=True) + temp_label = read_first_line(template % "label", error=True) except OSError: break - temp_input = read_first_line(template % 'input', error=True) + temp_input = read_first_line(template % "input", error=True) temp_input = float(temp_input) / 1000 # On Python 2, u"%.0f\xb0C" introduces unicode errors if the # locale encoding is ASCII, so use a space. temp_input = "%.0f C" % temp_input - item = '%s:%s=%s' % (hwmon_name, temp_label, temp_input) + item = "%s:%s=%s" % (hwmon_name, temp_label, temp_input) cpu_temp.append(item) index += 1 @@ -357,7 +364,7 @@ def collect_cpu_temperatures(metadata): if not cpu_temp: return None - metadata['cpu_temp'] = ', '.join(cpu_temp) + metadata["cpu_temp"] = ", ".join(cpu_temp) def collect_cpu_affinity(metadata, cpu_affinity, cpu_count): @@ -370,21 +377,21 @@ def collect_cpu_affinity(metadata, cpu_affinity, cpu_count): if set(cpu_affinity) == set(range(cpu_count)): return - metadata['cpu_affinity'] = format_cpu_list(cpu_affinity) + metadata["cpu_affinity"] = format_cpu_list(cpu_affinity) def collect_cpu_model(metadata): for line in read_proc("cpuinfo"): - if line.startswith('model name'): - model_name = line.split(':', 1)[1].strip() + if line.startswith("model name"): + model_name = line.split(":", 1)[1].strip() if model_name: - metadata['cpu_model_name'] = model_name + metadata["cpu_model_name"] = model_name break - if line.startswith('machine'): - machine = line.split(':', 1)[1].strip() + if line.startswith("machine"): + machine = line.split(":", 1)[1].strip() if machine: - metadata['cpu_machine'] = machine + metadata["cpu_machine"] = machine break @@ -394,7 +401,7 @@ def collect_cpu_metadata(metadata): # CPU count cpu_count = get_logical_cpu_count() if cpu_count: - metadata['cpu_count'] = cpu_count + metadata["cpu_count"] = cpu_count cpu_affinity = get_cpu_affinity() collect_cpu_affinity(metadata, cpu_affinity, cpu_count) @@ -411,7 +418,10 @@ def collect_cpu_metadata(metadata): def collect_metadata(process=True): - metadata = {'perf_version': pyperf.__version__, 'date': format_datetime(datetime.datetime.now())} + metadata = { + "perf_version": pyperf.__version__, + "date": format_datetime(datetime.datetime.now()), + } collect_system_metadata(metadata) collect_cpu_metadata(metadata) @@ -447,6 +457,6 @@ def cmd_collect_metadata(args): print(line) if filename: - run = run._update_metadata({'name': 'metadata'}) + run = run._update_metadata({"name": "metadata"}) bench = pyperf.Benchmark([run]) bench.dump(filename) diff --git a/pyperf/_command.py b/pyperf/_command.py index cd09f3fa..9b3da0b0 100644 --- a/pyperf/_command.py +++ b/pyperf/_command.py @@ -30,25 +30,23 @@ def parse_subprocess_data(output): def bench_command(command, task, loops): path = os.path.dirname(__file__) - script = os.path.join(path, '_process_time.py') + script = os.path.join(path, "_process_time.py") run_script = [sys.executable, script] args = run_script + [str(loops)] + command - proc = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + proc = subprocess.Popen( + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True + ) output = popen_communicate(proc)[0] if proc.returncode: - raise Exception("Command failed with exit code %s" - % proc.returncode) + raise Exception("Command failed with exit code %s" % proc.returncode) timing, rss, metadata = parse_subprocess_data(output) if rss and rss > 0: # store the maximum - max_rss = task.metadata.get('command_max_rss', 0) - task.metadata['command_max_rss'] = max(max_rss, rss) + max_rss = task.metadata.get("command_max_rss", 0) + task.metadata["command_max_rss"] = max(max_rss, rss) task.metadata.update(metadata) @@ -57,15 +55,15 @@ def bench_command(command, task, loops): class BenchCommandTask(WorkerTask): def __init__(self, runner, name, command): - command_str = ' '.join(map(shell_quote, command)) - metadata = {'command': command_str} + command_str = " ".join(map(shell_quote, command)) + metadata = {"command": command_str} task_func = functools.partial(bench_command, command) WorkerTask.__init__(self, runner, name, task_func, metadata) def compute(self): WorkerTask.compute(self) if self.args.track_memory: - value = self.metadata.pop('command_max_rss', None) + value = self.metadata.pop("command_max_rss", None) if not value: raise RuntimeError("failed to get the process RSS") diff --git a/pyperf/_compare.py b/pyperf/_compare.py index 9b5669aa..7fc4adc0 100644 --- a/pyperf/_compare.py +++ b/pyperf/_compare.py @@ -25,7 +25,10 @@ def __init__(self, name, benchmark): self.benchmark = benchmark def __repr__(self): - return '' % (self.name, self.benchmark.get_nvalue()) + return "" % ( + self.name, + self.benchmark.get_nvalue(), + ) def compute_normalized_mean(bench, ref): @@ -65,7 +68,7 @@ def __init__(self, ref, changed, min_speed=None): self._norm_mean = None def __repr__(self): - return '' % (self.ref, self.changed) + return "" % (self.ref, self.changed) def _set_significant(self): bench1 = self.ref.benchmark @@ -114,8 +117,10 @@ def oneliner(self, verbose=True, show_name=True, check_significant=True): if show_name: ref_text = "[%s] %s" % (self.ref.name, ref_text) chg_text = "[%s] %s" % (self.changed.name, chg_text) - if (self.ref.benchmark.get_nvalue() > 1 - or self.changed.benchmark.get_nvalue() > 1): + if ( + self.ref.benchmark.get_nvalue() > 1 + or self.changed.benchmark.get_nvalue() > 1 + ): text = "Mean +- std dev: %s -> %s" % (ref_text, chg_text) else: text = "%s -> %s" % (ref_text, chg_text) @@ -150,7 +155,7 @@ def __init__(self, name): self.name = name def __repr__(self): - return '' % (list(self),) + return "" % (list(self),) class ReSTTable: @@ -162,27 +167,27 @@ def __init__(self, headers, rows): for column, cell in enumerate(row): self.widths[column] = max(self.widths[column], len(cell)) - def _render_line(self, char='-'): - parts = [''] + def _render_line(self, char="-"): + parts = [""] for width in self.widths: parts.append(char * (width + 2)) - parts.append('') - return '+'.join(parts) + parts.append("") + return "+".join(parts) def _render_row(self, row): - parts = [''] + parts = [""] for width, cell in zip(self.widths, row): - parts.append(' %s ' % cell.ljust(width)) - parts.append('') - return '|'.join(parts) + parts.append(" %s " % cell.ljust(width)) + parts.append("") + return "|".join(parts) def render(self, write_line): - write_line(self._render_line('-')) + write_line(self._render_line("-")) write_line(self._render_row(self.headers)) - write_line(self._render_line('=')) + write_line(self._render_line("=")) for row in self.rows: write_line(self._render_row(row)) - write_line(self._render_line('-')) + write_line(self._render_line("-")) class MarkDownTable: @@ -194,26 +199,26 @@ def __init__(self, headers, rows): for column, cell in enumerate(row): self.widths[column] = max(self.widths[column], len(cell)) - def _render_line(self, char='-'): - parts = [''] + def _render_line(self, char="-"): + parts = [""] for idx, width in enumerate(self.widths): if idx == 0: parts.append(char * (width + 2)) else: - parts.append(f':{char * width}:') - parts.append('') - return '|'.join(parts) + parts.append(f":{char * width}:") + parts.append("") + return "|".join(parts) def _render_row(self, row): - parts = [''] + parts = [""] for width, cell in zip(self.widths, row): parts.append(" %s " % cell.ljust(width)) - parts.append('') - return '|'.join(parts) + parts.append("") + return "|".join(parts) def render(self, write_line): write_line(self._render_row(self.headers)) - write_line(self._render_line('-')) + write_line(self._render_line("-")) for row in self.rows: write_line(self._render_row(row)) @@ -244,7 +249,7 @@ def __init__(self, benchmarks, args): results = self.compare_benchmarks(item.name, cmp_benchmarks) self.all_results.append(results) - self.show_name = (len(grouped_by_name) > 1) + self.show_name = len(grouped_by_name) > 1 self.tags = set() for results in self.all_results: @@ -269,18 +274,21 @@ def compare_benchmarks(self, name, benchmarks): @staticmethod def display_not_significant(not_significant): - print("Benchmark hidden because not significant (%s): %s" - % (len(not_significant), ', '.join(not_significant))) + print( + "Benchmark hidden because not significant (%s): %s" + % (len(not_significant), ", ".join(not_significant)) + ) def compare_suites_table(self, all_results): if self.group_by_speed: + def sort_key(results): result = results[0] return result.norm_mean self.all_results.sort(key=sort_key) - headers = ['Benchmark', self.all_results[0][0].ref.name] + headers = ["Benchmark", self.all_results[0][0].ref.name] for item in self.all_results[0]: headers.append(item.changed.name) @@ -317,13 +325,13 @@ def sort_key(results): # only compute the geometric mean if there is at least two benchmarks # and if at least one is signicant. if len(all_norm_means[0]) > 1 and rows: - row = ['Geometric mean', '(ref)'] + row = ["Geometric mean", "(ref)"] for norm_means in all_norm_means: row.append(format_geometric_mean(norm_means)) rows.append(row) if rows: - if self.table_format == 'rest': + if self.table_format == "rest": table = ReSTTable(headers, rows) else: table = MarkDownTable(headers, rows) @@ -362,9 +370,9 @@ def sort_key(item): empty_line = False for title, results, sort_reverse in ( - ('Slower', slower, True), - ('Faster', faster, False), - ('Same speed', same, False), + ("Slower", slower, True), + ("Faster", faster, False), + ("Same speed", same, False), ): if not results: continue @@ -385,7 +393,7 @@ def sort_key(item): def compare_suites_list(self, all_results): not_significant = [] empty_line = False - last_index = (len(self.all_results) - 1) + last_index = len(self.all_results) - 1 for index, results in enumerate(all_results): significant = any(result.significant for result in results) @@ -407,7 +415,7 @@ def compare_suites_list(self, all_results): else: text = lines[0] if self.show_name: - text = '%s: %s' % (results.name, text) + text = "%s: %s" % (results.name, text) print(text) empty_line = True @@ -421,8 +429,10 @@ def list_ignored(self): if not hidden: continue hidden_names = [bench.get_name() for bench in hidden] - print("Ignored benchmarks (%s) of %s: %s" - % (len(hidden), suite.filename, ', '.join(sorted(hidden_names)))) + print( + "Ignored benchmarks (%s) of %s: %s" + % (len(hidden), suite.filename, ", ".join(sorted(hidden_names))) + ) def compare_geometric_mean(self, all_results): # use a list since two filenames can be identical, @@ -441,13 +451,13 @@ def compare_geometric_mean(self, all_results): print() if len(all_norm_means) > 1: - display_title('Geometric mean') + display_title("Geometric mean") for name, norm_means in all_norm_means: geo_mean = format_geometric_mean(norm_means) - print(f'{name}: {geo_mean}') + print(f"{name}: {geo_mean}") else: geo_mean = format_geometric_mean(all_norm_means[0][1]) - print(f'Geometric mean: {geo_mean}') + print(f"Geometric mean: {geo_mean}") def compare_suites(self, results): if self.table: @@ -465,7 +475,8 @@ def compare(self): for tag in self.tags: display_title(f"Benchmarks with tag '{tag}':") all_results = [ - results for results in self.all_results + results + for results in self.all_results if tag is None or tag in get_tags_for_result(results[0]) ] self.compare_suites(all_results) diff --git a/pyperf/_cpu_utils.py b/pyperf/_cpu_utils.py index f810df25..e4a8f26c 100644 --- a/pyperf/_cpu_utils.py +++ b/pyperf/_cpu_utils.py @@ -17,7 +17,7 @@ def get_logical_cpu_count(): if psutil is not None: # Number of logical CPUs cpu_count = psutil.cpu_count() - elif hasattr(os, 'cpu_count'): + elif hasattr(os, "cpu_count"): # Python 3.4 cpu_count = os.cpu_count() else: @@ -48,16 +48,16 @@ def format_cpu_list(cpus): first = cpu elif cpu != last + 1: if first != last: - parts.append('%s-%s' % (first, last)) + parts.append("%s-%s" % (first, last)) else: parts.append(str(last)) first = cpu last = cpu if first != last: - parts.append('%s-%s' % (first, last)) + parts.append("%s-%s" % (first, last)) else: parts.append(str(last)) - return ','.join(parts) + return ",".join(parts) def format_cpu_infos(infos): @@ -70,24 +70,24 @@ def format_cpu_infos(infos): text = [] for cpus, info in items: cpus = format_cpu_list(cpus) - text.append('%s=%s' % (cpus, info)) + text.append("%s=%s" % (cpus, info)) return text def parse_cpu_list(cpu_list): - cpu_list = cpu_list.strip(' \x00') + cpu_list = cpu_list.strip(" \x00") # /sys/devices/system/cpu/nohz_full returns ' (null)\n' when NOHZ full # is not used - if cpu_list == '(null)': + if cpu_list == "(null)": return if not cpu_list: return cpus = [] - for part in cpu_list.split(','): + for part in cpu_list.split(","): part = part.strip() - if '-' in part: - parts = part.split('-', 1) + if "-" in part: + parts = part.split("-", 1) first = int(parts[0]) last = int(parts[1]) for cpu in range(first, last + 1): @@ -100,7 +100,7 @@ def parse_cpu_list(cpu_list): def parse_cpu_mask(line): mask = 0 - for part in line.split(','): + for part in line.split(","): mask <<= 32 mask |= int(part, 16) return mask @@ -109,18 +109,18 @@ def parse_cpu_mask(line): def format_cpu_mask(mask): parts = [] while 1: - part = "%08x" % (mask & 0xffffffff) + part = "%08x" % (mask & 0xFFFFFFFF) parts.append(part) mask >>= 32 if not mask: break - return ','.join(reversed(parts)) + return ",".join(reversed(parts)) def format_cpus_as_mask(cpus): mask = 0 for cpu in cpus: - mask |= (1 << cpu) + mask |= 1 << cpu return format_cpu_mask(mask) @@ -132,14 +132,14 @@ def get_isolated_cpus(): """ # The cpu/isolated sysfs was added in Linux 4.2 # (commit 59f30abe94bff50636c8cad45207a01fdcb2ee49) - path = sysfs_path('devices/system/cpu/isolated') + path = sysfs_path("devices/system/cpu/isolated") isolated = read_first_line(path) if isolated: return parse_cpu_list(isolated) - cmdline = read_first_line(proc_path('cmdline')) + cmdline = read_first_line(proc_path("cmdline")) if cmdline: - match = re.search(r'\bisolcpus=([^ ]+)', cmdline) + match = re.search(r"\bisolcpus=([^ ]+)", cmdline) if match: isolated = match.group(1) return parse_cpu_list(isolated) @@ -149,7 +149,7 @@ def get_isolated_cpus(): def set_cpu_affinity(cpus): # Python 3.3 or newer? - if hasattr(os, 'sched_setaffinity'): + if hasattr(os, "sched_setaffinity"): os.sched_setaffinity(0, cpus) return True @@ -162,7 +162,7 @@ def set_cpu_affinity(cpus): return proc = psutil.Process() - if not hasattr(proc, 'cpu_affinity'): + if not hasattr(proc, "cpu_affinity"): return proc.cpu_affinity(cpus) @@ -179,7 +179,7 @@ def set_highest_priority(): return proc = psutil.Process() - if not hasattr(proc, 'nice'): + if not hasattr(proc, "nice"): return # Want to set realtime on Windows. diff --git a/pyperf/_formatter.py b/pyperf/_formatter.py index 6c3a1eea..bcdf1e1d 100644 --- a/pyperf/_formatter.py +++ b/pyperf/_formatter.py @@ -1,10 +1,10 @@ -_TIMEDELTA_UNITS = ('sec', 'ms', 'us', 'ns') +_TIMEDELTA_UNITS = ("sec", "ms", "us", "ns") def format_timedeltas(values): ref_value = abs(values[0]) for i in range(2, -9, -1): - if ref_value >= 10.0 ** i: + if ref_value >= 10.0**i: break else: i = -9 @@ -25,14 +25,14 @@ def format_timedelta(value): def format_filesize(size): if size < 10 * 1024: if size != 1: - return '%.0f bytes' % size + return "%.0f bytes" % size else: - return '%.0f byte' % size + return "%.0f byte" % size if size > 10 * 1024 * 1024: - return '%.1f MiB' % (size / (1024.0 * 1024.0)) + return "%.1f MiB" % (size / (1024.0 * 1024.0)) - return '%.1f KiB' % (size / 1024.0) + return "%.1f KiB" % (size / 1024.0) def format_filesizes(sizes): @@ -42,7 +42,7 @@ def format_filesizes(sizes): def format_seconds(seconds): # Coarse but human readable duration if not seconds: - return '0 sec' + return "0 sec" if seconds < 1.0: return format_timedelta(seconds) @@ -60,12 +60,12 @@ def format_seconds(seconds): if mins: parts.append("%.0f min" % mins) if secs and len(parts) <= 2: - parts.append('%.1f sec' % secs) - return ' '.join(parts) + parts.append("%.1f sec" % secs) + return " ".join(parts) def format_number(number, unit=None, units=None): - plural = (not number or abs(number) > 1) + plural = not number or abs(number) > 1 if number >= 10000: pow10 = 0 x = number @@ -75,7 +75,7 @@ def format_number(number, unit=None, units=None): if r: break if not r: - number = '10^%s' % pow10 + number = "10^%s" % pow10 if isinstance(number, int) and number > 8192: pow2 = 0 @@ -86,28 +86,28 @@ def format_number(number, unit=None, units=None): if r: break if not r: - number = '2^%s' % pow2 + number = "2^%s" % pow2 if not unit: return str(number) if plural: if not units: - units = unit + 's' - return '%s %s' % (number, units) + units = unit + "s" + return "%s %s" % (number, units) else: - return '%s %s' % (number, unit) + return "%s %s" % (number, unit) def format_integers(numbers): return tuple(format_number(number) for number in numbers) -DEFAULT_UNIT = 'second' +DEFAULT_UNIT = "second" UNIT_FORMATTERS = { - 'second': format_timedeltas, - 'byte': format_filesizes, - 'integer': format_integers, + "second": format_timedeltas, + "byte": format_filesizes, + "integer": format_integers, } @@ -125,4 +125,4 @@ def format_value(unit, value): def format_datetime(dt, microsecond=True): if not microsecond: dt = dt.replace(microsecond=0) - return dt.isoformat(' ') + return dt.isoformat(" ") diff --git a/pyperf/_hooks.py b/pyperf/_hooks.py index 8c9e7cdf..7db78934 100644 --- a/pyperf/_hooks.py +++ b/pyperf/_hooks.py @@ -134,14 +134,21 @@ def __init__(self): self.ack_fifo = self.mkfifo(self.tempdir.name, "ack_fifo") perf_data_dir = os.environ.get("PYPERF_PERF_RECORD_DATA_DIR", "") perf_data_basename = f"perf.data.{uuid.uuid4()}" - cmd = ["perf", "record", - "--pid", str(os.getpid()), - "--output", os.path.join(perf_data_dir, perf_data_basename), - "--control", f"fifo:{self.ctl_fifo},{self.ack_fifo}"] + cmd = [ + "perf", + "record", + "--pid", + str(os.getpid()), + "--output", + os.path.join(perf_data_dir, perf_data_basename), + "--control", + f"fifo:{self.ctl_fifo},{self.ack_fifo}", + ] extra_opts = os.environ.get("PYPERF_PERF_RECORD_EXTRA_OPTS", "") cmd += shlex.split(extra_opts) self.perf = subprocess.Popen( - cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) self.ctl_fd = open(self.ctl_fifo, "w") self.ack_fd = open(self.ack_fifo, "r") diff --git a/pyperf/_linux_memory.py b/pyperf/_linux_memory.py index 7eca372d..2353fe6c 100644 --- a/pyperf/_linux_memory.py +++ b/pyperf/_linux_memory.py @@ -18,7 +18,7 @@ def read_smap_file(): for line in fp: # Include both Private_Clean and Private_Dirty sections. line = line.rstrip() - if line.startswith(b"Private_") and line.endswith(b'kB'): + if line.startswith(b"Private_") and line.endswith(b"kB"): parts = line.split() total += int(parts[1]) * 1024 return total @@ -29,7 +29,7 @@ def __init__(self): threading.Thread.__init__(self) self.peak_usage = 0 self._done = threading.Event() - self.sleep = 0.010 # 10 ms + self.sleep = 0.010 # 10 ms self._quit = False def get(self): diff --git a/pyperf/_manager.py b/pyperf/_manager.py index a55f7a5a..c4626176 100644 --- a/pyperf/_manager.py +++ b/pyperf/_manager.py @@ -24,6 +24,7 @@ class Manager: It uses a state machine with next_run attribute and the choose_next_run() method. """ + def __init__(self, runner, python=None): self.runner = runner self.args = runner.args @@ -34,7 +35,7 @@ def __init__(self, runner, python=None): self.bench = None self.need_nprocess = self.args.processes self.nprocess = 0 - self.next_run = 'loops' + self.next_run = "loops" self.calibrate_loops = int(not self.args.loops) self.calibrate_warmups = int(self.args.warmups is None) @@ -43,40 +44,48 @@ def worker_cmd(self, calibrate_loops, calibrate_warmups, wpipe): cmd = [self.python] cmd.extend(self.runner._program_args) - cmd.extend(('--worker', '--pipe', str(wpipe), - '--worker-task=%s' % self.runner._worker_task, - '--values', str(args.values), - '--min-time', str(args.min_time))) + cmd.extend( + ( + "--worker", + "--pipe", + str(wpipe), + "--worker-task=%s" % self.runner._worker_task, + "--values", + str(args.values), + "--min-time", + str(args.min_time), + ) + ) if calibrate_loops == 1: - cmd.append('--calibrate-loops') + cmd.append("--calibrate-loops") else: - cmd.extend(('--loops', str(args.loops))) + cmd.extend(("--loops", str(args.loops))) if calibrate_loops > 1: - cmd.append('--recalibrate-loops') + cmd.append("--recalibrate-loops") if calibrate_warmups == 1: - cmd.append('--calibrate-warmups') + cmd.append("--calibrate-warmups") else: - cmd.extend(('--warmups', str(args.warmups))) + cmd.extend(("--warmups", str(args.warmups))) if calibrate_warmups > 1: - cmd.append('--recalibrate-warmups') + cmd.append("--recalibrate-warmups") if args.verbose: - cmd.append('-' + 'v' * args.verbose) + cmd.append("-" + "v" * args.verbose) if args.affinity: - cmd.append('--affinity=%s' % args.affinity) + cmd.append("--affinity=%s" % args.affinity) if args.tracemalloc: - cmd.append('--tracemalloc') + cmd.append("--tracemalloc") if args.track_memory: - cmd.append('--track-memory') + cmd.append("--track-memory") if args.profile: - cmd.extend(['--profile', args.profile]) + cmd.extend(["--profile", args.profile]) if args.timeout: - cmd.extend(['--timeout', str(args.timeout)]) + cmd.extend(["--timeout", str(args.timeout)]) if args.hook: for hook in args.hook: - cmd.extend(['--hook', hook]) + cmd.extend(["--hook", hook]) if self.runner._add_cmdline_args: self.runner._add_cmdline_args(cmd, args) @@ -84,25 +93,24 @@ def worker_cmd(self, calibrate_loops, calibrate_warmups, wpipe): return cmd def spawn_worker(self, calibrate_loops, calibrate_warmups): - env = create_environ(self.args.inherit_environ, - self.args.locale, - self.args.copy_env) + env = create_environ( + self.args.inherit_environ, self.args.locale, self.args.copy_env + ) rpipe, wpipe = create_pipe() with rpipe: with wpipe: warg = wpipe.to_subprocess() - cmd = self.worker_cmd(calibrate_loops, - calibrate_warmups, warg) + cmd = self.worker_cmd(calibrate_loops, calibrate_warmups, warg) kw = {} if MS_WINDOWS: # Set close_fds to False to call CreateProcess() with # bInheritHandles=True. For pass_handles, see # http://bugs.python.org/issue19764 - kw['close_fds'] = False + kw["close_fds"] = False else: - kw['pass_fds'] = [wpipe.fd] + kw["pass_fds"] = [wpipe.fd] proc = subprocess.Popen(cmd, env=env, **kw) @@ -115,22 +123,21 @@ def spawn_worker(self, calibrate_loops, calibrate_warmups): sys.exit(124) if exitcode: - raise RuntimeError("%s failed with exit code %s" - % (cmd[0], exitcode)) + raise RuntimeError("%s failed with exit code %s" % (cmd[0], exitcode)) return _load_suite_from_pipe(bench_json) def create_suite(self): # decide which kind of run must be computed - if self.next_run == 'loops' and not self.calibrate_loops: - self.next_run = 'warmups' - if self.next_run == 'warmups' and not self.calibrate_warmups: - self.next_run = 'values' + if self.next_run == "loops" and not self.calibrate_loops: + self.next_run = "warmups" + if self.next_run == "warmups" and not self.calibrate_warmups: + self.next_run = "values" # compute the run - if self.next_run == 'loops': + if self.next_run == "loops": suite = self.spawn_worker(self.calibrate_loops, 0) - elif self.next_run == 'warmups': + elif self.next_run == "warmups": suite = self.spawn_worker(0, self.calibrate_warmups) else: suite = self.spawn_worker(0, 0) @@ -144,12 +151,14 @@ def create_worker_bench(self): # get the run benchmarks = suite._benchmarks if len(benchmarks) != 1: - raise ValueError("worker produced %s benchmarks instead of 1" - % len(benchmarks)) + raise ValueError( + "worker produced %s benchmarks instead of 1" % len(benchmarks) + ) worker_bench = benchmarks[0] if len(worker_bench._runs) != 1: - raise ValueError("worker produced %s runs, only 1 run expected" - % len(worker_bench._runs)) + raise ValueError( + "worker produced %s runs, only 1 run expected" % len(worker_bench._runs) + ) run = worker_bench._runs[0] # save the run into bench @@ -168,14 +177,18 @@ def display_run(self, bench, run): print(line) sys.stdout.flush() elif not self.args.quiet: - print(".", end='') + print(".", end="") sys.stdout.flush() def calibration_done(self): if self.args.verbose: - print("Calibration: %s, %s" - % (format_number(self.args.warmups, 'warmup'), - format_number(self.args.loops, 'loop'))) + print( + "Calibration: %s, %s" + % ( + format_number(self.args.warmups, "warmup"), + format_number(self.args.loops, "loop"), + ) + ) self.calibrate_loops = 0 self.calibrate_warmups = 0 @@ -199,9 +212,10 @@ def handle_calibration(self, run): self.calibration_done() if self.calibrate_loops > MAX_CALIBRATION: - print("ERROR: calibration failed, the number of loops " - "is not stable after %s calibrations" - % (self.calibrate_loops - 1)) + print( + "ERROR: calibration failed, the number of loops " + "is not stable after %s calibrations" % (self.calibrate_loops - 1) + ) sys.exit(1) elif run._is_calibration_warmups() or run._is_recalibration_warmups(): @@ -221,16 +235,17 @@ def handle_calibration(self, run): self.calibration_done() if self.calibrate_warmups > MAX_CALIBRATION: - print("ERROR: calibration failed, the number of warmups " - "is not stable after %s calibrations" - % (self.calibrate_warmups - 1)) + print( + "ERROR: calibration failed, the number of warmups " + "is not stable after %s calibrations" % (self.calibrate_warmups - 1) + ) sys.exit(1) def choose_next_run(self): - if self.next_run == 'loops': - self.next_run = 'warmups' - elif self.next_run == 'warmups': - self.next_run = 'loops' + if self.next_run == "loops": + self.next_run = "warmups" + elif self.next_run == "warmups": + self.next_run = "loops" # else: keep action 'values' def create_bench(self): diff --git a/pyperf/_metadata.py b/pyperf/_metadata.py index e2ff96c5..fc5cae5a 100644 --- a/pyperf/_metadata.py +++ b/pyperf/_metadata.py @@ -1,7 +1,11 @@ import collections -from pyperf._formatter import (format_number, format_seconds, format_filesize, - UNIT_FORMATTERS) +from pyperf._formatter import ( + format_number, + format_seconds, + format_filesize, + UNIT_FORMATTERS, +) METADATA_VALUE_TYPES = (int, str, float) @@ -31,21 +35,21 @@ def format_generic(value): def format_system_load(load): # Format system load read from /proc/loadavg on Linux (ex: 0.12) - return '%.2f' % load + return "%.2f" % load def is_strictly_positive(value): - return (value >= 1) + return value >= 1 def is_positive(value): - return (value >= 0) + return value >= 0 def is_tags(value): if not isinstance(value, list): return False - return all(isinstance(x, str) and x not in ('all', '') for x in value) + return all(isinstance(x, str) and x not in ("all", "") for x in value) def parse_load_avg(value): @@ -61,37 +65,35 @@ def format_noop(value): # types: accepted types -_MetadataInfo = collections.namedtuple('_MetadataInfo', 'formatter types check_value unit') +_MetadataInfo = collections.namedtuple( + "_MetadataInfo", "formatter types check_value unit" +) -BYTES = _MetadataInfo(format_filesize, (int,), is_strictly_positive, 'byte') +BYTES = _MetadataInfo(format_filesize, (int,), is_strictly_positive, "byte") DATETIME = _MetadataInfo(format_noop, (str,), None, None) -LOOPS = _MetadataInfo(format_number, (int,), is_strictly_positive, 'integer') -WARMUPS = _MetadataInfo(format_number, (int,), is_positive, 'integer') -SECONDS = _MetadataInfo(format_seconds, NUMBER_TYPES, is_positive, 'second') -TAGS = _MetadataInfo(format_generic, (list,), is_tags, 'tag') +LOOPS = _MetadataInfo(format_number, (int,), is_strictly_positive, "integer") +WARMUPS = _MetadataInfo(format_number, (int,), is_positive, "integer") +SECONDS = _MetadataInfo(format_seconds, NUMBER_TYPES, is_positive, "second") +TAGS = _MetadataInfo(format_generic, (list,), is_tags, "tag") # Registry of metadata keys METADATA = { - 'loops': LOOPS, - 'inner_loops': LOOPS, - - 'duration': SECONDS, - 'uptime': SECONDS, - 'load_avg_1min': _MetadataInfo(format_system_load, NUMBER_TYPES, is_positive, None), - - 'mem_max_rss': BYTES, - 'mem_peak_pagefile_usage': BYTES, - 'command_max_rss': BYTES, - - 'unit': _MetadataInfo(format_noop, (str,), UNIT_FORMATTERS.__contains__, None), - 'date': DATETIME, - 'boot_time': DATETIME, - - 'calibrate_loops': LOOPS, - 'recalibrate_loops': LOOPS, - 'calibrate_warmups': WARMUPS, - 'recalibrate_warmups': WARMUPS, - 'tags': TAGS, + "loops": LOOPS, + "inner_loops": LOOPS, + "duration": SECONDS, + "uptime": SECONDS, + "load_avg_1min": _MetadataInfo(format_system_load, NUMBER_TYPES, is_positive, None), + "mem_max_rss": BYTES, + "mem_peak_pagefile_usage": BYTES, + "command_max_rss": BYTES, + "unit": _MetadataInfo(format_noop, (str,), UNIT_FORMATTERS.__contains__, None), + "date": DATETIME, + "boot_time": DATETIME, + "calibrate_loops": LOOPS, + "recalibrate_loops": LOOPS, + "calibrate_warmups": WARMUPS, + "recalibrate_warmups": WARMUPS, + "tags": TAGS, } DEFAULT_METADATA_INFO = _MetadataInfo(format_generic, METADATA_VALUE_TYPES, None, None) @@ -105,16 +107,15 @@ def check_metadata(name, value): info = get_metadata_info(name) if not isinstance(name, str): - raise TypeError("metadata name must be a string, got %s" - % type(name).__name__) + raise TypeError("metadata name must be a string, got %s" % type(name).__name__) if not isinstance(value, info.types): - raise ValueError("invalid metadata %r value type: got %r" - % (name, type(value).__name__)) + raise ValueError( + "invalid metadata %r value type: got %r" % (name, type(value).__name__) + ) if info.check_value is not None and not info.check_value(value): - raise ValueError("invalid metadata %r value: %r" - % (name, value)) + raise ValueError("invalid metadata %r value: %r" % (name, value)) def parse_metadata(metadata): @@ -122,9 +123,10 @@ def parse_metadata(metadata): for name, value in metadata.items(): if isinstance(value, str): value = value.strip() - if '\n' in value or '\r' in value: - raise ValueError("newline characters are not allowed " - "in metadata values: %r" % value) + if "\n" in value or "\r" in value: + raise ValueError( + "newline characters are not allowed in metadata values: %r" % value + ) if not value: raise ValueError("metadata %r value is empty" % name) check_metadata(name, value) @@ -157,15 +159,15 @@ def __str__(self): def __eq__(self, other): if not isinstance(other, Metadata): return False - return (self._name == other._name and self._value == other._value) + return self._name == other._name and self._value == other._value def __repr__(self): - return ('' - % (self._name, self._value)) + return "" % (self._name, self._value) def _exclude_common_metadata(metadata, common_metadata): if common_metadata: - metadata = {key: value for key, value in metadata.items() - if key not in common_metadata} + metadata = { + key: value for key, value in metadata.items() if key not in common_metadata + } return metadata diff --git a/pyperf/_process_time.py b/pyperf/_process_time.py index f18dee20..c210a9f0 100644 --- a/pyperf/_process_time.py +++ b/pyperf/_process_time.py @@ -14,6 +14,7 @@ If resource.getrusage() is available: compute the maximum RSS memory in bytes per process and writes it into stdout as a second line. """ + import contextlib import json import os @@ -35,7 +36,7 @@ def get_max_rss(*, children): else: resource_type = resource.RUSAGE_SELF usage = resource.getrusage(resource_type) - if sys.platform == 'darwin': + if sys.platform == "darwin": return usage.ru_maxrss return usage.ru_maxrss * 1024 else: @@ -47,6 +48,7 @@ def merge_profile_stats_files(src, dst): Merging one existing pstats file into another. """ import pstats + if os.path.isfile(dst): src_stats = pstats.Stats(src) dst_stats = pstats.Stats(dst) @@ -75,8 +77,7 @@ def bench_process(loops, args, kw, profile_filename=None): exitcode = proc.returncode if exitcode != 0: - print("Command failed with exit code %s" % exitcode, - file=sys.stderr) + print("Command failed with exit code %s" % exitcode, file=sys.stderr) if profile_filename: os.unlink(temp_profile_filename) sys.exit(exitcode) @@ -85,9 +86,7 @@ def bench_process(loops, args, kw, profile_filename=None): max_rss = max(max_rss, rss) if profile_filename: - merge_profile_stats_files( - temp_profile_filename, profile_filename - ) + merge_profile_stats_files(temp_profile_filename, profile_filename) dt = time.perf_counter() - start_time return (dt, max_rss) @@ -128,14 +127,18 @@ def write_data(dt, max_rss, metadata, out=sys.stdout): def main(): # Make sure that the pyperf module wasn't imported - if 'pyperf' in sys.modules: - print("ERROR: don't run %s -m pyperf._process, run the .py script" - % os.path.basename(sys.executable)) + if "pyperf" in sys.modules: + print( + "ERROR: don't run %s -m pyperf._process, run the .py script" + % os.path.basename(sys.executable) + ) sys.exit(1) if len(sys.argv) < 3: - print("Usage: %s %s loops program [arg1 arg2 ...] [--profile profile]" - % (os.path.basename(sys.executable), __file__)) + print( + "Usage: %s %s loops program [arg1 arg2 ...] [--profile profile]" + % (os.path.basename(sys.executable), __file__) + ) sys.exit(1) if "--profile" in sys.argv: @@ -153,15 +156,15 @@ def main(): args = sys.argv[2:] kw = {} - if hasattr(subprocess, 'DEVNULL'): + if hasattr(subprocess, "DEVNULL"): devnull = None - kw['stdin'] = subprocess.DEVNULL - kw['stdout'] = subprocess.DEVNULL + kw["stdin"] = subprocess.DEVNULL + kw["stdout"] = subprocess.DEVNULL else: - devnull = open(os.devnull, 'w+', 0) - kw['stdin'] = devnull - kw['stdout'] = devnull - kw['stderr'] = subprocess.STDOUT + devnull = open(os.devnull, "w+", 0) + kw["stdin"] = devnull + kw["stdout"] = devnull + kw["stderr"] = subprocess.STDOUT with contextlib.ExitStack() as stack: for hook in hook_managers.values(): diff --git a/pyperf/_psutil_memory.py b/pyperf/_psutil_memory.py index 38298953..c2b2858a 100644 --- a/pyperf/_psutil_memory.py +++ b/pyperf/_psutil_memory.py @@ -1,12 +1,14 @@ import os + try: from pyperf._utils import USE_PSUTIL, BSD + if not USE_PSUTIL: raise ImportError else: import psutil except ImportError: - raise ImportError('psutil is not installed') + raise ImportError("psutil is not installed") import threading import time @@ -17,7 +19,7 @@ def __init__(self): self.process = psutil.Process(os.getpid()) self.peak_usage = 0 self._done = threading.Event() - self.sleep = 0.010 # 10 ms + self.sleep = 0.010 # 10 ms self._quit = False def get(self): diff --git a/pyperf/_runner.py b/pyperf/_runner.py index 8000bc60..de90c7e4 100644 --- a/pyperf/_runner.py +++ b/pyperf/_runner.py @@ -4,17 +4,30 @@ import time import pyperf -from pyperf._cli import (format_benchmark, format_checks, - multiline_output, display_title, format_result_value, - catch_broken_pipe_error) -from pyperf._cpu_utils import (format_cpu_list, parse_cpu_list, - get_isolated_cpus, set_cpu_affinity, - set_highest_priority) +from pyperf._cli import ( + format_benchmark, + format_checks, + multiline_output, + display_title, + format_result_value, + catch_broken_pipe_error, +) +from pyperf._cpu_utils import ( + format_cpu_list, + parse_cpu_list, + get_isolated_cpus, + set_cpu_affinity, + set_highest_priority, +) from pyperf._formatter import format_timedelta from pyperf._hooks import get_hook_names -from pyperf._utils import (MS_WINDOWS, abs_executable, - WritePipe, get_python_names, - merge_profile_stats) +from pyperf._utils import ( + MS_WINDOWS, + abs_executable, + WritePipe, + get_python_names, + merge_profile_stats, +) from pyperf._system import OS_LINUX from pyperf._worker import WorkerProcessTask @@ -27,11 +40,11 @@ def strictly_positive(value): def positive_or_nul(value): - if '^' in value: - x, _, y = value.partition('^') + if "^" in value: + x, _, y = value.partition("^") x = int(x) y = int(y) - value = x ** y + value = x**y else: value = int(value) if value < 0: @@ -40,12 +53,12 @@ def positive_or_nul(value): def comma_separated(values): - values = [value.strip() for value in values.split(',')] + values = [value.strip() for value in values.split(",")] return list(filter(None, values)) def parse_python_names(names): - parts = names.split(':') + parts = names.split(":") if len(parts) != 2: raise ValueError("syntax is REF_NAME:CHANGED_NAME") return parts @@ -56,6 +69,7 @@ def profiling_wrapper(func): Wrap a function to collect profiling. """ import cProfile + profiler = cProfile.Profile() def profiling_func(*args): @@ -73,20 +87,29 @@ class Runner: # Default parameters are chosen to have approximately a run of 0.5 second # and so a total duration of 5 seconds by default - def __init__(self, values=None, processes=None, - loops=0, min_time=0.1, metadata=None, - show_name=True, - program_args=None, add_cmdline_args=None, - _argparser=None, warmups=1): - + def __init__( + self, + values=None, + processes=None, + loops=0, + min_time=0.1, + metadata=None, + show_name=True, + program_args=None, + add_cmdline_args=None, + _argparser=None, + warmups=1, + ): # Watchdog: ensure that only once instance of Runner (or a Runner # subclass) is created per process to prevent bad surprises cls = self.__class__ key = id(cls) if key in cls._created: - raise RuntimeError("only one %s instance must be created " - "per process: use the same instance to run " - "all benchmarks" % cls.__name__) + raise RuntimeError( + "only one %s instance must be created " + "per process: use the same instance to run " + "all benchmarks" % cls.__name__ + ) cls._created.add(key) # Use lazy import to limit imports on 'import pyperf' @@ -143,122 +166,212 @@ def __init__(self, values=None, processes=None, parser = _argparser else: parser = argparse.ArgumentParser() - parser.description = 'Benchmark' - parser.add_argument('--rigorous', action="store_true", - help='Spend longer running tests ' - 'to get more accurate results') - parser.add_argument('--fast', action="store_true", - help='Get rough answers quickly') - parser.add_argument("--debug-single-value", action="store_true", - help="Debug mode, only compute a single value") - parser.add_argument('-p', '--processes', - type=strictly_positive, default=processes, - help='number of processes used to run benchmarks ' - '(default: %s)' % processes) - parser.add_argument('-n', '--values', dest="values", - type=strictly_positive, default=values, - help='number of values per process (default: %s)' - % values) - parser.add_argument('-w', '--warmups', - type=positive_or_nul, default=warmups, - help='number of skipped values per run used ' - 'to warmup the benchmark') - parser.add_argument('-l', '--loops', - type=positive_or_nul, default=loops, - help='number of loops per value, 0 means ' - 'automatic calibration (default: %s)' - % loops) - parser.add_argument('-v', '--verbose', action="store_true", - help='enable verbose mode') - parser.add_argument('-q', '--quiet', action="store_true", - help='enable quiet mode') - parser.add_argument('--pipe', type=int, metavar="FD", - help='Write benchmarks encoded as JSON ' - 'into the pipe FD') - parser.add_argument('-o', '--output', metavar='FILENAME', - help='write results encoded to JSON into FILENAME') - parser.add_argument('--append', metavar='FILENAME', - help='append results encoded to JSON into FILENAME') - parser.add_argument('--min-time', type=float, default=min_time, - help='Minimum duration in seconds of a single ' - 'value, used to calibrate the number of ' - 'loops (default: %s)' - % format_timedelta(min_time)) - parser.add_argument('--timeout', - help='Specify a timeout in seconds for a single ' - 'benchmark execution (default: disabled)', - type=strictly_positive) - parser.add_argument('--worker', action='store_true', - help='Worker process, run the benchmark.') - parser.add_argument('--worker-task', type=positive_or_nul, metavar='TASK_ID', - help='Identifier of the worker task: ' - 'only execute the benchmark function TASK_ID') - parser.add_argument('--calibrate-loops', action="store_true", - help="calibrate the number of loops") - parser.add_argument('--recalibrate-loops', action="store_true", - help="recalibrate the the number of loops") - parser.add_argument('--calibrate-warmups', action="store_true", - help="calibrate the number of warmups") - parser.add_argument('--recalibrate-warmups', action="store_true", - help="recalibrate the number of warmups") - parser.add_argument('-d', '--dump', action="store_true", - help='display benchmark run results') - parser.add_argument('--metadata', '-m', action="store_true", - help='show metadata') - parser.add_argument('--hist', '-g', action="store_true", - help='display an histogram of values') - parser.add_argument('--stats', '-t', action="store_true", - help='display statistics (min, max, ...)') - parser.add_argument("--affinity", metavar="CPU_LIST", default=None, - help='Specify CPU affinity for worker processes. ' - 'This way, benchmarks can be forced to run ' - 'on a given set of CPUs to minimize run to ' - 'run variation. By default, worker processes ' - 'are pinned to isolate CPUs if isolated CPUs ' - 'are found.') - parser.add_argument("--inherit-environ", metavar='VARS', - type=comma_separated, - help='Comma-separated list of environment ' - 'variables inherited by worker child ' - 'processes.') - parser.add_argument("--copy-env", - dest="copy_env", action="store_true", default=False, - help="Copy all environment variables") - parser.add_argument("--no-locale", - dest="locale", action="store_false", default=True, - help="Don't copy locale environment variables " - "like LANG or LC_CTYPE.") - parser.add_argument("--python", default=sys.executable, - help='Python executable ' - '(default: use running Python, ' - 'sys.executable)') - parser.add_argument("--compare-to", metavar="REF_PYTHON", - help='Run benchmark on the Python executable REF_PYTHON, ' - 'run benchmark on Python executable PYTHON, ' - 'and then compare REF_PYTHON result to PYTHON result') - parser.add_argument("--python-names", metavar="REF_NAME:CHANGED_NAMED", - type=parse_python_names, - help='option used with --compare-to to name ' - 'PYTHON as CHANGED_NAME ' - 'and REF_PYTHON as REF_NAME in results') - - parser.add_argument('--profile', - type=str, - help='Collect profile data using cProfile ' - 'and output to the given file.') + parser.description = "Benchmark" + parser.add_argument( + "--rigorous", + action="store_true", + help="Spend longer running tests to get more accurate results", + ) + parser.add_argument( + "--fast", action="store_true", help="Get rough answers quickly" + ) + parser.add_argument( + "--debug-single-value", + action="store_true", + help="Debug mode, only compute a single value", + ) + parser.add_argument( + "-p", + "--processes", + type=strictly_positive, + default=processes, + help="number of processes used to run benchmarks (default: %s)" % processes, + ) + parser.add_argument( + "-n", + "--values", + dest="values", + type=strictly_positive, + default=values, + help="number of values per process (default: %s)" % values, + ) + parser.add_argument( + "-w", + "--warmups", + type=positive_or_nul, + default=warmups, + help="number of skipped values per run used to warmup the benchmark", + ) + parser.add_argument( + "-l", + "--loops", + type=positive_or_nul, + default=loops, + help="number of loops per value, 0 means " + "automatic calibration (default: %s)" % loops, + ) + parser.add_argument( + "-v", "--verbose", action="store_true", help="enable verbose mode" + ) + parser.add_argument( + "-q", "--quiet", action="store_true", help="enable quiet mode" + ) + parser.add_argument( + "--pipe", + type=int, + metavar="FD", + help="Write benchmarks encoded as JSON into the pipe FD", + ) + parser.add_argument( + "-o", + "--output", + metavar="FILENAME", + help="write results encoded to JSON into FILENAME", + ) + parser.add_argument( + "--append", + metavar="FILENAME", + help="append results encoded to JSON into FILENAME", + ) + parser.add_argument( + "--min-time", + type=float, + default=min_time, + help="Minimum duration in seconds of a single " + "value, used to calibrate the number of " + "loops (default: %s)" % format_timedelta(min_time), + ) + parser.add_argument( + "--timeout", + help="Specify a timeout in seconds for a single " + "benchmark execution (default: disabled)", + type=strictly_positive, + ) + parser.add_argument( + "--worker", action="store_true", help="Worker process, run the benchmark." + ) + parser.add_argument( + "--worker-task", + type=positive_or_nul, + metavar="TASK_ID", + help="Identifier of the worker task: " + "only execute the benchmark function TASK_ID", + ) + parser.add_argument( + "--calibrate-loops", + action="store_true", + help="calibrate the number of loops", + ) + parser.add_argument( + "--recalibrate-loops", + action="store_true", + help="recalibrate the the number of loops", + ) + parser.add_argument( + "--calibrate-warmups", + action="store_true", + help="calibrate the number of warmups", + ) + parser.add_argument( + "--recalibrate-warmups", + action="store_true", + help="recalibrate the number of warmups", + ) + parser.add_argument( + "-d", "--dump", action="store_true", help="display benchmark run results" + ) + parser.add_argument( + "--metadata", "-m", action="store_true", help="show metadata" + ) + parser.add_argument( + "--hist", "-g", action="store_true", help="display an histogram of values" + ) + parser.add_argument( + "--stats", + "-t", + action="store_true", + help="display statistics (min, max, ...)", + ) + parser.add_argument( + "--affinity", + metavar="CPU_LIST", + default=None, + help="Specify CPU affinity for worker processes. " + "This way, benchmarks can be forced to run " + "on a given set of CPUs to minimize run to " + "run variation. By default, worker processes " + "are pinned to isolate CPUs if isolated CPUs " + "are found.", + ) + parser.add_argument( + "--inherit-environ", + metavar="VARS", + type=comma_separated, + help="Comma-separated list of environment " + "variables inherited by worker child " + "processes.", + ) + parser.add_argument( + "--copy-env", + dest="copy_env", + action="store_true", + default=False, + help="Copy all environment variables", + ) + parser.add_argument( + "--no-locale", + dest="locale", + action="store_false", + default=True, + help="Don't copy locale environment variables like LANG or LC_CTYPE.", + ) + parser.add_argument( + "--python", + default=sys.executable, + help="Python executable (default: use running Python, sys.executable)", + ) + parser.add_argument( + "--compare-to", + metavar="REF_PYTHON", + help="Run benchmark on the Python executable REF_PYTHON, " + "run benchmark on Python executable PYTHON, " + "and then compare REF_PYTHON result to PYTHON result", + ) + parser.add_argument( + "--python-names", + metavar="REF_NAME:CHANGED_NAMED", + type=parse_python_names, + help="option used with --compare-to to name " + "PYTHON as CHANGED_NAME " + "and REF_PYTHON as REF_NAME in results", + ) + + parser.add_argument( + "--profile", + type=str, + help="Collect profile data using cProfile and output to the given file.", + ) hook_names = list(get_hook_names()) parser.add_argument( - '--hook', action="append", choices=hook_names, + "--hook", + action="append", + choices=hook_names, metavar=f"{', '.join(x for x in hook_names if not x.startswith('_'))}", - help='Use the given pyperf hooks' + help="Use the given pyperf hooks", ) memory = parser.add_mutually_exclusive_group() - memory.add_argument('--tracemalloc', action="store_true", - help='Trace memory allocations using tracemalloc') - memory.add_argument('--track-memory', action="store_true", - help='Track memory usage using a thread') + memory.add_argument( + "--tracemalloc", + action="store_true", + help="Trace memory allocations using tracemalloc", + ) + memory.add_argument( + "--track-memory", + action="store_true", + help="Track memory usage using a thread", + ) self.argparser = parser @@ -282,8 +395,8 @@ def _process_args_impl(self): if args.warmups is None and not args.worker and not has_jit: args.warmups = 1 - nprocess = self.argparser.get_default('processes') - nvalues = self.argparser.get_default('values') + nprocess = self.argparser.get_default("processes") + nvalues = self.argparser.get_default("values") if args.rigorous: args.processes = nprocess * 2 # args.values = nvalues * 5 // 3 @@ -303,8 +416,7 @@ def _process_args_impl(self): if args.calibrate_loops: self._only_in_worker("--calibrate-loops") if args.loops: - raise CLIError("--loops=N is incompatible with " - "--calibrate-loops") + raise CLIError("--loops=N is incompatible with --calibrate-loops") elif args.recalibrate_loops: self._only_in_worker("--recalibrate-loops") if args.loops < 1: @@ -316,15 +428,14 @@ def _process_args_impl(self): elif args.recalibrate_warmups: self._only_in_worker("--recalibrate-warmups") if args.loops < 1 or args.warmups is None: - raise CLIError("--recalibrate-warmups requires " - "--loops=N and --warmups=N") + raise CLIError( + "--recalibrate-warmups requires --loops=N and --warmups=N" + ) else: if args.worker and args.loops < 1: - raise CLIError("--worker requires --loops=N " - "or --calibrate-loops") + raise CLIError("--worker requires --loops=N or --calibrate-loops") if args.worker and args.warmups is None: - raise CLIError("--worker requires --warmups=N " - "or --calibrate-warmups") + raise CLIError("--worker requires --warmups=N or --calibrate-warmups") if args.values < 1: raise CLIError("--values must be >= 1") @@ -337,10 +448,10 @@ def _process_args_impl(self): self._only_in_worker("--worker-task") if args.tracemalloc: - if getattr(args, 'action', None) == 'command': - raise CLIError('--tracemalloc cannot be used with pyperf command') + if getattr(args, "action", None) == "command": + raise CLIError("--tracemalloc cannot be used with pyperf command") try: - import tracemalloc # noqa + import tracemalloc # noqa except ImportError as exc: raise CLIError("fail to import tracemalloc: %s" % exc) @@ -353,18 +464,20 @@ def _process_args_impl(self): from pyperf._psutil_memory import check_tracking_memory err_msg = check_tracking_memory() if err_msg: - raise CLIError("unable to track the memory usage " - "(--track-memory): %s" % err_msg) + raise CLIError( + "unable to track the memory usage (--track-memory): %s" % err_msg + ) args.python = abs_executable(args.python) if args.compare_to: args.compare_to = abs_executable(args.compare_to) if args.compare_to: - for option in ('output', 'append'): + for option in ("output", "append"): if getattr(args, option): - raise CLIError("--%s option is incompatible " - "with --compare-to option" % option) + raise CLIError( + "--%s option is incompatible with --compare-to option" % option + ) def _process_args(self): try: @@ -404,11 +517,9 @@ def _cpu_affinity(self): if set_cpu_affinity(cpus): if self.args.verbose: if isolated: - text = ("Pin process to isolated CPUs: %s" - % format_cpu_list(cpus)) + text = "Pin process to isolated CPUs: %s" % format_cpu_list(cpus) else: - text = ("Pin process to CPUs: %s" - % format_cpu_list(cpus)) + text = "Pin process to CPUs: %s" % format_cpu_list(cpus) print(text) if isolated: @@ -419,8 +530,10 @@ def _cpu_affinity(self): print("Use Python 3.3 or newer, or install psutil dependency") sys.exit(1) elif not self.args.quiet: - print("WARNING: unable to pin worker processes to " - "isolated CPUs, CPU affinity not available") + print( + "WARNING: unable to pin worker processes to " + "isolated CPUs, CPU affinity not available" + ) print("Use Python 3.3 or newer, or install psutil dependency") def _process_priority(self): @@ -477,12 +590,12 @@ def _no_keyword_argument(kwargs): if not kwargs: return - args = ', '.join(map(repr, sorted(kwargs))) - raise TypeError('unexpected keyword argument %s' % args) + args = ", ".join(map(repr, sorted(kwargs))) + raise TypeError("unexpected keyword argument %s" % args) def bench_time_func(self, name, time_func, *args, **kwargs): - inner_loops = kwargs.pop('inner_loops', None) - metadata = kwargs.pop('metadata', None) + inner_loops = kwargs.pop("inner_loops", None) + metadata = kwargs.pop("metadata", None) self._no_keyword_argument(kwargs) if not self._check_worker_task(): @@ -505,10 +618,10 @@ def task_func(_, loops): return result def bench_func(self, name, func, *args, **kwargs): - """"Benchmark func(*args).""" + """ "Benchmark func(*args).""" - inner_loops = kwargs.pop('inner_loops', None) - metadata = kwargs.pop('metadata', None) + inner_loops = kwargs.pop("inner_loops", None) + metadata = kwargs.pop("metadata", None) self._no_keyword_argument(kwargs) if not self._check_worker_task(): @@ -550,9 +663,9 @@ def task_func(_, loops): def bench_async_func(self, name, func, *args, **kwargs): """Benchmark await func(*args)""" - inner_loops = kwargs.pop('inner_loops', None) - metadata = kwargs.pop('metadata', None) - loop_factory = kwargs.pop('loop_factory', None) + inner_loops = kwargs.pop("inner_loops", None) + metadata = kwargs.pop("metadata", None) + loop_factory = kwargs.pop("loop_factory", None) self._no_keyword_argument(kwargs) if not self._check_worker_task(): @@ -566,6 +679,7 @@ def bench_async_func(self, name, func, *args, **kwargs): def task_func(_, loops): if loops != 1: + async def main(): # use fast local variables local_timer = time.perf_counter @@ -578,6 +692,7 @@ async def main(): dt = local_timer() - t0 return dt else: + async def main(): # use fast local variables local_timer = time.perf_counter @@ -589,6 +704,7 @@ async def main(): return dt import asyncio + # using the lower level loop API instead of asyncio.run because # asyncio.run gained the `loop_factory` arg only in Python 3.12. # we can go back to asyncio.run when Python 3.12 is the oldest @@ -615,9 +731,17 @@ async def main(): return result - def timeit(self, name, stmt=None, setup="pass", teardown="pass", - inner_loops=None, duplicate=None, metadata=None, globals=None): - + def timeit( + self, + name, + stmt=None, + setup="pass", + teardown="pass", + inner_loops=None, + duplicate=None, + metadata=None, + globals=None, + ): if not self._check_worker_task(): return None @@ -627,13 +751,18 @@ def timeit(self, name, stmt=None, setup="pass", teardown="pass", # Use lazy import to limit imports on 'import pyperf' from pyperf._timeit import bench_timeit - return bench_timeit(self, name, stmt, - setup=setup, - teardown=teardown, - inner_loops=inner_loops, - duplicate=duplicate, - func_metadata=metadata, - globals=globals) + + return bench_timeit( + self, + name, + stmt, + setup=setup, + teardown=teardown, + inner_loops=inner_loops, + duplicate=duplicate, + func_metadata=metadata, + globals=globals, + ) def _display_result(self, bench, checks=True): args = self.args @@ -649,13 +778,15 @@ def _display_result(self, bench, checks=True): with catch_broken_pipe_error(wfile): bench.dump(wfile) else: - lines = format_benchmark(bench, - checks=checks, - metadata=args.metadata, - dump=args.dump, - stats=args.stats, - hist=args.hist, - show_name=self._show_name) + lines = format_benchmark( + bench, + checks=checks, + metadata=args.metadata, + dump=args.dump, + stats=args.stats, + hist=args.hist, + show_name=self._show_name, + ) for line in lines: print(line) @@ -703,9 +834,9 @@ def _compare_to(self): print() if multiline: - display_title('Benchmark %s' % name) + display_title("Benchmark %s" % name) elif not args.quiet: - print(name, end=': ') + print(name, end=": ") bench = Manager(self, python=python).create_bench() benchs.append(bench) @@ -713,7 +844,7 @@ def _compare_to(self): if multiline: self._display_result(bench) elif not args.quiet: - print(' ' + format_result_value(bench)) + print(" " + format_result_value(bench)) if multiline: print() @@ -726,7 +857,7 @@ def _compare_to(self): print() if multiline: - display_title('Compare') + display_title("Compare") elif not args.quiet: print() timeit_compare_benchs(name_ref, benchs[0], name_changed, benchs[1], args) @@ -744,5 +875,6 @@ def bench_command(self, name, command): # Use lazy import to limit imports on 'import pyperf' from pyperf._command import BenchCommandTask + task = BenchCommandTask(self, name, command) return self._main(task) diff --git a/pyperf/_system.py b/pyperf/_system.py index d0f8e526..0f6789d4 100644 --- a/pyperf/_system.py +++ b/pyperf/_system.py @@ -7,23 +7,33 @@ import sys from pyperf._cli import display_title -from pyperf._cpu_utils import (parse_cpu_list, - get_logical_cpu_count, get_isolated_cpus, - format_cpu_list, format_cpu_infos, - parse_cpu_mask, format_cpus_as_mask) -from pyperf._utils import (read_first_line, sysfs_path, proc_path, open_text, - popen_communicate) - - -MSR_IA32_MISC_ENABLE = 0x1a0 +from pyperf._cpu_utils import ( + parse_cpu_list, + get_logical_cpu_count, + get_isolated_cpus, + format_cpu_list, + format_cpu_infos, + parse_cpu_mask, + format_cpus_as_mask, +) +from pyperf._utils import ( + read_first_line, + sysfs_path, + proc_path, + open_text, + popen_communicate, +) + + +MSR_IA32_MISC_ENABLE = 0x1A0 MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT = 38 -OS_LINUX = sys.platform.startswith('linux') -PLATFORM_X86 = platform.machine() in ('x86', 'x86_64', 'amd64') +OS_LINUX = sys.platform.startswith("linux") +PLATFORM_X86 = platform.machine() in ("x86", "x86_64", "amd64") def is_root(): - return (os.getuid() == 0) + return os.getuid() == 0 def is_permission_error(exc): @@ -39,9 +49,9 @@ def write_text(filename, content): def run_cmd(cmd): try: # ignore stdout and stderr - proc = subprocess.Popen(cmd, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + proc = subprocess.Popen( + cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) except OSError as exc: if exc.errno == errno.ENOENT: return 127 @@ -55,12 +65,10 @@ def run_cmd(cmd): def get_output(cmd): try: - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - universal_newlines=True) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) except OSError as exc: if exc.errno == errno.ENOENT: - return (127, '') + return (127, "") else: raise @@ -73,7 +81,7 @@ def use_intel_pstate(): cpu = 0 path = sysfs_path("devices/system/cpu/cpu%s/cpufreq/scaling_driver" % cpu) scaling_driver = read_first_line(path) - return (scaling_driver == 'intel_pstate') + return scaling_driver == "intel_pstate" class Operation: @@ -88,19 +96,19 @@ def __init__(self, name, system): self.tuned_for_benchmarks = None def advice(self, msg): - self.system.advice('%s: %s' % (self.name, msg)) + self.system.advice("%s: %s" % (self.name, msg)) def log_state(self, msg): - self.system.log_state('%s: %s' % (self.name, msg)) + self.system.log_state("%s: %s" % (self.name, msg)) def log_action(self, msg): - self.system.log_action('%s: %s' % (self.name, msg)) + self.system.log_action("%s: %s" % (self.name, msg)) def warning(self, msg): - self.system.warning('%s: %s' % (self.name, msg)) + self.system.warning("%s: %s" % (self.name, msg)) def error(self, msg): - self.system.error('%s: %s' % (self.name, msg)) + self.system.error("%s: %s" % (self.name, msg)) def check_permission_error(self, exc): if is_permission_error(exc): @@ -111,7 +119,7 @@ def read_first_line(self, path): return read_first_line(path, error=True) except OSError as exc: self.check_permission_error(exc) - return '' + return "" def show(self): pass @@ -136,19 +144,19 @@ def available(): return OS_LINUX and PLATFORM_X86 and not use_intel_pstate() def __init__(self, system): - super().__init__('Turbo Boost (MSR)', system) + super().__init__("Turbo Boost (MSR)", system) self.cpu_states = {} self.have_device = True def read_msr(self, cpu, reg_num, use_warnings=False): - path = '/dev/cpu/%s/msr' % cpu - size = struct.calcsize('Q') + path = "/dev/cpu/%s/msr" % cpu + size = struct.calcsize("Q") if size != 8: raise ValueError("need a 64-bit unsigned integer type") try: fd = os.open(path, os.O_RDONLY) try: - if hasattr(os, 'pread'): + if hasattr(os, "pread"): data = os.pread(fd, size, reg_num) else: os.lseek(fd, reg_num, os.SEEK_SET) @@ -171,7 +179,7 @@ def read_msr(self, cpu, reg_num, use_warnings=False): self.error(msg) return None - return struct.unpack('Q', data)[0] + return struct.unpack("Q", data)[0] def read_cpu(self, cpu): reg = self.read_msr(cpu, MSR_IA32_MISC_ENABLE, use_warnings=True) @@ -179,7 +187,7 @@ def read_cpu(self, cpu): return False msr = bool(reg & (1 << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)) - self.cpu_states[cpu] = (not msr) + self.cpu_states[cpu] = not msr return True def show(self): @@ -200,27 +208,29 @@ def show(self): text = [] if enabled: - text.append('CPU %s: enabled' % format_cpu_list(enabled)) + text.append("CPU %s: enabled" % format_cpu_list(enabled)) if disabled: - text.append('CPU %s: disabled' % format_cpu_list(disabled)) + text.append("CPU %s: disabled" % format_cpu_list(disabled)) if text: - self.log_state(', '.join(text)) + self.log_state(", ".join(text)) - self.tuned_for_benchmarks = (not enabled) + self.tuned_for_benchmarks = not enabled if enabled: - self.advice('Disable Turbo Boost on CPU %s to get more reliable ' - 'CPU frequency' % format_cpu_list(enabled)) + self.advice( + "Disable Turbo Boost on CPU %s to get more reliable " + "CPU frequency" % format_cpu_list(enabled) + ) def write_msr(self, cpu, reg_num, value): - path = '/dev/cpu/%s/msr' % cpu - size = struct.calcsize('Q') + path = "/dev/cpu/%s/msr" % cpu + size = struct.calcsize("Q") if size != 8: raise ValueError("need a 64-bit unsigned integer type") - data = struct.pack('Q', value) + data = struct.pack("Q", value) try: fd = os.open(path, os.O_WRONLY) try: - if hasattr(os, 'pwrite'): + if hasattr(os, "pwrite"): os.pwrite(fd, data, reg_num) else: os.lseek(fd, reg_num, os.SEEK_SET) @@ -229,8 +239,10 @@ def write_msr(self, cpu, reg_num, value): os.close(fd) except OSError as exc: self.check_permission_error(exc) - self.error("Failed to write %#x into MSR %#x using %s: %s" - % (value, reg_num, path, exc)) + self.error( + "Failed to write %#x into MSR %#x using %s: %s" + % (value, reg_num, path, exc) + ) return False return True @@ -240,7 +252,7 @@ def write_cpu(self, cpu, enabled): if value is None: return False - mask = (1 << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) + mask = 1 << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT if not enabled: new_value = value | mask else: @@ -253,12 +265,14 @@ def write_cpu(self, cpu, enabled): return False state = "enabled" if enabled else "disabled" - self.log_action("Turbo Boost %s on CPU %s: MSR %#x set to %#x" - % (state, cpu, MSR_IA32_MISC_ENABLE, new_value)) + self.log_action( + "Turbo Boost %s on CPU %s: MSR %#x set to %#x" + % (state, cpu, MSR_IA32_MISC_ENABLE, new_value) + ) return True def write(self, tune): - enabled = (not tune) + enabled = not tune if tune: cpus = self.system.cpus else: @@ -276,15 +290,15 @@ class TurboBoostIntelPstate(IntelPstateOperation): """ def __init__(self, system): - super().__init__('Turbo Boost (intel_pstate)', system) + super().__init__("Turbo Boost (intel_pstate)", system) self.path = sysfs_path("devices/system/cpu/intel_pstate/no_turbo") self.enabled = None def read_turbo_boost(self): no_turbo = self.read_first_line(self.path) - if no_turbo == '1': + if no_turbo == "1": self.enabled = False - elif no_turbo == '0': + elif no_turbo == "0": self.enabled = True else: self.error("Invalid no_turbo value: %r" % no_turbo) @@ -293,23 +307,22 @@ def read_turbo_boost(self): def show(self): self.read_turbo_boost() if self.enabled is not None: - state = 'enabled' if self.enabled else 'disabled' + state = "enabled" if self.enabled else "disabled" self.log_state("Turbo Boost %s" % state) - self.tuned_for_benchmarks = (not self.enabled) + self.tuned_for_benchmarks = not self.enabled if self.enabled: - self.advice('Disable Turbo Boost to get more reliable ' - 'CPU frequency') + self.advice("Disable Turbo Boost to get more reliable CPU frequency") def write(self, tune): - enable = (not tune) + enable = not tune self.read_turbo_boost() if self.enabled == enable: # no_turbo already set to the expected value return - content = '0' if enable else '1' + content = "0" if enable else "1" try: write_text(self.path, content) except OSError as exc: @@ -318,7 +331,7 @@ def write(self, tune): if not is_root(): self.check_permission_error(exc) - action = 'enable' if enable else 'disable' + action = "enable" if enable else "disable" msg = "Failed to %s Turbo Boost" % action disabled_in_bios = is_permission_error(exc) and is_root() if disabled_in_bios: @@ -332,7 +345,7 @@ def write(self, tune): return msg = "%r written into %s" % (content, self.path) - action = 'enabled' if enable else 'disabled' + action = "enabled" if enable else "disabled" self.log_action("Turbo Boost %s: %s" % (action, msg)) @@ -340,7 +353,8 @@ class CPUGovernor(Operation): """ Get/Set CPU scaling governor """ - BENCHMARK_GOVERNOR = 'performance' + + BENCHMARK_GOVERNOR = "performance" @staticmethod def available(): @@ -349,11 +363,13 @@ def available(): ) def __init__(self, system): - super().__init__('CPU scaling governor', system) + super().__init__("CPU scaling governor", system) self.device_syspath = sysfs_path("devices/system/cpu") def read_governor(self, cpu): - filename = os.path.join(self.device_syspath, 'cpu%s/cpufreq/scaling_governor' % cpu) + filename = os.path.join( + self.device_syspath, "cpu%s/cpufreq/scaling_governor" % cpu + ) try: with open(filename, "r") as fp: return fp.readline().rstrip() @@ -372,14 +388,13 @@ def show(self): infos = format_cpu_infos(cpus) if not infos: return - self.log_state('; '.join(infos)) + self.log_state("; ".join(infos)) self.tuned_for_benchmarks = all( governor == self.BENCHMARK_GOVERNOR for governor in cpus.values() ) if not self.tuned_for_benchmarks: - self.advice('Use CPU scaling governor %r' - % self.BENCHMARK_GOVERNOR) + self.advice("Use CPU scaling governor %r" % self.BENCHMARK_GOVERNOR) def write_governor(self, filename, new_governor): with open(filename, "r") as fp: @@ -399,13 +414,14 @@ def write_cpu(self, cpu, tune): return False new_governor = self.BENCHMARK_GOVERNOR if tune else "powersave" - filename = os.path.join(self.device_syspath, 'cpu%s/cpufreq/scaling_governor' % cpu) + filename = os.path.join( + self.device_syspath, "cpu%s/cpufreq/scaling_governor" % cpu + ) try: return self.write_governor(filename, new_governor) except OSError as exc: self.check_permission_error(exc) - self.error("Unable to write governor of CPU %s: %s" - % (cpu, exc)) + self.error("Unable to write governor of CPU %s: %s" % (cpu, exc)) def write(self, tune): modified = [] @@ -435,7 +451,7 @@ def available(): return OS_LINUX def __init__(self, system): - super().__init__('Linux scheduler', system) + super().__init__("Linux scheduler", system) self.ncpu = None self.linux_version = None @@ -447,8 +463,8 @@ def show(self): release = os.uname()[2] try: - version_txt = release.split('-', 1)[0] - self.linux_version = tuple(map(int, version_txt.split('.'))) + version_txt = release.split("-", 1)[0] + self.linux_version = tuple(map(int, version_txt.split("."))) except ValueError: self.error("Failed to get the Linux version: release=%r" % release) return @@ -464,20 +480,20 @@ def show(self): def check_isolcpus(self): isolated = get_isolated_cpus() if isolated: - self.log_state('Isolated CPUs (%s/%s): %s' - % (len(isolated), self.ncpu, - format_cpu_list(isolated))) + self.log_state( + "Isolated CPUs (%s/%s): %s" + % (len(isolated), self.ncpu, format_cpu_list(isolated)) + ) elif self.ncpu > 1: - self.log_state('No CPU is isolated') - self.advice('Use isolcpus= kernel parameter ' - 'to isolate CPUs') + self.log_state("No CPU is isolated") + self.advice("Use isolcpus= kernel parameter to isolate CPUs") def read_rcu_nocbs(self): - cmdline = self.read_first_line(proc_path('cmdline')) + cmdline = self.read_first_line(proc_path("cmdline")) if not cmdline: return - match = re.search(r'\brcu_nocbs=([^ ]+)', cmdline) + match = re.search(r"\brcu_nocbs=([^ ]+)", cmdline) if not match: return @@ -487,22 +503,27 @@ def read_rcu_nocbs(self): def check_rcu_nocbs(self): rcu_nocbs = self.read_rcu_nocbs() if rcu_nocbs: - self.log_state('RCU disabled on CPUs (%s/%s): %s' - % (len(rcu_nocbs), self.ncpu, - format_cpu_list(rcu_nocbs))) + self.log_state( + "RCU disabled on CPUs (%s/%s): %s" + % (len(rcu_nocbs), self.ncpu, format_cpu_list(rcu_nocbs)) + ) elif self.ncpu > 1: - self.advice('Use rcu_nocbs= kernel parameter ' - '(with isolcpus) to not schedule RCU ' - 'on isolated CPUs') + self.advice( + "Use rcu_nocbs= kernel parameter " + "(with isolcpus) to not schedule RCU " + "on isolated CPUs" + ) class ASLR(Operation): # randomize_va_space procfs existed prior to 2.6.12-rc2 (2005) # which is first commit of the Linux git repository - STATE = {'0': 'No randomization', - '1': 'Conservative randomization', - '2': 'Full randomization'} + STATE = { + "0": "No randomization", + "1": "Conservative randomization", + "2": "Full randomization", + } path = proc_path("sys/kernel/randomize_va_space") @classmethod @@ -510,7 +531,7 @@ def available(cls): return os.path.exists(cls.path) def __init__(self, system): - super().__init__('ASLR', system) + super().__init__("ASLR", system) def show(self): line = self.read_first_line(self.path) @@ -521,17 +542,16 @@ def show(self): return self.log_state(state) - self.tuned_for_benchmarks = (line == '2') + self.tuned_for_benchmarks = line == "2" if not self.tuned_for_benchmarks: - self.advice("Enable full randomization: write 2 into %s" - % self.path) + self.advice("Enable full randomization: write 2 into %s" % self.path) def write(self, tune): value = self.read_first_line(self.path) if not value: return - new_value = '2' + new_value = "2" if new_value == value: return @@ -541,8 +561,10 @@ def write(self, tune): self.check_permission_error(exc) self.error("Failed to write into %s: %s" % (self.path, exc)) else: - self.log_action("Full randomization enabled: %r written into %s" - % (new_value, self.path)) + self.log_action( + "Full randomization enabled: %r written into %s" + % (new_value, self.path) + ) class CPUFrequency(Operation): @@ -556,26 +578,26 @@ def available(): return os.path.exists(sysfs_path("devices/system/cpu/cpu0/cpufreq")) def __init__(self, system): - super().__init__('CPU Frequency', system) + super().__init__("CPU Frequency", system) self.device_syspath = sysfs_path("devices/system/cpu") def read_cpu(self, cpu): - path = os.path.join(self.device_syspath, 'cpu%s/cpufreq' % cpu) + path = os.path.join(self.device_syspath, "cpu%s/cpufreq" % cpu) scaling_min_freq = self.read_first_line(os.path.join(path, "scaling_min_freq")) scaling_max_freq = self.read_first_line(os.path.join(path, "scaling_max_freq")) if not scaling_min_freq or not scaling_max_freq: - self.warning("Unable to read scaling_min_freq " - "or scaling_max_freq of CPU %s" % cpu) + self.warning( + "Unable to read scaling_min_freq or scaling_max_freq of CPU %s" % cpu + ) return min_mhz = int(scaling_min_freq) // 1000 max_mhz = int(scaling_max_freq) // 1000 if min_mhz != max_mhz: - freq = ('min=%s MHz, max=%s MHz' - % (min_mhz, max_mhz)) + freq = "min=%s MHz, max=%s MHz" % (min_mhz, max_mhz) else: - freq = 'min=max=%s MHz' % max_mhz + freq = "min=max=%s MHz" % max_mhz return freq def show(self): @@ -588,7 +610,7 @@ def show(self): infos = format_cpu_infos(cpus) if not infos: return - self.log_state('; '.join(infos)) + self.log_state("; ".join(infos)) def read_freq(self, filename): try: @@ -610,7 +632,7 @@ def write_freq(self, filename, new_freq): return True def write_cpu(self, cpu, tune): - cpu_path = os.path.join(self.device_syspath, 'cpu%s/cpufreq' % cpu) + cpu_path = os.path.join(self.device_syspath, "cpu%s/cpufreq" % cpu) name = "cpuinfo_max_freq" if tune else "cpuinfo_min_freq" freq = self.read_freq(os.path.join(cpu_path, name)) @@ -623,8 +645,7 @@ def write_cpu(self, cpu, tune): return self.write_freq(filename, freq) except OSError as exc: self.check_permission_error(exc) - self.error("Unable to write scaling_max_freq of CPU %s: %s" - % (cpu, exc)) + self.error("Unable to write scaling_max_freq of CPU %s: %s" % (cpu, exc)) def write(self, tune): modified = [] @@ -647,22 +668,22 @@ class IRQAffinity(Operation): # /proc/irq/N/smp_affinity existed prior to 2.6.12-rc2 (2005) # which is first commit of the Linux git repository - irq_path = proc_path('irq') + irq_path = proc_path("irq") @classmethod def available(cls): return os.path.exists(cls.irq_path) def __init__(self, system): - super().__init__('IRQ affinity', system) + super().__init__("IRQ affinity", system) self.irq_affinity_path = os.path.join(self.irq_path, "%s/smp_affinity") - self.default_affinity_path = os.path.join(self.irq_path, 'default_smp_affinity') + self.default_affinity_path = os.path.join(self.irq_path, "default_smp_affinity") self.systemctl = True self.irqs = None def read_irqbalance_systemctl(self): - cmd = ('systemctl', 'status', 'irqbalance') + cmd = ("systemctl", "status", "irqbalance") exitcode, stdout = get_output(cmd) if not stdout: # systemctl is not installed? ignore errors @@ -676,7 +697,7 @@ def read_irqbalance_systemctl(self): self.systemctl = True loaded = match.group(1) - if loaded.startswith('not-found'): + if loaded.startswith("not-found"): # irqbalance service is not installed: do nothing return @@ -686,25 +707,25 @@ def read_irqbalance_systemctl(self): return active = match.group(1) - if active in ('active', 'activating'): + if active in ("active", "activating"): return True - elif active in ('inactive', 'deactivating', 'dead'): + elif active in ("inactive", "deactivating", "dead"): return False else: self.error("Unknown service state: %r" % active) def read_irqbalance_service(self): - cmd = ('service', 'irqbalance', 'status') + cmd = ("service", "irqbalance", "status") exitcode, stdout = get_output(cmd) if not stdout: # failed to the the status: ignore return stdout = stdout.rstrip() - state = stdout.split(' ', 1)[-1] - if state.startswith('stop'): + state = stdout.split(" ", 1)[-1] + if state.startswith("stop"): return False - elif state.startswith('start'): + elif state.startswith("start"): return True else: self.error("Unknown service state: %r" % stdout) @@ -760,21 +781,24 @@ def read_irqs_affinity(self): def show(self): irqbalance_active = self.read_irqbalance_state() if irqbalance_active is not None: - state = 'active' if irqbalance_active else 'inactive' + state = "active" if irqbalance_active else "inactive" self.log_state("irqbalance service: %s" % state) default_smp_affinity = self.read_default_affinity() if default_smp_affinity: - self.log_state("Default IRQ affinity: CPU %s" - % format_cpu_list(default_smp_affinity)) + self.log_state( + "Default IRQ affinity: CPU %s" % format_cpu_list(default_smp_affinity) + ) irq_affinity = self.read_irqs_affinity() if irq_affinity: - infos = {irq: 'CPU %s' % format_cpu_list(cpus) - for irq, cpus in irq_affinity.items()} + infos = { + irq: "CPU %s" % format_cpu_list(cpus) + for irq, cpus in irq_affinity.items() + } infos = format_cpu_infos(infos) - infos = ['IRQ %s' % info for info in infos] - self.log_state('IRQ affinity: %s' % '; '.join(infos)) + infos = ["IRQ %s" % info for info in infos] + self.log_state("IRQ affinity: %s" % "; ".join(infos)) def write_irqbalance_service(self, enable): irqbalance_active = self.read_irqbalance_state() @@ -787,19 +811,20 @@ def write_irqbalance_service(self, enable): # service is already in the expected state: nothing to do return - action = 'start' if enable else 'stop' + action = "start" if enable else "stop" if self.systemctl is False: - cmd = ('service', 'irqbalance', action) + cmd = ("service", "irqbalance", action) else: - cmd = ('systemctl', action, 'irqbalance') + cmd = ("systemctl", action, "irqbalance") exitcode = run_cmd(cmd) if exitcode: - self.error('Failed to %s irqbalance service: ' - '%s failed with exit code %s' - % (action, ' '.join(cmd), exitcode)) + self.error( + "Failed to %s irqbalance service: " + "%s failed with exit code %s" % (action, " ".join(cmd), exitcode) + ) return - action = 'Start' if enable else 'Stop' + action = "Start" if enable else "Stop" self.log_action("%s irqbalance service" % action) def write_default(self, new_affinity): @@ -812,11 +837,14 @@ def write_default(self, new_affinity): write_text(self.default_affinity_path, mask) except OSError as exc: self.check_permission_error(exc) - self.error("Failed to write %r into %s: %s" - % (mask, self.default_affinity_path, exc)) + self.error( + "Failed to write %r into %s: %s" + % (mask, self.default_affinity_path, exc) + ) else: - self.log_action("Set default affinity to CPU %s" - % format_cpu_list(new_affinity)) + self.log_action( + "Set default affinity to CPU %s" % format_cpu_list(new_affinity) + ) def write_irq(self, irq, cpus): path = self.irq_affinity_path % irq @@ -829,8 +857,7 @@ def write_irq(self, irq, cpus): # EIO means that the IRQ doesn't support SMP affinity: # ignore the error if exc.errno != errno.EIO: - self.error("Failed to write %r into %s: %s" - % (mask, path, exc)) + self.error("Failed to write %r into %s: %s" % (mask, path, exc)) return False def write_irqs(self, new_cpus): @@ -848,8 +875,10 @@ def write_irqs(self, new_cpus): modified.append(irq) if modified: - self.log_action("Set affinity of IRQ %s to CPU %s" - % (format_cpu_list(modified), format_cpu_list(new_cpus))) + self.log_action( + "Set affinity of IRQ %s to CPU %s" + % (format_cpu_list(modified), format_cpu_list(new_cpus)) + ) def write(self, tune): cpus = range(self.system.logical_cpu_count) @@ -866,12 +895,11 @@ def write(self, tune): class CheckNOHZFullIntelPstate(IntelPstateOperation): - def __init__(self, system): - super().__init__('Check nohz_full', system) + super().__init__("Check nohz_full", system) def show(self): - nohz_full = self.read_first_line(sysfs_path('devices/system/cpu/nohz_full')) + nohz_full = self.read_first_line(sysfs_path("devices/system/cpu/nohz_full")) if not nohz_full: return @@ -883,41 +911,42 @@ def show(self): if not used: return - self.advice("WARNING: nohz_full is enabled on CPUs %s which use the " - "intel_pstate driver, whereas intel_pstate is incompatible " - "with nohz_full" - % format_cpu_list(used)) + self.advice( + "WARNING: nohz_full is enabled on CPUs %s which use the " + "intel_pstate driver, whereas intel_pstate is incompatible " + "with nohz_full" % format_cpu_list(used) + ) self.advice("See https://bugzilla.redhat.com/show_bug.cgi?id=1378529") self.tuned_for_benchmarks = False class PowerSupply(Operation): - path = sysfs_path('class/power_supply') + path = sysfs_path("class/power_supply") @classmethod def available(cls): return os.path.exists(cls.path) def __init__(self, system): - super().__init__('Power supply', system) + super().__init__("Power supply", system) def read_power_supply(self): # Python implementation of the on_ac_power shell script for name in os.listdir(self.path): # Ignore "USB" and "Battery" types - filename = os.path.join(self.path, name, 'type') + filename = os.path.join(self.path, name, "type") sys_type = self.read_first_line(filename) if sys_type.strip() != "Mains": continue - filename = os.path.join(self.path, name, 'online') + filename = os.path.join(self.path, name, "online") if not os.path.exists(filename): continue line = self.read_first_line(filename) - if line == '1': + if line == "1": return True - if line == '0': + if line == "0": return False self.error("Failed to parse %s: %r" % (filename, line)) break @@ -929,10 +958,10 @@ def show(self): if plugged is None: return - state = 'plugged' if plugged else 'unplugged' - self.log_state('the power cable is %s' % state) + state = "plugged" if plugged else "unplugged" + self.log_state("the power cable is %s" % state) if not plugged: - self.advice('The power cable must be plugged') + self.advice("The power cable must be plugged") class PerfEvent(Operation): @@ -946,7 +975,7 @@ def available(cls): return os.path.exists(cls.path) def __init__(self, system): - super().__init__('Perf event', system) + super().__init__("Perf event", system) def read_max_sample_rate(self): line = self.read_first_line(self.path) @@ -960,7 +989,7 @@ def show(self): return self.log_state("Maximum sample rate: %s per second" % max_sample_rate) - self.tuned_for_benchmarks = (max_sample_rate == self.BENCHMARK_RATE) + self.tuned_for_benchmarks = max_sample_rate == self.BENCHMARK_RATE if not self.tuned_for_benchmarks: self.advice("Set max sample rate to %s" % self.BENCHMARK_RATE) @@ -991,15 +1020,12 @@ def write(self, tune): CPUFrequency, IRQAffinity, PowerSupply, - # Setting the CPU scaling governor resets no_turbo # and so must be set before Turbo Boost CPUGovernor, - # Intel Pstate Operations TurboBoostIntelPstate, CheckNOHZFullIntelPstate, - # X86 Operations TurboBoostMSR, ] @@ -1052,15 +1078,15 @@ def write_messages(self, title, messages): print(msg) def run_operations(self, action): - if action == 'tune': + if action == "tune": print("Tune the system configuration to run benchmarks") - elif action == 'reset': + elif action == "reset": print("Reset system configuration") else: print("Show the system configuration") - if action in ('tune', 'reset'): - tune = (action == 'tune') + if action in ("tune", "reset"): + tune = action == "tune" for operation in self.operations: operation.write(tune) @@ -1072,7 +1098,7 @@ def run_operations(self, action): msg = "ERROR: At least one operation failed with permission error" if not is_root(): msg += ", retry as root" - if action == 'show': + if action == "show": self.warning(msg) else: self.error(msg) @@ -1097,28 +1123,33 @@ def init(self, args): # The list of cpus must be sorted to avoid useless write in operations assert sorted(self.cpus) == list(self.cpus) - self.log_state("CPU: use %s logical CPUs: %s" - % (len(self.cpus), format_cpu_list(self.cpus))) + self.log_state( + "CPU: use %s logical CPUs: %s" + % (len(self.cpus), format_cpu_list(self.cpus)) + ) def render_messages(self, action): self.write_messages("Actions", self.actions) self.write_messages("System state", self.states) # Advices are for tuning: hide them for reset - if action != 'reset': + if action != "reset": self.write_messages("Advices", self.advices) self.write_messages("Warnings", self.warnings) self.write_messages("Errors", self.errors) - if action == 'show': - self.tuned = all(operation.tuned_for_benchmarks in (True, None) - for operation in self.operations) + if action == "show": + self.tuned = all( + operation.tuned_for_benchmarks in (True, None) + for operation in self.operations + ) print() if self.tuned and not self.errors: print("OK! System ready for benchmarking") else: - print('Run "%s -m pyperf system tune" to tune the system ' - 'configuration to run benchmarks' - % os.path.basename(sys.executable)) + print( + 'Run "%s -m pyperf system tune" to tune the system ' + "configuration to run benchmarks" % os.path.basename(sys.executable) + ) def main(self, action, args): self.init(args) diff --git a/pyperf/_timeit.py b/pyperf/_timeit.py index 64da6a51..33a036ad 100644 --- a/pyperf/_timeit.py +++ b/pyperf/_timeit.py @@ -6,7 +6,7 @@ import pyperf -PYPY = (pyperf.python_implementation() == 'pypy') +PYPY = pyperf.python_implementation() == "pypy" DUMMY_SRC_NAME = "" # Don't change the indentation of the template; the reindent() calls @@ -41,36 +41,35 @@ def reindent(src, indent): class Timer: - def __init__(self, stmt="pass", setup="pass", teardown="pass", - globals=None): + def __init__(self, stmt="pass", setup="pass", teardown="pass", globals=None): self.local_ns = {} self.global_ns = {} if globals is None else globals self.filename = DUMMY_SRC_NAME - init = '' + init = "" if isinstance(setup, str): # Check that the code can be compiled outside a function compile(setup, self.filename, "exec") - full = setup + '\n' + full = setup + "\n" setup = reindent(setup, 4) elif callable(setup): - self.local_ns['_setup'] = setup - init += ', _setup=_setup' - full = '' - setup = '_setup()' + self.local_ns["_setup"] = setup + init += ", _setup=_setup" + full = "" + setup = "_setup()" else: raise ValueError("setup is neither a string nor callable") if isinstance(stmt, str): # Check that the code can be compiled outside a function compile(full + stmt, self.filename, "exec") - full = full + stmt + '\n' + full = full + stmt + "\n" stmt = reindent(stmt, 8) elif callable(stmt): - self.local_ns['_stmt'] = stmt - init += ', _stmt=_stmt' - full = '' - stmt = '_stmt()' + self.local_ns["_stmt"] = stmt + init += ", _stmt=_stmt" + full = "" + stmt = "_stmt()" else: raise ValueError("stmt is neither a string nor callable") @@ -79,9 +78,9 @@ def __init__(self, stmt="pass", setup="pass", teardown="pass", compile(full + teardown, self.filename, "exec") teardown = reindent(teardown, 4) elif callable(teardown): - self.local_ns['_teardown'] = teardown - init += ', _teardown=_teardown' - teardown = '_teardown()' + self.local_ns["_teardown"] = teardown + init += ", _teardown=_teardown" + teardown = "_teardown()" else: raise ValueError("teardown is neither a string nor callable") @@ -89,8 +88,7 @@ def __init__(self, stmt="pass", setup="pass", teardown="pass", template = PYPY_TEMPLATE else: template = TEMPLATE - src = template.format(stmt=stmt, setup=setup, init=init, - teardown=teardown) + src = template.format(stmt=stmt, setup=setup, init=init, teardown=teardown) self.src = src # Save for traceback display def make_inner(self): @@ -111,10 +109,12 @@ def make_inner(self): def update_linecache(self): import linecache - linecache.cache[self.filename] = (len(self.src), - None, - self.src.split("\n"), - self.filename) + linecache.cache[self.filename] = ( + len(self.src), + None, + self.src.split("\n"), + self.filename, + ) def time_func(self, loops): inner = self.make_inner() @@ -137,7 +137,7 @@ def strip_statements(statements): def format_statements(statements): - return ' '.join(repr(stmt) for stmt in statements) + return " ".join(repr(stmt) for stmt in statements) def create_timer(stmt, setup, teardown, globals): @@ -145,6 +145,7 @@ def create_timer(stmt, setup, teardown, globals): # contains the directory of this script, rather than the current # directory) import os + sys.path.insert(0, os.curdir) stmt = "\n".join(stmt) @@ -181,10 +182,17 @@ def display_error(timer, stmt, setup, teardown): traceback.print_exc() -def bench_timeit(runner, name, stmt, setup, teardown, - inner_loops=None, duplicate=None, - func_metadata=None, globals=None): - +def bench_timeit( + runner, + name, + stmt, + setup, + teardown, + inner_loops=None, + duplicate=None, + func_metadata=None, + globals=None, +): if isinstance(stmt, str): stmt = (stmt,) if isinstance(setup, str): @@ -203,10 +211,10 @@ def bench_timeit(runner, name, stmt, setup, teardown, if func_metadata: metadata.update(func_metadata) if setup: - metadata['timeit_setup'] = format_statements(setup) + metadata["timeit_setup"] = format_statements(setup) if teardown: - metadata['timeit_teardown'] = format_statements(teardown) - metadata['timeit_stmt'] = format_statements(stmt) + metadata["timeit_teardown"] = format_statements(teardown) + metadata["timeit_stmt"] = format_statements(stmt) orig_stmt = stmt @@ -218,11 +226,11 @@ def bench_timeit(runner, name, stmt, setup, teardown, inner_loops *= duplicate else: inner_loops = duplicate - metadata['timeit_duplicate'] = duplicate + metadata["timeit_duplicate"] = duplicate - kwargs = {'metadata': metadata} + kwargs = {"metadata": metadata} if inner_loops: - kwargs['inner_loops'] = inner_loops + kwargs["inner_loops"] = inner_loops timer = None try: @@ -230,6 +238,6 @@ def bench_timeit(runner, name, stmt, setup, teardown, runner.bench_time_func(name, timer.time_func, **kwargs) except SystemExit: raise - except: # noqa: E722 + except: # noqa: E722 display_error(timer, orig_stmt, setup, teardown) sys.exit(1) diff --git a/pyperf/_timeit_cli.py b/pyperf/_timeit_cli.py index 98b86b0a..ee954308 100644 --- a/pyperf/_timeit_cli.py +++ b/pyperf/_timeit_cli.py @@ -1,54 +1,65 @@ """ "pyperf timeit" microbenchmark command based on the Python timeit module. """ + from pyperf._runner import Runner from pyperf._timeit import bench_timeit -DEFAULT_NAME = 'timeit' +DEFAULT_NAME = "timeit" def add_cmdline_args(cmd, args): - cmd.extend(('--name', args.name)) + cmd.extend(("--name", args.name)) if args.inner_loops: - cmd.extend(('--inner-loops', str(args.inner_loops))) + cmd.extend(("--inner-loops", str(args.inner_loops))) for setup in args.setup: cmd.extend(("--setup", setup)) for teardown in args.teardown: - cmd.extend(('--teardown', teardown)) + cmd.extend(("--teardown", teardown)) if args.duplicate: - cmd.extend(('--duplicate', str(args.duplicate))) + cmd.extend(("--duplicate", str(args.duplicate))) cmd.extend(args.stmt) class TimeitRunner(Runner): def __init__(self, *args, **kw): - if 'program_args' not in kw: - kw['program_args'] = ('-m', 'pyperf', 'timeit') - kw['add_cmdline_args'] = add_cmdline_args + if "program_args" not in kw: + kw["program_args"] = ("-m", "pyperf", "timeit") + kw["add_cmdline_args"] = add_cmdline_args Runner.__init__(self, *args, **kw) def parse_name(name): return name.strip() cmd = self.argparser - cmd.add_argument('--name', type=parse_name, - help='Benchmark name (default: %r)' % DEFAULT_NAME) - cmd.add_argument('-s', '--setup', action='append', default=[], - help='setup statements') - cmd.add_argument('--teardown', action='append', default=[], - help='teardown statements') - cmd.add_argument('--inner-loops', - type=int, - help='Number of inner loops per value. For example, ' - 'the number of times that the code is copied ' - 'manually multiple times to reduce the overhead ' - 'of the outer loop.') - cmd.add_argument('--duplicate', type=int, - help='duplicate statements to reduce the overhead of ' - 'the outer loop and multiply inner_loops ' - 'by DUPLICATE') - cmd.add_argument('stmt', nargs='+', help='executed statements') + cmd.add_argument( + "--name", + type=parse_name, + help="Benchmark name (default: %r)" % DEFAULT_NAME, + ) + cmd.add_argument( + "-s", "--setup", action="append", default=[], help="setup statements" + ) + cmd.add_argument( + "--teardown", action="append", default=[], help="teardown statements" + ) + cmd.add_argument( + "--inner-loops", + type=int, + help="Number of inner loops per value. For example, " + "the number of times that the code is copied " + "manually multiple times to reduce the overhead " + "of the outer loop.", + ) + cmd.add_argument( + "--duplicate", + type=int, + help="duplicate statements to reduce the overhead of " + "the outer loop and multiply inner_loops " + "by DUPLICATE", + ) + cmd.add_argument("stmt", nargs="+", help="executed statements") def _process_args(self): Runner._process_args(self) @@ -61,5 +72,12 @@ def _process_args(self): def main(runner): args = runner.args - bench_timeit(runner, args.name, args.stmt, args.setup, args.teardown, - args.inner_loops, args.duplicate) + bench_timeit( + runner, + args.name, + args.stmt, + args.setup, + args.teardown, + args.inner_loops, + args.duplicate, + ) diff --git a/pyperf/_utils.py b/pyperf/_utils.py index ef542d9d..cd3e6ffe 100644 --- a/pyperf/_utils.py +++ b/pyperf/_utils.py @@ -6,16 +6,16 @@ import sys import sysconfig import time -from shlex import quote as shell_quote # noqa +from shlex import quote as shell_quote # noqa from shutil import which # Currently there is a packaging issue for PEP-703, # Until then psutil is disabled as a workaround. # See: https://github.com/python/cpython/issues/116024 -USE_PSUTIL = not bool(sysconfig.get_config_var('Py_GIL_DISABLED')) -MS_WINDOWS = (sys.platform == 'win32') -MAC_OS = (sys.platform == 'darwin') -BSD = ('bsd' in sys.platform) +USE_PSUTIL = not bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +MS_WINDOWS = sys.platform == "win32" +MAC_OS = sys.platform == "darwin" +BSD = "bsd" in sys.platform if MS_WINDOWS: import msvcrt @@ -25,13 +25,39 @@ # approximate. While this may look less elegant than simply calculating the # critical value, those calculations suck. Look at # http://www.math.unb.ca/~knight/utility/t-table.htm if you need more values. -_T_DIST_95_CONF_LEVELS = [0, 12.706, 4.303, 3.182, 2.776, - 2.571, 2.447, 2.365, 2.306, 2.262, - 2.228, 2.201, 2.179, 2.160, 2.145, - 2.131, 2.120, 2.110, 2.101, 2.093, - 2.086, 2.080, 2.074, 2.069, 2.064, - 2.060, 2.056, 2.052, 2.048, 2.045, - 2.042] +_T_DIST_95_CONF_LEVELS = [ + 0, + 12.706, + 4.303, + 3.182, + 2.776, + 2.571, + 2.447, + 2.365, + 2.306, + 2.262, + 2.228, + 2.201, + 2.179, + 2.160, + 2.145, + 2.131, + 2.120, + 2.110, + 2.101, + 2.093, + 2.086, + 2.080, + 2.074, + 2.069, + 2.064, + 2.060, + 2.056, + 2.052, + 2.048, + 2.045, + 2.042, +] def tdist95conf_level(df): @@ -125,11 +151,11 @@ def parse_run_list(run_list): run_list = run_list.strip() runs = [] - for part in run_list.split(','): + for part in run_list.split(","): part = part.strip() try: - if '-' in part: - parts = part.split('-', 1) + if "-" in part: + parts = part.split("-", 1) first = int(parts[0]) last = int(parts[1]) for run in range(first, last + 1): @@ -162,7 +188,7 @@ def read_first_line(path, error=False): if error: raise else: - return '' + return "" def proc_path(path): @@ -174,29 +200,29 @@ def sysfs_path(path): def python_implementation(): - if hasattr(sys, 'implementation'): + if hasattr(sys, "implementation"): # PEP 421, Python 3.3 name = sys.implementation.name else: # Code extracted from platform.python_implementation(). # Don't import platform to avoid the subprocess import. sys_version = sys.version - if 'IronPython' in sys_version: - name = 'IronPython' - elif sys.platform.startswith('java'): - name = 'Jython' + if "IronPython" in sys_version: + name = "IronPython" + elif sys.platform.startswith("java"): + name = "Jython" elif "PyPy" in sys_version: name = "PyPy" else: - name = 'CPython' + name = "CPython" return name.lower() def python_has_jit(): implementation_name = python_implementation() - if implementation_name == 'pypy': + if implementation_name == "pypy": return sys.pypy_translation_info["translation.jit"] - elif implementation_name in ['graalpython', 'graalpy']: + elif implementation_name in ["graalpython", "graalpy"]: return True elif hasattr(sys, "pyston_version_info") or "pyston_lite" in sys.modules: return True @@ -207,7 +233,7 @@ def python_has_jit(): def popen_killer(proc): try: yield - except: # noqa: E722 + except: # noqa: E722 # Close pipes if proc.stdin: proc.stdin.close() @@ -269,17 +295,40 @@ def create_environ(inherit_environ, locale, copy_all): if copy_all: return os.environ env = {} - copy_env = ["PATH", "HOME", "TEMP", "COMSPEC", "SystemRoot", "SystemDrive", - # Python specific variables - "PYTHONPATH", "PYTHON_CPU_COUNT", "PYTHON_GIL", - # Pyperf specific variables - "PYPERF_PERF_RECORD_DATA_DIR", "PYPERF_PERF_RECORD_EXTRA_OPTS", - ] + copy_env = [ + "PATH", + "HOME", + "TEMP", + "COMSPEC", + "SystemRoot", + "SystemDrive", + # Python specific variables + "PYTHONPATH", + "PYTHON_CPU_COUNT", + "PYTHON_GIL", + # Pyperf specific variables + "PYPERF_PERF_RECORD_DATA_DIR", + "PYPERF_PERF_RECORD_EXTRA_OPTS", + ] if locale: - copy_env.extend(('LANG', 'LC_ADDRESS', 'LC_ALL', 'LC_COLLATE', - 'LC_CTYPE', 'LC_IDENTIFICATION', 'LC_MEASUREMENT', - 'LC_MESSAGES', 'LC_MONETARY', 'LC_NAME', 'LC_NUMERIC', - 'LC_PAPER', 'LC_TELEPHONE', 'LC_TIME')) + copy_env.extend( + ( + "LANG", + "LC_ADDRESS", + "LC_ALL", + "LC_COLLATE", + "LC_CTYPE", + "LC_IDENTIFICATION", + "LC_MEASUREMENT", + "LC_MESSAGES", + "LC_MONETARY", + "LC_NAME", + "LC_NUMERIC", + "LC_PAPER", + "LC_TELEPHONE", + "LC_TIME", + ) + ) if inherit_environ: copy_env.extend(inherit_environ) @@ -414,9 +463,10 @@ def percentile(values, p): return values[int(k)] -if hasattr(statistics, 'geometric_mean'): +if hasattr(statistics, "geometric_mean"): _geometric_mean = statistics.geometric_mean else: + def _geometric_mean(data): # Compute exp(fmean(map(log, data))) using floats data = list(map(math.log, data)) @@ -439,6 +489,7 @@ def merge_profile_stats(profiler, dst): Save pstats by merging into an existing file. """ import pstats + if os.path.isfile(dst): try: src_stats = pstats.Stats(profiler) diff --git a/pyperf/_win_memory.py b/pyperf/_win_memory.py index 7bb5c280..a57b8607 100644 --- a/pyperf/_win_memory.py +++ b/pyperf/_win_memory.py @@ -10,17 +10,17 @@ class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): _fields_ = [ - ('cb', wintypes.DWORD), - ('PageFaultCount', wintypes.DWORD), - ('PeakWorkingSetSize', SIZE_T), - ('WorkingSetSize', SIZE_T), - ('QuotaPeakPagedPoolUsage', SIZE_T), - ('QuotaPagedPoolUsage', SIZE_T), - ('QuotaPeakNonPagedPoolUsage', SIZE_T), - ('QuotaNonPagedPoolUsage', SIZE_T), - ('PagefileUsage', SIZE_T), - ('PeakPagefileUsage', SIZE_T), - ('PrivateUsage', SIZE_T), + ("cb", wintypes.DWORD), + ("PageFaultCount", wintypes.DWORD), + ("PeakWorkingSetSize", SIZE_T), + ("WorkingSetSize", SIZE_T), + ("QuotaPeakPagedPoolUsage", SIZE_T), + ("QuotaPagedPoolUsage", SIZE_T), + ("QuotaPeakNonPagedPoolUsage", SIZE_T), + ("QuotaNonPagedPoolUsage", SIZE_T), + ("PagefileUsage", SIZE_T), + ("PeakPagefileUsage", SIZE_T), + ("PrivateUsage", SIZE_T), ] GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo @@ -35,9 +35,7 @@ class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): def get_peak_pagefile_usage(): process = GetCurrentProcess() counters = PROCESS_MEMORY_COUNTERS_EX() - ret = GetProcessMemoryInfo(process, - ctypes.byref(counters), - ctypes.sizeof(counters)) + ret = GetProcessMemoryInfo(process, ctypes.byref(counters), ctypes.sizeof(counters)) if not ret: raise ctypes.WinError() @@ -46,8 +44,7 @@ def get_peak_pagefile_usage(): def check_tracking_memory(): if GetProcessMemoryInfo is None: - return ("missing ctypes module, " - "unable to get GetProcessMemoryInfo()") + return "missing ctypes module, unable to get GetProcessMemoryInfo()" usage = get_peak_pagefile_usage() if not usage: diff --git a/pyperf/_worker.py b/pyperf/_worker.py index 67a7452f..51d4e9a2 100644 --- a/pyperf/_worker.py +++ b/pyperf/_worker.py @@ -4,14 +4,18 @@ import time import pyperf -from pyperf._formatter import (format_number, format_value, format_values, - format_timedelta) +from pyperf._formatter import ( + format_number, + format_value, + format_values, + format_timedelta, +) from pyperf._hooks import instantiate_selected_hooks from pyperf._utils import MS_WINDOWS, percentile, median_abs_dev from pyperf._system import OS_LINUX -MAX_LOOPS = 2 ** 32 +MAX_LOOPS = 2**32 # Parameters to calibrate and recalibrate warmups @@ -35,19 +39,18 @@ def __init__(self, runner, name, task_func, func_metadata): self.metadata = dict(runner.metadata) if func_metadata: self.metadata.update(func_metadata) - if 'unit' not in self.metadata: + if "unit" not in self.metadata: # Set default unit to seconds - self.metadata['unit'] = 'second' + self.metadata["unit"] = "second" self.inner_loops = None self.warmups = None self.values = () - def _compute_values(self, values, nvalue, - is_warmup=False, - calibrate_loops=False, - start=0): - unit = self.metadata.get('unit') + def _compute_values( + self, values, nvalue, is_warmup=False, calibrate_loops=False, start=0 + ): + unit = self.metadata.get("unit") args = self.args if nvalue < 1: raise ValueError("nvalue must be >= 1") @@ -55,9 +58,9 @@ def _compute_values(self, values, nvalue, raise ValueError("loops must be >= 1") if is_warmup: - value_name = 'Warmup' + value_name = "Warmup" else: - value_name = 'Value' + value_name = "Value" task_func = self.task_func @@ -92,20 +95,25 @@ def _compute_values(self, values, nvalue, if args.verbose: text = format_value(unit, value) if is_warmup: - text = ('%s (loops: %s, raw: %s)' - % (text, - format_number(self.loops), - format_value(unit, raw_value))) + text = "%s (loops: %s, raw: %s)" % ( + text, + format_number(self.loops), + format_value(unit, raw_value), + ) print("%s %s: %s" % (value_name, start + index, text)) if calibrate_loops and raw_value < args.min_time: if self.loops * 2 > MAX_LOOPS: print("ERROR: failed to calibrate the number of loops") - print("Raw timing %s with %s is still smaller than " - "the minimum time of %s" - % (format_value(unit, raw_value), - format_number(self.loops, 'loop'), - format_timedelta(args.min_time))) + print( + "Raw timing %s with %s is still smaller than " + "the minimum time of %s" + % ( + format_value(unit, raw_value), + format_number(self.loops, "loop"), + format_timedelta(args.min_time), + ) + ) sys.exit(1) self.loops *= 2 # need more values for the calibration @@ -118,6 +126,7 @@ def _compute_values(self, values, nvalue, def collect_metadata(self): from pyperf._collect_metadata import collect_metadata + return collect_metadata(process=False) def test_calibrate_warmups(self, nwarmup, unit): @@ -131,7 +140,7 @@ def test_calibrate_warmups(self, nwarmup, unit): q1 = percentile(values, 0.25) q3 = percentile(values, 0.75) iqr = q3 - q1 - outlier_max = (q3 + 1.5 * iqr) + outlier_max = q3 + 1.5 * iqr # only check maximum, not minimum outlier = not (first_value <= outlier_max) @@ -159,29 +168,46 @@ def test_calibrate_warmups(self, nwarmup, unit): sample1_str = format_values(unit, (s1_q1, mean1, s1_q3, stdev1, mad1)) sample2_str = format_values(unit, (s2_q1, mean2, s2_q3, stdev2, mad2)) print("Calibration: warmups=%s" % format_number(nwarmup)) - print(" first value: %s, outlier? %s (max: %s)" - % (format_value(unit, first_value), outlier, - format_value(unit, outlier_max))) - print(" sample1(%s): Q1=%s mean=%s Q3=%s stdev=%s MAD=%s" - % (len(sample1), - sample1_str[0], - sample1_str[1], - sample1_str[2], - sample1_str[3], - sample1_str[4])) - print(" sample2(%s): Q1=%s mean=%s Q3=%s stdev=%s MAD=%s" - % (len(sample2), - sample2_str[0], - sample2_str[1], - sample2_str[2], - sample2_str[3], - sample2_str[4])) - print(" diff: Q1=%+.0f%% mean=%+.0f%% Q3=%+.0f%% stdev=%+.0f%% MAD=%+.0f%%" - % (q1_diff * 100, - mean_diff * 100, - q3_diff * 100, - stdev_diff * 100, - mad_diff * 100)) + print( + " first value: %s, outlier? %s (max: %s)" + % ( + format_value(unit, first_value), + outlier, + format_value(unit, outlier_max), + ) + ) + print( + " sample1(%s): Q1=%s mean=%s Q3=%s stdev=%s MAD=%s" + % ( + len(sample1), + sample1_str[0], + sample1_str[1], + sample1_str[2], + sample1_str[3], + sample1_str[4], + ) + ) + print( + " sample2(%s): Q1=%s mean=%s Q3=%s stdev=%s MAD=%s" + % ( + len(sample2), + sample2_str[0], + sample2_str[1], + sample2_str[2], + sample2_str[3], + sample2_str[4], + ) + ) + print( + " diff: Q1=%+.0f%% mean=%+.0f%% Q3=%+.0f%% stdev=%+.0f%% MAD=%+.0f%%" + % ( + q1_diff * 100, + mean_diff * 100, + q3_diff * 100, + stdev_diff * 100, + mad_diff * 100, + ) + ) if outlier: return False @@ -205,16 +231,14 @@ def calibrate_warmups(self): else: nwarmup = 1 - unit = self.metadata.get('unit') + unit = self.metadata.get("unit") start = 0 # test_calibrate_warmups() requires at least 2 values per sample while True: total = nwarmup + WARMUP_SAMPLE_SIZE * 2 nvalue = total - len(self.warmups) if nvalue: - self._compute_values(self.warmups, nvalue, - is_warmup=True, - start=start) + self._compute_values(self.warmups, nvalue, is_warmup=True, start=start) start += nvalue if self.test_calibrate_warmups(nwarmup, unit): @@ -222,9 +246,8 @@ def calibrate_warmups(self): if len(self.warmups) >= MAX_WARMUP_VALUES: print("ERROR: failed to calibrate the number of warmups") - values = [format_value(unit, value) - for loops, value in self.warmups] - print("Values (%s): %s" % (len(values), ', '.join(values))) + values = [format_value(unit, value) for loops, value in self.warmups] + print("Values (%s): %s" % (len(values), ", ".join(values))) sys.exit(1) nwarmup += 1 @@ -233,9 +256,9 @@ def calibrate_warmups(self): print() if self.args.recalibrate_warmups: - self.metadata['recalibrate_warmups'] = nwarmup + self.metadata["recalibrate_warmups"] = nwarmup else: - self.metadata['calibrate_warmups'] = nwarmup + self.metadata["calibrate_warmups"] = nwarmup def calibrate_loops(self): args = self.args @@ -247,9 +270,7 @@ def calibrate_loops(self): else: nvalue = 1 nvalue += args.values - self._compute_values(self.warmups, nvalue, - is_warmup=True, - calibrate_loops=True) + self._compute_values(self.warmups, nvalue, is_warmup=True, calibrate_loops=True) if args.verbose: print() @@ -257,9 +278,9 @@ def calibrate_loops(self): print() if args.recalibrate_loops: - self.metadata['recalibrate_loops'] = self.loops + self.metadata["recalibrate_loops"] = self.loops else: - self.metadata['calibrate_loops'] = self.loops + self.metadata["calibrate_loops"] = self.loops def compute_warmups_values(self): args = self.args @@ -275,9 +296,9 @@ def compute_warmups_values(self): def compute(self): args = self.args - self.metadata['name'] = self.name + self.metadata["name"] = self.name if self.inner_loops is not None: - self.metadata['inner_loops'] = self.inner_loops + self.metadata["inner_loops"] = self.inner_loops self.warmups = [] self.values = [] @@ -293,23 +314,25 @@ def compute(self): metadata2.update(self.metadata) self.metadata = metadata2 - self.metadata['loops'] = self.loops + self.metadata["loops"] = self.loops def create_run(self): start_time = time.monotonic() self.compute() - self.metadata['duration'] = time.monotonic() - start_time + self.metadata["duration"] = time.monotonic() - start_time - return pyperf.Run(self.values, - warmups=self.warmups, - metadata=self.metadata, - collect_metadata=False) + return pyperf.Run( + self.values, + warmups=self.warmups, + metadata=self.metadata, + collect_metadata=False, + ) def _set_memory_value(self, value): - is_calibration = (not self.values) - self.metadata['unit'] = 'byte' - self.metadata['warmups'] = len(self.warmups) - self.metadata['values'] = len(self.values) + is_calibration = not self.values + self.metadata["unit"] = "byte" + self.metadata["warmups"] = len(self.warmups) + self.metadata["values"] = len(self.values) if is_calibration: values = ((self.loops, value),) self.warmups = values @@ -327,13 +350,16 @@ def __init__(self): def start(self): if MS_WINDOWS: from pyperf._win_memory import get_peak_pagefile_usage + self.get_peak_profile_usage = get_peak_pagefile_usage elif OS_LINUX: from pyperf._linux_memory import PeakMemoryUsageThread + self.mem_thread = PeakMemoryUsageThread() self.mem_thread.start() else: from pyperf._psutil_memory import PeakMemoryUsageThread + self.mem_thread = PeakMemoryUsageThread() self.mem_thread.start() @@ -359,6 +385,7 @@ def compute(self): if args.tracemalloc: import tracemalloc + tracemalloc.start() WorkerTask.compute(self) @@ -368,8 +395,9 @@ def compute(self): tracemalloc.stop() if not traced_peak: - raise RuntimeError("tracemalloc didn't trace any Python " - "memory allocation") + raise RuntimeError( + "tracemalloc didn't trace any Python memory allocation" + ) # drop timings, replace them with the memory peak self._set_memory_value(traced_peak) @@ -381,4 +409,5 @@ def compute(self): def collect_metadata(self): from pyperf._collect_metadata import collect_metadata + return collect_metadata() diff --git a/pyperf/tests/__init__.py b/pyperf/tests/__init__.py index 8ce1bdbe..307a913e 100644 --- a/pyperf/tests/__init__.py +++ b/pyperf/tests/__init__.py @@ -23,11 +23,11 @@ def _capture_stream(name): def capture_stdout(): - return _capture_stream('stdout') + return _capture_stream("stdout") def capture_stderr(): - return _capture_stream('stderr') + return _capture_stream("stderr") @contextlib.contextmanager @@ -55,7 +55,7 @@ def temporary_directory(): def benchmark_as_json(benchmark, compact=True): with temporary_file() as tmp_name: benchmark.dump(tmp_name, compact=compact) - with open(tmp_name, 'r', encoding='utf-8') as tmp: + with open(tmp_name, "r", encoding="utf-8") as tmp: return tmp.read() @@ -65,15 +65,17 @@ def compare_benchmarks(testcase, bench1, bench2): testcase.assertEqual(json1, json2) -ProcResult = collections.namedtuple('ProcResult', 'returncode stdout stderr') +ProcResult = collections.namedtuple("ProcResult", "returncode stdout stderr") def get_output(cmd, **kw): - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - **kw) + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + **kw, + ) stdout, stderr = popen_communicate(proc) return ProcResult(proc.returncode, stdout, stderr) diff --git a/pyperf/tests/mult_list_bench.py b/pyperf/tests/mult_list_bench.py index fd58a175..16545cad 100644 --- a/pyperf/tests/mult_list_bench.py +++ b/pyperf/tests/mult_list_bench.py @@ -2,12 +2,6 @@ import pyperf runner = pyperf.Runner() -runner.timeit("[1]*1000", - stmt="[1]*1000", - duplicate=1024) -runner.timeit("[1,2]*1000", - stmt="[1,2]*1000", - duplicate=1024) -runner.timeit("[1,2,3]*1000", - stmt="[1,2,3]*1000", - duplicate=1024) +runner.timeit("[1]*1000", stmt="[1]*1000", duplicate=1024) +runner.timeit("[1,2]*1000", stmt="[1,2]*1000", duplicate=1024) +runner.timeit("[1,2,3]*1000", stmt="[1,2,3]*1000", duplicate=1024) diff --git a/pyperf/tests/replay.py b/pyperf/tests/replay.py index fb9aea8f..b4154e6b 100644 --- a/pyperf/tests/replay.py +++ b/pyperf/tests/replay.py @@ -7,8 +7,7 @@ def get_raw_values(filename, run_id): bench = pyperf.Benchmark.load(filename) run = bench.get_runs()[run_id] inner_loops = run.get_inner_loops() - raw_values = [value * (loops * inner_loops) - for loops, value in run.warmups] + raw_values = [value * (loops * inner_loops) for loops, value in run.warmups] total_loops = run.get_total_loops() raw_values.extend(value * total_loops for value in run.values) return (run, raw_values) @@ -65,16 +64,16 @@ def time_func(self, loops): def add_cmdline_args(cmd, args): cmd.append(args.filename) if args.session_filename: - cmd.extend(('--session-filename', args.session_filename)) + cmd.extend(("--session-filename", args.session_filename)) runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) -runner.argparser.add_argument('filename') -runner.argparser.add_argument('--session-filename', default=None) -runner.argparser.add_argument('--first-run', type=int, default=1) +runner.argparser.add_argument("filename") +runner.argparser.add_argument("--session-filename", default=None) +runner.argparser.add_argument("--first-run", type=int, default=1) args = runner.parse_args() replay = Replay(runner, args.filename) -runner.bench_time_func('bench', replay.time_func) +runner.bench_time_func("bench", replay.time_func) if not args.worker: os.unlink(args.session_filename) diff --git a/pyperf/tests/test_bench.py b/pyperf/tests/test_bench.py index 8fd4e2a1..96a94ee1 100644 --- a/pyperf/tests/test_bench.py +++ b/pyperf/tests/test_bench.py @@ -14,29 +14,26 @@ def create_run(values=None, warmups=None, metadata=None): if values is None: values = (1.0,) if metadata is None: - metadata = {'name': 'bench'} - elif 'name' not in metadata: - metadata['name'] = 'bench' - return pyperf.Run(values, warmups, - metadata=metadata, - collect_metadata=False) + metadata = {"name": "bench"} + elif "name" not in metadata: + metadata["name"] = "bench" + return pyperf.Run(values, warmups, metadata=metadata, collect_metadata=False) class RunTests(unittest.TestCase): def test_attr(self): - run = pyperf.Run((2.0, 3.0), - warmups=((4, 0.5),), - metadata={'loops': 2, 'inner_loops': 5}, - collect_metadata=False) + run = pyperf.Run( + (2.0, 3.0), + warmups=((4, 0.5),), + metadata={"loops": 2, "inner_loops": 5}, + collect_metadata=False, + ) self.assertEqual(run.get_loops(), 2) self.assertEqual(run.get_inner_loops(), 5) self.assertEqual(run.get_total_loops(), 2 * 5) - self.assertEqual(run.values, - (2.0, 3.0)) - self.assertEqual(run._get_raw_values(), - [20.0, 30.0]) - self.assertEqual(run._get_raw_values(warmups=True), - [10.0, 20.0, 30.0]) + self.assertEqual(run.values, (2.0, 3.0)) + self.assertEqual(run._get_raw_values(), [20.0, 30.0]) + self.assertEqual(run._get_raw_values(warmups=True), [10.0, 20.0, 30.0]) run = pyperf.Run((2.0, 3.0), warmups=((1, 1.0),)) self.assertEqual(run.get_loops(), 1) @@ -52,27 +49,26 @@ def test_constructor(self): # number of loops with self.assertRaises(ValueError): - pyperf.Run([1.0], metadata={'loops': -1}, collect_metadata=False) + pyperf.Run([1.0], metadata={"loops": -1}, collect_metadata=False) with self.assertRaises(ValueError): - pyperf.Run([1.0], metadata={'inner_loops': 0}, collect_metadata=False) + pyperf.Run([1.0], metadata={"inner_loops": 0}, collect_metadata=False) # loops type error with self.assertRaises(ValueError): - pyperf.Run([1.0], metadata={'loops': 1.0}, collect_metadata=False) + pyperf.Run([1.0], metadata={"loops": 1.0}, collect_metadata=False) with self.assertRaises(ValueError): - pyperf.Run([1.0], metadata={'inner_loops': 1.0}, collect_metadata=False) + pyperf.Run([1.0], metadata={"inner_loops": 1.0}, collect_metadata=False) # metadata value must not be an empty string with self.assertRaises(ValueError): - pyperf.Run([1.0], metadata={'name': ''}, collect_metadata=False) - run = pyperf.Run([1.0], metadata={'load_avg_1min': 0.0}, - collect_metadata=False) - self.assertEqual(run.get_metadata()['load_avg_1min'], 0.0) + pyperf.Run([1.0], metadata={"name": ""}, collect_metadata=False) + run = pyperf.Run([1.0], metadata={"load_avg_1min": 0.0}, collect_metadata=False) + self.assertEqual(run.get_metadata()["load_avg_1min"], 0.0) def test_name(self): # name must be non-empty with self.assertRaises(ValueError): - pyperf.Run([1.0], metadata={'name': ' '}) + pyperf.Run([1.0], metadata={"name": " "}) def test_number_types(self): # ensure that all types of numbers are accepted @@ -80,15 +76,13 @@ def test_number_types(self): run = pyperf.Run([number_type(1)], collect_metadata=False) self.assertIsInstance(run.values[0], number_type) - run = pyperf.Run([5], warmups=[(4, number_type(3))], - collect_metadata=False) + run = pyperf.Run([5], warmups=[(4, number_type(3))], collect_metadata=False) self.assertEqual(run.warmups, ((4, 3),)) self.assertIsInstance(run.warmups[0][1], number_type) def test_get_date(self): - date = datetime.datetime.now().isoformat(' ') - run = pyperf.Run([1.0], metadata={'date': date}, - collect_metadata=False) + date = datetime.datetime.now().isoformat(" ") + run = pyperf.Run([1.0], metadata={"date": date}, collect_metadata=False) self.assertEqual(run._get_date(), date) run = pyperf.Run([1.0], collect_metadata=False) @@ -110,7 +104,7 @@ def test_name(self): pyperf.Benchmark([run]) def test_add_run(self): - metadata = {'name': 'bench', 'hostname': 'toto'} + metadata = {"name": "bench", "hostname": "toto"} runs = [create_run(metadata=metadata)] bench = pyperf.Benchmark(runs) @@ -120,17 +114,17 @@ def test_add_run(self): bench.add_run(create_run(metadata=metadata)) # incompatible: name is different - metadata = {'name': 'bench2', 'hostname': 'toto'} + metadata = {"name": "bench2", "hostname": "toto"} with self.assertRaises(ValueError): bench.add_run(create_run(metadata=metadata)) # incompatible: hostname is different - metadata = {'name': 'bench', 'hostname': 'homer'} + metadata = {"name": "bench", "hostname": "homer"} with self.assertRaises(ValueError): bench.add_run(create_run(metadata=metadata)) # compatible (same metadata) - metadata = {'name': 'bench', 'hostname': 'toto'} + metadata = {"name": "bench", "hostname": "toto"} bench.add_run(create_run(metadata=metadata)) def test_benchmark(self): @@ -138,18 +132,22 @@ def test_benchmark(self): raw_values = tuple(value * 3 * 20 for value in values) runs = [] for value in values: - run = pyperf.Run([value], - warmups=[(1, 3.0)], - metadata={'key': 'value', - 'loops': 20, - 'inner_loops': 3, - 'name': 'mybench'}, - collect_metadata=False) + run = pyperf.Run( + [value], + warmups=[(1, 3.0)], + metadata={ + "key": "value", + "loops": 20, + "inner_loops": 3, + "name": "mybench", + }, + collect_metadata=False, + ) runs.append(run) bench = pyperf.Benchmark(runs) self.assertEqual(bench.get_values(), values) - self.assertEqual(bench.get_unit(), 'second') + self.assertEqual(bench.get_unit(), "second") self.assertEqual(bench._get_raw_values(), list(raw_values)) self.assertEqual(bench.get_nrun(), 3) @@ -165,20 +163,18 @@ def test_benchmark(self): self.check_runs(bench, [(1, 3.0)], values) self.assertEqual(bench.get_name(), "mybench") - self.assertEqual(bench.get_metadata(), - {'key': 'value', - 'name': 'mybench', - 'loops': 20, - 'inner_loops': 3}) - self.assertEqual(repr(bench), - "") + self.assertEqual( + bench.get_metadata(), + {"key": "value", "name": "mybench", "loops": 20, "inner_loops": 3}, + ) + self.assertEqual(repr(bench), "") def test_get_unit(self): - run = pyperf.Run((1.0,), - metadata={'name': 'bench', 'unit': 'byte'}, - collect_metadata=False) + run = pyperf.Run( + (1.0,), metadata={"name": "bench", "unit": "byte"}, collect_metadata=False + ) bench = pyperf.Benchmark([run]) - self.assertEqual(bench.get_unit(), 'byte') + self.assertEqual(bench.get_unit(), "byte") def create_dummy_benchmark(self): runs = [create_run()] @@ -215,10 +211,10 @@ def test_dump_replace(self): def test_dump_gzip(self): bench = self.create_dummy_benchmark() - with tests.temporary_file(suffix='.gz') as tmp_name: + with tests.temporary_file(suffix=".gz") as tmp_name: bench.dump(tmp_name) - with gzip.open(tmp_name, 'rt', encoding='utf-8') as fp: + with gzip.open(tmp_name, "rt", encoding="utf-8") as fp: json = fp.read() expected = tests.benchmark_as_json(bench) @@ -227,7 +223,7 @@ def test_dump_gzip(self): def test_load_gzip(self): bench = self.create_dummy_benchmark() - with tests.temporary_file(suffix='.gz') as tmp_name: + with tests.temporary_file(suffix=".gz") as tmp_name: bench.dump(tmp_name) bench2 = pyperf.Benchmark.load(tmp_name) @@ -245,16 +241,14 @@ def test_add_runs(self): def test__get_nvalue_per_run(self): # exact - runs = [create_run([1.0, 2.0, 3.0]), - create_run([4.0, 5.0, 6.0])] + runs = [create_run([1.0, 2.0, 3.0]), create_run([4.0, 5.0, 6.0])] bench = pyperf.Benchmark(runs) nvalue = bench._get_nvalue_per_run() self.assertEqual(nvalue, 3) self.assertIsInstance(nvalue, int) # average - runs = [create_run([1.0, 2.0, 3.0, 4.0]), - create_run([5.0, 6.0])] + runs = [create_run([1.0, 2.0, 3.0, 4.0]), create_run([5.0, 6.0])] bench = pyperf.Benchmark(runs) nvalue = bench._get_nvalue_per_run() self.assertEqual(nvalue, 3.0) @@ -262,16 +256,20 @@ def test__get_nvalue_per_run(self): def test_get_warmups(self): # exact - runs = [create_run((1.0, 2.0, 3.0), warmups=[(1, 1.0)]), - create_run((5.0, 6.0), warmups=[(1, 4.0)])] + runs = [ + create_run((1.0, 2.0, 3.0), warmups=[(1, 1.0)]), + create_run((5.0, 6.0), warmups=[(1, 4.0)]), + ] bench = pyperf.Benchmark(runs) nwarmup = bench._get_nwarmup() self.assertEqual(nwarmup, 1) self.assertIsInstance(nwarmup, int) # average - runs = [create_run([3.0], warmups=[(1, 1.0), (1, 2.0)]), - create_run([4.0, 5.0, 6.0])] + runs = [ + create_run([3.0], warmups=[(1, 1.0), (1, 2.0)]), + create_run([4.0, 5.0, 6.0]), + ] bench = pyperf.Benchmark(runs) nwarmup = bench._get_nwarmup() self.assertEqual(nwarmup, 1) @@ -293,8 +291,10 @@ def test_get_runs(self): def test_get_total_duration(self): # use duration metadata - runs = [create_run([0.1], metadata={'duration': 1.0}), - create_run([0.1], metadata={'duration': 2.0})] + runs = [ + create_run([0.1], metadata={"duration": 1.0}), + create_run([0.1], metadata={"duration": 2.0}), + ] bench = pyperf.Benchmark(runs) self.assertEqual(bench.get_total_duration(), 3.0) @@ -306,64 +306,80 @@ def test_get_dates(self): bench = pyperf.Benchmark([create_run()]) self.assertIsNone(bench.get_dates()) - metadata = {'date': '2016-07-20T14:06:00', 'duration': 60.0} + metadata = {"date": "2016-07-20T14:06:00", "duration": 60.0} bench = pyperf.Benchmark([create_run(metadata=metadata)]) - self.assertEqual(bench.get_dates(), - (datetime.datetime(2016, 7, 20, 14, 6, 0), - datetime.datetime(2016, 7, 20, 14, 7, 0))) - - metadata = {'date': '2016-07-20T14:10:00', 'duration': 60.0} + self.assertEqual( + bench.get_dates(), + ( + datetime.datetime(2016, 7, 20, 14, 6, 0), + datetime.datetime(2016, 7, 20, 14, 7, 0), + ), + ) + + metadata = {"date": "2016-07-20T14:10:00", "duration": 60.0} bench.add_run(create_run(metadata=metadata)) - self.assertEqual(bench.get_dates(), - (datetime.datetime(2016, 7, 20, 14, 6, 0), - datetime.datetime(2016, 7, 20, 14, 11, 0))) + self.assertEqual( + bench.get_dates(), + ( + datetime.datetime(2016, 7, 20, 14, 6, 0), + datetime.datetime(2016, 7, 20, 14, 11, 0), + ), + ) def test_extract_metadata(self): warmups = ((1, 5.0),) - runs = [pyperf.Run((1.0,), warmups=warmups, - metadata={'name': 'bench', 'mem_usage': 5}, - collect_metadata=False), - pyperf.Run((2.0,), warmups=warmups, - metadata={'name': 'bench', 'mem_usage': 13}, - collect_metadata=False)] + runs = [ + pyperf.Run( + (1.0,), + warmups=warmups, + metadata={"name": "bench", "mem_usage": 5}, + collect_metadata=False, + ), + pyperf.Run( + (2.0,), + warmups=warmups, + metadata={"name": "bench", "mem_usage": 13}, + collect_metadata=False, + ), + ] bench = pyperf.Benchmark(runs) - bench._extract_metadata('mem_usage') + bench._extract_metadata("mem_usage") self.assertEqual(bench.get_values(), (5, 13)) for run in bench.get_runs(): self.assertEqual(run.warmups, ()) def test_remove_all_metadata(self): - run = pyperf.Run((1.0,), - metadata={'name': 'bench', 'os': 'win', 'unit': 'byte'}, - collect_metadata=False) + run = pyperf.Run( + (1.0,), + metadata={"name": "bench", "os": "win", "unit": "byte"}, + collect_metadata=False, + ) bench = pyperf.Benchmark([run]) - self.assertEqual(bench.get_metadata(), - {'name': 'bench', 'os': 'win', 'unit': 'byte'}) + self.assertEqual( + bench.get_metadata(), {"name": "bench", "os": "win", "unit": "byte"} + ) bench._remove_all_metadata() - self.assertEqual(bench.get_metadata(), - {'name': 'bench', 'unit': 'byte'}) + self.assertEqual(bench.get_metadata(), {"name": "bench", "unit": "byte"}) def test_update_metadata(self): runs = [] for value in (1.0, 2.0, 3.0): - runs.append(pyperf.Run((value,), - metadata={'name': 'bench'}, - collect_metadata=False)) + runs.append( + pyperf.Run((value,), metadata={"name": "bench"}, collect_metadata=False) + ) bench = pyperf.Benchmark(runs) - self.assertEqual(bench.get_metadata(), - {'name': 'bench'}) + self.assertEqual(bench.get_metadata(), {"name": "bench"}) - bench.update_metadata({'os': 'linux'}) - self.assertEqual(bench.get_metadata(), - {'os': 'linux', 'name': 'bench'}) + bench.update_metadata({"os": "linux"}) + self.assertEqual(bench.get_metadata(), {"os": "linux", "name": "bench"}) def test_update_metadata_inner_loops(self): - run = create_run(metadata={'inner_loops': 5}) + run = create_run(metadata={"inner_loops": 5}) bench = pyperf.Benchmark([run]) with self.assertRaises(ValueError): - bench.update_metadata({'inner_loops': 8}) + bench.update_metadata({"inner_loops": 8}) def test_stats(self): values = [float(value) for value in range(1, 96)] @@ -403,33 +419,33 @@ def test_stats_single(self): class TestBenchmarkSuite(unittest.TestCase): def benchmark(self, name): - run = pyperf.Run([1.0, 1.5, 2.0], - metadata={'name': name}, - collect_metadata=False) + run = pyperf.Run( + [1.0, 1.5, 2.0], metadata={"name": name}, collect_metadata=False + ) return pyperf.Benchmark([run]) def test_suite(self): - telco = self.benchmark('telco') - go = self.benchmark('go') + telco = self.benchmark("telco") + go = self.benchmark("go") suite = pyperf.BenchmarkSuite([telco, go]) self.assertIsNone(suite.filename) self.assertEqual(len(suite), 2) self.assertEqual(suite.get_benchmarks(), [telco, go]) - self.assertEqual(suite.get_benchmark('go'), go) + self.assertEqual(suite.get_benchmark("go"), go) with self.assertRaises(KeyError): - suite.get_benchmark('non_existent') + suite.get_benchmark("non_existent") def create_dummy_suite(self): - telco = self.benchmark('telco') - go = self.benchmark('go') + telco = self.benchmark("telco") + go = self.benchmark("go") return pyperf.BenchmarkSuite([telco, go]) def check_dummy_suite(self, suite): benchmarks = suite.get_benchmarks() self.assertEqual(len(benchmarks), 2) - self.assertEqual(benchmarks[0].get_name(), 'telco') - self.assertEqual(benchmarks[1].get_name(), 'go') + self.assertEqual(benchmarks[0].get_name(), "telco") + self.assertEqual(benchmarks[1].get_name(), "go") def test_json(self): suite = self.create_dummy_suite() @@ -459,17 +475,17 @@ def test_dump_replace(self): def test_add_runs(self): # bench 1 values = (1.0, 2.0, 3.0) - run = pyperf.Run(values, metadata={'name': "bench"}) + run = pyperf.Run(values, metadata={"name": "bench"}) bench = pyperf.Benchmark([run]) suite = pyperf.BenchmarkSuite([bench]) # bench 2 values2 = (4.0, 5.0, 6.0) - run = pyperf.Run(values2, metadata={'name': "bench"}) + run = pyperf.Run(values2, metadata={"name": "bench"}) bench2 = pyperf.Benchmark([run]) suite.add_runs(bench2) - bench = suite.get_benchmark('bench') + bench = suite.get_benchmark("bench") self.assertEqual(bench.get_values(), values + values2) def test_get_total_duration(self): @@ -484,36 +500,43 @@ def test_get_total_duration(self): self.assertEqual(suite.get_total_duration(), 3.0) def test_get_dates(self): - run = create_run(metadata={'date': '2016-07-20T14:06:00', - 'duration': 60.0, - 'name': 'bench1'}) + run = create_run( + metadata={"date": "2016-07-20T14:06:00", "duration": 60.0, "name": "bench1"} + ) bench = pyperf.Benchmark([run]) suite = pyperf.BenchmarkSuite([bench]) - self.assertEqual(suite.get_dates(), - (datetime.datetime(2016, 7, 20, 14, 6, 0), - datetime.datetime(2016, 7, 20, 14, 7, 0))) - - run = create_run(metadata={'date': '2016-07-20T14:10:00', - 'duration': 60.0, - 'name': 'bench2'}) + self.assertEqual( + suite.get_dates(), + ( + datetime.datetime(2016, 7, 20, 14, 6, 0), + datetime.datetime(2016, 7, 20, 14, 7, 0), + ), + ) + + run = create_run( + metadata={"date": "2016-07-20T14:10:00", "duration": 60.0, "name": "bench2"} + ) bench = pyperf.Benchmark([run]) suite.add_benchmark(bench) - self.assertEqual(suite.get_dates(), - (datetime.datetime(2016, 7, 20, 14, 6, 0), - datetime.datetime(2016, 7, 20, 14, 11, 0))) + self.assertEqual( + suite.get_dates(), + ( + datetime.datetime(2016, 7, 20, 14, 6, 0), + datetime.datetime(2016, 7, 20, 14, 11, 0), + ), + ) def test_get_metadata(self): benchmarks = [] - for name in ('a', 'b'): - run = pyperf.Run([1.0], - metadata={'name': name, 'os': 'linux'}, - collect_metadata=False) + for name in ("a", "b"): + run = pyperf.Run( + [1.0], metadata={"name": name, "os": "linux"}, collect_metadata=False + ) bench = pyperf.Benchmark([run]) benchmarks.append(bench) suite = pyperf.BenchmarkSuite(benchmarks) - self.assertEqual(suite.get_metadata(), - {'os': 'linux'}) + self.assertEqual(suite.get_metadata(), {"os": "linux"}) if __name__ == "__main__": diff --git a/pyperf/tests/test_cli.py b/pyperf/tests/test_cli.py index 4452bca1..cb31ff31 100644 --- a/pyperf/tests/test_cli.py +++ b/pyperf/tests/test_cli.py @@ -6,25 +6,28 @@ class CLITests(unittest.TestCase): def test_format_result(self): - run = pyperf.Run([1.0, 1.5, 2.0], - warmups=[(1, 3.0)], - metadata={'name': 'mybench'}, - collect_metadata=False) + run = pyperf.Run( + [1.0, 1.5, 2.0], + warmups=[(1, 3.0)], + metadata={"name": "mybench"}, + collect_metadata=False, + ) bench = pyperf.Benchmark([run]) - self.assertEqual(cli.format_result_value(bench), - '1.50 sec +- 0.50 sec') - self.assertEqual(cli.format_result(bench), - 'Mean +- std dev: 1.50 sec +- 0.50 sec') + self.assertEqual(cli.format_result_value(bench), "1.50 sec +- 0.50 sec") + self.assertEqual( + cli.format_result(bench), "Mean +- std dev: 1.50 sec +- 0.50 sec" + ) def test_format_result_calibration(self): - run = pyperf.Run([], warmups=[(100, 1.0)], - metadata={'name': 'bench', 'loops': 100}, - collect_metadata=False) + run = pyperf.Run( + [], + warmups=[(100, 1.0)], + metadata={"name": "bench", "loops": 100}, + collect_metadata=False, + ) bench = pyperf.Benchmark([run]) - self.assertEqual(cli.format_result_value(bench), - '') - self.assertEqual(cli.format_result(bench), - 'Calibration: 100 loops') + self.assertEqual(cli.format_result_value(bench), "") + self.assertEqual(cli.format_result(bench), "Calibration: 100 loops") self.assertRaises(ValueError, bench.median) diff --git a/pyperf/tests/test_examples.py b/pyperf/tests/test_examples.py index 6a9cccbb..1376edec 100644 --- a/pyperf/tests/test_examples.py +++ b/pyperf/tests/test_examples.py @@ -6,18 +6,18 @@ from pyperf import tests -ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')) -EXAMPLES_DIR = os.path.join(ROOT_DIR, 'doc', 'examples') +ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..")) +EXAMPLES_DIR = os.path.join(ROOT_DIR, "doc", "examples") class ExampleTests(unittest.TestCase): TESTED = set() # It's not easy to test a GUI - IGNORED = {'hist_scipy.py', 'plot.py'} + IGNORED = {"hist_scipy.py", "plot.py"} @classmethod def tearDownClass(cls): - scripts = glob.glob(os.path.join(EXAMPLES_DIR, '*.py')) + scripts = glob.glob(os.path.join(EXAMPLES_DIR, "*.py")) scripts = list(map(os.path.basename, scripts)) not_tested = set(scripts) - cls.IGNORED - cls.TESTED if not_tested: @@ -30,67 +30,71 @@ def check_command(self, script, args): cmd = [sys.executable] + [script] + args proc = tests.get_output(cmd) - self.assertRegex(proc.stdout, - r'Mean \+- std dev: [0-9.]+ [mun]s ' - r'\+- [0-9.]+ [mun]s\n') + self.assertRegex( + proc.stdout, + r"Mean \+- std dev: [0-9.]+ [mun]s " + r"\+- [0-9.]+ [mun]s\n", + ) self.assertEqual(proc.returncode, 0) def test_bench_func(self): - script = 'bench_func.py' + script = "bench_func.py" # Use -w1 --min-time=0.001 to reduce the duration of the test - args = ['-p2', '-w1', '--min-time=0.001'] + args = ["-p2", "-w1", "--min-time=0.001"] self.check_command(script, args) def test_bench_func_no_warmup(self): - script = 'bench_func.py' - args = ['-p2', '-w0', '--min-time=0.001'] + script = "bench_func.py" + args = ["-p2", "-w0", "--min-time=0.001"] self.check_command(script, args) def test_bench_async_func(self): - script = 'bench_async_func.py' + script = "bench_async_func.py" # Use -w1 --min-time=0.001 to reduce the duration of the test - args = ['-p2', '-w1', '--min-time=0.001'] + args = ["-p2", "-w1", "--min-time=0.001"] self.check_command(script, args) def test_bench_async_func_with_loop_factory(self): - script = 'bench_async_func_with_loop_factory.py' + script = "bench_async_func_with_loop_factory.py" # Use -w1 --min-time=0.001 to reduce the duration of the test - args = ['-p2', '-w1', '--min-time=0.001'] + args = ["-p2", "-w1", "--min-time=0.001"] self.check_command(script, args) def test_bench_time_func(self): - script = 'bench_time_func.py' - args = ['-p2', '-w1', '--min-time=0.001'] + script = "bench_time_func.py" + args = ["-p2", "-w1", "--min-time=0.001"] self.check_command(script, args) def test_bench_command(self): - script = 'bench_command.py' - args = ['-p2', '-w1', '--min-time=0.001'] + script = "bench_command.py" + args = ["-p2", "-w1", "--min-time=0.001"] self.check_command(script, args) def test_bench_timeit(self): - script = 'bench_timeit.py' - args = ['-p2', '-w1', '--min-time=0.001'] + script = "bench_timeit.py" + args = ["-p2", "-w1", "--min-time=0.001"] self.check_command(script, args) def test_export_csv(self): - script = 'export_csv.py' + script = "export_csv.py" self.TESTED.add(script) script = os.path.join(EXAMPLES_DIR, script) - json = os.path.join(os.path.dirname(__file__), 'telco.json') + json = os.path.join(os.path.dirname(__file__), "telco.json") with tests.temporary_file() as tmpname: cmd = [sys.executable, script, json, tmpname] exitcode = tests.run_command(cmd) self.assertEqual(exitcode, 0) - with open(tmpname, 'r') as fp: + with open(tmpname, "r") as fp: lines = fp.readlines() lines = [line.rstrip() for line in lines] - expected = ['0.02263077381239782', - '0.022488519346734393', - '0.02247294420317303'] + expected = [ + "0.02263077381239782", + "0.022488519346734393", + "0.02247294420317303", + ] self.assertEqual(lines, expected) diff --git a/pyperf/tests/test_metadata.py b/pyperf/tests/test_metadata.py index 0e08304a..a6df9545 100644 --- a/pyperf/tests/test_metadata.py +++ b/pyperf/tests/test_metadata.py @@ -8,12 +8,9 @@ from pyperf._metadata import METADATA_VALUE_TYPES -MANDATORY_METADATA = [ - 'date', - 'python_implementation', 'python_version', - 'platform'] -if sys.platform.startswith('linux'): - MANDATORY_METADATA.append('aslr') +MANDATORY_METADATA = ["date", "python_implementation", "python_version", "platform"] +if sys.platform.startswith("linux"): + MANDATORY_METADATA.append("aslr") class TestMetadata(unittest.TestCase): @@ -26,23 +23,23 @@ def test_collect_metadata(self): for key, value in metadata.items(): # test key self.assertIsInstance(key, str) - self.assertRegex(key, '^[a-z][a-z0-9_]+$') + self.assertRegex(key, "^[a-z][a-z0-9_]+$") # test value self.assertIsInstance(value, METADATA_VALUE_TYPES) - self.assertNotEqual(value, '') + self.assertNotEqual(value, "") if isinstance(value, str): self.assertEqual(value.strip(), value) - self.assertNotIn('\n', value) + self.assertNotIn("\n", value) def test_collect_cpu_affinity(self): metadata = {} perf_metadata.collect_cpu_affinity(metadata, {2, 3}, 4) - self.assertEqual(metadata['cpu_affinity'], '2-3') + self.assertEqual(metadata["cpu_affinity"], "2-3") metadata = {} perf_metadata.collect_cpu_affinity(metadata, {0, 1, 2, 3}, 4) - self.assertNotIn('cpu_affinity', metadata) + self.assertNotIn("cpu_affinity", metadata) class CpuFunctionsTests(unittest.TestCase): @@ -148,83 +145,81 @@ class CpuFunctionsTests(unittest.TestCase): """) def test_cpu_config(self): - nohz_full = '2-3\n' + nohz_full = "2-3\n" def mock_open(filename, *args, **kw): - filename = filename.replace('\\', '/') - if filename == '/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver': - data = 'DRIVER\n' - elif filename == '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor': - data = 'GOVERNOR\n' - elif filename.startswith('/sys/devices/system/cpu/nohz_full'): + filename = filename.replace("\\", "/") + if filename == "/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver": + data = "DRIVER\n" + elif filename == "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor": + data = "GOVERNOR\n" + elif filename.startswith("/sys/devices/system/cpu/nohz_full"): data = nohz_full - elif filename.startswith('/sys/devices/system/cpu/cpu2'): + elif filename.startswith("/sys/devices/system/cpu/cpu2"): raise OSError - elif filename == '/sys/devices/system/cpu/cpuidle/current_driver': - data = 'IDLE_DRV\n' + elif filename == "/sys/devices/system/cpu/cpuidle/current_driver": + data = "IDLE_DRV\n" else: raise ValueError("unexpect open: %r" % filename) return io.StringIO(data) - with mock.patch('pyperf._collect_metadata.get_isolated_cpus', return_value=[2]): - with mock.patch('pyperf._utils.open', create=True, side_effect=mock_open): + with mock.patch("pyperf._collect_metadata.get_isolated_cpus", return_value=[2]): + with mock.patch("pyperf._utils.open", create=True, side_effect=mock_open): metadata = {} perf_metadata.collect_cpu_config(metadata, [0, 2]) - self.assertEqual(metadata['cpu_config'], - '0=driver:DRIVER, governor:GOVERNOR; ' - '2=nohz_full, isolated; ' - 'idle:IDLE_DRV') - - nohz_full = ' (null)\n' - with mock.patch('pyperf._collect_metadata.get_isolated_cpus'): - with mock.patch('pyperf._utils.open', create=True, side_effect=mock_open): + self.assertEqual( + metadata["cpu_config"], + "0=driver:DRIVER, governor:GOVERNOR; 2=nohz_full, isolated; idle:IDLE_DRV", + ) + + nohz_full = " (null)\n" + with mock.patch("pyperf._collect_metadata.get_isolated_cpus"): + with mock.patch("pyperf._utils.open", create=True, side_effect=mock_open): metadata = {} perf_metadata.collect_cpu_config(metadata, [0, 2]) - self.assertEqual(metadata['cpu_config'], - '0=driver:DRIVER, governor:GOVERNOR; ' - 'idle:IDLE_DRV') + self.assertEqual( + metadata["cpu_config"], "0=driver:DRIVER, governor:GOVERNOR; idle:IDLE_DRV" + ) def test_intel_cpu_frequencies(self): def mock_open(filename, *args, **kw): - filename = filename.replace('\\', '/') - if filename == '/proc/cpuinfo': + filename = filename.replace("\\", "/") + if filename == "/proc/cpuinfo": data = self.INTEL_CPU_INFO - elif filename == '/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver': - data = 'DRIVER\n' - elif filename == '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor': - data = 'GOVERNOR\n' - elif filename.startswith('/sys/devices/system/cpu/cpu2'): + elif filename == "/sys/devices/system/cpu/cpu0/cpufreq/scaling_driver": + data = "DRIVER\n" + elif filename == "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor": + data = "GOVERNOR\n" + elif filename.startswith("/sys/devices/system/cpu/cpu2"): raise OSError else: raise ValueError("unexpect open: %r" % filename) return io.StringIO(data) - with mock.patch('pyperf._utils.open', create=True, side_effect=mock_open): + with mock.patch("pyperf._utils.open", create=True, side_effect=mock_open): metadata = {} perf_metadata.collect_cpu_freq(metadata, [0, 2]) perf_metadata.collect_cpu_model(metadata) - self.assertEqual(metadata['cpu_freq'], - '0=1288 MHz; 2=1200 MHz') - self.assertEqual(metadata['cpu_model_name'], - 'Intel(R) Core(TM) i7-3520M CPU @ 2.90GHz') + self.assertEqual(metadata["cpu_freq"], "0=1288 MHz; 2=1200 MHz") + self.assertEqual( + metadata["cpu_model_name"], "Intel(R) Core(TM) i7-3520M CPU @ 2.90GHz" + ) def test_power8_cpu_frequencies(self): def mock_open(filename, *args, **kw): - filename = filename.replace('\\', '/') - if filename == '/proc/cpuinfo': + filename = filename.replace("\\", "/") + if filename == "/proc/cpuinfo": data = self.POWER8_CPUINFO else: raise ValueError("unexpect open: %r" % filename) return io.StringIO(data) - with mock.patch('pyperf._utils.open', create=True, side_effect=mock_open): + with mock.patch("pyperf._utils.open", create=True, side_effect=mock_open): metadata = {} perf_metadata.collect_cpu_freq(metadata, [0, 159]) perf_metadata.collect_cpu_model(metadata) - self.assertEqual(metadata['cpu_freq'], - '0,159=3425 MHz') - self.assertEqual(metadata['cpu_machine'], - 'PowerNV 8247-22L') + self.assertEqual(metadata["cpu_freq"], "0,159=3425 MHz") + self.assertEqual(metadata["cpu_machine"], "PowerNV 8247-22L") if __name__ == "__main__": diff --git a/pyperf/tests/test_misc.py b/pyperf/tests/test_misc.py index 94981b38..092160e7 100644 --- a/pyperf/tests/test_misc.py +++ b/pyperf/tests/test_misc.py @@ -10,28 +10,29 @@ class MiscTests(unittest.TestCase): def test_version_tuple(self): self.assertIsInstance(pyperf.VERSION, tuple) - self.assertTrue(all(isinstance(part, int) for part in pyperf.VERSION), - pyperf.VERSION) + self.assertTrue( + all(isinstance(part, int) for part in pyperf.VERSION), pyperf.VERSION + ) def test_version_str(self): self.assertIsInstance(pyperf.__version__, str) - self.assertEqual(pyperf.__version__, - '.'.join(str(part) for part in pyperf.VERSION)) + self.assertEqual( + pyperf.__version__, ".".join(str(part) for part in pyperf.VERSION) + ) def test_format_metadata(self): - self.assertEqual(pyperf.format_metadata('loops', 2 ** 24), - '2^24') + self.assertEqual(pyperf.format_metadata("loops", 2**24), "2^24") def test_python_implementation(self): name = pyperf.python_implementation() self.assertIsInstance(name, str) - self.assertRegex(name, '^[a-z]+$') + self.assertRegex(name, "^[a-z]+$") def test_python_has_jit(self): jit = pyperf.python_has_jit() self.assertIsInstance(jit, bool) - @unittest.skipUnless(hasattr(os, 'symlink'), 'need os.symlink') + @unittest.skipUnless(hasattr(os, "symlink"), "need os.symlink") def test_abs_executable(self): with tests.temporary_file() as tmpname: tmpname = os.path.realpath(tmpname) @@ -41,30 +42,25 @@ def test_abs_executable(self): except (OSError, NotImplementedError): self.skipTest("os.symlink() failed") - self.assertEqual(utils.abs_executable(tmpname), - tmpname) + self.assertEqual(utils.abs_executable(tmpname), tmpname) def test_parse_run_list(self): parse_run_list = utils.parse_run_list with self.assertRaises(ValueError): - parse_run_list('') + parse_run_list("") with self.assertRaises(ValueError): - parse_run_list('0') - self.assertEqual(parse_run_list('1'), - [0]) - self.assertEqual(parse_run_list('1-2,5-6'), - [0, 1, 4, 5]) - self.assertEqual(parse_run_list('1,3,7'), - [0, 2, 6]) + parse_run_list("0") + self.assertEqual(parse_run_list("1"), [0]) + self.assertEqual(parse_run_list("1-2,5-6"), [0, 1, 4, 5]) + self.assertEqual(parse_run_list("1,3,7"), [0, 2, 6]) # tolerate spaces - self.assertEqual(parse_run_list(' 1 , 2 '), - [0, 1]) + self.assertEqual(parse_run_list(" 1 , 2 "), [0, 1]) # errors - self.assertRaises(ValueError, parse_run_list, 'x') - self.assertRaises(ValueError, parse_run_list, '1,') + self.assertRaises(ValueError, parse_run_list, "x") + self.assertRaises(ValueError, parse_run_list, "1,") def test_setup_version(self): from importlib.metadata import version @@ -72,13 +68,14 @@ def test_setup_version(self): self.assertEqual(pyperf.__version__, version("pyperf")) def test_doc_version(self): - doc_path = os.path.join(os.path.dirname(__file__), '..', '..', 'doc') + doc_path = os.path.join(os.path.dirname(__file__), "..", "..", "doc") doc_path = os.path.realpath(doc_path) old_path = sys.path[:] try: sys.path.insert(0, doc_path) import conf + self.assertEqual(pyperf.__version__, conf.version) self.assertEqual(pyperf.__version__, conf.release) finally: diff --git a/pyperf/tests/test_perf_cli.py b/pyperf/tests/test_perf_cli.py index fbb94037..ab3fcd67 100644 --- a/pyperf/tests/test_perf_cli.py +++ b/pyperf/tests/test_perf_cli.py @@ -8,7 +8,7 @@ TESTDIR = os.path.dirname(__file__) -TELCO = os.path.join(TESTDIR, 'telco.json') +TELCO = os.path.join(TESTDIR, "telco.json") class BaseTestCase: @@ -16,37 +16,35 @@ class BaseTestCase: def create_bench(self, values, metadata=None): if metadata is None: - metadata = {'name': 'bench'} - elif 'name' not in metadata: - metadata['name'] = 'bench' + metadata = {"name": "bench"} + elif "name" not in metadata: + metadata["name"] = "bench" runs = [] for value in values: - run = pyperf.Run([value], - metadata=metadata, - collect_metadata=False) + run = pyperf.Run([value], metadata=metadata, collect_metadata=False) runs.append(run) return pyperf.Benchmark(runs) def run_command(self, *args, **kwargs): - cmd = [sys.executable, '-m', 'pyperf'] + cmd = [sys.executable, "-m", "pyperf"] cmd.extend(args) proc = tests.get_output(cmd, **kwargs) - self.assertEqual(proc.stderr, '') + self.assertEqual(proc.stderr, "") self.assertEqual(proc.returncode, 0) return proc.stdout class TestPerfCLI(BaseTestCase, unittest.TestCase): def create_suite(self): - bench1 = self.create_bench((1.0, 1.5, 2.0), - metadata={'hostname': 'toto', - 'python_version': '2.7', - 'name': 'py36'}) - bench2 = self.create_bench((1.5, 2.0, 2.5), - metadata={'hostname': 'toto', - 'python_version': '3.4', - 'name': 'py38'}) + bench1 = self.create_bench( + (1.0, 1.5, 2.0), + metadata={"hostname": "toto", "python_version": "2.7", "name": "py36"}, + ) + bench2 = self.create_bench( + (1.5, 2.0, 2.5), + metadata={"hostname": "toto", "python_version": "3.4", "name": "py38"}, + ) return pyperf.BenchmarkSuite([bench1, bench2]) def test_show_common_metadata(self): @@ -54,7 +52,7 @@ def test_show_common_metadata(self): with tests.temporary_file() as tmp_name: suite.dump(tmp_name) - stdout = self.run_command('show', '-q', '--metadata', tmp_name) + stdout = self.run_command("show", "-q", "--metadata", tmp_name) expected = textwrap.dedent(""" Common metadata @@ -85,7 +83,7 @@ def test_metadata(self): with tests.temporary_file() as tmp_name: suite.dump(tmp_name) - stdout = self.run_command('metadata', '-q', tmp_name) + stdout = self.run_command("metadata", "-q", tmp_name) expected = textwrap.dedent(""" Common metadata @@ -109,8 +107,8 @@ def test_metadata(self): def compare(self, action, ref_result, changed_result, *args): with tests.temporary_directory() as tmpdir: - ref_name = os.path.join(tmpdir, 'ref.json') - changed_name = os.path.join(tmpdir, 'changed.json') + ref_name = os.path.join(tmpdir, "ref.json") + changed_name = os.path.join(tmpdir, "changed.json") ref_result.dump(ref_name) changed_result.dump(changed_name) @@ -120,118 +118,106 @@ def compare(self, action, ref_result, changed_result, *args): return stdout def test_compare_to(self): - ref_result = self.create_bench((1.0, 1.5, 2.0), - metadata={'name': 'telco'}) + ref_result = self.create_bench((1.0, 1.5, 2.0), metadata={"name": "telco"}) - changed_result = self.create_bench((1.5, 2.0, 2.5), - metadata={'name': 'telco'}) + changed_result = self.create_bench((1.5, 2.0, 2.5), metadata={"name": "telco"}) - stdout = self.compare('compare_to', ref_result, changed_result, '-v') + stdout = self.compare("compare_to", ref_result, changed_result, "-v") - expected = ('Mean +- std dev: [ref] 1.50 sec +- 0.50 sec ' - '-> [changed] 2.00 sec +- 0.50 sec: 1.33x slower\n' - 'Not significant!') - self.assertEqual(stdout.rstrip(), - expected) + expected = ( + "Mean +- std dev: [ref] 1.50 sec +- 0.50 sec " + "-> [changed] 2.00 sec +- 0.50 sec: 1.33x slower\n" + "Not significant!" + ) + self.assertEqual(stdout.rstrip(), expected) def test_compare_to_rest_table(self): - ref_result = self.create_bench((1.0,), - metadata={'name': 'telco'}) + ref_result = self.create_bench((1.0,), metadata={"name": "telco"}) - changed_result = self.create_bench((2.0,), - metadata={'name': 'telco'}) + changed_result = self.create_bench((2.0,), metadata={"name": "telco"}) - stdout = self.compare('compare_to', ref_result, changed_result, '--table') + stdout = self.compare("compare_to", ref_result, changed_result, "--table") - expected = textwrap.dedent(''' + expected = textwrap.dedent(""" +-----------+----------+------------------------+ | Benchmark | ref | changed | +===========+==========+========================+ | telco | 1.00 sec | 2.00 sec: 2.00x slower | +-----------+----------+------------------------+ - ''').strip() + """).strip() - self.assertEqual(stdout.rstrip(), - expected) + self.assertEqual(stdout.rstrip(), expected) def test_compare_to_md_table(self): - ref_result = self.create_bench((1.0,), - metadata={'name': 'telco'}) + ref_result = self.create_bench((1.0,), metadata={"name": "telco"}) - changed_result = self.create_bench((2.0,), - metadata={'name': 'telco'}) + changed_result = self.create_bench((2.0,), metadata={"name": "telco"}) - stdout = self.compare('compare_to', ref_result, changed_result, '--table', - '--table-format', 'md') + stdout = self.compare( + "compare_to", ref_result, changed_result, "--table", "--table-format", "md" + ) - expected = textwrap.dedent(''' + expected = textwrap.dedent(""" | Benchmark | ref | changed | |-----------|:--------:|:----------------------:| | telco | 1.00 sec | 2.00 sec: 2.00x slower | - ''').strip() + """).strip() - self.assertEqual(stdout.rstrip(), - expected) + self.assertEqual(stdout.rstrip(), expected) def test_compare_to_table_not_significant(self): - ref_result = self.create_bench((1.0, 1.5, 2.0), - metadata={'name': 'telco'}) + ref_result = self.create_bench((1.0, 1.5, 2.0), metadata={"name": "telco"}) - changed_result = self.create_bench((1.5, 2.0, 2.5), - metadata={'name': 'telco'}) + changed_result = self.create_bench((1.5, 2.0, 2.5), metadata={"name": "telco"}) - stdout = self.compare('compare_to', ref_result, changed_result, '--table') + stdout = self.compare("compare_to", ref_result, changed_result, "--table") expected = "Benchmark hidden because not significant (1): telco" - self.assertEqual(stdout.rstrip(), - expected) + self.assertEqual(stdout.rstrip(), expected) def test_compare_to_not_significant(self): - ref_result = self.create_bench((1.0, 1.5, 2.0), - metadata={'name': 'name'}) - changed_result = self.create_bench((1.5, 2.0, 2.5), - metadata={'name': 'name'}) + ref_result = self.create_bench((1.0, 1.5, 2.0), metadata={"name": "name"}) + changed_result = self.create_bench((1.5, 2.0, 2.5), metadata={"name": "name"}) - stdout = self.compare('compare_to', ref_result, changed_result) + stdout = self.compare("compare_to", ref_result, changed_result) - expected = 'Benchmark hidden because not significant (1): name' - self.assertEqual(stdout.rstrip(), - expected) + expected = "Benchmark hidden because not significant (1): name" + self.assertEqual(stdout.rstrip(), expected) def test_compare_to_not_significant_verbose(self): - ref_result = self.create_bench((1.0, 1.5, 2.0), - metadata={'name': 'name'}) - changed_result = self.create_bench((1.5, 2.0, 2.5), - metadata={'name': 'name'}) + ref_result = self.create_bench((1.0, 1.5, 2.0), metadata={"name": "name"}) + changed_result = self.create_bench((1.5, 2.0, 2.5), metadata={"name": "name"}) - stdout = self.compare('compare_to', ref_result, changed_result, '-v') + stdout = self.compare("compare_to", ref_result, changed_result, "-v") - expected = ('Mean +- std dev: [ref] 1.50 sec +- 0.50 sec ' - '-> [changed] 2.00 sec +- 0.50 sec: 1.33x slower\n' - 'Not significant!') - self.assertEqual(stdout.rstrip(), - expected) + expected = ( + "Mean +- std dev: [ref] 1.50 sec +- 0.50 sec " + "-> [changed] 2.00 sec +- 0.50 sec: 1.33x slower\n" + "Not significant!" + ) + self.assertEqual(stdout.rstrip(), expected) def test_compare_to_same(self): values = (1.0, 1.5, 2.0) - ref_result = self.create_bench(values, metadata={'name': 'name'}) - changed_result = self.create_bench(values, metadata={'name': 'name'}) + ref_result = self.create_bench(values, metadata={"name": "name"}) + changed_result = self.create_bench(values, metadata={"name": "name"}) - stdout = self.compare('compare_to', ref_result, changed_result, '-v') + stdout = self.compare("compare_to", ref_result, changed_result, "-v") - expected = ('Mean +- std dev: [ref] 1.50 sec +- 0.50 sec ' - '-> [changed] 1.50 sec +- 0.50 sec: no change\n' - 'Not significant!') - self.assertEqual(stdout.rstrip(), - expected) + expected = ( + "Mean +- std dev: [ref] 1.50 sec +- 0.50 sec " + "-> [changed] 1.50 sec +- 0.50 sec: no change\n" + "Not significant!" + ) + self.assertEqual(stdout.rstrip(), expected) def check_command(self, expected, *args, **kwargs): stdout = self.run_command(*args, **kwargs) self.assertEqual(stdout, textwrap.dedent(expected).lstrip()) def test_compare_to_cli(self): - py36 = os.path.join(TESTDIR, 'mult_list_py36.json') - py37 = os.path.join(TESTDIR, 'mult_list_py37.json') - py38 = os.path.join(TESTDIR, 'mult_list_py38.json') + py36 = os.path.join(TESTDIR, "mult_list_py36.json") + py37 = os.path.join(TESTDIR, "mult_list_py37.json") + py38 = os.path.join(TESTDIR, "mult_list_py38.json") # 2 files expected = """ @@ -241,7 +227,7 @@ def test_compare_to_cli(self): Geometric mean: 1.22x slower """ - self.check_command(expected, 'compare_to', py36, py37) + self.check_command(expected, "compare_to", py36, py37) # 2 files grouped by speed expected = """ @@ -254,7 +240,7 @@ def test_compare_to_cli(self): Geometric mean: 1.22x slower """ - self.check_command(expected, 'compare_to', "--group-by-speed", py36, py37) + self.check_command(expected, "compare_to", "--group-by-speed", py36, py37) # 2 files grouped by speed (with not significant) expected = """ @@ -266,7 +252,7 @@ def test_compare_to_cli(self): Geometric mean: 1.09x faster """ - self.check_command(expected, 'compare_to', "--group-by-speed", py36, py38) + self.check_command(expected, "compare_to", "--group-by-speed", py36, py38) # 3 files expected = """ @@ -295,7 +281,7 @@ def test_compare_to_cli(self): mult_list_py37: 1.22x slower mult_list_py38: 1.09x faster """ - self.check_command(expected, 'compare_to', py36, py37, py38) + self.check_command(expected, "compare_to", py36, py37, py38) # 3 files as table expected = """ @@ -311,7 +297,7 @@ def test_compare_to_cli(self): | Geometric mean | (ref) | 1.22x slower | 1.09x faster | +----------------+----------------+-----------------------+-----------------------+ """ - self.check_command(expected, 'compare_to', '--table', py36, py37, py38) + self.check_command(expected, "compare_to", "--table", py36, py37, py38) # 3 files as table grouped by speed expected = """ @@ -327,11 +313,13 @@ def test_compare_to_cli(self): | Geometric mean | (ref) | 1.22x slower | +----------------+----------------+-----------------------+ """ - self.check_command(expected, 'compare_to', '--table', "--group-by-speed", py36, py37) + self.check_command( + expected, "compare_to", "--table", "--group-by-speed", py36, py37 + ) def test_compare_to_cli_tags(self): - py36 = os.path.join(TESTDIR, 'mult_list_py36_tags.json') - py37 = os.path.join(TESTDIR, 'mult_list_py37_tags.json') + py36 = os.path.join(TESTDIR, "mult_list_py36_tags.json") + py37 = os.path.join(TESTDIR, "mult_list_py37_tags.json") # 2 files expected = """ @@ -360,7 +348,7 @@ def test_compare_to_cli_tags(self): Geometric mean: 1.22x slower """ - self.check_command(expected, 'compare_to', py36, py37) + self.check_command(expected, "compare_to", py36, py37) expected = """ Benchmarks with tag 'bar': @@ -404,12 +392,12 @@ def test_compare_to_cli_tags(self): | Geometric mean | (ref) | 1.22x slower | +----------------+---------------------+-----------------------+ """ - self.check_command(expected, 'compare_to', '--table', py36, py37) + self.check_command(expected, "compare_to", "--table", py36, py37) def test_compare_to_cli_min_speed(self): - py36 = os.path.join(TESTDIR, 'mult_list_py36.json') - py37 = os.path.join(TESTDIR, 'mult_list_py37.json') - py38 = os.path.join(TESTDIR, 'mult_list_py38.json') + py36 = os.path.join(TESTDIR, "mult_list_py36.json") + py37 = os.path.join(TESTDIR, "mult_list_py37.json") + py38 = os.path.join(TESTDIR, "mult_list_py38.json") # 2 files, min-speed=10 expected = """ @@ -420,7 +408,7 @@ def test_compare_to_cli_min_speed(self): Geometric mean: 1.22x slower """ - self.check_command(expected, 'compare_to', "--min-speed=10", py36, py37) + self.check_command(expected, "compare_to", "--min-speed=10", py36, py37) # 2 files, min-speed=40 expected = """ @@ -430,7 +418,7 @@ def test_compare_to_cli_min_speed(self): Geometric mean: 1.22x slower """ - self.check_command(expected, 'compare_to', "--min-speed=40", py36, py37) + self.check_command(expected, "compare_to", "--min-speed=40", py36, py37) # 3 files as table, min-speed=10 expected = """ @@ -446,15 +434,17 @@ def test_compare_to_cli_min_speed(self): Benchmark hidden because not significant (1): [1]*1000 """ - self.check_command(expected, 'compare_to', '--table', "--min-speed=10", py36, py37, py38) + self.check_command( + expected, "compare_to", "--table", "--min-speed=10", py36, py37, py38 + ) def test_hist(self): # Force terminal size on Python 3 for shutil.get_terminal_size() env = dict(os.environ) - env['COLUMNS'] = '80' - env['LINES'] = '25' + env["COLUMNS"] = "80" + env["LINES"] = "25" - expected = (""" + expected = """ 22.1 ms: 1 ##### 22.1 ms: 0 | 22.2 ms: 1 ##### @@ -478,17 +468,17 @@ def test_hist(self): 22.8 ms: 3 ############## 22.9 ms: 4 ################### 22.9 ms: 4 ################### - """) - self.check_command(expected, 'hist', TELCO, env=env) + """ + self.check_command(expected, "hist", TELCO, env=env) def test_show(self): - expected = (""" + expected = """ Mean +- std dev: 22.5 ms +- 0.2 ms - """) - self.check_command(expected, 'show', TELCO) + """ + self.check_command(expected, "show", TELCO) def test_stats(self): - expected = (""" + expected = """ Total duration: 29.2 sec Start date: 2016-10-21 03:14:19 End date: 2016-10-21 03:14:53 @@ -518,8 +508,8 @@ def test_stats(self): 100th percentile: 22.9 ms (+2% of the mean) -- maximum Number of outlier (out of 22.0 ms..23.0 ms): 0 - """) - self.check_command(expected, 'stats', TELCO) + """ + self.check_command(expected, "stats", TELCO) def test_dump_raw(self): expected = """ @@ -534,7 +524,7 @@ def test_dump_raw(self): - raw value 2: 180 ms - raw value 3: 181 ms """ - stdout = self.run_command('dump', '--raw', TELCO) + stdout = self.run_command("dump", "--raw", TELCO) self.assertIn(textwrap.dedent(expected).strip(), stdout) def test_dump(self): @@ -550,7 +540,7 @@ def test_dump(self): - value 2: 22.5 ms - value 3: 22.6 ms """ - stdout = self.run_command('dump', TELCO) + stdout = self.run_command("dump", TELCO) self.assertIn(textwrap.dedent(expected).strip(), stdout) def test_dump_track_memory(self): @@ -564,8 +554,8 @@ def test_dump_track_memory(self): Run 4: 0 warmups, 1 value, 2^15 loops - value 1: 7208.0 KiB """ - filename = os.path.join(TESTDIR, 'track_memory.json') - stdout = self.run_command('dump', filename) + filename = os.path.join(TESTDIR, "track_memory.json") + stdout = self.run_command("dump", filename) self.assertIn(textwrap.dedent(expected).strip(), stdout) def test_dump_quiet(self): @@ -579,7 +569,7 @@ def test_dump_quiet(self): - value 2: 22.4 ms - value 3: 22.3 ms """ - stdout = self.run_command('dump', '--quiet', TELCO) + stdout = self.run_command("dump", "--quiet", TELCO) self.assertIn(textwrap.dedent(expected).strip(), stdout) def test_dump_verbose(self): @@ -613,46 +603,41 @@ def test_dump_verbose(self): runnable_threads: 1 uptime: 2 day 2 hour 4 min """ - stdout = self.run_command('dump', '--verbose', TELCO) + stdout = self.run_command("dump", "--verbose", TELCO) self.assertIn(textwrap.dedent(expected).strip(), stdout) def test_collect_metadata(self): - stdout = self.run_command('collect_metadata') - self.assertRegex(stdout, - r'^Metadata:\n(- [^:]+: .*\n)+$') + stdout = self.run_command("collect_metadata") + self.assertRegex(stdout, r"^Metadata:\n(- [^:]+: .*\n)+$") def test_slowest(self): - stdout = self.run_command('slowest', TELCO) - self.assertEqual(stdout.rstrip(), - '#1: telco (29.2 sec)') + stdout = self.run_command("slowest", TELCO) + self.assertEqual(stdout.rstrip(), "#1: telco (29.2 sec)") def test_check_stable(self): - stdout = self.run_command('check', TELCO) + stdout = self.run_command("check", TELCO) self.assertIn( textwrap.dedent( """ Benchmark was run more times than necessary to get a stable result. Consider passing processes=7 to the Runner constructor to save time. """ - ).strip(), stdout.rstrip() - ) - self.assertIn( - 'The benchmark seems to be stable', - stdout.rstrip() + ).strip(), + stdout.rstrip(), ) + self.assertIn("The benchmark seems to be stable", stdout.rstrip()) def test_command(self): - command = [sys.executable, '-c', 'pass'] - stdout = self.run_command('command', '--debug-single-value', *command) - self.assertRegex(stdout, - r'^\.\ncommand: [0-9.]+ (?:ms|sec)$') + command = [sys.executable, "-c", "pass"] + stdout = self.run_command("command", "--debug-single-value", *command) + self.assertRegex(stdout, r"^\.\ncommand: [0-9.]+ (?:ms|sec)$") def test_check_unstable(self): suite = self.create_suite() with tests.temporary_file() as tmp_name: suite.dump(tmp_name) - stdout = self.run_command('check', tmp_name) + stdout = self.run_command("check", tmp_name) expected = textwrap.dedent(""" py36 @@ -688,41 +673,52 @@ def _check_track_memory_bench(self, bench, loops): self.assertIsInstance(run.values[0], int) self.assertEqual(run.get_loops(), loops) metadata = run.get_metadata() - self.assertEqual(metadata['warmups'], 1) - self.assertEqual(metadata['values'], 3) + self.assertEqual(metadata["warmups"], 1) + self.assertEqual(metadata["values"], 3) def _check_track_memory(self, track_option): with tests.temporary_file() as tmp_name: - self.run_command('timeit', - track_option, - '-p2', '-w1', '-l5', '-n3', - '[1,2]*1000', - '-o', tmp_name) + self.run_command( + "timeit", + track_option, + "-p2", + "-w1", + "-l5", + "-n3", + "[1,2]*1000", + "-o", + tmp_name, + ) bench = pyperf.Benchmark.load(tmp_name) self._check_track_memory_bench(bench, loops=5) def test_track_memory(self): - self._check_track_memory('--track-memory') + self._check_track_memory("--track-memory") def test_tracemalloc(self): try: - import tracemalloc # noqa + import tracemalloc # noqa except ImportError: - self.skipTest('tracemalloc module not available') + self.skipTest("tracemalloc module not available") - self._check_track_memory('--tracemalloc') + self._check_track_memory("--tracemalloc") - @unittest.skipIf(sys.platform == 'win32', - 'https://github.com/psf/pyperf/issues/97') + @unittest.skipIf(sys.platform == "win32", "https://github.com/psf/pyperf/issues/97") def test_command_track_memory(self): - cmd = (sys.executable, '-c', 'pass') + cmd = (sys.executable, "-c", "pass") with tests.temporary_file() as tmp_name: - args = ('command', - '--track-memory', - '-p2', '-w1', '-l2', '-n3', - '-o', tmp_name, - '--') + args = ( + "command", + "--track-memory", + "-p2", + "-w1", + "-l2", + "-n3", + "-o", + tmp_name, + "--", + ) args += cmd self.run_command(*args) bench = pyperf.Benchmark.load(tmp_name) @@ -731,12 +727,18 @@ def test_command_track_memory(self): def test_hook(self): with tests.temporary_file() as tmp_name: - self.run_command('timeit', - '--hook', - '_test_hook', - '-p2', '-w1', '-l5', '-n3', - '[1,2]*1000', - '-o', tmp_name) + self.run_command( + "timeit", + "--hook", + "_test_hook", + "-p2", + "-w1", + "-l5", + "-n3", + "[1,2]*1000", + "-o", + tmp_name, + ) bench = pyperf.Benchmark.load(tmp_name) metadata = bench.get_metadata() assert metadata.get("_test_hook", 0) > 0 @@ -749,28 +751,25 @@ def test_stdout(self): with tests.temporary_file() as tmp_name: bench.dump(tmp_name) - stdout = self.run_command('convert', tmp_name, '--stdout') + stdout = self.run_command("convert", tmp_name, "--stdout") - self.assertEqual(stdout, - tests.benchmark_as_json(bench)) + self.assertEqual(stdout, tests.benchmark_as_json(bench)) def test_indent(self): bench = self.create_bench((1.0, 1.5, 2.0)) with tests.temporary_file() as tmp_name: bench.dump(tmp_name) - stdout = self.run_command('convert', tmp_name, - '--indent', '--stdout') + stdout = self.run_command("convert", tmp_name, "--indent", "--stdout") - self.assertEqual(stdout, - tests.benchmark_as_json(bench, compact=False)) + self.assertEqual(stdout, tests.benchmark_as_json(bench, compact=False)) def test_convert(self): bench = pyperf.Benchmark.load(TELCO) with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') - self.run_command('convert', TELCO, '-o', filename) + filename = os.path.join(tmpdir, "test.json") + self.run_command("convert", TELCO, "-o", filename) bench2 = pyperf.Benchmark.load(filename) @@ -780,50 +779,48 @@ def test_filter_benchmarks(self): values = (1.0, 1.5, 2.0) benchmarks = [] for name in ("call_simple", "go", "telco"): - bench = self.create_bench(values, metadata={'name': name}) + bench = self.create_bench(values, metadata={"name": name}) benchmarks.append(bench) suite = pyperf.BenchmarkSuite(benchmarks) with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') + filename = os.path.join(tmpdir, "test.json") suite.dump(filename) - stdout = self.run_command('convert', filename, - '--include-benchmark', 'go', '--stdout') + stdout = self.run_command( + "convert", filename, "--include-benchmark", "go", "--stdout" + ) suite2 = pyperf.BenchmarkSuite.loads(stdout) - stdout = self.run_command('convert', filename, - '--exclude-benchmark', 'go', '--stdout') + stdout = self.run_command( + "convert", filename, "--exclude-benchmark", "go", "--stdout" + ) suite3 = pyperf.BenchmarkSuite.loads(stdout) - self.assertEqual(suite2.get_benchmark_names(), - ['go']) + self.assertEqual(suite2.get_benchmark_names(), ["go"]) - self.assertEqual(suite3.get_benchmark_names(), - ['call_simple', 'telco']) + self.assertEqual(suite3.get_benchmark_names(), ["call_simple", "telco"]) def test_remove_warmups(self): values = [1.0, 2.0, 3.0] raw_values = [5.0] + values - run = pyperf.Run(values, warmups=[(1, 5.0)], - metadata={'name': 'bench'}) + run = pyperf.Run(values, warmups=[(1, 5.0)], metadata={"name": "bench"}) bench = pyperf.Benchmark([run]) self.assertEqual(bench._get_nwarmup(), 1) - self.assertEqual(bench._get_raw_values(warmups=True), - raw_values) + self.assertEqual(bench._get_raw_values(warmups=True), raw_values) with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') + filename = os.path.join(tmpdir, "test.json") bench.dump(filename) - stdout = self.run_command('convert', filename, - '--remove-warmups', '--stdout') + stdout = self.run_command( + "convert", filename, "--remove-warmups", "--stdout" + ) bench2 = pyperf.Benchmark.loads(stdout) self.assertEqual(bench2._get_nwarmup(), 0) - self.assertEqual(bench2._get_raw_values(warmups=True), - raw_values[1:]) + self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:]) def test_filter_runs(self): runs = (1.0, 2.0, 3.0, 4.0, 5.0) @@ -832,19 +829,22 @@ def test_filter_runs(self): self.assertEqual(bench.get_values(), runs) with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') + filename = os.path.join(tmpdir, "test.json") bench.dump(filename) - stdout = self.run_command('convert', filename, - '--include-runs', '4', '--stdout') + stdout = self.run_command( + "convert", filename, "--include-runs", "4", "--stdout" + ) bench2 = pyperf.Benchmark.loads(stdout) - stdout = self.run_command('convert', filename, - '--include-runs', '1-3,5', '--stdout') + stdout = self.run_command( + "convert", filename, "--include-runs", "1-3,5", "--stdout" + ) bench3 = pyperf.Benchmark.loads(stdout) - stdout = self.run_command('convert', filename, - '--exclude-runs', '2,4', '--stdout') + stdout = self.run_command( + "convert", filename, "--exclude-runs", "2,4", "--stdout" + ) bench4 = pyperf.Benchmark.loads(stdout) self.assertEqual(bench2.get_values(), (4.0,)) diff --git a/pyperf/tests/test_runner.py b/pyperf/tests/test_runner.py index fc2fb58a..2909ca81 100644 --- a/pyperf/tests/test_runner.py +++ b/pyperf/tests/test_runner.py @@ -22,7 +22,7 @@ def check_args(loops, a, b): return loops -Result = collections.namedtuple('Result', 'runner bench stdout') +Result = collections.namedtuple("Result", "runner bench stdout") class TestRunner(unittest.TestCase): @@ -41,21 +41,23 @@ def fake_timer(): t = fake_timer.value fake_timer.value += 1.0 return t + fake_timer.value = 0.0 def fake_get_clock_info(clock): class ClockInfo: - implementation = 'fake_clock' + implementation = "fake_clock" resolution = 1.0 + return ClockInfo() - name = kwargs.pop('name', 'bench') - time_func = kwargs.pop('time_func', None) + name = kwargs.pop("name", "bench") + time_func = kwargs.pop("time_func", None) runner = self.create_runner(args, **kwargs) - with mock.patch('time.perf_counter', fake_timer): - with mock.patch('time.get_clock_info', fake_get_clock_info): + with mock.patch("time.perf_counter", fake_timer): + with mock.patch("time.get_clock_info", fake_get_clock_info): with tests.capture_stdout() as stdout: with tests.capture_stderr() as stderr: if time_func: @@ -65,8 +67,8 @@ class ClockInfo: stdout = stdout.getvalue() stderr = stderr.getvalue() - if '--stdout' not in args: - self.assertEqual(stderr, '') + if "--stdout" not in args: + self.assertEqual(stderr, "") # check bench_time_func() bench self.assertIsInstance(bench, pyperf.Benchmark) @@ -76,24 +78,25 @@ class ClockInfo: return Result(runner, bench, stdout) def test_worker(self): - result = self.exec_runner('--worker', '-l1', '-w1') - self.assertRegex(result.stdout, - r'^bench: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$') + result = self.exec_runner("--worker", "-l1", "-w1") + self.assertRegex( + result.stdout, r"^bench: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$" + ) def test_debug_single_value(self): - result = self.exec_runner('--debug-single-value', '--worker') + result = self.exec_runner("--debug-single-value", "--worker") self.assertEqual(result.bench.get_nvalue(), 1) def test_profile_time_func(self): - with tempfile.NamedTemporaryFile('wb+') as tmp: + with tempfile.NamedTemporaryFile("wb+") as tmp: name = tmp.name - args = ['--worker', '-l1', '-w1', '--profile', name] + args = ["--worker", "-l1", "-w1", "--profile", name] runner = self.create_runner(args) def time_func(loops): return 1.0 - runner.bench_time_func('bench1', time_func) + runner.bench_time_func("bench1", time_func) try: s = pstats.Stats(name) @@ -103,9 +106,9 @@ def time_func(loops): os.unlink(name) def test_profile_func(self): - with tempfile.NamedTemporaryFile('wb+') as tmp: + with tempfile.NamedTemporaryFile("wb+") as tmp: name = tmp.name - args = ['--worker', '-l1', '-w1', '--profile', name] + args = ["--worker", "-l1", "-w1", "--profile", name] runner = self.create_runner(args) def external(): @@ -115,10 +118,11 @@ def func(): external() return 1.0 - runner.bench_func('bench1', func) + runner.bench_func("bench1", func) try: import pstats + s = pstats.Stats(name) assert len(s.get_stats_profile().func_profiles) finally: @@ -134,14 +138,12 @@ def test_pipe(self): # the Runner class wpipe._fd = None - result = self.exec_runner('--pipe', str(arg), - '--worker', '-l1', '-w1') + result = self.exec_runner("--pipe", str(arg), "--worker", "-l1", "-w1") with rpipe.open_text() as rfile: bench_json = rfile.read() - self.assertEqual(bench_json, - tests.benchmark_as_json(result.bench)) + self.assertEqual(bench_json, tests.benchmark_as_json(result.bench)) def test_pipe_with_timeout(self): rpipe, wpipe = create_pipe() @@ -152,64 +154,66 @@ def test_pipe_with_timeout(self): # the Runner class wpipe._fd = None - result = self.exec_runner('--pipe', str(arg), - '--worker', '-l1', '-w1') + result = self.exec_runner("--pipe", str(arg), "--worker", "-l1", "-w1") # Mock the select to make the read pipeline not ready - with mock.patch('pyperf._utils.select.select', - return_value=(False, False, False)): + with mock.patch( + "pyperf._utils.select.select", return_value=(False, False, False) + ): with self.assertRaises(TimeoutError) as cm: rpipe.read_text(timeout=0.1) - self.assertEqual(str(cm.exception), - 'Timed out after 0.1 seconds') + self.assertEqual(str(cm.exception), "Timed out after 0.1 seconds") # Mock the select to make the read pipeline ready - with mock.patch('pyperf._utils.select.select', - return_value=(True, False, False)): + with mock.patch( + "pyperf._utils.select.select", return_value=(True, False, False) + ): bench_json = rpipe.read_text(timeout=0.1) - self.assertEqual(bench_json.rstrip(), - tests.benchmark_as_json(result.bench).rstrip()) + self.assertEqual( + bench_json.rstrip(), tests.benchmark_as_json(result.bench).rstrip() + ) def test_json_exists(self): - with tempfile.NamedTemporaryFile('wb+') as tmp: - + with tempfile.NamedTemporaryFile("wb+") as tmp: with tests.capture_stdout() as stdout: try: - self.create_runner(['--worker', '-l1', '-w1', - '--output', tmp.name]) + self.create_runner(["--worker", "-l1", "-w1", "--output", tmp.name]) except SystemExit as exc: self.assertEqual(exc.code, 1) - self.assertEqual('ERROR: The JSON file %r already exists' - % tmp.name, - stdout.getvalue().rstrip()) + self.assertEqual( + "ERROR: The JSON file %r already exists" % tmp.name, + stdout.getvalue().rstrip(), + ) def test_verbose_metadata(self): - result = self.exec_runner('--worker', '-l1', '-w1', - '--verbose', '--metadata') - self.assertRegex(result.stdout, - r'^' - r'(?:Warmup [0-9]+: 1\.00 sec \(loops: [0-9]+, raw: 1\.00 sec\)\n)+' - r'\n' - r'(?:Value [0-9]+: 1\.00 sec\n)+' - r'\n' - r'Metadata:\n' - r'(?:- .*\n)+' - r'\n' - r'bench: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$') + result = self.exec_runner("--worker", "-l1", "-w1", "--verbose", "--metadata") + self.assertRegex( + result.stdout, + r"^" + r"(?:Warmup [0-9]+: 1\.00 sec \(loops: [0-9]+, raw: 1\.00 sec\)\n)+" + r"\n" + r"(?:Value [0-9]+: 1\.00 sec\n)+" + r"\n" + r"Metadata:\n" + r"(?:- .*\n)+" + r"\n" + r"bench: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$", + ) def test_loops_calibration(self): def time_func(loops): # number of iterations => number of microseconds return loops * 1e-6 - result = self.exec_runner('--worker', '--calibrate-loops', - '-v', time_func=time_func) + result = self.exec_runner( + "--worker", "--calibrate-loops", "-v", time_func=time_func + ) for run in result.bench.get_runs(): - self.assertEqual(run.get_total_loops(), 2 ** 17) + self.assertEqual(run.get_total_loops(), 2**17) - expected = textwrap.dedent(''' + expected = textwrap.dedent(""" Warmup 1: 1.00 us (loops: 1, raw: 1.00 us) Warmup 2: 1.00 us (loops: 2, raw: 2.00 us) Warmup 3: 1.00 us (loops: 4, raw: 4.00 us) @@ -229,7 +233,7 @@ def time_func(loops): Warmup 17: 1.00 us (loops: 2^16, raw: 65.5 ms) Warmup 18: 1.00 us (loops: 2^17, raw: 131 ms) - ''').strip() + """).strip() self.assertIn(expected, result.stdout) def test_loops_calibration_min_time(self): @@ -237,48 +241,47 @@ def time_func(loops): # number of iterations => number of microseconds return loops * 1e-6 - result = self.exec_runner('--worker', '--calibrate-loops', - '--min-time', '0.001', - time_func=time_func) + result = self.exec_runner( + "--worker", "--calibrate-loops", "--min-time", "0.001", time_func=time_func + ) for run in result.bench.get_runs(): - self.assertEqual(run.get_total_loops(), 2 ** 10) + self.assertEqual(run.get_total_loops(), 2**10) def test_json_file(self): with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') + filename = os.path.join(tmpdir, "test.json") - result = self.exec_runner('--worker', '-l1', '-w1', - '--output', filename) + result = self.exec_runner("--worker", "-l1", "-w1", "--output", filename) loaded = pyperf.Benchmark.load(filename) tests.compare_benchmarks(self, loaded, result.bench) def test_time_func_zero(self): - runner = self.create_runner(['--worker', '-l1', '-w1']) + runner = self.create_runner(["--worker", "-l1", "-w1"]) def time_func(loops): return 0 with self.assertRaises(ValueError) as cm: - runner.bench_time_func('bench', time_func) - self.assertEqual(str(cm.exception), - 'benchmark function returned zero') + runner.bench_time_func("bench", time_func) + self.assertEqual(str(cm.exception), "benchmark function returned zero") def test_calibration_zero(self): - runner = self.create_runner(['--worker', '--calibrate-loops']) + runner = self.create_runner(["--worker", "--calibrate-loops"]) def time_func(loops): return 0 with self.assertRaises(SystemExit): with tests.capture_stdout() as stdout: - runner.bench_time_func('bench', time_func) - self.assertIn('ERROR: failed to calibrate the number of loops', - stdout.getvalue()) + runner.bench_time_func("bench", time_func) + self.assertIn( + "ERROR: failed to calibrate the number of loops", stdout.getvalue() + ) def check_calibrate_loops(self, runner, time_func, warmups): with tests.capture_stdout(): - bench = runner.bench_time_func('bench', time_func) + bench = runner.bench_time_func("bench", time_func) runs = bench.get_runs() self.assertEqual(len(runs), 1) @@ -287,8 +290,7 @@ def check_calibrate_loops(self, runner, time_func, warmups): self.assertEqual(run.warmups, warmups) def test_calibrate_loops(self): - args = ['--worker', '-w0', '-n2', '--min-time=1.0', - '--calibrate-loops'] + args = ["--worker", "-w0", "-n2", "--min-time=1.0", "--calibrate-loops"] runner = self.create_runner(args) def time_func(loops): @@ -296,22 +298,22 @@ def time_func(loops): return 0.5 else: return 1.0 + time_func.step = 0 warmups = ( (1, 0.5), (2, 0.5 / 2), (4, 0.5 / 4), - # warmup 1: dt >= min_time (8, 1.0 / 8), # warmup 2 - (8, 1.0 / 8)) + (8, 1.0 / 8), + ) self.check_calibrate_loops(runner, time_func, warmups) def test_calibrate_loops_jit(self): - args = ['--worker', '-w0', '-n2', '--min-time=1.0', - '--calibrate-loops'] + args = ["--worker", "-w0", "-n2", "--min-time=1.0", "--calibrate-loops"] runner = self.create_runner(args) # Simulate PyPy JIT: running the same function becomes faster @@ -327,6 +329,7 @@ def time_func(loops): return 0.5 else: return 1.0 + time_func.step = 0 warmups = ( @@ -335,20 +338,25 @@ def time_func(loops): (2, 0.0), (4, 0.0), (8, 0.0), - # warmup 1: first non-zero calibration value (16, 3.0 / 16), - # warmup 2: JIT triggered, dt < min_time, # double number of loops (16, 0.5 / 16), # warmup 3 - (32, 1.0 / 32)) + (32, 1.0 / 32), + ) self.check_calibrate_loops(runner, time_func, warmups) def test_recalibrate_loops_jit(self): - args = ['--worker', '-w0', '-n2', '--min-time=1.0', - '--recalibrate-loops', '--loops=16'] + args = [ + "--worker", + "-w0", + "-n2", + "--min-time=1.0", + "--recalibrate-loops", + "--loops=16", + ] runner = self.create_runner(args) # Simulate PyPy JIT: running the same function becomes faster @@ -361,6 +369,7 @@ def time_func(loops): return 0.5 else: return 1.0 + time_func.step = 0 warmups = ( @@ -370,18 +379,19 @@ def time_func(loops): # double the number of loops (16, 0.5 / 16), # warmup 3, new try with loops x 2 - (32, 1.0 / 32)) + (32, 1.0 / 32), + ) self.check_calibrate_loops(runner, time_func, warmups) def test_loops_power(self): # test 'x^y' syntax for loops - runner = self.create_runner(['--loops', '2^8']) + runner = self.create_runner(["--loops", "2^8"]) self.assertEqual(runner.args.loops, 256) def check_two_benchmarks(self, task=None): - args = ['--worker', '--loops=1', '-w0', '-n3'] + args = ["--worker", "--loops=1", "-w0", "-n3"] if task is not None: - args.append('--worker-task=%s' % task) + args.append("--worker-task=%s" % task) runner = self.create_runner(args) def time_func(loops): @@ -391,28 +401,28 @@ def time_func2(loops): return 2.0 with tests.capture_stdout(): - bench1 = runner.bench_time_func('bench1', time_func) - bench2 = runner.bench_time_func('bench2', time_func2) + bench1 = runner.bench_time_func("bench1", time_func) + bench2 = runner.bench_time_func("bench2", time_func2) return (bench1, bench2) def test_two_benchmarks(self): bench1, bench2 = self.check_two_benchmarks() - self.assertEqual(bench1.get_name(), 'bench1') + self.assertEqual(bench1.get_name(), "bench1") self.assertEqual(bench1.get_values(), (1.0, 1.0, 1.0)) - self.assertEqual(bench2.get_name(), 'bench2') + self.assertEqual(bench2.get_name(), "bench2") self.assertEqual(bench2.get_values(), (2.0, 2.0, 2.0)) def test_worker_task(self): bench1, bench2 = self.check_two_benchmarks(task=0) - self.assertEqual(bench1.get_name(), 'bench1') + self.assertEqual(bench1.get_name(), "bench1") self.assertEqual(bench1.get_values(), (1.0, 1.0, 1.0)) self.assertIs(bench2, None) bench1, bench2 = self.check_two_benchmarks(task=1) self.assertIs(bench1, None) - self.assertEqual(bench2.get_name(), 'bench2') + self.assertEqual(bench2.get_name(), "bench2") self.assertEqual(bench2.get_values(), (2.0, 2.0, 2.0)) bench1, bench2 = self.check_two_benchmarks(task=2) @@ -420,13 +430,17 @@ def test_worker_task(self): self.assertIs(bench2, None) def test_show_name(self): - result = self.exec_runner('--worker', '-l1', '-w1', name='NAME') - self.assertRegex(result.stdout, - r'^NAME: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$') - - result = self.exec_runner('--worker', '-l1', '-w1', name='NAME', show_name=False) - self.assertRegex(result.stdout, - r'^Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$') + result = self.exec_runner("--worker", "-l1", "-w1", name="NAME") + self.assertRegex( + result.stdout, r"^NAME: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$" + ) + + result = self.exec_runner( + "--worker", "-l1", "-w1", name="NAME", show_name=False + ) + self.assertRegex( + result.stdout, r"^Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$" + ) def test_compare_to(self): def time_func(loops): @@ -435,50 +449,70 @@ def time_func(loops): def abs_executable(python): return python - run = pyperf.Run([1.5], - metadata={'name': 'name'}, - collect_metadata=False) + run = pyperf.Run([1.5], metadata={"name": "name"}, collect_metadata=False) bench = pyperf.Benchmark([run]) suite = pyperf.BenchmarkSuite([bench]) with ExitStack() as cm: + def popen(*args, **kw): mock_popen = mock.Mock() mock_popen.wait.return_value = 0 return mock_popen - mock_subprocess = cm.enter_context(mock.patch('pyperf._manager.subprocess')) + mock_subprocess = cm.enter_context(mock.patch("pyperf._manager.subprocess")) mock_subprocess.Popen.side_effect = popen - cm.enter_context(mock.patch('pyperf._runner.abs_executable', - side_effect=abs_executable)) - cm.enter_context(mock.patch('pyperf._manager._load_suite_from_pipe', - return_value=suite)) - - args = ["--python=python3.8", "--compare-to=python3.6", "--min-time=5", - "-p1", "-w3", "-n7", "-l11"] + cm.enter_context( + mock.patch("pyperf._runner.abs_executable", side_effect=abs_executable) + ) + cm.enter_context( + mock.patch("pyperf._manager._load_suite_from_pipe", return_value=suite) + ) + + args = [ + "--python=python3.8", + "--compare-to=python3.6", + "--min-time=5", + "-p1", + "-w3", + "-n7", + "-l11", + ] runner = self.create_runner(args) with tests.capture_stdout(): - runner.bench_time_func('name', time_func) + runner.bench_time_func("name", time_func) def popen_call(python): - args = [python, mock.ANY, '--worker', - '--pipe', mock.ANY, '--worker-task=0', - '--values', '7', '--min-time', '5.0', - '--loops', '11', '--warmups', '3'] + args = [ + python, + mock.ANY, + "--worker", + "--pipe", + mock.ANY, + "--worker-task=0", + "--values", + "7", + "--min-time", + "5.0", + "--loops", + "11", + "--warmups", + "3", + ] kw = {} if MS_WINDOWS: - kw['close_fds'] = False + kw["close_fds"] = False else: - kw['pass_fds'] = mock.ANY + kw["pass_fds"] = mock.ANY return mock.call(args, env=mock.ANY, **kw) - call1 = popen_call('python3.6') - call2 = popen_call('python3.8') + call1 = popen_call("python3.6") + call2 = popen_call("python3.8") mock_subprocess.Popen.assert_has_calls([call1, call2]) def test_parse_args_twice_error(self): - args = ["--worker", '-l1', '-w1'] + args = ["--worker", "-l1", "-w1"] runner = self.create_runner(args) with self.assertRaises(RuntimeError): runner.parse_args(args) @@ -487,41 +521,41 @@ def test_duplicated_named(self): def time_func(loops): return 1.0 - runner = self.create_runner('-l1 -w0 -n1 --worker'.split()) + runner = self.create_runner("-l1 -w0 -n1 --worker".split()) with tests.capture_stdout(): - runner.bench_time_func('optim', time_func) + runner.bench_time_func("optim", time_func) with self.assertRaises(ValueError) as cm: - runner.bench_time_func('optim', time_func) + runner.bench_time_func("optim", time_func) - self.assertEqual(str(cm.exception), - "duplicated benchmark name: 'optim'") + self.assertEqual(str(cm.exception), "duplicated benchmark name: 'optim'") def test_bench_command(self): - args = [sys.executable, '-c', 'pass'] + args = [sys.executable, "-c", "pass"] - runner = self.create_runner('-l1 -w0 -n1 --worker'.split()) + runner = self.create_runner("-l1 -w0 -n1 --worker".split()) with tests.capture_stdout(): - bench = runner.bench_command('bench', args) + bench = runner.bench_command("bench", args) - self.assertEqual(bench.get_metadata()['command'], - ' '.join(map(shell_quote, args))) + self.assertEqual( + bench.get_metadata()["command"], " ".join(map(shell_quote, args)) + ) def test_hook_command(self): - args = [sys.executable, '-c', 'pass'] + args = [sys.executable, "-c", "pass"] - runner = self.create_runner('-l1 -w0 -n1 --worker --hook _test_hook'.split()) + runner = self.create_runner("-l1 -w0 -n1 --worker --hook _test_hook".split()) with tests.capture_stdout(): - bench = runner.bench_command('bench', args) + bench = runner.bench_command("bench", args) - self.assertEqual(bench.get_metadata()['command'], - ' '.join(map(shell_quote, args))) - self.assertEqual(bench.get_metadata()["hooks"], - "_test_hook") + self.assertEqual( + bench.get_metadata()["command"], " ".join(map(shell_quote, args)) + ) + self.assertEqual(bench.get_metadata()["hooks"], "_test_hook") def test_single_instance(self): - runner1 = self.create_runner([]) # noqa + runner1 = self.create_runner([]) # noqa with self.assertRaises(RuntimeError): - runner2 = pyperf.Runner() # noqa + runner2 = pyperf.Runner() # noqa class TestRunnerCPUAffinity(unittest.TestCase): @@ -533,35 +567,33 @@ def create_runner(self, args, **kwargs): return runner def test_cpu_affinity_args(self): - runner = self.create_runner(['-v', '--affinity=3,7']) + runner = self.create_runner(["-v", "--affinity=3,7"]) - with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity: + with mock.patch("pyperf._runner.set_cpu_affinity") as mock_setaffinity: with tests.capture_stdout() as stdout: runner._cpu_affinity() - self.assertEqual(runner.args.affinity, '3,7') - self.assertEqual(stdout.getvalue(), - 'Pin process to CPUs: 3,7\n') + self.assertEqual(runner.args.affinity, "3,7") + self.assertEqual(stdout.getvalue(), "Pin process to CPUs: 3,7\n") mock_setaffinity.assert_called_once_with([3, 7]) def test_cpu_affinity_isolcpus(self): - runner = self.create_runner(['-v']) + runner = self.create_runner(["-v"]) - with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity: - with mock.patch('pyperf._runner.get_isolated_cpus', return_value=[1, 2]): + with mock.patch("pyperf._runner.set_cpu_affinity") as mock_setaffinity: + with mock.patch("pyperf._runner.get_isolated_cpus", return_value=[1, 2]): with tests.capture_stdout() as stdout: runner._cpu_affinity() - self.assertEqual(runner.args.affinity, '1-2') - self.assertEqual(stdout.getvalue(), - 'Pin process to isolated CPUs: 1-2\n') + self.assertEqual(runner.args.affinity, "1-2") + self.assertEqual(stdout.getvalue(), "Pin process to isolated CPUs: 1-2\n") mock_setaffinity.assert_called_once_with([1, 2]) def test_cpu_affinity_no_isolcpus(self): - runner = self.create_runner(['-v']) + runner = self.create_runner(["-v"]) - with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity: - with mock.patch('pyperf._runner.get_isolated_cpus', return_value=None): + with mock.patch("pyperf._runner.set_cpu_affinity") as mock_setaffinity: + with mock.patch("pyperf._runner.get_isolated_cpus", return_value=None): runner._cpu_affinity() self.assertFalse(runner.args.affinity) diff --git a/pyperf/tests/test_system.py b/pyperf/tests/test_system.py index e8d519a3..0fb8cdb7 100644 --- a/pyperf/tests/test_system.py +++ b/pyperf/tests/test_system.py @@ -7,13 +7,15 @@ class SystemTests(unittest.TestCase): def test_show(self): - args = [sys.executable, '-m', 'pyperf', 'system', 'show'] + args = [sys.executable, "-m", "pyperf", "system", "show"] proc = get_output(args) - regex = ('(Run "%s -m pyperf system tune" to tune the system configuration to run benchmarks' - '|OK! System ready for benchmarking' - '|WARNING: no operation available for your platform)' - % os.path.basename(sys.executable)) + regex = ( + '(Run "%s -m pyperf system tune" to tune the system configuration to run benchmarks' + "|OK! System ready for benchmarking" + "|WARNING: no operation available for your platform)" + % os.path.basename(sys.executable) + ) self.assertRegex(proc.stdout, regex, msg=proc) # The return code is either 0 if the system is tuned or 2 if the diff --git a/pyperf/tests/test_timeit.py b/pyperf/tests/test_timeit.py index 4b2b9b17..24b6b241 100644 --- a/pyperf/tests/test_timeit.py +++ b/pyperf/tests/test_timeit.py @@ -14,18 +14,14 @@ from pyperf._timeit import Timer -PERF_TIMEIT = (sys.executable, '-m', 'pyperf', 'timeit') +PERF_TIMEIT = (sys.executable, "-m", "pyperf", "timeit") # We only need a statement taking longer than 0 nanosecond -FAST_BENCH_ARGS = ('--debug-single-value', - '-s', 'import time', - 'time.sleep(1e-6)') +FAST_BENCH_ARGS = ("--debug-single-value", "-s", "import time", "time.sleep(1e-6)") FAST_MIN_TIME = 1e-6 # test with a least with two values -COMPARE_BENCH = ('-l1', '-p1', '-w0', '-n3', - '-s', 'import time', - 'time.sleep(1e-6)') +COMPARE_BENCH = ("-l1", "-p1", "-w0", "-n3", "-s", "import time", "time.sleep(1e-6)") -SLEEP = 'time.sleep(1e-3)' +SLEEP = "time.sleep(1e-3)" # The perfect timing is 1 ms +- 0 ms, but tolerate large differences on busy # systems. The unit test doesn't test the system but more the output format. MIN_VALUE = 0.9 # ms @@ -34,7 +30,7 @@ MAX_MEAN = MAX_VALUE / 2 MAX_STD_DEV = 10.0 # ms -PYPY = pyperf.python_implementation() == 'pypy' +PYPY = pyperf.python_implementation() == "pypy" def identity(x): @@ -45,7 +41,7 @@ def reindent(src, indent): return src.replace("\n", "\n" + " " * indent) -def template_output(stmt='pass', setup='pass', teardown='pass', init=''): +def template_output(stmt="pass", setup="pass", teardown="pass", init=""): if PYPY: template = textwrap.dedent(""" def inner(_it, _timer{init}): @@ -70,80 +66,99 @@ def inner(_it, _timer{init}): return _t1 - _t0 """) - return template.format(init=init, - stmt=reindent(stmt, 8), - setup=reindent(setup, 4), - teardown=reindent(teardown, 4)) + return template.format( + init=init, + stmt=reindent(stmt, 8), + setup=reindent(setup, 4), + teardown=reindent(teardown, 4), + ) class TestTimeit(unittest.TestCase): - @unittest.skipIf(sys.platform == 'win32', - 'https://github.com/psf/pyperf/issues/97') + @unittest.skipIf(sys.platform == "win32", "https://github.com/psf/pyperf/issues/97") def test_worker_verbose(self): - args = ('--worker', - '-w', '1', - '-n', '2', - '-l', '1', - '--min-time', '0.001', - '--metadata', - '-v', - '-s', 'import time', - SLEEP) + args = ( + "--worker", + "-w", + "1", + "-n", + "2", + "-l", + "1", + "--min-time", + "0.001", + "--metadata", + "-v", + "-s", + "import time", + SLEEP, + ) args = PERF_TIMEIT + args cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0) - self.assertEqual(cmd.stderr, '') - - match = re.search(r'Warmup 1: ([0-9.]+) ms \(loops: 1, raw: [0-9.]+ ms\)\n' - r'\n' - r'Value 1: ([0-9.]+) ms\n' - r'Value 2: ([0-9.]+) ms\n' - r'\n' - r'Metadata:\n' - r'(- .*\n)+' - r'\n' - r'Mean \+- std dev: (?P[0-9.]+) ms \+-' - ' (?P[0-9.]+) ms\n' - r'$', - cmd.stdout) + self.assertEqual(cmd.stderr, "") + + match = re.search( + r"Warmup 1: ([0-9.]+) ms \(loops: 1, raw: [0-9.]+ ms\)\n" + r"\n" + r"Value 1: ([0-9.]+) ms\n" + r"Value 2: ([0-9.]+) ms\n" + r"\n" + r"Metadata:\n" + r"(- .*\n)+" + r"\n" + r"Mean \+- std dev: (?P[0-9.]+) ms \+-" + " (?P[0-9.]+) ms\n" + r"$", + cmd.stdout, + ) self.assertIsNotNone(match, repr(cmd.stdout)) values = [float(match.group(i)) for i in range(1, 4)] for value in values: - self.assertTrue(MIN_VALUE <= value <= MAX_VALUE, - repr(value)) + self.assertTrue(MIN_VALUE <= value <= MAX_VALUE, repr(value)) - mean = float(match.group('mean')) + mean = float(match.group("mean")) self.assertTrue(MIN_MEAN <= mean <= MAX_MEAN, mean) - mad = float(match.group('mad')) + mad = float(match.group("mad")) self.assertLessEqual(mad, MAX_STD_DEV) def test_cli(self): - args = ('-p', '2', - '-w', '1', - '-n', '3', - '-l', '4', - '--min-time', '0.001', - '-s', 'import time', - SLEEP) + args = ( + "-p", + "2", + "-w", + "1", + "-n", + "3", + "-l", + "4", + "--min-time", + "0.001", + "-s", + "import time", + SLEEP, + ) args = PERF_TIMEIT + args cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0) - self.assertEqual(cmd.stderr, '') + self.assertEqual(cmd.stderr, "") # ignore lines before to ignore random warnings like # "ERROR: the benchmark is very unstable" - match = re.search(r'Mean \+- std dev: (?P[0-9.]+) ms' - r' \+- (?P[0-9.]+) ms' - r'$', - cmd.stdout.rstrip()) + match = re.search( + r"Mean \+- std dev: (?P[0-9.]+) ms" + r" \+- (?P[0-9.]+) ms" + r"$", + cmd.stdout.rstrip(), + ) self.assertIsNotNone(match, repr(cmd.stdout)) # Tolerate large differences on busy systems - mean = float(match.group('mean')) + mean = float(match.group("mean")) self.assertTrue(MIN_MEAN <= mean <= MAX_MEAN, mean) - mad = float(match.group('mad')) + mad = float(match.group("mad")) self.assertLessEqual(mad, MAX_STD_DEV) def run_timeit(self, args): @@ -153,21 +168,28 @@ def run_timeit(self, args): def run_timeit_bench(self, args): with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') - args += ('--output', filename) + filename = os.path.join(tmpdir, "test.json") + args += ("--output", filename) stdout = self.run_timeit(args) bench = pyperf.Benchmark.load(filename) return (bench, stdout) def test_verbose_output(self): - args = ('-p', '2', - '-w', '1', - '-n', '3', - # don't pass --loops to test calibration - '--min-time', '0.001', - '-s', 'import time', - '--verbose', - SLEEP) + args = ( + "-p", + "2", + "-w", + "1", + "-n", + "3", + # don't pass --loops to test calibration + "--min-time", + "0.001", + "-s", + "import time", + "--verbose", + SLEEP, + ) args = PERF_TIMEIT + args # Don't check the exact output, only check that the verbose # mode doesn't fail with an error (non-zero exist code) @@ -175,13 +197,21 @@ def test_verbose_output(self): def test_bench(self): loops = 4 - args = ('-p', '2', - '-w', '1', - '-n', '3', - '-l', str(loops), - '--min-time', '0.001', - '-s', 'import time', - SLEEP) + args = ( + "-p", + "2", + "-w", + "1", + "-n", + "3", + "-l", + str(loops), + "--min-time", + "0.001", + "-s", + "import time", + SLEEP, + ) args = PERF_TIMEIT + args bench, stdout = self.run_timeit_bench(args) @@ -202,8 +232,8 @@ def test_bench(self): def test_append(self): with tests.temporary_directory() as tmpdir: - filename = os.path.join(tmpdir, 'test.json') - args = PERF_TIMEIT + ('--append', filename) + FAST_BENCH_ARGS + filename = os.path.join(tmpdir, "test.json") + args = PERF_TIMEIT + ("--append", filename) + FAST_BENCH_ARGS self.run_timeit(args) bench = pyperf.Benchmark.load(filename) @@ -214,18 +244,18 @@ def test_append(self): self.assertEqual(bench.get_nvalue(), 2) def test_cli_snippet_error(self): - args = PERF_TIMEIT + ('x+1',) + args = PERF_TIMEIT + ("x+1",) cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 1) - self.assertIn('Traceback (most recent call last):', cmd.stderr) + self.assertIn("Traceback (most recent call last):", cmd.stderr) self.assertIn("NameError", cmd.stderr) # When the PyPy program is copied, it fails with "Library path not found" - @unittest.skipIf(pyperf.python_implementation() == 'pypy', - 'pypy program cannot be copied') - @unittest.skipIf(sys.platform == 'win32', - 'https://github.com/psf/pyperf/issues/97') + @unittest.skipIf( + pyperf.python_implementation() == "pypy", "pypy program cannot be copied" + ) + @unittest.skipIf(sys.platform == "win32", "https://github.com/psf/pyperf/issues/97") def test_python_option(self): # Ensure that paths are absolute paths = [os.path.realpath(path) for path in sys.path] @@ -236,8 +266,7 @@ def test_python_option(self): shutil.copy2(sys.executable, tmp_exe) # Run benchmark to check if --python works - args = ('--metadata', - '--python', tmp_exe) + args = ("--metadata", "--python", tmp_exe) args = PERF_TIMEIT + args + FAST_BENCH_ARGS cmd = tests.get_output(args, env=env) finally: @@ -252,42 +281,41 @@ def test_python_option(self): self.assertIn("python_executable: %s" % tmp_exe, cmd.stdout) def test_name(self): - name = 'myname' - args = PERF_TIMEIT + ('--name', name) + FAST_BENCH_ARGS + name = "myname" + args = PERF_TIMEIT + ("--name", name) + FAST_BENCH_ARGS bench, stdout = self.run_timeit_bench(args) self.assertEqual(bench.get_name(), name) - self.assertRegex(stdout, re.compile('^%s' % name, flags=re.MULTILINE)) + self.assertRegex(stdout, re.compile("^%s" % name, flags=re.MULTILINE)) def test_inner_loops(self): inner_loops = 17 - args = PERF_TIMEIT + ('--inner-loops', str(inner_loops)) + FAST_BENCH_ARGS + args = PERF_TIMEIT + ("--inner-loops", str(inner_loops)) + FAST_BENCH_ARGS bench, stdout = self.run_timeit_bench(args) metadata = bench.get_metadata() - self.assertEqual(metadata['inner_loops'], inner_loops) + self.assertEqual(metadata["inner_loops"], inner_loops) def test_compare_to(self): - args = ('--compare-to', sys.executable, - '--python-names=ref:changed') + args = ("--compare-to", sys.executable, "--python-names=ref:changed") args = PERF_TIMEIT + args + COMPARE_BENCH cmd = tests.get_output(args) # ".*" and DOTALL ignore stability warnings - expected = textwrap.dedent(r''' + expected = textwrap.dedent(r""" ref: \. [0-9.]+ (?:ms|us) \+- [0-9.]+ (?:ms|us).* changed: \. [0-9.]+ (?:ms|us) \+- [0-9.]+ (?:ms|us).* Mean \+- std dev: \[ref\] .* -> \[changed\] .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change) - ''').strip() + """).strip() expected = re.compile(expected, flags=re.DOTALL) self.assertRegex(cmd.stdout, expected) def test_compare_to_verbose(self): - args = PERF_TIMEIT + ('--compare-to', sys.executable, '--verbose') + args = PERF_TIMEIT + ("--compare-to", sys.executable, "--verbose") args += COMPARE_BENCH cmd = tests.get_output(args) - expected = textwrap.dedent(r''' + expected = textwrap.dedent(r""" Benchmark .* ==========+ @@ -304,41 +332,52 @@ def test_compare_to_verbose(self): ======= Mean \+- std dev: .* -> .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change) - ''').strip() + """).strip() expected = re.compile(expected, flags=re.DOTALL) self.assertRegex(cmd.stdout, expected) def test_compare_to_quiet(self): - args = PERF_TIMEIT + ('--compare-to', sys.executable, '--quiet') + args = PERF_TIMEIT + ("--compare-to", sys.executable, "--quiet") args += COMPARE_BENCH cmd = tests.get_output(args) - expected = r'(?:Mean \+- std dev: .* -> .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change)|Not significant!)' + expected = r"(?:Mean \+- std dev: .* -> .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change)|Not significant!)" self.assertRegex(cmd.stdout, expected) def test_duplicate(self): sleep = 1e-3 duplicate = 10 args = PERF_TIMEIT - args += ('-n3', '-p1', - '--duplicate', str(duplicate), '--loops', '1', - '-s', 'import time', 'time.sleep(%s)' % sleep) + args += ( + "-n3", + "-p1", + "--duplicate", + str(duplicate), + "--loops", + "1", + "-s", + "import time", + "time.sleep(%s)" % sleep, + ) bench, stdout = self.run_timeit_bench(args) metadata = bench.get_metadata() - self.assertEqual(metadata['timeit_duplicate'], duplicate) + self.assertEqual(metadata["timeit_duplicate"], duplicate) for raw_value in bench._get_raw_values(): self.assertGreaterEqual(raw_value, sleep * duplicate) def test_teardown_single_line(self): - args = PERF_TIMEIT + ('--teardown', 'assert 2 == 2') + FAST_BENCH_ARGS + args = PERF_TIMEIT + ("--teardown", "assert 2 == 2") + FAST_BENCH_ARGS cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0, cmd.stdout + cmd.stderr) def test_teardown_multi_line(self): - args = PERF_TIMEIT + ('--teardown', 'assert 2 == 2', - '--teardown', 'assert 2 == 2') + FAST_BENCH_ARGS + args = ( + PERF_TIMEIT + + ("--teardown", "assert 2 == 2", "--teardown", "assert 2 == 2") + + FAST_BENCH_ARGS + ) cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0, cmd.stdout + cmd.stderr) @@ -350,42 +389,42 @@ def test_raises_if_setup_is_missing(self): Timer(setup=None) err = cm.exception - self.assertEqual(str(err), 'setup is neither a string nor callable') + self.assertEqual(str(err), "setup is neither a string nor callable") def test_raises_if_stmt_is_missing(self): with self.assertRaises(ValueError) as cm: Timer(stmt=None) err = cm.exception - self.assertEqual(str(err), 'stmt is neither a string nor callable') + self.assertEqual(str(err), "stmt is neither a string nor callable") def test_raises_if_teardown_is_missing(self): with self.assertRaises(ValueError) as cm: Timer(teardown=None) err = cm.exception - self.assertEqual(str(err), 'teardown is neither a string nor callable') + self.assertEqual(str(err), "teardown is neither a string nor callable") def test_raises_if_setup_contains_invalid_syntax(self): with self.assertRaises(SyntaxError) as cm: - Timer(setup='foo = 1, 2, *') + Timer(setup="foo = 1, 2, *") err = cm.exception - self.assertTrue('invalid syntax' in str(err)) + self.assertTrue("invalid syntax" in str(err)) def test_raises_if_stmt_contains_invalid_syntax(self): with self.assertRaises(SyntaxError) as cm: - Timer(stmt='foo = 1, 2, *') + Timer(stmt="foo = 1, 2, *") err = cm.exception - self.assertTrue('invalid syntax' in str(err)) + self.assertTrue("invalid syntax" in str(err)) def test_raises_if_teardown_contains_invalid_syntax(self): with self.assertRaises(SyntaxError) as cm: - Timer(teardown='foo = 1, 2, *') + Timer(teardown="foo = 1, 2, *") err = cm.exception - self.assertTrue('invalid syntax' in str(err)) + self.assertTrue("invalid syntax" in str(err)) def test_raises_if_setup_and_stmt_contain_invalid_syntax(self): with self.assertRaises(SyntaxError) as cm: @@ -434,35 +473,37 @@ def test_returns_valid_template_with_all_str_params(self): def test_returns_valid_template_if_setup_is_code(self): setup = identity timer = Timer(setup=setup) - output = template_output(setup='_setup()', init=', _setup=_setup') + output = template_output(setup="_setup()", init=", _setup=_setup") self.assertEqual(timer.src, output) - self.assertDictEqual({'_setup': setup}, timer.local_ns) + self.assertDictEqual({"_setup": setup}, timer.local_ns) def test_returns_valid_template_if_stmt_is_code(self): stmt = identity timer = Timer(stmt=stmt) - output = template_output(stmt='_stmt()', init=', _stmt=_stmt') + output = template_output(stmt="_stmt()", init=", _stmt=_stmt") self.assertEqual(timer.src, output) - self.assertDictEqual({'_stmt': stmt}, timer.local_ns) + self.assertDictEqual({"_stmt": stmt}, timer.local_ns) def test_returns_valid_template_if_teardown_is_code(self): teardown = identity timer = Timer(teardown=teardown) - output = template_output(teardown='_teardown()', - init=', _teardown=_teardown') + output = template_output(teardown="_teardown()", init=", _teardown=_teardown") self.assertEqual(timer.src, output) - self.assertDictEqual({'_teardown': teardown}, timer.local_ns) + self.assertDictEqual({"_teardown": teardown}, timer.local_ns) def test_returns_valid_template_with_all_callable_params(self): setup, stmt, teardown = identity, identity, identity timer = Timer(setup=setup, stmt=stmt, teardown=teardown) - output = template_output(setup='_setup()', stmt='_stmt()', - teardown='_teardown()', - init=', _setup=_setup, _stmt=_stmt, ' - '_teardown=_teardown') + output = template_output( + setup="_setup()", + stmt="_stmt()", + teardown="_teardown()", + init=", _setup=_setup, _stmt=_stmt, _teardown=_teardown", + ) self.assertEqual(timer.src, output) - self.assertDictEqual({'_setup': setup, '_stmt': stmt, - '_teardown': teardown}, timer.local_ns) + self.assertDictEqual( + {"_setup": setup, "_stmt": stmt, "_teardown": teardown}, timer.local_ns + ) if __name__ == "__main__": diff --git a/pyperf/tests/test_utils.py b/pyperf/tests/test_utils.py index edf8549c..200e2200 100644 --- a/pyperf/tests/test_utils.py +++ b/pyperf/tests/test_utils.py @@ -4,8 +4,13 @@ from unittest import mock import pyperf -from pyperf._formatter import (format_filesize, format_seconds, format_timedelta, - format_timedeltas, format_number) +from pyperf._formatter import ( + format_filesize, + format_seconds, + format_timedelta, + format_timedeltas, + format_number, +) from pyperf import _cpu_utils as cpu_utils from pyperf import _utils as utils @@ -74,18 +79,12 @@ def test_geometric_mean(self): class TestUtils(unittest.TestCase): - def test_format_seconds(self): - self.assertEqual(format_seconds(0), - "0 sec") - self.assertEqual(format_seconds(316e-4), - "31.6 ms") - self.assertEqual(format_seconds(15.9), - "15.9 sec") - self.assertEqual(format_seconds(3 * 60 + 15.9), - "3 min 15.9 sec") - self.assertEqual(format_seconds(404683.5876653), - "4 day 16 hour 24 min") + self.assertEqual(format_seconds(0), "0 sec") + self.assertEqual(format_seconds(316e-4), "31.6 ms") + self.assertEqual(format_seconds(15.9), "15.9 sec") + self.assertEqual(format_seconds(3 * 60 + 15.9), "3 min 15.9 sec") + self.assertEqual(format_seconds(404683.5876653), "4 day 16 hour 24 min") def test_format_timedelta(self): fmt_delta = format_timedelta @@ -115,118 +114,97 @@ def fmt_stdev(seconds, stdev): def test_format_number(self): # plural - self.assertEqual(format_number(0, 'unit'), '0 units') - self.assertEqual(format_number(1, 'unit'), '1 unit') - self.assertEqual(format_number(2, 'unit'), '2 units') - self.assertEqual(format_number(123, 'unit'), '123 units') + self.assertEqual(format_number(0, "unit"), "0 units") + self.assertEqual(format_number(1, "unit"), "1 unit") + self.assertEqual(format_number(2, "unit"), "2 units") + self.assertEqual(format_number(123, "unit"), "123 units") # powers of 10 - self.assertEqual(format_number(10 ** 3, 'unit'), - '1000 units') - self.assertEqual(format_number(10 ** 4, 'unit'), - '10^4 units') - self.assertEqual(format_number(10 ** 4 + 1, 'unit'), - '10001 units') - self.assertEqual(format_number(33 * 10 ** 4, 'unit'), - '330000 units') + self.assertEqual(format_number(10**3, "unit"), "1000 units") + self.assertEqual(format_number(10**4, "unit"), "10^4 units") + self.assertEqual(format_number(10**4 + 1, "unit"), "10001 units") + self.assertEqual(format_number(33 * 10**4, "unit"), "330000 units") # powers of 10 - self.assertEqual(format_number(2 ** 10, 'unit'), - '1024 units') - self.assertEqual(format_number(2 ** 15, 'unit'), - '2^15 units') - self.assertEqual(format_number(2 ** 15), - '2^15') - self.assertEqual(format_number(2 ** 10 + 1, 'unit'), - '1025 units') + self.assertEqual(format_number(2**10, "unit"), "1024 units") + self.assertEqual(format_number(2**15, "unit"), "2^15 units") + self.assertEqual(format_number(2**15), "2^15") + self.assertEqual(format_number(2**10 + 1, "unit"), "1025 units") def test_format_filesize(self): - self.assertEqual(format_filesize(0), - '0 bytes') - self.assertEqual(format_filesize(1), - '1 byte') - self.assertEqual(format_filesize(10 * 1024), - '10.0 KiB') - self.assertEqual(format_filesize(12.4 * 1024 * 1024), - '12.4 MiB') + self.assertEqual(format_filesize(0), "0 bytes") + self.assertEqual(format_filesize(1), "1 byte") + self.assertEqual(format_filesize(10 * 1024), "10.0 KiB") + self.assertEqual(format_filesize(12.4 * 1024 * 1024), "12.4 MiB") def test_get_python_names(self): - self.assertEqual(utils.get_python_names('/usr/bin/python3.6', - '/usr/bin/python3.8'), - ('python3.6', 'python3.8')) + self.assertEqual( + utils.get_python_names("/usr/bin/python3.6", "/usr/bin/python3.8"), + ("python3.6", "python3.8"), + ) - self.assertEqual(utils.get_python_names('/bin/python3.6', - '/usr/bin/python3.6'), - ('/bin/python3.6', '/usr/bin/python3.6')) + self.assertEqual( + utils.get_python_names("/bin/python3.6", "/usr/bin/python3.6"), + ("/bin/python3.6", "/usr/bin/python3.6"), + ) class CPUToolsTests(unittest.TestCase): def test_parse_cpu_list(self): parse_cpu_list = cpu_utils.parse_cpu_list - self.assertIsNone(parse_cpu_list('')) - self.assertIsNone(parse_cpu_list('\x00')) - self.assertEqual(parse_cpu_list('0'), - [0]) - self.assertEqual(parse_cpu_list('0-1,5-6'), - [0, 1, 5, 6]) - self.assertEqual(parse_cpu_list('1,3,7'), - [1, 3, 7]) + self.assertIsNone(parse_cpu_list("")) + self.assertIsNone(parse_cpu_list("\x00")) + self.assertEqual(parse_cpu_list("0"), [0]) + self.assertEqual(parse_cpu_list("0-1,5-6"), [0, 1, 5, 6]) + self.assertEqual(parse_cpu_list("1,3,7"), [1, 3, 7]) # tolerate spaces - self.assertEqual(parse_cpu_list(' 1 , 2 '), - [1, 2]) + self.assertEqual(parse_cpu_list(" 1 , 2 "), [1, 2]) # errors - self.assertRaises(ValueError, parse_cpu_list, 'x') - self.assertRaises(ValueError, parse_cpu_list, '1,') + self.assertRaises(ValueError, parse_cpu_list, "x") + self.assertRaises(ValueError, parse_cpu_list, "1,") def test_format_cpu_list(self): - self.assertEqual(cpu_utils.format_cpu_list([0]), - '0') - self.assertEqual(cpu_utils.format_cpu_list([0, 1, 5, 6]), - '0-1,5-6') - self.assertEqual(cpu_utils.format_cpu_list([1, 3, 7]), - '1,3,7') + self.assertEqual(cpu_utils.format_cpu_list([0]), "0") + self.assertEqual(cpu_utils.format_cpu_list([0, 1, 5, 6]), "0-1,5-6") + self.assertEqual(cpu_utils.format_cpu_list([1, 3, 7]), "1,3,7") def test_get_isolated_cpus(self): def check_get(line): def mock_open(*args, **kw): return io.StringIO(line) - with mock.patch('pyperf._utils.open', create=True, side_effect=mock_open): + with mock.patch("pyperf._utils.open", create=True, side_effect=mock_open): return cpu_utils.get_isolated_cpus() # no isolated CPU - self.assertIsNone(check_get('')) + self.assertIsNone(check_get("")) # isolated CPUs - self.assertEqual(check_get('1-2'), [1, 2]) + self.assertEqual(check_get("1-2"), [1, 2]) # /sys/devices/system/cpu/isolated doesn't exist (ex: Windows) - with mock.patch('builtins.open', side_effect=IOError): + with mock.patch("builtins.open", side_effect=IOError): self.assertIsNone(cpu_utils.get_isolated_cpus()) def test_parse_cpu_mask(self): parse_cpu_mask = cpu_utils.parse_cpu_mask - self.assertEqual(parse_cpu_mask('f0'), - 0xf0) - self.assertEqual(parse_cpu_mask('fedcba00,12345678'), - 0xfedcba0012345678) - self.assertEqual(parse_cpu_mask('ffffffff,ffffffff,ffffffff,ffffffff'), - 2**128 - 1) + self.assertEqual(parse_cpu_mask("f0"), 0xF0) + self.assertEqual(parse_cpu_mask("fedcba00,12345678"), 0xFEDCBA0012345678) + self.assertEqual( + parse_cpu_mask("ffffffff,ffffffff,ffffffff,ffffffff"), 2**128 - 1 + ) def test_format_cpu_mask(self): format_cpu_mask = cpu_utils.format_cpu_mask - self.assertEqual(format_cpu_mask(0xf0), - '000000f0') - self.assertEqual(format_cpu_mask(0xfedcba0012345678), - 'fedcba00,12345678') + self.assertEqual(format_cpu_mask(0xF0), "000000f0") + self.assertEqual(format_cpu_mask(0xFEDCBA0012345678), "fedcba00,12345678") def test_format_cpus_as_mask(self): format_cpus_as_mask = cpu_utils.format_cpus_as_mask - self.assertEqual(format_cpus_as_mask({4, 5, 6, 7}), - '000000f0') + self.assertEqual(format_cpus_as_mask({4, 5, 6, 7}), "000000f0") if __name__ == "__main__":