psf / pyperf

Toolkit to run Python benchmarks
http://pyperf.readthedocs.io/
MIT License
771 stars 74 forks source link

bench_func returning None for all but the first benchmark #166

Open axman6 opened 1 year ago

axman6 commented 1 year ago

I'm trying to write a benchmark suite for several functions and use BenchmarkSuite to write the results to a json file. I have the following code (irrelevant parts removed):

import pyperf
import decimation_accel as dec
import decimation_accel_opt as opt

results_file = 'decimation.json'

runner = pyperf.Runner()

def main() -> None:

    small = alloc_args(1024)
    small10 = alloc_args(1024, rate=10)
    small100 = alloc_args(1024, rate=100)
    medium = alloc_args(1024 * 10)
    benchmarks: List[pyperf.Benchmark] = []

    benchmarks.append(runner.bench_func('dec.decimate_minmax(small) all', dec.minmax, *small))
    benchmarks.append(runner.bench_func('opt.decimate_minmax(small) all', opt.minmax, *small))
    benchmarks.append(runner.bench_func('dec.decimate_minmax(small10) all', dec.minmax, *small10))
    benchmarks.append(runner.bench_func('opt.decimate_minmax(small10) all', opt.minmax, *small10))
    benchmarks.append(runner.bench_func('dec.decimate_minmax(small100) all', dec.minmax, *small100))
    benchmarks.append(runner.bench_func('opt.decimate_minmax(small100) all', opt.minmax, *small100))
    benchmarks.append(runner.bench_func('dec.decimate_minmax(medium) all', dec.minmax, *medium))
    benchmarks.append(runner.bench_func('opt.decimate_minmax(medium) all', opt.minmax, *medium))

    print(benchmarks)

    suite = pyperf.BenchmarkSuite(benchmarks)  # filename='decimation.json')

    suite.dump(results_file)

if __name__ == "__main__":
    main()

However if I run this file, I get the following output:

[<Benchmark 'dec.decimate_minmax(small) all' with 1 runs>, None, None, None, None, None, None, None]
Traceback (most recent call last):
  File "benchmarks.py", line 60, in <module>
    main()
  File "benchmarks.py", line 54, in main
    suite = pyperf.BenchmarkSuite(benchmarks)  # filename='decimation.json')
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_bench.py", line 639, in __init__
    self.add_benchmark(benchmark)
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_bench.py", line 687, in add_benchmark
    name = benchmark.get_name()
AttributeError: 'NoneType' object has no attribute 'get_name'
Traceback (most recent call last):
  File "benchmarks.py", line 60, in <module>
    main()
  File "benchmarks.py", line 39, in main
    benchmarks.append(runner.bench_func('dec.decimate_minmax(small) all', dec.minmax, *small))
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_runner.py", line 537, in bench_func
    result = self._main(task)
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_runner.py", line 460, in _main
    bench = self._manager()
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_runner.py", line 673, in _manager
    bench = Manager(self).create_bench()
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_manager.py", line 232, in create_bench
    worker_bench, run = self.create_worker_bench()
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_manager.py", line 131, in create_worker_bench
    suite = self.create_suite()
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_manager.py", line 121, in create_suite
    suite = self.spawn_worker(self.calibrate_loops, 0)
  File "<REMOVED>/venv-local/lib/python3.8/site-packages/pyperf/_manager.py", line 107, in spawn_worker
    raise RuntimeError("%s failed with exit code %s"
RuntimeError: <REMOVED>/venv-local/bin/python failed with exit code 1

If I don't put the results in the list, then they all appear to run fine, but then I have no way to collect the results.