-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbenchmark.py
More file actions
63 lines (48 loc) · 2.08 KB
/
benchmark.py
File metadata and controls
63 lines (48 loc) · 2.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import subprocess, tempfile, re
from typing import Tuple, Any, Optional
BENCHMARK_SIZE: int = 1
BENCHMARK_BATCH_SIZE: int = 1
BENCHMARK_MSG = f"""
======================================================
ProbFX RoadMarkings Benchmarking Suite.
------------------------------------------------------
This scripts takes random parameters for the target
road, synthetically generates the road and gets the
accuracy.
{BENCHMARK_SIZE} benchmarks are run in batches of {BENCHMARK_BATCH_SIZE}.
"""
def start_benchmark(iteration: int) -> Tuple[int, Any, Any]:
print(f"Starting Benchmark: {iteration:3}")
output = tempfile.TemporaryFile()
return (iteration, output,
subprocess.Popen(
[f"cabal run ProbFX-RoadMarkings -- {iteration} | grep \"FINAL ACCURACY: \""],
stdout=output, stderr=output, shell=True))
def extract_benchmark(output: bytes) -> Optional[float]:
if (res := re.match("FINAL ACCURACY: *(\d.*)[\n ]*", output.decode("utf-8"))) is not None:
(acc,) = res.groups()
return float(acc)
else:
print(f"Failed to extract accuracy from {output}")
def end_benchmark(iteration: int, output: Any, process: Any) -> Optional[float]:
print(f"Waiting on benchmark:{iteration:3}")
process.wait()
output.seek(0)
contents = output.readlines()
output.close()
print(f"Completed Benchmark: {iteration:3}")
return extract_benchmark(contents[-1])
def run_benchmarks(iterations: int) -> float:
print(BENCHMARK_MSG)
results: list[float] = []
for batch in range(0, iterations, BENCHMARK_BATCH_SIZE):
benchmarks = [start_benchmark(i) for i in range(
batch, min(batch + BENCHMARK_BATCH_SIZE, iterations))]
results.extend([res for bench in benchmarks if (res := end_benchmark(*bench)) is not None])
print("\nList of accuracies:")
print(' '.join([f"{round(f * 100, 2)}%" for f in results]))
mean : float = 100 * sum(results) / len(results)
print(f"Average accuracy: {round(mean,2):3}")
return mean
if __name__ == "__main__":
run_benchmarks(BENCHMARK_SIZE)