]>
Commit | Line | Data |
---|---|---|
f427ee49 A |
1 | #!/usr/local/bin/recon |
2 | require 'strict' | |
3 | ||
4 | local benchrun = require 'benchrun' | |
5 | local perfdata = require 'perfdata' | |
6 | local csv = require 'csv' | |
7 | local sysctl = require 'sysctl' | |
8 | local os = require 'os' | |
9 | ||
10 | local kDefaultDuration = 30 | |
11 | ||
12 | local benchmark = benchrun.new { | |
13 | name = 'xnu.zero_fill_fault_throughput', | |
14 | version = 1, | |
15 | arg = arg, | |
16 | modify_argparser = function(parser) | |
17 | parser:option{ | |
18 | name = '--cpu-workers', | |
19 | description = 'Number of cpu workers' | |
20 | } | |
21 | parser:flag{ | |
22 | name = '--through-max-workers', | |
23 | description = 'Run benchmark for [1..n] cpu workers' | |
24 | } | |
25 | parser:flag{ | |
26 | name = '--through-max-workers-fast', | |
27 | description = 'Run benchmark for [1..2] and each power of four value in [4..n] cpu workers' | |
28 | } | |
29 | parser:option{ | |
30 | name = '--path', | |
31 | description = 'Path to fault throughput binary' | |
32 | } | |
33 | parser:option{ | |
34 | name = '--duration', | |
35 | description = 'How long, in seconds, to run each iteration', | |
36 | default = kDefaultDuration | |
37 | } | |
38 | parser:option{ | |
39 | name = '--variant', | |
40 | description = 'Which benchmark variant to run (sparate-objects or share-objects)', | |
41 | default = 'separate-objects' | |
42 | } | |
43 | end | |
44 | } | |
45 | ||
46 | assert(benchmark.opt.path, "No path supplied for fault throughput binary") | |
47 | assert(benchmark.opt.variant == "separate-objects" or | |
48 | benchmark.opt.variant == "share-objects", "Unsupported benchmark variant") | |
49 | ||
50 | local ncpus, err = sysctl('hw.logicalcpu_max') | |
51 | assert(ncpus > 0, 'invalid number of logical cpus') | |
52 | local cpu_workers = tonumber(benchmark.opt.cpu_workers) or ncpus | |
53 | ||
54 | local unit = perfdata.unit.custom('pages/sec') | |
55 | local tests = {} | |
56 | ||
57 | function QueueTest(num_cores) | |
58 | table.insert(tests, { | |
59 | path = benchmark.opt.path, | |
60 | num_cores = num_cores, | |
61 | }) | |
62 | end | |
63 | ||
64 | if benchmark.opt.through_max_workers then | |
65 | for i = 1, cpu_workers do | |
66 | QueueTest(i) | |
67 | end | |
68 | elseif benchmark.opt.through_max_workers_fast then | |
69 | local i = 1 | |
70 | while i <= cpu_workers do | |
71 | QueueTest(i) | |
72 | -- Always do a run with two threads to see what the first part of | |
73 | -- the scaling curve looks like | |
74 | -- (and to measure perf on dual core systems). | |
75 | if i == 1 and cpu_workers >= 2 then | |
76 | QueueTest(i + 1) | |
77 | end | |
78 | i = i * 4 | |
79 | end | |
80 | else | |
81 | QueueTest(cpu_workers) | |
82 | end | |
83 | ||
84 | for _, test in ipairs(tests) do | |
85 | local args = {test.path, "-v", benchmark.opt.variant, benchmark.opt.duration, test.num_cores, | |
86 | echo = true} | |
87 | for out in benchmark:run(args) do | |
88 | local result = out:match("-----Results-----\n(.*)") | |
89 | benchmark:assert(result, "Unable to find result data in output") | |
90 | local data = csv.openstring(result, {header = true}) | |
91 | for field in data:lines() do | |
92 | for k, v in pairs(field) do | |
93 | benchmark.writer:add_value(k, unit, tonumber(v), { | |
94 | [perfdata.larger_better] = true, | |
95 | threads = test.num_cores, | |
96 | variant = benchmark.opt.variant | |
97 | }) | |
98 | end | |
99 | end | |
100 | end | |
101 | end | |
102 | ||
103 | benchmark:finish() |