xref: /aosp_15_r20/external/google-benchmark/src/benchmark_runner.cc (revision dbb99499c3810fa1611fa2242a2fc446be01a57c)
1 // Copyright 2015 Google Inc. All rights reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "benchmark_runner.h"
16 
17 #include "benchmark/benchmark.h"
18 #include "benchmark_api_internal.h"
19 #include "internal_macros.h"
20 
21 #ifndef BENCHMARK_OS_WINDOWS
22 #if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
23 #include <sys/resource.h>
24 #endif
25 #include <sys/time.h>
26 #include <unistd.h>
27 #endif
28 
29 #include <algorithm>
30 #include <atomic>
31 #include <climits>
32 #include <cmath>
33 #include <condition_variable>
34 #include <cstdio>
35 #include <cstdlib>
36 #include <fstream>
37 #include <iostream>
38 #include <limits>
39 #include <memory>
40 #include <string>
41 #include <thread>
42 #include <utility>
43 
44 #include "check.h"
45 #include "colorprint.h"
46 #include "commandlineflags.h"
47 #include "complexity.h"
48 #include "counter.h"
49 #include "internal_macros.h"
50 #include "log.h"
51 #include "mutex.h"
52 #include "perf_counters.h"
53 #include "re.h"
54 #include "statistics.h"
55 #include "string_util.h"
56 #include "thread_manager.h"
57 #include "thread_timer.h"
58 
59 namespace benchmark {
60 
61 namespace internal {
62 
63 MemoryManager* memory_manager = nullptr;
64 
65 ProfilerManager* profiler_manager = nullptr;
66 
67 namespace {
68 
69 static constexpr IterationCount kMaxIterations = 1000000000000;
70 const double kDefaultMinTime =
71     std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr);
72 
CreateRunReport(const benchmark::internal::BenchmarkInstance & b,const internal::ThreadManager::Result & results,IterationCount memory_iterations,const MemoryManager::Result * memory_result,double seconds,int64_t repetition_index,int64_t repeats)73 BenchmarkReporter::Run CreateRunReport(
74     const benchmark::internal::BenchmarkInstance& b,
75     const internal::ThreadManager::Result& results,
76     IterationCount memory_iterations,
77     const MemoryManager::Result* memory_result, double seconds,
78     int64_t repetition_index, int64_t repeats) {
79   // Create report about this benchmark run.
80   BenchmarkReporter::Run report;
81 
82   report.run_name = b.name();
83   report.family_index = b.family_index();
84   report.per_family_instance_index = b.per_family_instance_index();
85   report.skipped = results.skipped_;
86   report.skip_message = results.skip_message_;
87   report.report_label = results.report_label_;
88   // This is the total iterations across all threads.
89   report.iterations = results.iterations;
90   report.time_unit = b.time_unit();
91   report.threads = b.threads();
92   report.repetition_index = repetition_index;
93   report.repetitions = repeats;
94 
95   if (!report.skipped) {
96     if (b.use_manual_time()) {
97       report.real_accumulated_time = results.manual_time_used;
98     } else {
99       report.real_accumulated_time = results.real_time_used;
100     }
101     report.use_real_time_for_initial_big_o = b.use_manual_time();
102     report.cpu_accumulated_time = results.cpu_time_used;
103     report.complexity_n = results.complexity_n;
104     report.complexity = b.complexity();
105     report.complexity_lambda = b.complexity_lambda();
106     report.statistics = &b.statistics();
107     report.counters = results.counters;
108 
109     if (memory_iterations > 0) {
110       assert(memory_result != nullptr);
111       report.memory_result = memory_result;
112       report.allocs_per_iter =
113           memory_iterations ? static_cast<double>(memory_result->num_allocs) /
114                                   static_cast<double>(memory_iterations)
115                             : 0;
116     }
117 
118     internal::Finish(&report.counters, results.iterations, seconds,
119                      b.threads());
120   }
121   return report;
122 }
123 
124 // Execute one thread of benchmark b for the specified number of iterations.
125 // Adds the stats collected for the thread into manager->results.
RunInThread(const BenchmarkInstance * b,IterationCount iters,int thread_id,ThreadManager * manager,PerfCountersMeasurement * perf_counters_measurement,ProfilerManager * profiler_manager)126 void RunInThread(const BenchmarkInstance* b, IterationCount iters,
127                  int thread_id, ThreadManager* manager,
128                  PerfCountersMeasurement* perf_counters_measurement,
129                  ProfilerManager* profiler_manager) {
130   internal::ThreadTimer timer(
131       b->measure_process_cpu_time()
132           ? internal::ThreadTimer::CreateProcessCpuTime()
133           : internal::ThreadTimer::Create());
134 
135   State st = b->Run(iters, thread_id, &timer, manager,
136                     perf_counters_measurement, profiler_manager);
137   BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations)
138       << "Benchmark returned before State::KeepRunning() returned false!";
139   {
140     MutexLock l(manager->GetBenchmarkMutex());
141     internal::ThreadManager::Result& results = manager->results;
142     results.iterations += st.iterations();
143     results.cpu_time_used += timer.cpu_time_used();
144     results.real_time_used += timer.real_time_used();
145     results.manual_time_used += timer.manual_time_used();
146     results.complexity_n += st.complexity_length_n();
147     internal::Increment(&results.counters, st.counters);
148   }
149   manager->NotifyThreadComplete();
150 }
151 
ComputeMinTime(const benchmark::internal::BenchmarkInstance & b,const BenchTimeType & iters_or_time)152 double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b,
153                       const BenchTimeType& iters_or_time) {
154   if (!IsZero(b.min_time())) return b.min_time();
155   // If the flag was used to specify number of iters, then return the default
156   // min_time.
157   if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime;
158 
159   return iters_or_time.time;
160 }
161 
ComputeIters(const benchmark::internal::BenchmarkInstance & b,const BenchTimeType & iters_or_time)162 IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b,
163                             const BenchTimeType& iters_or_time) {
164   if (b.iterations() != 0) return b.iterations();
165 
166   // We've already concluded that this flag is currently used to pass
167   // iters but do a check here again anyway.
168   BM_CHECK(iters_or_time.tag == BenchTimeType::ITERS);
169   return iters_or_time.iters;
170 }
171 
172 }  // end namespace
173 
ParseBenchMinTime(const std::string & value)174 BenchTimeType ParseBenchMinTime(const std::string& value) {
175   BenchTimeType ret;
176 
177   if (value.empty()) {
178     ret.tag = BenchTimeType::TIME;
179     ret.time = 0.0;
180     return ret;
181   }
182 
183   if (value.back() == 'x') {
184     char* p_end;
185     // Reset errno before it's changed by strtol.
186     errno = 0;
187     IterationCount num_iters = std::strtol(value.c_str(), &p_end, 10);
188 
189     // After a valid parse, p_end should have been set to
190     // point to the 'x' suffix.
191     BM_CHECK(errno == 0 && p_end != nullptr && *p_end == 'x')
192         << "Malformed iters value passed to --benchmark_min_time: `" << value
193         << "`. Expected --benchmark_min_time=<integer>x.";
194 
195     ret.tag = BenchTimeType::ITERS;
196     ret.iters = num_iters;
197     return ret;
198   }
199 
200   bool has_suffix = value.back() == 's';
201   if (!has_suffix) {
202     BM_VLOG(0) << "Value passed to --benchmark_min_time should have a suffix. "
203                   "Eg., `30s` for 30-seconds.";
204   }
205 
206   char* p_end;
207   // Reset errno before it's changed by strtod.
208   errno = 0;
209   double min_time = std::strtod(value.c_str(), &p_end);
210 
211   // After a successful parse, p_end should point to the suffix 's',
212   // or the end of the string if the suffix was omitted.
213   BM_CHECK(errno == 0 && p_end != nullptr &&
214            ((has_suffix && *p_end == 's') || *p_end == '\0'))
215       << "Malformed seconds value passed to --benchmark_min_time: `" << value
216       << "`. Expected --benchmark_min_time=<float>x.";
217 
218   ret.tag = BenchTimeType::TIME;
219   ret.time = min_time;
220 
221   return ret;
222 }
223 
BenchmarkRunner(const benchmark::internal::BenchmarkInstance & b_,PerfCountersMeasurement * pcm_,BenchmarkReporter::PerFamilyRunReports * reports_for_family_)224 BenchmarkRunner::BenchmarkRunner(
225     const benchmark::internal::BenchmarkInstance& b_,
226     PerfCountersMeasurement* pcm_,
227     BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
228     : b(b_),
229       reports_for_family(reports_for_family_),
230       parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)),
231       min_time(ComputeMinTime(b_, parsed_benchtime_flag)),
232       min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0)
233                           ? b.min_warmup_time()
234                           : FLAGS_benchmark_min_warmup_time),
235       warmup_done(!(min_warmup_time > 0.0)),
236       repeats(b.repetitions() != 0 ? b.repetitions()
237                                    : FLAGS_benchmark_repetitions),
238       has_explicit_iteration_count(b.iterations() != 0 ||
239                                    parsed_benchtime_flag.tag ==
240                                        BenchTimeType::ITERS),
241       pool(static_cast<size_t>(b.threads() - 1)),
242       iters(has_explicit_iteration_count
243                 ? ComputeIters(b_, parsed_benchtime_flag)
244                 : 1),
245       perf_counters_measurement_ptr(pcm_) {
246   run_results.display_report_aggregates_only =
247       (FLAGS_benchmark_report_aggregates_only ||
248        FLAGS_benchmark_display_aggregates_only);
249   run_results.file_report_aggregates_only =
250       FLAGS_benchmark_report_aggregates_only;
251   if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
252     run_results.display_report_aggregates_only =
253         (b.aggregation_report_mode() &
254          internal::ARM_DisplayReportAggregatesOnly);
255     run_results.file_report_aggregates_only =
256         (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
257     BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
258              (perf_counters_measurement_ptr->num_counters() == 0))
259         << "Perf counters were requested but could not be set up.";
260   }
261 }
262 
DoNIterations()263 BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
264   BM_VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
265 
266   std::unique_ptr<internal::ThreadManager> manager;
267   manager.reset(new internal::ThreadManager(b.threads()));
268 
269   // Run all but one thread in separate threads
270   for (std::size_t ti = 0; ti < pool.size(); ++ti) {
271     pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
272                            manager.get(), perf_counters_measurement_ptr,
273                            /*profiler_manager=*/nullptr);
274   }
275   // And run one thread here directly.
276   // (If we were asked to run just one thread, we don't create new threads.)
277   // Yes, we need to do this here *after* we start the separate threads.
278   RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr,
279               /*profiler_manager=*/nullptr);
280 
281   // The main thread has finished. Now let's wait for the other threads.
282   manager->WaitForAllThreads();
283   for (std::thread& thread : pool) thread.join();
284 
285   IterationResults i;
286   // Acquire the measurements/counters from the manager, UNDER THE LOCK!
287   {
288     MutexLock l(manager->GetBenchmarkMutex());
289     i.results = manager->results;
290   }
291 
292   // And get rid of the manager.
293   manager.reset();
294 
295   BM_VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
296              << i.results.real_time_used << "\n";
297 
298   // By using KeepRunningBatch a benchmark can iterate more times than
299   // requested, so take the iteration count from i.results.
300   i.iters = i.results.iterations / b.threads();
301 
302   // Base decisions off of real time if requested by this benchmark.
303   i.seconds = i.results.cpu_time_used;
304   if (b.use_manual_time()) {
305     i.seconds = i.results.manual_time_used;
306   } else if (b.use_real_time()) {
307     i.seconds = i.results.real_time_used;
308   }
309 
310   return i;
311 }
312 
PredictNumItersNeeded(const IterationResults & i) const313 IterationCount BenchmarkRunner::PredictNumItersNeeded(
314     const IterationResults& i) const {
315   // See how much iterations should be increased by.
316   // Note: Avoid division by zero with max(seconds, 1ns).
317   double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9);
318   // If our last run was at least 10% of FLAGS_benchmark_min_time then we
319   // use the multiplier directly.
320   // Otherwise we use at most 10 times expansion.
321   // NOTE: When the last run was at least 10% of the min time the max
322   // expansion should be 14x.
323   const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1;
324   multiplier = is_significant ? multiplier : 10.0;
325 
326   // So what seems to be the sufficiently-large iteration count? Round up.
327   const IterationCount max_next_iters = static_cast<IterationCount>(
328       std::llround(std::max(multiplier * static_cast<double>(i.iters),
329                             static_cast<double>(i.iters) + 1.0)));
330   // But we do have *some* limits though..
331   const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
332 
333   BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
334   return next_iters;  // round up before conversion to integer.
335 }
336 
ShouldReportIterationResults(const IterationResults & i) const337 bool BenchmarkRunner::ShouldReportIterationResults(
338     const IterationResults& i) const {
339   // Determine if this run should be reported;
340   // Either it has run for a sufficient amount of time
341   // or because an error was reported.
342   return i.results.skipped_ ||
343          i.iters >= kMaxIterations ||  // Too many iterations already.
344          i.seconds >=
345              GetMinTimeToApply() ||  // The elapsed time is large enough.
346          // CPU time is specified but the elapsed real time greatly exceeds
347          // the minimum time.
348          // Note that user provided timers are except from this test.
349          ((i.results.real_time_used >= 5 * GetMinTimeToApply()) &&
350           !b.use_manual_time());
351 }
352 
GetMinTimeToApply() const353 double BenchmarkRunner::GetMinTimeToApply() const {
354   // In order to re-use functionality to run and measure benchmarks for running
355   // a warmup phase of the benchmark, we need a way of telling whether to apply
356   // min_time or min_warmup_time. This function will figure out if we are in the
357   // warmup phase and therefore need to apply min_warmup_time or if we already
358   // in the benchmarking phase and min_time needs to be applied.
359   return warmup_done ? min_time : min_warmup_time;
360 }
361 
FinishWarmUp(const IterationCount & i)362 void BenchmarkRunner::FinishWarmUp(const IterationCount& i) {
363   warmup_done = true;
364   iters = i;
365 }
366 
RunWarmUp()367 void BenchmarkRunner::RunWarmUp() {
368   // Use the same mechanisms for warming up the benchmark as used for actually
369   // running and measuring the benchmark.
370   IterationResults i_warmup;
371   // Dont use the iterations determined in the warmup phase for the actual
372   // measured benchmark phase. While this may be a good starting point for the
373   // benchmark and it would therefore get rid of the need to figure out how many
374   // iterations are needed if min_time is set again, this may also be a complete
375   // wrong guess since the warmup loops might be considerably slower (e.g
376   // because of caching effects).
377   const IterationCount i_backup = iters;
378 
379   for (;;) {
380     b.Setup();
381     i_warmup = DoNIterations();
382     b.Teardown();
383 
384     const bool finish = ShouldReportIterationResults(i_warmup);
385 
386     if (finish) {
387       FinishWarmUp(i_backup);
388       break;
389     }
390 
391     // Although we are running "only" a warmup phase where running enough
392     // iterations at once without measuring time isn't as important as it is for
393     // the benchmarking phase, we still do it the same way as otherwise it is
394     // very confusing for the user to know how to choose a proper value for
395     // min_warmup_time if a different approach on running it is used.
396     iters = PredictNumItersNeeded(i_warmup);
397     assert(iters > i_warmup.iters &&
398            "if we did more iterations than we want to do the next time, "
399            "then we should have accepted the current iteration run.");
400   }
401 }
402 
RunMemoryManager(IterationCount memory_iterations)403 MemoryManager::Result* BenchmarkRunner::RunMemoryManager(
404     IterationCount memory_iterations) {
405   // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an
406   // optional so we don't have to own the Result here.
407   // Can't do it now due to cxx03.
408   memory_results.push_back(MemoryManager::Result());
409   MemoryManager::Result* memory_result = &memory_results.back();
410   memory_manager->Start();
411   std::unique_ptr<internal::ThreadManager> manager;
412   manager.reset(new internal::ThreadManager(1));
413   b.Setup();
414   RunInThread(&b, memory_iterations, 0, manager.get(),
415               perf_counters_measurement_ptr,
416               /*profiler_manager=*/nullptr);
417   manager->WaitForAllThreads();
418   manager.reset();
419   b.Teardown();
420   memory_manager->Stop(*memory_result);
421   return memory_result;
422 }
423 
RunProfilerManager()424 void BenchmarkRunner::RunProfilerManager() {
425   // TODO: Provide a way to specify the number of iterations.
426   IterationCount profile_iterations = 1;
427   std::unique_ptr<internal::ThreadManager> manager;
428   manager.reset(new internal::ThreadManager(1));
429   b.Setup();
430   RunInThread(&b, profile_iterations, 0, manager.get(),
431               /*perf_counters_measurement_ptr=*/nullptr,
432               /*profiler_manager=*/profiler_manager);
433   manager->WaitForAllThreads();
434   manager.reset();
435   b.Teardown();
436 }
437 
DoOneRepetition()438 void BenchmarkRunner::DoOneRepetition() {
439   assert(HasRepeatsRemaining() && "Already done all repetitions?");
440 
441   const bool is_the_first_repetition = num_repetitions_done == 0;
442 
443   // In case a warmup phase is requested by the benchmark, run it now.
444   // After running the warmup phase the BenchmarkRunner should be in a state as
445   // this warmup never happened except the fact that warmup_done is set. Every
446   // other manipulation of the BenchmarkRunner instance would be a bug! Please
447   // fix it.
448   if (!warmup_done) RunWarmUp();
449 
450   IterationResults i;
451   // We *may* be gradually increasing the length (iteration count)
452   // of the benchmark until we decide the results are significant.
453   // And once we do, we report those last results and exit.
454   // Please do note that the if there are repetitions, the iteration count
455   // is *only* calculated for the *first* repetition, and other repetitions
456   // simply use that precomputed iteration count.
457   for (;;) {
458     b.Setup();
459     i = DoNIterations();
460     b.Teardown();
461 
462     // Do we consider the results to be significant?
463     // If we are doing repetitions, and the first repetition was already done,
464     // it has calculated the correct iteration time, so we have run that very
465     // iteration count just now. No need to calculate anything. Just report.
466     // Else, the normal rules apply.
467     const bool results_are_significant = !is_the_first_repetition ||
468                                          has_explicit_iteration_count ||
469                                          ShouldReportIterationResults(i);
470 
471     if (results_are_significant) break;  // Good, let's report them!
472 
473     // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
474     // iteration count, and run the benchmark again...
475 
476     iters = PredictNumItersNeeded(i);
477     assert(iters > i.iters &&
478            "if we did more iterations than we want to do the next time, "
479            "then we should have accepted the current iteration run.");
480   }
481 
482   // Produce memory measurements if requested.
483   MemoryManager::Result* memory_result = nullptr;
484   IterationCount memory_iterations = 0;
485   if (memory_manager != nullptr) {
486     // Only run a few iterations to reduce the impact of one-time
487     // allocations in benchmarks that are not properly managed.
488     memory_iterations = std::min<IterationCount>(16, iters);
489     memory_result = RunMemoryManager(memory_iterations);
490   }
491 
492   if (profiler_manager != nullptr) {
493     RunProfilerManager();
494   }
495 
496   // Ok, now actually report.
497   BenchmarkReporter::Run report =
498       CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
499                       num_repetitions_done, repeats);
500 
501   if (reports_for_family) {
502     ++reports_for_family->num_runs_done;
503     if (!report.skipped) reports_for_family->Runs.push_back(report);
504   }
505 
506   run_results.non_aggregates.push_back(report);
507 
508   ++num_repetitions_done;
509 }
510 
GetResults()511 RunResults&& BenchmarkRunner::GetResults() {
512   assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?");
513 
514   // Calculate additional statistics over the repetitions of this instance.
515   run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
516 
517   return std::move(run_results);
518 }
519 
520 }  // end namespace internal
521 
522 }  // end namespace benchmark
523