Home | History | Annotate | Download | only in src
      1 // Copyright 2015 Google Inc. All rights reserved.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //     http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include "benchmark/benchmark.h"
     16 #include "benchmark_api_internal.h"
     17 #include "internal_macros.h"
     18 
     19 #ifndef BENCHMARK_OS_WINDOWS
     20 #include <sys/resource.h>
     21 #include <sys/time.h>
     22 #include <unistd.h>
     23 #endif
     24 
     25 #include <algorithm>
     26 #include <atomic>
     27 #include <condition_variable>
     28 #include <cstdio>
     29 #include <cstdlib>
     30 #include <cstring>
     31 #include <fstream>
     32 #include <iostream>
     33 #include <memory>
     34 #include <thread>
     35 
     36 #include "check.h"
     37 #include "colorprint.h"
     38 #include "commandlineflags.h"
     39 #include "complexity.h"
     40 #include "log.h"
     41 #include "mutex.h"
     42 #include "re.h"
     43 #include "stat.h"
     44 #include "string_util.h"
     45 #include "sysinfo.h"
     46 #include "timers.h"
     47 
     48 DEFINE_bool(benchmark_list_tests, false,
     49             "Print a list of benchmarks. This option overrides all other "
     50             "options.");
     51 
     52 DEFINE_string(benchmark_filter, ".",
     53               "A regular expression that specifies the set of benchmarks "
     54               "to execute.  If this flag is empty, no benchmarks are run.  "
     55               "If this flag is the string \"all\", all benchmarks linked "
     56               "into the process are run.");
     57 
     58 DEFINE_double(benchmark_min_time, 0.5,
     59               "Minimum number of seconds we should run benchmark before "
     60               "results are considered significant.  For cpu-time based "
     61               "tests, this is the lower bound on the total cpu time "
     62               "used by all threads that make up the test.  For real-time "
     63               "based tests, this is the lower bound on the elapsed time "
     64               "of the benchmark execution, regardless of number of "
     65               "threads.");
     66 
     67 DEFINE_int32(benchmark_repetitions, 1,
     68              "The number of runs of each benchmark. If greater than 1, the "
     69              "mean and standard deviation of the runs will be reported.");
     70 
     71 DEFINE_bool(benchmark_report_aggregates_only, false,
     72             "Report the result of each benchmark repetitions. When 'true' is "
     73             "specified only the mean, standard deviation, and other statistics "
     74             "are reported for repeated benchmarks.");
     75 
     76 DEFINE_string(benchmark_format, "console",
     77               "The format to use for console output. Valid values are "
     78               "'console', 'json', or 'csv'.");
     79 
     80 DEFINE_string(benchmark_out_format, "json",
     81               "The format to use for file output. Valid values are "
     82               "'console', 'json', or 'csv'.");
     83 
     84 DEFINE_string(benchmark_out, "", "The file to write additonal output to");
     85 
     86 DEFINE_string(benchmark_color, "auto",
     87               "Whether to use colors in the output.  Valid values: "
     88               "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
     89               "colors if the output is being sent to a terminal and the TERM "
     90               "environment variable is set to a terminal type that supports "
     91               "colors.");
     92 
     93 DEFINE_int32(v, 0, "The level of verbose logging to output");
     94 
     95 namespace benchmark {
     96 namespace internal {
     97 
     98 void UseCharPointer(char const volatile*) {}
     99 
    100 }  // end namespace internal
    101 
    102 namespace {
    103 
    104 static const size_t kMaxIterations = 1000000000;
    105 
    106 }  // end namespace
    107 
    108 namespace internal {
    109 
    110 class ThreadManager {
    111  public:
    112   ThreadManager(int num_threads)
    113       : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
    114 
    115   Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
    116     return benchmark_mutex_;
    117   }
    118 
    119   bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
    120     return start_stop_barrier_.wait();
    121   }
    122 
    123   void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
    124     start_stop_barrier_.removeThread();
    125     if (--alive_threads_ == 0) {
    126       MutexLock lock(end_cond_mutex_);
    127       end_condition_.notify_all();
    128     }
    129   }
    130 
    131   void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
    132     MutexLock lock(end_cond_mutex_);
    133     end_condition_.wait(lock.native_handle(),
    134                         [this]() { return alive_threads_ == 0; });
    135   }
    136 
    137  public:
    138   struct Result {
    139     double real_time_used = 0;
    140     double cpu_time_used = 0;
    141     double manual_time_used = 0;
    142     int64_t bytes_processed = 0;
    143     int64_t items_processed = 0;
    144     int complexity_n = 0;
    145     std::string report_label_;
    146     std::string error_message_;
    147     bool has_error_ = false;
    148   };
    149   GUARDED_BY(GetBenchmarkMutex()) Result results;
    150 
    151  private:
    152   mutable Mutex benchmark_mutex_;
    153   std::atomic<int> alive_threads_;
    154   Barrier start_stop_barrier_;
    155   Mutex end_cond_mutex_;
    156   Condition end_condition_;
    157 };
    158 
    159 // Timer management class
    160 class ThreadTimer {
    161  public:
    162   ThreadTimer() = default;
    163 
    164   // Called by each thread
    165   void StartTimer() {
    166     running_ = true;
    167     start_real_time_ = ChronoClockNow();
    168     start_cpu_time_ = ThreadCPUUsage();
    169   }
    170 
    171   // Called by each thread
    172   void StopTimer() {
    173     CHECK(running_);
    174     running_ = false;
    175     real_time_used_ += ChronoClockNow() - start_real_time_;
    176     cpu_time_used_ += ThreadCPUUsage() - start_cpu_time_;
    177   }
    178 
    179   // Called by each thread
    180   void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
    181 
    182   bool running() const { return running_; }
    183 
    184   // REQUIRES: timer is not running
    185   double real_time_used() {
    186     CHECK(!running_);
    187     return real_time_used_;
    188   }
    189 
    190   // REQUIRES: timer is not running
    191   double cpu_time_used() {
    192     CHECK(!running_);
    193     return cpu_time_used_;
    194   }
    195 
    196   // REQUIRES: timer is not running
    197   double manual_time_used() {
    198     CHECK(!running_);
    199     return manual_time_used_;
    200   }
    201 
    202  private:
    203   bool running_ = false;        // Is the timer running
    204   double start_real_time_ = 0;  // If running_
    205   double start_cpu_time_ = 0;   // If running_
    206 
    207   // Accumulated time so far (does not contain current slice if running_)
    208   double real_time_used_ = 0;
    209   double cpu_time_used_ = 0;
    210   // Manually set iteration time. User sets this with SetIterationTime(seconds).
    211   double manual_time_used_ = 0;
    212 };
    213 
    214 namespace {
    215 
    216 BenchmarkReporter::Run CreateRunReport(
    217     const benchmark::internal::Benchmark::Instance& b,
    218     const internal::ThreadManager::Result& results, size_t iters,
    219     double seconds) {
    220   // Create report about this benchmark run.
    221   BenchmarkReporter::Run report;
    222 
    223   report.benchmark_name = b.name;
    224   report.error_occurred = results.has_error_;
    225   report.error_message = results.error_message_;
    226   report.report_label = results.report_label_;
    227   // Report the total iterations across all threads.
    228   report.iterations = static_cast<int64_t>(iters) * b.threads;
    229   report.time_unit = b.time_unit;
    230 
    231   if (!report.error_occurred) {
    232     double bytes_per_second = 0;
    233     if (results.bytes_processed > 0 && seconds > 0.0) {
    234       bytes_per_second = (results.bytes_processed / seconds);
    235     }
    236     double items_per_second = 0;
    237     if (results.items_processed > 0 && seconds > 0.0) {
    238       items_per_second = (results.items_processed / seconds);
    239     }
    240 
    241     if (b.use_manual_time) {
    242       report.real_accumulated_time = results.manual_time_used;
    243     } else {
    244       report.real_accumulated_time = results.real_time_used;
    245     }
    246     report.cpu_accumulated_time = results.cpu_time_used;
    247     report.bytes_per_second = bytes_per_second;
    248     report.items_per_second = items_per_second;
    249     report.complexity_n = results.complexity_n;
    250     report.complexity = b.complexity;
    251     report.complexity_lambda = b.complexity_lambda;
    252   }
    253   return report;
    254 }
    255 
    256 // Execute one thread of benchmark b for the specified number of iterations.
    257 // Adds the stats collected for the thread into *total.
    258 void RunInThread(const benchmark::internal::Benchmark::Instance* b,
    259                  size_t iters, int thread_id,
    260                  internal::ThreadManager* manager) {
    261   internal::ThreadTimer timer;
    262   State st(iters, b->arg, thread_id, b->threads, &timer, manager);
    263   b->benchmark->Run(st);
    264   CHECK(st.iterations() == st.max_iterations)
    265       << "Benchmark returned before State::KeepRunning() returned false!";
    266   {
    267     MutexLock l(manager->GetBenchmarkMutex());
    268     internal::ThreadManager::Result& results = manager->results;
    269     results.cpu_time_used += timer.cpu_time_used();
    270     results.real_time_used += timer.real_time_used();
    271     results.manual_time_used += timer.manual_time_used();
    272     results.bytes_processed += st.bytes_processed();
    273     results.items_processed += st.items_processed();
    274     results.complexity_n += st.complexity_length_n();
    275   }
    276   manager->NotifyThreadComplete();
    277 }
    278 
    279 std::vector<BenchmarkReporter::Run> RunBenchmark(
    280     const benchmark::internal::Benchmark::Instance& b,
    281     std::vector<BenchmarkReporter::Run>* complexity_reports) {
    282   std::vector<BenchmarkReporter::Run> reports;  // return value
    283 
    284   size_t iters = 1;
    285   std::unique_ptr<internal::ThreadManager> manager;
    286   std::vector<std::thread> pool(b.threads - 1);
    287   const int repeats =
    288       b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
    289   const bool report_aggregates_only =
    290       repeats != 1 &&
    291       (b.report_mode == internal::RM_Unspecified
    292            ? FLAGS_benchmark_report_aggregates_only
    293            : b.report_mode == internal::RM_ReportAggregatesOnly);
    294   for (int i = 0; i < repeats; i++) {
    295     for (;;) {
    296       // Try benchmark
    297       VLOG(2) << "Running " << b.name << " for " << iters << "\n";
    298 
    299       manager.reset(new internal::ThreadManager(b.threads));
    300       for (std::size_t ti = 0; ti < pool.size(); ++ti) {
    301         pool[ti] = std::thread(&RunInThread, &b, iters,
    302                                static_cast<int>(ti + 1), manager.get());
    303       }
    304       RunInThread(&b, iters, 0, manager.get());
    305       manager->WaitForAllThreads();
    306       for (std::thread& thread : pool) thread.join();
    307       internal::ThreadManager::Result results;
    308       {
    309         MutexLock l(manager->GetBenchmarkMutex());
    310         results = manager->results;
    311       }
    312       manager.reset();
    313       // Adjust real/manual time stats since they were reported per thread.
    314       results.real_time_used /= b.threads;
    315       results.manual_time_used /= b.threads;
    316 
    317       VLOG(2) << "Ran in " << results.cpu_time_used << "/"
    318               << results.real_time_used << "\n";
    319 
    320       // Base decisions off of real time if requested by this benchmark.
    321       double seconds = results.cpu_time_used;
    322       if (b.use_manual_time) {
    323         seconds = results.manual_time_used;
    324       } else if (b.use_real_time) {
    325         seconds = results.real_time_used;
    326       }
    327 
    328       const double min_time =
    329           !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
    330       // If this was the first run, was elapsed time or cpu time large enough?
    331       // If this is not the first run, go with the current value of iter.
    332       if ((i > 0) || results.has_error_ || (iters >= kMaxIterations) ||
    333           (seconds >= min_time) || (results.real_time_used >= 5 * min_time)) {
    334         BenchmarkReporter::Run report =
    335             CreateRunReport(b, results, iters, seconds);
    336         if (!report.error_occurred && b.complexity != oNone)
    337           complexity_reports->push_back(report);
    338         reports.push_back(report);
    339         break;
    340       }
    341 
    342       // See how much iterations should be increased by
    343       // Note: Avoid division by zero with max(seconds, 1ns).
    344       double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
    345       // If our last run was at least 10% of FLAGS_benchmark_min_time then we
    346       // use the multiplier directly. Otherwise we use at most 10 times
    347       // expansion.
    348       // NOTE: When the last run was at least 10% of the min time the max
    349       // expansion should be 14x.
    350       bool is_significant = (seconds / min_time) > 0.1;
    351       multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
    352       if (multiplier <= 1.0) multiplier = 2.0;
    353       double next_iters = std::max(multiplier * iters, iters + 1.0);
    354       if (next_iters > kMaxIterations) {
    355         next_iters = kMaxIterations;
    356       }
    357       VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
    358       iters = static_cast<int>(next_iters + 0.5);
    359     }
    360   }
    361   // Calculate additional statistics
    362   auto stat_reports = ComputeStats(reports);
    363   if ((b.complexity != oNone) && b.last_benchmark_instance) {
    364     auto additional_run_stats = ComputeBigO(*complexity_reports);
    365     stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
    366                         additional_run_stats.end());
    367     complexity_reports->clear();
    368   }
    369 
    370   if (report_aggregates_only) reports.clear();
    371   reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
    372   return reports;
    373 }
    374 
    375 }  // namespace
    376 }  // namespace internal
    377 
    378 State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
    379              int n_threads, internal::ThreadTimer* timer,
    380              internal::ThreadManager* manager)
    381     : started_(false),
    382       finished_(false),
    383       total_iterations_(0),
    384       range_(ranges),
    385       bytes_processed_(0),
    386       items_processed_(0),
    387       complexity_n_(0),
    388       error_occurred_(false),
    389       thread_index(thread_i),
    390       threads(n_threads),
    391       max_iterations(max_iters),
    392       timer_(timer),
    393       manager_(manager) {
    394   CHECK(max_iterations != 0) << "At least one iteration must be run";
    395   CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
    396 }
    397 
    398 void State::PauseTiming() {
    399   // Add in time accumulated so far
    400   CHECK(started_ && !finished_ && !error_occurred_);
    401   timer_->StopTimer();
    402 }
    403 
    404 void State::ResumeTiming() {
    405   CHECK(started_ && !finished_ && !error_occurred_);
    406   timer_->StartTimer();
    407 }
    408 
    409 void State::SkipWithError(const char* msg) {
    410   CHECK(msg);
    411   error_occurred_ = true;
    412   {
    413     MutexLock l(manager_->GetBenchmarkMutex());
    414     if (manager_->results.has_error_ == false) {
    415       manager_->results.error_message_ = msg;
    416       manager_->results.has_error_ = true;
    417     }
    418   }
    419   total_iterations_ = max_iterations;
    420   if (timer_->running()) timer_->StopTimer();
    421 }
    422 
    423 void State::SetIterationTime(double seconds) {
    424   timer_->SetIterationTime(seconds);
    425 }
    426 
    427 void State::SetLabel(const char* label) {
    428   MutexLock l(manager_->GetBenchmarkMutex());
    429   manager_->results.report_label_ = label;
    430 }
    431 
    432 void State::StartKeepRunning() {
    433   CHECK(!started_ && !finished_);
    434   started_ = true;
    435   manager_->StartStopBarrier();
    436   if (!error_occurred_) ResumeTiming();
    437 }
    438 
    439 void State::FinishKeepRunning() {
    440   CHECK(started_ && (!finished_ || error_occurred_));
    441   if (!error_occurred_) {
    442     PauseTiming();
    443   }
    444   // Total iterations now is one greater than max iterations. Fix this.
    445   total_iterations_ = max_iterations;
    446   finished_ = true;
    447   manager_->StartStopBarrier();
    448 }
    449 
    450 namespace internal {
    451 namespace {
    452 
    453 void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
    454                            BenchmarkReporter* console_reporter,
    455                            BenchmarkReporter* file_reporter) {
    456   // Note the file_reporter can be null.
    457   CHECK(console_reporter != nullptr);
    458 
    459   // Determine the width of the name field using a minimum width of 10.
    460   bool has_repetitions = FLAGS_benchmark_repetitions > 1;
    461   size_t name_field_width = 10;
    462   for (const Benchmark::Instance& benchmark : benchmarks) {
    463     name_field_width =
    464         std::max<size_t>(name_field_width, benchmark.name.size());
    465     has_repetitions |= benchmark.repetitions > 1;
    466   }
    467   if (has_repetitions) name_field_width += std::strlen("_stddev");
    468 
    469   // Print header here
    470   BenchmarkReporter::Context context;
    471   context.num_cpus = NumCPUs();
    472   context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
    473 
    474   context.cpu_scaling_enabled = CpuScalingEnabled();
    475   context.name_field_width = name_field_width;
    476 
    477   // Keep track of runing times of all instances of current benchmark
    478   std::vector<BenchmarkReporter::Run> complexity_reports;
    479 
    480   // We flush streams after invoking reporter methods that write to them. This
    481   // ensures users get timely updates even when streams are not line-buffered.
    482   auto flushStreams = [](BenchmarkReporter* reporter) {
    483     if (!reporter) return;
    484     std::flush(reporter->GetOutputStream());
    485     std::flush(reporter->GetErrorStream());
    486   };
    487 
    488   if (console_reporter->ReportContext(context) &&
    489       (!file_reporter || file_reporter->ReportContext(context))) {
    490     flushStreams(console_reporter);
    491     flushStreams(file_reporter);
    492     for (const auto& benchmark : benchmarks) {
    493       std::vector<BenchmarkReporter::Run> reports =
    494           RunBenchmark(benchmark, &complexity_reports);
    495       console_reporter->ReportRuns(reports);
    496       if (file_reporter) file_reporter->ReportRuns(reports);
    497       flushStreams(console_reporter);
    498       flushStreams(file_reporter);
    499     }
    500   }
    501   console_reporter->Finalize();
    502   if (file_reporter) file_reporter->Finalize();
    503   flushStreams(console_reporter);
    504   flushStreams(file_reporter);
    505 }
    506 
    507 std::unique_ptr<BenchmarkReporter> CreateReporter(
    508     std::string const& name, ConsoleReporter::OutputOptions allow_color) {
    509   typedef std::unique_ptr<BenchmarkReporter> PtrType;
    510   if (name == "console") {
    511     return PtrType(new ConsoleReporter(allow_color));
    512   } else if (name == "json") {
    513     return PtrType(new JSONReporter);
    514   } else if (name == "csv") {
    515     return PtrType(new CSVReporter);
    516   } else {
    517     std::cerr << "Unexpected format: '" << name << "'\n";
    518     std::exit(1);
    519   }
    520 }
    521 
    522 }  // end namespace
    523 }  // end namespace internal
    524 
    525 size_t RunSpecifiedBenchmarks() {
    526   return RunSpecifiedBenchmarks(nullptr, nullptr);
    527 }
    528 
    529 size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
    530   return RunSpecifiedBenchmarks(console_reporter, nullptr);
    531 }
    532 
    533 size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
    534                               BenchmarkReporter* file_reporter) {
    535   std::string spec = FLAGS_benchmark_filter;
    536   if (spec.empty() || spec == "all")
    537     spec = ".";  // Regexp that matches all benchmarks
    538 
    539   // Setup the reporters
    540   std::ofstream output_file;
    541   std::unique_ptr<BenchmarkReporter> default_console_reporter;
    542   std::unique_ptr<BenchmarkReporter> default_file_reporter;
    543   if (!console_reporter) {
    544     auto output_opts = ConsoleReporter::OO_None;
    545     if (FLAGS_benchmark_color == "auto")
    546       output_opts = IsColorTerminal() ? ConsoleReporter::OO_Color
    547                                       : ConsoleReporter::OO_None;
    548     else
    549       output_opts = IsTruthyFlagValue(FLAGS_benchmark_color)
    550                         ? ConsoleReporter::OO_Color
    551                         : ConsoleReporter::OO_None;
    552     default_console_reporter =
    553         internal::CreateReporter(FLAGS_benchmark_format, output_opts);
    554     console_reporter = default_console_reporter.get();
    555   }
    556   auto& Out = console_reporter->GetOutputStream();
    557   auto& Err = console_reporter->GetErrorStream();
    558 
    559   std::string const& fname = FLAGS_benchmark_out;
    560   if (fname == "" && file_reporter) {
    561     Err << "A custom file reporter was provided but "
    562            "--benchmark_out=<file> was not specified."
    563         << std::endl;
    564     std::exit(1);
    565   }
    566   if (fname != "") {
    567     output_file.open(fname);
    568     if (!output_file.is_open()) {
    569       Err << "invalid file name: '" << fname << std::endl;
    570       std::exit(1);
    571     }
    572     if (!file_reporter) {
    573       default_file_reporter = internal::CreateReporter(
    574           FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
    575       file_reporter = default_file_reporter.get();
    576     }
    577     file_reporter->SetOutputStream(&output_file);
    578     file_reporter->SetErrorStream(&output_file);
    579   }
    580 
    581   std::vector<internal::Benchmark::Instance> benchmarks;
    582   if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
    583 
    584   if (benchmarks.empty()) {
    585     Err << "Failed to match any benchmarks against regex: " << spec << "\n";
    586     return 0;
    587   }
    588 
    589   if (FLAGS_benchmark_list_tests) {
    590     for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
    591   } else {
    592     internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
    593   }
    594 
    595   return benchmarks.size();
    596 }
    597 
    598 namespace internal {
    599 
    600 void PrintUsageAndExit() {
    601   fprintf(stdout,
    602           "benchmark"
    603           " [--benchmark_list_tests={true|false}]\n"
    604           "          [--benchmark_filter=<regex>]\n"
    605           "          [--benchmark_min_time=<min_time>]\n"
    606           "          [--benchmark_repetitions=<num_repetitions>]\n"
    607           "          [--benchmark_report_aggregates_only={true|false}\n"
    608           "          [--benchmark_format=<console|json|csv>]\n"
    609           "          [--benchmark_out=<filename>]\n"
    610           "          [--benchmark_out_format=<json|console|csv>]\n"
    611           "          [--benchmark_color={auto|true|false}]\n"
    612           "          [--v=<verbosity>]\n");
    613   exit(0);
    614 }
    615 
    616 void ParseCommandLineFlags(int* argc, char** argv) {
    617   using namespace benchmark;
    618   for (int i = 1; i < *argc; ++i) {
    619     if (ParseBoolFlag(argv[i], "benchmark_list_tests",
    620                       &FLAGS_benchmark_list_tests) ||
    621         ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
    622         ParseDoubleFlag(argv[i], "benchmark_min_time",
    623                         &FLAGS_benchmark_min_time) ||
    624         ParseInt32Flag(argv[i], "benchmark_repetitions",
    625                        &FLAGS_benchmark_repetitions) ||
    626         ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
    627                       &FLAGS_benchmark_report_aggregates_only) ||
    628         ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
    629         ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
    630         ParseStringFlag(argv[i], "benchmark_out_format",
    631                         &FLAGS_benchmark_out_format) ||
    632         ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
    633         // "color_print" is the deprecated name for "benchmark_color".
    634         // TODO: Remove this.
    635         ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
    636         ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
    637       for (int j = i; j != *argc; ++j) argv[j] = argv[j + 1];
    638 
    639       --(*argc);
    640       --i;
    641     } else if (IsFlag(argv[i], "help")) {
    642       PrintUsageAndExit();
    643     }
    644   }
    645   for (auto const* flag :
    646        {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
    647     if (*flag != "console" && *flag != "json" && *flag != "csv") {
    648       PrintUsageAndExit();
    649     }
    650   if (FLAGS_benchmark_color.empty()) {
    651     PrintUsageAndExit();
    652   }
    653 }
    654 
    655 int InitializeStreams() {
    656   static std::ios_base::Init init;
    657   return 0;
    658 }
    659 
    660 }  // end namespace internal
    661 
    662 void Initialize(int* argc, char** argv) {
    663   internal::ParseCommandLineFlags(argc, argv);
    664   internal::LogLevel() = FLAGS_v;
    665 }
    666 
    667 }  // end namespace benchmark
    668