Home | History | Annotate | Download | only in src
      1 // Copyright 2015 Google Inc. All rights reserved.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //     http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include "benchmark/benchmark.h"
     16 #include "benchmark_api_internal.h"
     17 #include "internal_macros.h"
     18 
     19 #ifndef BENCHMARK_OS_WINDOWS
     20 #include <sys/resource.h>
     21 #include <sys/time.h>
     22 #include <unistd.h>
     23 #endif
     24 
     25 #include <algorithm>
     26 #include <atomic>
     27 #include <condition_variable>
     28 #include <cstdio>
     29 #include <cstdlib>
     30 #include <cstring>
     31 #include <fstream>
     32 #include <iostream>
     33 #include <memory>
     34 #include <thread>
     35 
     36 #include "check.h"
     37 #include "colorprint.h"
     38 #include "commandlineflags.h"
     39 #include "complexity.h"
     40 #include "counter.h"
     41 #include "log.h"
     42 #include "mutex.h"
     43 #include "re.h"
     44 #include "stat.h"
     45 #include "string_util.h"
     46 #include "sysinfo.h"
     47 #include "timers.h"
     48 
     49 DEFINE_bool(benchmark_list_tests, false,
     50             "Print a list of benchmarks. This option overrides all other "
     51             "options.");
     52 
     53 DEFINE_string(benchmark_filter, ".",
     54               "A regular expression that specifies the set of benchmarks "
     55               "to execute.  If this flag is empty, no benchmarks are run.  "
     56               "If this flag is the string \"all\", all benchmarks linked "
     57               "into the process are run.");
     58 
     59 DEFINE_double(benchmark_min_time, 0.5,
     60               "Minimum number of seconds we should run benchmark before "
     61               "results are considered significant.  For cpu-time based "
     62               "tests, this is the lower bound on the total cpu time "
     63               "used by all threads that make up the test.  For real-time "
     64               "based tests, this is the lower bound on the elapsed time "
     65               "of the benchmark execution, regardless of number of "
     66               "threads.");
     67 
     68 DEFINE_int32(benchmark_repetitions, 1,
     69              "The number of runs of each benchmark. If greater than 1, the "
     70              "mean and standard deviation of the runs will be reported.");
     71 
     72 DEFINE_bool(benchmark_report_aggregates_only, false,
     73             "Report the result of each benchmark repetitions. When 'true' is "
     74             "specified only the mean, standard deviation, and other statistics "
     75             "are reported for repeated benchmarks.");
     76 
     77 DEFINE_string(benchmark_format, "console",
     78               "The format to use for console output. Valid values are "
     79               "'console', 'json', or 'csv'.");
     80 
     81 DEFINE_string(benchmark_out_format, "json",
     82               "The format to use for file output. Valid values are "
     83               "'console', 'json', or 'csv'.");
     84 
     85 DEFINE_string(benchmark_out, "", "The file to write additonal output to");
     86 
     87 DEFINE_string(benchmark_color, "auto",
     88               "Whether to use colors in the output.  Valid values: "
     89               "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use "
     90               "colors if the output is being sent to a terminal and the TERM "
     91               "environment variable is set to a terminal type that supports "
     92               "colors.");
     93 
     94 DEFINE_int32(v, 0, "The level of verbose logging to output");
     95 
     96 namespace benchmark {
     97 namespace internal {
     98 
     99 void UseCharPointer(char const volatile*) {}
    100 
    101 }  // end namespace internal
    102 
    103 namespace {
    104 
    105 static const size_t kMaxIterations = 1000000000;
    106 
    107 }  // end namespace
    108 
    109 namespace internal {
    110 
    111 class ThreadManager {
    112  public:
    113   ThreadManager(int num_threads)
    114       : alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
    115 
    116   Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
    117     return benchmark_mutex_;
    118   }
    119 
    120   bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
    121     return start_stop_barrier_.wait();
    122   }
    123 
    124   void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
    125     start_stop_barrier_.removeThread();
    126     if (--alive_threads_ == 0) {
    127       MutexLock lock(end_cond_mutex_);
    128       end_condition_.notify_all();
    129     }
    130   }
    131 
    132   void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
    133     MutexLock lock(end_cond_mutex_);
    134     end_condition_.wait(lock.native_handle(),
    135                         [this]() { return alive_threads_ == 0; });
    136   }
    137 
    138  public:
    139   struct Result {
    140     double real_time_used = 0;
    141     double cpu_time_used = 0;
    142     double manual_time_used = 0;
    143     int64_t bytes_processed = 0;
    144     int64_t items_processed = 0;
    145     int complexity_n = 0;
    146     std::string report_label_;
    147     std::string error_message_;
    148     bool has_error_ = false;
    149     UserCounters counters;
    150   };
    151   GUARDED_BY(GetBenchmarkMutex()) Result results;
    152 
    153  private:
    154   mutable Mutex benchmark_mutex_;
    155   std::atomic<int> alive_threads_;
    156   Barrier start_stop_barrier_;
    157   Mutex end_cond_mutex_;
    158   Condition end_condition_;
    159 };
    160 
    161 // Timer management class
    162 class ThreadTimer {
    163  public:
    164   ThreadTimer() = default;
    165 
    166   // Called by each thread
    167   void StartTimer() {
    168     running_ = true;
    169     start_real_time_ = ChronoClockNow();
    170     start_cpu_time_ = ThreadCPUUsage();
    171   }
    172 
    173   // Called by each thread
    174   void StopTimer() {
    175     CHECK(running_);
    176     running_ = false;
    177     real_time_used_ += ChronoClockNow() - start_real_time_;
    178     cpu_time_used_ += ThreadCPUUsage() - start_cpu_time_;
    179   }
    180 
    181   // Called by each thread
    182   void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
    183 
    184   bool running() const { return running_; }
    185 
    186   // REQUIRES: timer is not running
    187   double real_time_used() {
    188     CHECK(!running_);
    189     return real_time_used_;
    190   }
    191 
    192   // REQUIRES: timer is not running
    193   double cpu_time_used() {
    194     CHECK(!running_);
    195     return cpu_time_used_;
    196   }
    197 
    198   // REQUIRES: timer is not running
    199   double manual_time_used() {
    200     CHECK(!running_);
    201     return manual_time_used_;
    202   }
    203 
    204  private:
    205   bool running_ = false;        // Is the timer running
    206   double start_real_time_ = 0;  // If running_
    207   double start_cpu_time_ = 0;   // If running_
    208 
    209   // Accumulated time so far (does not contain current slice if running_)
    210   double real_time_used_ = 0;
    211   double cpu_time_used_ = 0;
    212   // Manually set iteration time. User sets this with SetIterationTime(seconds).
    213   double manual_time_used_ = 0;
    214 };
    215 
    216 namespace {
    217 
    218 BenchmarkReporter::Run CreateRunReport(
    219     const benchmark::internal::Benchmark::Instance& b,
    220     const internal::ThreadManager::Result& results, size_t iters,
    221     double seconds) {
    222   // Create report about this benchmark run.
    223   BenchmarkReporter::Run report;
    224 
    225   report.benchmark_name = b.name;
    226   report.error_occurred = results.has_error_;
    227   report.error_message = results.error_message_;
    228   report.report_label = results.report_label_;
    229   // Report the total iterations across all threads.
    230   report.iterations = static_cast<int64_t>(iters) * b.threads;
    231   report.time_unit = b.time_unit;
    232 
    233   if (!report.error_occurred) {
    234     double bytes_per_second = 0;
    235     if (results.bytes_processed > 0 && seconds > 0.0) {
    236       bytes_per_second = (results.bytes_processed / seconds);
    237     }
    238     double items_per_second = 0;
    239     if (results.items_processed > 0 && seconds > 0.0) {
    240       items_per_second = (results.items_processed / seconds);
    241     }
    242 
    243     if (b.use_manual_time) {
    244       report.real_accumulated_time = results.manual_time_used;
    245     } else {
    246       report.real_accumulated_time = results.real_time_used;
    247     }
    248     report.cpu_accumulated_time = results.cpu_time_used;
    249     report.bytes_per_second = bytes_per_second;
    250     report.items_per_second = items_per_second;
    251     report.complexity_n = results.complexity_n;
    252     report.complexity = b.complexity;
    253     report.complexity_lambda = b.complexity_lambda;
    254     report.counters = results.counters;
    255   }
    256   return report;
    257 }
    258 
    259 // Execute one thread of benchmark b for the specified number of iterations.
    260 // Adds the stats collected for the thread into *total.
    261 void RunInThread(const benchmark::internal::Benchmark::Instance* b,
    262                  size_t iters, int thread_id,
    263                  internal::ThreadManager* manager) {
    264   internal::ThreadTimer timer;
    265   State st(iters, b->arg, thread_id, b->threads, &timer, manager);
    266   b->benchmark->Run(st);
    267   CHECK(st.iterations() == st.max_iterations)
    268       << "Benchmark returned before State::KeepRunning() returned false!";
    269   {
    270     MutexLock l(manager->GetBenchmarkMutex());
    271     internal::ThreadManager::Result& results = manager->results;
    272     results.cpu_time_used += timer.cpu_time_used();
    273     results.real_time_used += timer.real_time_used();
    274     results.manual_time_used += timer.manual_time_used();
    275     results.bytes_processed += st.bytes_processed();
    276     results.items_processed += st.items_processed();
    277     results.complexity_n += st.complexity_length_n();
    278     internal::Increment(&results.counters, st.counters);
    279   }
    280   manager->NotifyThreadComplete();
    281 }
    282 
    283 std::vector<BenchmarkReporter::Run> RunBenchmark(
    284     const benchmark::internal::Benchmark::Instance& b,
    285     std::vector<BenchmarkReporter::Run>* complexity_reports) {
    286   std::vector<BenchmarkReporter::Run> reports;  // return value
    287 
    288   const bool has_explicit_iteration_count = b.iterations != 0;
    289   size_t iters = has_explicit_iteration_count ? b.iterations : 1;
    290   std::unique_ptr<internal::ThreadManager> manager;
    291   std::vector<std::thread> pool(b.threads - 1);
    292   const int repeats =
    293       b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
    294   const bool report_aggregates_only =
    295       repeats != 1 &&
    296       (b.report_mode == internal::RM_Unspecified
    297            ? FLAGS_benchmark_report_aggregates_only
    298            : b.report_mode == internal::RM_ReportAggregatesOnly);
    299   for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
    300     for (;;) {
    301       // Try benchmark
    302       VLOG(2) << "Running " << b.name << " for " << iters << "\n";
    303 
    304       manager.reset(new internal::ThreadManager(b.threads));
    305       for (std::size_t ti = 0; ti < pool.size(); ++ti) {
    306         pool[ti] = std::thread(&RunInThread, &b, iters,
    307                                static_cast<int>(ti + 1), manager.get());
    308       }
    309       RunInThread(&b, iters, 0, manager.get());
    310       manager->WaitForAllThreads();
    311       for (std::thread& thread : pool) thread.join();
    312       internal::ThreadManager::Result results;
    313       {
    314         MutexLock l(manager->GetBenchmarkMutex());
    315         results = manager->results;
    316       }
    317       manager.reset();
    318       // Adjust real/manual time stats since they were reported per thread.
    319       results.real_time_used /= b.threads;
    320       results.manual_time_used /= b.threads;
    321 
    322       VLOG(2) << "Ran in " << results.cpu_time_used << "/"
    323               << results.real_time_used << "\n";
    324 
    325       // Base decisions off of real time if requested by this benchmark.
    326       double seconds = results.cpu_time_used;
    327       if (b.use_manual_time) {
    328         seconds = results.manual_time_used;
    329       } else if (b.use_real_time) {
    330         seconds = results.real_time_used;
    331       }
    332 
    333       const double min_time =
    334           !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
    335 
    336       // Determine if this run should be reported; Either it has
    337       // run for a sufficient amount of time or because an error was reported.
    338       const bool should_report =  repetition_num > 0
    339         || has_explicit_iteration_count // An exact iteration count was requested
    340         || results.has_error_
    341         || iters >= kMaxIterations
    342         || seconds >= min_time // the elapsed time is large enough
    343         // CPU time is specified but the elapsed real time greatly exceeds the
    344         // minimum time. Note that user provided timers are except from this
    345         // sanity check.
    346         || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
    347 
    348       if (should_report) {
    349         BenchmarkReporter::Run report =
    350             CreateRunReport(b, results, iters, seconds);
    351         if (!report.error_occurred && b.complexity != oNone)
    352           complexity_reports->push_back(report);
    353         reports.push_back(report);
    354         break;
    355       }
    356 
    357       // See how much iterations should be increased by
    358       // Note: Avoid division by zero with max(seconds, 1ns).
    359       double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
    360       // If our last run was at least 10% of FLAGS_benchmark_min_time then we
    361       // use the multiplier directly. Otherwise we use at most 10 times
    362       // expansion.
    363       // NOTE: When the last run was at least 10% of the min time the max
    364       // expansion should be 14x.
    365       bool is_significant = (seconds / min_time) > 0.1;
    366       multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
    367       if (multiplier <= 1.0) multiplier = 2.0;
    368       double next_iters = std::max(multiplier * iters, iters + 1.0);
    369       if (next_iters > kMaxIterations) {
    370         next_iters = kMaxIterations;
    371       }
    372       VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
    373       iters = static_cast<int>(next_iters + 0.5);
    374     }
    375   }
    376   // Calculate additional statistics
    377   auto stat_reports = ComputeStats(reports);
    378   if ((b.complexity != oNone) && b.last_benchmark_instance) {
    379     auto additional_run_stats = ComputeBigO(*complexity_reports);
    380     stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
    381                         additional_run_stats.end());
    382     complexity_reports->clear();
    383   }
    384 
    385   if (report_aggregates_only) reports.clear();
    386   reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
    387   return reports;
    388 }
    389 
    390 }  // namespace
    391 }  // namespace internal
    392 
    393 State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
    394              int n_threads, internal::ThreadTimer* timer,
    395              internal::ThreadManager* manager)
    396     : started_(false),
    397       finished_(false),
    398       total_iterations_(0),
    399       range_(ranges),
    400       bytes_processed_(0),
    401       items_processed_(0),
    402       complexity_n_(0),
    403       error_occurred_(false),
    404       counters(),
    405       thread_index(thread_i),
    406       threads(n_threads),
    407       max_iterations(max_iters),
    408       timer_(timer),
    409       manager_(manager) {
    410   CHECK(max_iterations != 0) << "At least one iteration must be run";
    411   CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
    412 }
    413 
    414 void State::PauseTiming() {
    415   // Add in time accumulated so far
    416   CHECK(started_ && !finished_ && !error_occurred_);
    417   timer_->StopTimer();
    418 }
    419 
    420 void State::ResumeTiming() {
    421   CHECK(started_ && !finished_ && !error_occurred_);
    422   timer_->StartTimer();
    423 }
    424 
    425 void State::SkipWithError(const char* msg) {
    426   CHECK(msg);
    427   error_occurred_ = true;
    428   {
    429     MutexLock l(manager_->GetBenchmarkMutex());
    430     if (manager_->results.has_error_ == false) {
    431       manager_->results.error_message_ = msg;
    432       manager_->results.has_error_ = true;
    433     }
    434   }
    435   total_iterations_ = max_iterations;
    436   if (timer_->running()) timer_->StopTimer();
    437 }
    438 
    439 void State::SetIterationTime(double seconds) {
    440   timer_->SetIterationTime(seconds);
    441 }
    442 
    443 void State::SetLabel(const char* label) {
    444   MutexLock l(manager_->GetBenchmarkMutex());
    445   manager_->results.report_label_ = label;
    446 }
    447 
    448 void State::StartKeepRunning() {
    449   CHECK(!started_ && !finished_);
    450   started_ = true;
    451   manager_->StartStopBarrier();
    452   if (!error_occurred_) ResumeTiming();
    453 }
    454 
    455 void State::FinishKeepRunning() {
    456   CHECK(started_ && (!finished_ || error_occurred_));
    457   if (!error_occurred_) {
    458     PauseTiming();
    459   }
    460   // Total iterations now is one greater than max iterations. Fix this.
    461   total_iterations_ = max_iterations;
    462   finished_ = true;
    463   manager_->StartStopBarrier();
    464 }
    465 
    466 namespace internal {
    467 namespace {
    468 
    469 void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
    470                            BenchmarkReporter* console_reporter,
    471                            BenchmarkReporter* file_reporter) {
    472   // Note the file_reporter can be null.
    473   CHECK(console_reporter != nullptr);
    474 
    475   // Determine the width of the name field using a minimum width of 10.
    476   bool has_repetitions = FLAGS_benchmark_repetitions > 1;
    477   size_t name_field_width = 10;
    478   for (const Benchmark::Instance& benchmark : benchmarks) {
    479     name_field_width =
    480         std::max<size_t>(name_field_width, benchmark.name.size());
    481     has_repetitions |= benchmark.repetitions > 1;
    482   }
    483   if (has_repetitions) name_field_width += std::strlen("_stddev");
    484 
    485   // Print header here
    486   BenchmarkReporter::Context context;
    487   context.num_cpus = NumCPUs();
    488   context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f;
    489 
    490   context.cpu_scaling_enabled = CpuScalingEnabled();
    491   context.name_field_width = name_field_width;
    492 
    493   // Keep track of runing times of all instances of current benchmark
    494   std::vector<BenchmarkReporter::Run> complexity_reports;
    495 
    496   // We flush streams after invoking reporter methods that write to them. This
    497   // ensures users get timely updates even when streams are not line-buffered.
    498   auto flushStreams = [](BenchmarkReporter* reporter) {
    499     if (!reporter) return;
    500     std::flush(reporter->GetOutputStream());
    501     std::flush(reporter->GetErrorStream());
    502   };
    503 
    504   if (console_reporter->ReportContext(context) &&
    505       (!file_reporter || file_reporter->ReportContext(context))) {
    506     flushStreams(console_reporter);
    507     flushStreams(file_reporter);
    508     for (const auto& benchmark : benchmarks) {
    509       std::vector<BenchmarkReporter::Run> reports =
    510           RunBenchmark(benchmark, &complexity_reports);
    511       console_reporter->ReportRuns(reports);
    512       if (file_reporter) file_reporter->ReportRuns(reports);
    513       flushStreams(console_reporter);
    514       flushStreams(file_reporter);
    515     }
    516   }
    517   console_reporter->Finalize();
    518   if (file_reporter) file_reporter->Finalize();
    519   flushStreams(console_reporter);
    520   flushStreams(file_reporter);
    521 }
    522 
    523 std::unique_ptr<BenchmarkReporter> CreateReporter(
    524     std::string const& name, ConsoleReporter::OutputOptions allow_color) {
    525   typedef std::unique_ptr<BenchmarkReporter> PtrType;
    526   if (name == "console") {
    527     return PtrType(new ConsoleReporter(allow_color));
    528   } else if (name == "json") {
    529     return PtrType(new JSONReporter);
    530   } else if (name == "csv") {
    531     return PtrType(new CSVReporter);
    532   } else {
    533     std::cerr << "Unexpected format: '" << name << "'\n";
    534     std::exit(1);
    535   }
    536 }
    537 
    538 }  // end namespace
    539 }  // end namespace internal
    540 
    541 size_t RunSpecifiedBenchmarks() {
    542   return RunSpecifiedBenchmarks(nullptr, nullptr);
    543 }
    544 
    545 size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
    546   return RunSpecifiedBenchmarks(console_reporter, nullptr);
    547 }
    548 
    549 size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
    550                               BenchmarkReporter* file_reporter) {
    551   std::string spec = FLAGS_benchmark_filter;
    552   if (spec.empty() || spec == "all")
    553     spec = ".";  // Regexp that matches all benchmarks
    554 
    555   // Setup the reporters
    556   std::ofstream output_file;
    557   std::unique_ptr<BenchmarkReporter> default_console_reporter;
    558   std::unique_ptr<BenchmarkReporter> default_file_reporter;
    559   if (!console_reporter) {
    560     auto output_opts = ConsoleReporter::OO_None;
    561     if (FLAGS_benchmark_color == "auto")
    562       output_opts = IsColorTerminal() ? ConsoleReporter::OO_Color
    563                                       : ConsoleReporter::OO_None;
    564     else
    565       output_opts = IsTruthyFlagValue(FLAGS_benchmark_color)
    566                         ? ConsoleReporter::OO_Color
    567                         : ConsoleReporter::OO_None;
    568     default_console_reporter =
    569         internal::CreateReporter(FLAGS_benchmark_format, output_opts);
    570     console_reporter = default_console_reporter.get();
    571   }
    572   auto& Out = console_reporter->GetOutputStream();
    573   auto& Err = console_reporter->GetErrorStream();
    574 
    575   std::string const& fname = FLAGS_benchmark_out;
    576   if (fname == "" && file_reporter) {
    577     Err << "A custom file reporter was provided but "
    578            "--benchmark_out=<file> was not specified."
    579         << std::endl;
    580     std::exit(1);
    581   }
    582   if (fname != "") {
    583     output_file.open(fname);
    584     if (!output_file.is_open()) {
    585       Err << "invalid file name: '" << fname << std::endl;
    586       std::exit(1);
    587     }
    588     if (!file_reporter) {
    589       default_file_reporter = internal::CreateReporter(
    590           FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
    591       file_reporter = default_file_reporter.get();
    592     }
    593     file_reporter->SetOutputStream(&output_file);
    594     file_reporter->SetErrorStream(&output_file);
    595   }
    596 
    597   std::vector<internal::Benchmark::Instance> benchmarks;
    598   if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
    599 
    600   if (benchmarks.empty()) {
    601     Err << "Failed to match any benchmarks against regex: " << spec << "\n";
    602     return 0;
    603   }
    604 
    605   if (FLAGS_benchmark_list_tests) {
    606     for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
    607   } else {
    608     internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
    609   }
    610 
    611   return benchmarks.size();
    612 }
    613 
    614 namespace internal {
    615 
    616 void PrintUsageAndExit() {
    617   fprintf(stdout,
    618           "benchmark"
    619           " [--benchmark_list_tests={true|false}]\n"
    620           "          [--benchmark_filter=<regex>]\n"
    621           "          [--benchmark_min_time=<min_time>]\n"
    622           "          [--benchmark_repetitions=<num_repetitions>]\n"
    623           "          [--benchmark_report_aggregates_only={true|false}\n"
    624           "          [--benchmark_format=<console|json|csv>]\n"
    625           "          [--benchmark_out=<filename>]\n"
    626           "          [--benchmark_out_format=<json|console|csv>]\n"
    627           "          [--benchmark_color={auto|true|false}]\n"
    628           "          [--v=<verbosity>]\n");
    629   exit(0);
    630 }
    631 
    632 void ParseCommandLineFlags(int* argc, char** argv) {
    633   using namespace benchmark;
    634   for (int i = 1; i < *argc; ++i) {
    635     if (ParseBoolFlag(argv[i], "benchmark_list_tests",
    636                       &FLAGS_benchmark_list_tests) ||
    637         ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
    638         ParseDoubleFlag(argv[i], "benchmark_min_time",
    639                         &FLAGS_benchmark_min_time) ||
    640         ParseInt32Flag(argv[i], "benchmark_repetitions",
    641                        &FLAGS_benchmark_repetitions) ||
    642         ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
    643                       &FLAGS_benchmark_report_aggregates_only) ||
    644         ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
    645         ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
    646         ParseStringFlag(argv[i], "benchmark_out_format",
    647                         &FLAGS_benchmark_out_format) ||
    648         ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
    649         // "color_print" is the deprecated name for "benchmark_color".
    650         // TODO: Remove this.
    651         ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
    652         ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
    653       for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
    654 
    655       --(*argc);
    656       --i;
    657     } else if (IsFlag(argv[i], "help")) {
    658       PrintUsageAndExit();
    659     }
    660   }
    661   for (auto const* flag :
    662        {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
    663     if (*flag != "console" && *flag != "json" && *flag != "csv") {
    664       PrintUsageAndExit();
    665     }
    666   if (FLAGS_benchmark_color.empty()) {
    667     PrintUsageAndExit();
    668   }
    669 }
    670 
    671 int InitializeStreams() {
    672   static std::ios_base::Init init;
    673   return 0;
    674 }
    675 
    676 }  // end namespace internal
    677 
    678 void Initialize(int* argc, char** argv) {
    679   internal::ParseCommandLineFlags(argc, argv);
    680   internal::LogLevel() = FLAGS_v;
    681 }
    682 
    683 bool ReportUnrecognizedArguments(int argc, char** argv) {
    684   for (int i = 1; i < argc; ++i) {
    685     fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
    686   }
    687   return argc > 1;
    688 }
    689 
    690 }  // end namespace benchmark
    691