Home | History | Annotate | Download | only in examples
      1 // Ceres Solver - A fast non-linear least squares minimizer
      2 // Copyright 2012 Google Inc. All rights reserved.
      3 // http://code.google.com/p/ceres-solver/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are met:
      7 //
      8 // * Redistributions of source code must retain the above copyright notice,
      9 //   this list of conditions and the following disclaimer.
     10 // * Redistributions in binary form must reproduce the above copyright notice,
     11 //   this list of conditions and the following disclaimer in the documentation
     12 //   and/or other materials provided with the distribution.
     13 // * Neither the name of Google Inc. nor the names of its contributors may be
     14 //   used to endorse or promote products derived from this software without
     15 //   specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27 // POSSIBILITY OF SUCH DAMAGE.
     28 //
     29 // Author: sameeragarwal (at) google.com (Sameer Agarwal)
     30 //
     31 // The National Institute of Standards and Technology has released a
     32 // set of problems to test non-linear least squares solvers.
     33 //
     34 // More information about the background on these problems and
     35 // suggested evaluation methodology can be found at:
     36 //
     37 //   http://www.itl.nist.gov/div898/strd/nls/nls_info.shtml
     38 //
     39 // The problem data themselves can be found at
     40 //
     41 //   http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml
     42 //
     43 // The problems are divided into three levels of difficulty, Easy,
     44 // Medium and Hard. For each problem there are two starting guesses,
     45 // the first one far away from the global minimum and the second
     46 // closer to it.
     47 //
     48 // A problem is considered successfully solved, if every components of
     49 // the solution matches the globally optimal solution in at least 4
     50 // digits or more.
     51 //
     52 // This dataset was used for an evaluation of Non-linear least squares
     53 // solvers:
     54 //
     55 // P. F. Mondragon & B. Borchers, A Comparison of Nonlinear Regression
     56 // Codes, Journal of Modern Applied Statistical Methods, 4(1):343-351,
     57 // 2005.
     58 //
     59 // The results from Mondragon & Borchers can be summarized as
     60 //               Excel  Gnuplot  GaussFit  HBN  MinPack
     61 // Average LRE     2.3      4.3       4.0  6.8      4.4
     62 //      Winner       1        5        12   29       12
     63 //
     64 // Where the row Winner counts, the number of problems for which the
     65 // solver had the highest LRE.
     66 
     67 // In this file, we implement the same evaluation methodology using
     68 // Ceres. Currently using Levenberg-Marquard with DENSE_QR, we get
     69 //
     70 //               Excel  Gnuplot  GaussFit  HBN  MinPack  Ceres
     71 // Average LRE     2.3      4.3       4.0  6.8      4.4    9.4
     72 //      Winner       0        0         5   11        2     41
     73 
     74 #include <iostream>
     75 #include <iterator>
     76 #include <fstream>
     77 #include "ceres/ceres.h"
     78 #include "gflags/gflags.h"
     79 #include "glog/logging.h"
     80 #include "Eigen/Core"
     81 
     82 DEFINE_string(nist_data_dir, "", "Directory containing the NIST non-linear"
     83               "regression examples");
     84 DEFINE_string(minimizer, "trust_region",
     85               "Minimizer type to use, choices are: line_search & trust_region");
     86 DEFINE_string(trust_region_strategy, "levenberg_marquardt",
     87               "Options are: levenberg_marquardt, dogleg");
     88 DEFINE_string(dogleg, "traditional_dogleg",
     89               "Options are: traditional_dogleg, subspace_dogleg");
     90 DEFINE_string(linear_solver, "dense_qr", "Options are: "
     91               "sparse_cholesky, dense_qr, dense_normal_cholesky and"
     92               "cgnr");
     93 DEFINE_string(preconditioner, "jacobi", "Options are: "
     94               "identity, jacobi");
     95 DEFINE_string(line_search, "armijo",
     96               "Line search algorithm to use, choices are: armijo and wolfe.");
     97 DEFINE_string(line_search_direction, "lbfgs",
     98               "Line search direction algorithm to use, choices: lbfgs, bfgs");
     99 DEFINE_int32(max_line_search_iterations, 20,
    100              "Maximum number of iterations for each line search.");
    101 DEFINE_int32(max_line_search_restarts, 10,
    102              "Maximum number of restarts of line search direction algorithm.");
    103 DEFINE_string(line_search_interpolation, "cubic",
    104               "Degree of polynomial aproximation in line search, "
    105               "choices are: bisection, quadratic & cubic.");
    106 DEFINE_int32(lbfgs_rank, 20,
    107              "Rank of L-BFGS inverse Hessian approximation in line search.");
    108 DEFINE_bool(approximate_eigenvalue_bfgs_scaling, false,
    109             "Use approximate eigenvalue scaling in (L)BFGS line search.");
    110 DEFINE_double(sufficient_decrease, 1.0e-4,
    111               "Line search Armijo sufficient (function) decrease factor.");
    112 DEFINE_double(sufficient_curvature_decrease, 0.9,
    113               "Line search Wolfe sufficient curvature decrease factor.");
    114 DEFINE_int32(num_iterations, 10000, "Number of iterations");
    115 DEFINE_bool(nonmonotonic_steps, false, "Trust region algorithm can use"
    116             " nonmonotic steps");
    117 DEFINE_double(initial_trust_region_radius, 1e4, "Initial trust region radius");
    118 
    119 namespace ceres {
    120 namespace examples {
    121 
    122 using Eigen::Dynamic;
    123 using Eigen::RowMajor;
    124 typedef Eigen::Matrix<double, Dynamic, 1> Vector;
    125 typedef Eigen::Matrix<double, Dynamic, Dynamic, RowMajor> Matrix;
    126 
    127 void SplitStringUsingChar(const string& full,
    128                           const char delim,
    129                           vector<string>* result) {
    130   back_insert_iterator< vector<string> > it(*result);
    131 
    132   const char* p = full.data();
    133   const char* end = p + full.size();
    134   while (p != end) {
    135     if (*p == delim) {
    136       ++p;
    137     } else {
    138       const char* start = p;
    139       while (++p != end && *p != delim) {
    140         // Skip to the next occurence of the delimiter.
    141       }
    142       *it++ = string(start, p - start);
    143     }
    144   }
    145 }
    146 
    147 bool GetAndSplitLine(std::ifstream& ifs, std::vector<std::string>* pieces) {
    148   pieces->clear();
    149   char buf[256];
    150   ifs.getline(buf, 256);
    151   SplitStringUsingChar(std::string(buf), ' ', pieces);
    152   return true;
    153 }
    154 
    155 void SkipLines(std::ifstream& ifs, int num_lines) {
    156   char buf[256];
    157   for (int i = 0; i < num_lines; ++i) {
    158     ifs.getline(buf, 256);
    159   }
    160 }
    161 
    162 bool IsSuccessfulTermination(ceres::SolverTerminationType status) {
    163   return
    164       (status == ceres::FUNCTION_TOLERANCE) ||
    165       (status == ceres::GRADIENT_TOLERANCE) ||
    166       (status == ceres::PARAMETER_TOLERANCE) ||
    167       (status == ceres::USER_SUCCESS);
    168 }
    169 
    170 class NISTProblem {
    171  public:
    172   explicit NISTProblem(const std::string& filename) {
    173     std::ifstream ifs(filename.c_str(), std::ifstream::in);
    174 
    175     std::vector<std::string> pieces;
    176     SkipLines(ifs, 24);
    177     GetAndSplitLine(ifs, &pieces);
    178     const int kNumResponses = std::atoi(pieces[1].c_str());
    179 
    180     GetAndSplitLine(ifs, &pieces);
    181     const int kNumPredictors = std::atoi(pieces[0].c_str());
    182 
    183     GetAndSplitLine(ifs, &pieces);
    184     const int kNumObservations = std::atoi(pieces[0].c_str());
    185 
    186     SkipLines(ifs, 4);
    187     GetAndSplitLine(ifs, &pieces);
    188     const int kNumParameters = std::atoi(pieces[0].c_str());
    189     SkipLines(ifs, 8);
    190 
    191     // Get the first line of initial and final parameter values to
    192     // determine the number of tries.
    193     GetAndSplitLine(ifs, &pieces);
    194     const int kNumTries = pieces.size() - 4;
    195 
    196     predictor_.resize(kNumObservations, kNumPredictors);
    197     response_.resize(kNumObservations, kNumResponses);
    198     initial_parameters_.resize(kNumTries, kNumParameters);
    199     final_parameters_.resize(1, kNumParameters);
    200 
    201     // Parse the line for parameter b1.
    202     int parameter_id = 0;
    203     for (int i = 0; i < kNumTries; ++i) {
    204       initial_parameters_(i, parameter_id) = std::atof(pieces[i + 2].c_str());
    205     }
    206     final_parameters_(0, parameter_id) = std::atof(pieces[2 + kNumTries].c_str());
    207 
    208     // Parse the remaining parameter lines.
    209     for (int parameter_id = 1; parameter_id < kNumParameters; ++parameter_id) {
    210      GetAndSplitLine(ifs, &pieces);
    211      // b2, b3, ....
    212      for (int i = 0; i < kNumTries; ++i) {
    213        initial_parameters_(i, parameter_id) = std::atof(pieces[i + 2].c_str());
    214      }
    215      final_parameters_(0, parameter_id) = std::atof(pieces[2 + kNumTries].c_str());
    216     }
    217 
    218     // Certfied cost
    219     SkipLines(ifs, 1);
    220     GetAndSplitLine(ifs, &pieces);
    221     certified_cost_ = std::atof(pieces[4].c_str()) / 2.0;
    222 
    223     // Read the observations.
    224     SkipLines(ifs, 18 - kNumParameters);
    225     for (int i = 0; i < kNumObservations; ++i) {
    226       GetAndSplitLine(ifs, &pieces);
    227       // Response.
    228       for (int j = 0; j < kNumResponses; ++j) {
    229         response_(i, j) =  std::atof(pieces[j].c_str());
    230       }
    231 
    232       // Predictor variables.
    233       for (int j = 0; j < kNumPredictors; ++j) {
    234         predictor_(i, j) =  std::atof(pieces[j + kNumResponses].c_str());
    235       }
    236     }
    237   }
    238 
    239   Matrix initial_parameters(int start) const { return initial_parameters_.row(start); }
    240   Matrix final_parameters() const  { return final_parameters_; }
    241   Matrix predictor()        const { return predictor_;         }
    242   Matrix response()         const { return response_;          }
    243   int predictor_size()      const { return predictor_.cols();  }
    244   int num_observations()    const { return predictor_.rows();  }
    245   int response_size()       const { return response_.cols();   }
    246   int num_parameters()      const { return initial_parameters_.cols(); }
    247   int num_starts()          const { return initial_parameters_.rows(); }
    248   double certified_cost()   const { return certified_cost_; }
    249 
    250  private:
    251   Matrix predictor_;
    252   Matrix response_;
    253   Matrix initial_parameters_;
    254   Matrix final_parameters_;
    255   double certified_cost_;
    256 };
    257 
    258 #define NIST_BEGIN(CostFunctionName) \
    259   struct CostFunctionName { \
    260     CostFunctionName(const double* const x, \
    261                      const double* const y) \
    262         : x_(*x), y_(*y) {} \
    263     double x_; \
    264     double y_; \
    265     template <typename T> \
    266     bool operator()(const T* const b, T* residual) const { \
    267     const T y(y_); \
    268     const T x(x_); \
    269       residual[0] = y - (
    270 
    271 #define NIST_END ); return true; }};
    272 
    273 // y = b1 * (b2+x)**(-1/b3)  +  e
    274 NIST_BEGIN(Bennet5)
    275   b[0] * pow(b[1] + x, T(-1.0) / b[2])
    276 NIST_END
    277 
    278 // y = b1*(1-exp[-b2*x])  +  e
    279 NIST_BEGIN(BoxBOD)
    280   b[0] * (T(1.0) - exp(-b[1] * x))
    281 NIST_END
    282 
    283 // y = exp[-b1*x]/(b2+b3*x)  +  e
    284 NIST_BEGIN(Chwirut)
    285   exp(-b[0] * x) / (b[1] + b[2] * x)
    286 NIST_END
    287 
    288 // y  = b1*x**b2  +  e
    289 NIST_BEGIN(DanWood)
    290   b[0] * pow(x, b[1])
    291 NIST_END
    292 
    293 // y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
    294 //     + b6*exp( -(x-b7)**2 / b8**2 ) + e
    295 NIST_BEGIN(Gauss)
    296   b[0] * exp(-b[1] * x) +
    297   b[2] * exp(-pow((x - b[3])/b[4], 2)) +
    298   b[5] * exp(-pow((x - b[6])/b[7],2))
    299 NIST_END
    300 
    301 // y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
    302 NIST_BEGIN(Lanczos)
    303   b[0] * exp(-b[1] * x) + b[2] * exp(-b[3] * x) + b[4] * exp(-b[5] * x)
    304 NIST_END
    305 
    306 // y = (b1+b2*x+b3*x**2+b4*x**3) /
    307 //     (1+b5*x+b6*x**2+b7*x**3)  +  e
    308 NIST_BEGIN(Hahn1)
    309   (b[0] + b[1] * x + b[2] * x * x + b[3] * x * x * x) /
    310   (T(1.0) + b[4] * x + b[5] * x * x + b[6] * x * x * x)
    311 NIST_END
    312 
    313 // y = (b1 + b2*x + b3*x**2) /
    314 //    (1 + b4*x + b5*x**2)  +  e
    315 NIST_BEGIN(Kirby2)
    316   (b[0] + b[1] * x + b[2] * x * x) /
    317   (T(1.0) + b[3] * x + b[4] * x * x)
    318 NIST_END
    319 
    320 // y = b1*(x**2+x*b2) / (x**2+x*b3+b4)  +  e
    321 NIST_BEGIN(MGH09)
    322   b[0] * (x * x + x * b[1]) / (x * x + x * b[2] + b[3])
    323 NIST_END
    324 
    325 // y = b1 * exp[b2/(x+b3)]  +  e
    326 NIST_BEGIN(MGH10)
    327   b[0] * exp(b[1] / (x + b[2]))
    328 NIST_END
    329 
    330 // y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5]
    331 NIST_BEGIN(MGH17)
    332   b[0] + b[1] * exp(-x * b[3]) + b[2] * exp(-x * b[4])
    333 NIST_END
    334 
    335 // y = b1*(1-exp[-b2*x])  +  e
    336 NIST_BEGIN(Misra1a)
    337   b[0] * (T(1.0) - exp(-b[1] * x))
    338 NIST_END
    339 
    340 // y = b1 * (1-(1+b2*x/2)**(-2))  +  e
    341 NIST_BEGIN(Misra1b)
    342   b[0] * (T(1.0) - T(1.0)/ ((T(1.0) + b[1] * x / 2.0) * (T(1.0) + b[1] * x / 2.0)))
    343 NIST_END
    344 
    345 // y = b1 * (1-(1+2*b2*x)**(-.5))  +  e
    346 NIST_BEGIN(Misra1c)
    347   b[0] * (T(1.0) - pow(T(1.0) + T(2.0) * b[1] * x, -0.5))
    348 NIST_END
    349 
    350 // y = b1*b2*x*((1+b2*x)**(-1))  +  e
    351 NIST_BEGIN(Misra1d)
    352   b[0] * b[1] * x / (T(1.0) + b[1] * x)
    353 NIST_END
    354 
    355 const double kPi = 3.141592653589793238462643383279;
    356 // pi = 3.141592653589793238462643383279E0
    357 // y =  b1 - b2*x - arctan[b3/(x-b4)]/pi  +  e
    358 NIST_BEGIN(Roszman1)
    359   b[0] - b[1] * x - atan2(b[2], (x - b[3]))/T(kPi)
    360 NIST_END
    361 
    362 // y = b1 / (1+exp[b2-b3*x])  +  e
    363 NIST_BEGIN(Rat42)
    364   b[0] / (T(1.0) + exp(b[1] - b[2] * x))
    365 NIST_END
    366 
    367 // y = b1 / ((1+exp[b2-b3*x])**(1/b4))  +  e
    368 NIST_BEGIN(Rat43)
    369   b[0] / pow(T(1.0) + exp(b[1] - b[2] * x), T(1.0) / b[3])
    370 NIST_END
    371 
    372 // y = (b1 + b2*x + b3*x**2 + b4*x**3) /
    373 //    (1 + b5*x + b6*x**2 + b7*x**3)  +  e
    374 NIST_BEGIN(Thurber)
    375   (b[0] + b[1] * x + b[2] * x * x  + b[3] * x * x * x) /
    376   (T(1.0) + b[4] * x + b[5] * x * x + b[6] * x * x * x)
    377 NIST_END
    378 
    379 // y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 )
    380 //        + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
    381 //        + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 )  + e
    382 NIST_BEGIN(ENSO)
    383   b[0] + b[1] * cos(T(2.0 * kPi) * x / T(12.0)) +
    384          b[2] * sin(T(2.0 * kPi) * x / T(12.0)) +
    385          b[4] * cos(T(2.0 * kPi) * x / b[3]) +
    386          b[5] * sin(T(2.0 * kPi) * x / b[3]) +
    387          b[7] * cos(T(2.0 * kPi) * x / b[6]) +
    388          b[8] * sin(T(2.0 * kPi) * x / b[6])
    389 NIST_END
    390 
    391 // y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2]  +  e
    392 NIST_BEGIN(Eckerle4)
    393   b[0] / b[1] * exp(T(-0.5) * pow((x - b[2])/b[1], 2))
    394 NIST_END
    395 
    396 struct Nelson {
    397  public:
    398   Nelson(const double* const x, const double* const y)
    399       : x1_(x[0]), x2_(x[1]), y_(y[0]) {}
    400 
    401   template <typename T>
    402   bool operator()(const T* const b, T* residual) const {
    403     // log[y] = b1 - b2*x1 * exp[-b3*x2]  +  e
    404     residual[0] = T(log(y_)) - (b[0] - b[1] * T(x1_) * exp(-b[2] * T(x2_)));
    405     return true;
    406   }
    407 
    408  private:
    409   double x1_;
    410   double x2_;
    411   double y_;
    412 };
    413 
    414 template <typename Model, int num_residuals, int num_parameters>
    415 int RegressionDriver(const std::string& filename,
    416                      const ceres::Solver::Options& options) {
    417   NISTProblem nist_problem(FLAGS_nist_data_dir + filename);
    418   CHECK_EQ(num_residuals, nist_problem.response_size());
    419   CHECK_EQ(num_parameters, nist_problem.num_parameters());
    420 
    421   Matrix predictor = nist_problem.predictor();
    422   Matrix response = nist_problem.response();
    423   Matrix final_parameters = nist_problem.final_parameters();
    424 
    425   printf("%s\n", filename.c_str());
    426 
    427   // Each NIST problem comes with multiple starting points, so we
    428   // construct the problem from scratch for each case and solve it.
    429   int num_success = 0;
    430   for (int start = 0; start < nist_problem.num_starts(); ++start) {
    431     Matrix initial_parameters = nist_problem.initial_parameters(start);
    432 
    433     ceres::Problem problem;
    434     for (int i = 0; i < nist_problem.num_observations(); ++i) {
    435       problem.AddResidualBlock(
    436           new ceres::AutoDiffCostFunction<Model, num_residuals, num_parameters>(
    437               new Model(predictor.data() + nist_problem.predictor_size() * i,
    438                         response.data() + nist_problem.response_size() * i)),
    439           NULL,
    440           initial_parameters.data());
    441     }
    442 
    443     ceres::Solver::Summary summary;
    444     Solve(options, &problem, &summary);
    445 
    446     // Compute the LRE by comparing each component of the solution
    447     // with the ground truth, and taking the minimum.
    448     Matrix final_parameters = nist_problem.final_parameters();
    449     const double kMaxNumSignificantDigits = 11;
    450     double log_relative_error = kMaxNumSignificantDigits + 1;
    451     for (int i = 0; i < num_parameters; ++i) {
    452       const double tmp_lre =
    453           -std::log10(std::fabs(final_parameters(i) - initial_parameters(i)) /
    454                       std::fabs(final_parameters(i)));
    455       // The maximum LRE is capped at 11 - the precision at which the
    456       // ground truth is known.
    457       //
    458       // The minimum LRE is capped at 0 - no digits match between the
    459       // computed solution and the ground truth.
    460       log_relative_error =
    461           std::min(log_relative_error,
    462                    std::max(0.0, std::min(kMaxNumSignificantDigits, tmp_lre)));
    463     }
    464 
    465     const int kMinNumMatchingDigits = 4;
    466     if (log_relative_error >= kMinNumMatchingDigits) {
    467       ++num_success;
    468     }
    469 
    470     printf("start: %d status: %s lre: %4.1f initial cost: %e final cost:%e "
    471            "certified cost: %e total iterations: %d\n",
    472            start + 1,
    473            log_relative_error < kMinNumMatchingDigits ? "FAILURE" : "SUCCESS",
    474            log_relative_error,
    475            summary.initial_cost,
    476            summary.final_cost,
    477            nist_problem.certified_cost(),
    478            (summary.num_successful_steps + summary.num_unsuccessful_steps));
    479   }
    480   return num_success;
    481 }
    482 
    483 void SetMinimizerOptions(ceres::Solver::Options* options) {
    484   CHECK(ceres::StringToMinimizerType(FLAGS_minimizer,
    485                                      &options->minimizer_type));
    486   CHECK(ceres::StringToLinearSolverType(FLAGS_linear_solver,
    487                                         &options->linear_solver_type));
    488   CHECK(ceres::StringToPreconditionerType(FLAGS_preconditioner,
    489                                           &options->preconditioner_type));
    490   CHECK(ceres::StringToTrustRegionStrategyType(
    491             FLAGS_trust_region_strategy,
    492             &options->trust_region_strategy_type));
    493   CHECK(ceres::StringToDoglegType(FLAGS_dogleg, &options->dogleg_type));
    494   CHECK(ceres::StringToLineSearchDirectionType(
    495       FLAGS_line_search_direction,
    496       &options->line_search_direction_type));
    497   CHECK(ceres::StringToLineSearchType(FLAGS_line_search,
    498                                       &options->line_search_type));
    499   CHECK(ceres::StringToLineSearchInterpolationType(
    500       FLAGS_line_search_interpolation,
    501       &options->line_search_interpolation_type));
    502 
    503   options->max_num_iterations = FLAGS_num_iterations;
    504   options->use_nonmonotonic_steps = FLAGS_nonmonotonic_steps;
    505   options->initial_trust_region_radius = FLAGS_initial_trust_region_radius;
    506   options->max_lbfgs_rank = FLAGS_lbfgs_rank;
    507   options->line_search_sufficient_function_decrease = FLAGS_sufficient_decrease;
    508   options->line_search_sufficient_curvature_decrease =
    509       FLAGS_sufficient_curvature_decrease;
    510   options->max_num_line_search_step_size_iterations =
    511       FLAGS_max_line_search_iterations;
    512   options->max_num_line_search_direction_restarts =
    513       FLAGS_max_line_search_restarts;
    514   options->use_approximate_eigenvalue_bfgs_scaling =
    515       FLAGS_approximate_eigenvalue_bfgs_scaling;
    516   options->function_tolerance = 1e-18;
    517   options->gradient_tolerance = 1e-18;
    518   options->parameter_tolerance = 1e-18;
    519 }
    520 
    521 void SolveNISTProblems() {
    522   if (FLAGS_nist_data_dir.empty()) {
    523     LOG(FATAL) << "Must specify the directory containing the NIST problems";
    524   }
    525 
    526   ceres::Solver::Options options;
    527   SetMinimizerOptions(&options);
    528 
    529   std::cout << "Lower Difficulty\n";
    530   int easy_success = 0;
    531   easy_success += RegressionDriver<Misra1a,  1, 2>("Misra1a.dat",  options);
    532   easy_success += RegressionDriver<Chwirut,  1, 3>("Chwirut1.dat", options);
    533   easy_success += RegressionDriver<Chwirut,  1, 3>("Chwirut2.dat", options);
    534   easy_success += RegressionDriver<Lanczos,  1, 6>("Lanczos3.dat", options);
    535   easy_success += RegressionDriver<Gauss,    1, 8>("Gauss1.dat",   options);
    536   easy_success += RegressionDriver<Gauss,    1, 8>("Gauss2.dat",   options);
    537   easy_success += RegressionDriver<DanWood,  1, 2>("DanWood.dat",  options);
    538   easy_success += RegressionDriver<Misra1b,  1, 2>("Misra1b.dat",  options);
    539 
    540   std::cout << "\nMedium Difficulty\n";
    541   int medium_success = 0;
    542   medium_success += RegressionDriver<Kirby2,   1, 5>("Kirby2.dat",   options);
    543   medium_success += RegressionDriver<Hahn1,    1, 7>("Hahn1.dat",    options);
    544   medium_success += RegressionDriver<Nelson,   1, 3>("Nelson.dat",   options);
    545   medium_success += RegressionDriver<MGH17,    1, 5>("MGH17.dat",    options);
    546   medium_success += RegressionDriver<Lanczos,  1, 6>("Lanczos1.dat", options);
    547   medium_success += RegressionDriver<Lanczos,  1, 6>("Lanczos2.dat", options);
    548   medium_success += RegressionDriver<Gauss,    1, 8>("Gauss3.dat",   options);
    549   medium_success += RegressionDriver<Misra1c,  1, 2>("Misra1c.dat",  options);
    550   medium_success += RegressionDriver<Misra1d,  1, 2>("Misra1d.dat",  options);
    551   medium_success += RegressionDriver<Roszman1, 1, 4>("Roszman1.dat", options);
    552   medium_success += RegressionDriver<ENSO,     1, 9>("ENSO.dat",     options);
    553 
    554   std::cout << "\nHigher Difficulty\n";
    555   int hard_success = 0;
    556   hard_success += RegressionDriver<MGH09,    1, 4>("MGH09.dat",    options);
    557   hard_success += RegressionDriver<Thurber,  1, 7>("Thurber.dat",  options);
    558   hard_success += RegressionDriver<BoxBOD,   1, 2>("BoxBOD.dat",   options);
    559   hard_success += RegressionDriver<Rat42,    1, 3>("Rat42.dat",    options);
    560   hard_success += RegressionDriver<MGH10,    1, 3>("MGH10.dat",    options);
    561 
    562   hard_success += RegressionDriver<Eckerle4, 1, 3>("Eckerle4.dat", options);
    563   hard_success += RegressionDriver<Rat43,    1, 4>("Rat43.dat",    options);
    564   hard_success += RegressionDriver<Bennet5,  1, 3>("Bennett5.dat", options);
    565 
    566   std::cout << "\n";
    567   std::cout << "Easy    : " << easy_success << "/16\n";
    568   std::cout << "Medium  : " << medium_success << "/22\n";
    569   std::cout << "Hard    : " << hard_success << "/16\n";
    570   std::cout << "Total   : " << easy_success + medium_success + hard_success << "/54\n";
    571 }
    572 
    573 }  // namespace examples
    574 }  // namespace ceres
    575 
    576 int main(int argc, char** argv) {
    577   google::ParseCommandLineFlags(&argc, &argv, true);
    578   google::InitGoogleLogging(argv[0]);
    579   ceres::examples::SolveNISTProblems();
    580   return 0;
    581 };
    582