1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "BenchLogger.h" 9 #include "Benchmark.h" 10 #include "CrashHandler.h" 11 #include "GMBench.h" 12 #include "ResultsWriter.h" 13 #include "SkBitmapDevice.h" 14 #include "SkCanvas.h" 15 #include "SkColorPriv.h" 16 #include "SkCommandLineFlags.h" 17 #include "SkData.h" 18 #include "SkDeferredCanvas.h" 19 #include "SkGraphics.h" 20 #include "SkImageEncoder.h" 21 #include "SkOSFile.h" 22 #include "SkPicture.h" 23 #include "SkPictureRecorder.h" 24 #include "SkString.h" 25 #include "SkSurface.h" 26 #include "Timer.h" 27 28 #if SK_SUPPORT_GPU 29 #include "GrContext.h" 30 #include "GrContextFactory.h" 31 #include "GrRenderTarget.h" 32 #include "SkGpuDevice.h" 33 #include "gl/GrGLDefines.h" 34 #else 35 class GrContext; 36 #endif // SK_SUPPORT_GPU 37 38 #include <limits> 39 40 enum BenchMode { 41 kNormal_BenchMode, 42 kDeferred_BenchMode, 43 kDeferredSilent_BenchMode, 44 kRecord_BenchMode, 45 kPictureRecord_BenchMode 46 }; 47 const char* BenchMode_Name[] = { 48 "normal", "deferred", "deferredSilent", "record", "picturerecord" 49 }; 50 51 static const char kDefaultsConfigStr[] = "defaults"; 52 53 /////////////////////////////////////////////////////////////////////////////// 54 55 class Iter { 56 public: 57 Iter() : fBenches(BenchRegistry::Head()), fGMs(skiagm::GMRegistry::Head()) {} 58 59 Benchmark* next() { 60 if (fBenches) { 61 BenchRegistry::Factory f = fBenches->factory(); 62 fBenches = fBenches->next(); 63 return (*f)(NULL); 64 } 65 66 while (fGMs) { 67 SkAutoTDelete<skiagm::GM> gm(fGMs->factory()(NULL)); 68 fGMs = fGMs->next(); 69 if (gm->getFlags() & skiagm::GM::kAsBench_Flag) { 70 return SkNEW_ARGS(GMBench, (gm.detach())); 71 } 72 } 73 74 return NULL; 75 } 76 77 private: 78 const BenchRegistry* fBenches; 79 const skiagm::GMRegistry* fGMs; 80 }; 81 82 static void make_filename(const char name[], SkString* path) { 83 path->set(name); 84 for (int i = 0; name[i]; i++) { 85 switch (name[i]) { 86 case '/': 87 case '\\': 88 case ' ': 89 case ':': 90 path->writable_str()[i] = '-'; 91 break; 92 default: 93 break; 94 } 95 } 96 } 97 98 static void saveFile(const char name[], const char config[], const char dir[], 99 const SkImage* image) { 100 SkAutoTUnref<SkData> data(image->encode(SkImageEncoder::kPNG_Type, 100)); 101 if (NULL == data.get()) { 102 return; 103 } 104 105 SkString filename; 106 make_filename(name, &filename); 107 filename.appendf("_%s.png", config); 108 SkString path = SkOSPath::SkPathJoin(dir, filename.c_str()); 109 ::remove(path.c_str()); 110 111 SkFILEWStream stream(path.c_str()); 112 stream.write(data->data(), data->size()); 113 } 114 115 static void perform_clip(SkCanvas* canvas, int w, int h) { 116 SkRect r; 117 118 r.set(SkIntToScalar(10), SkIntToScalar(10), 119 SkIntToScalar(w*2/3), SkIntToScalar(h*2/3)); 120 canvas->clipRect(r, SkRegion::kIntersect_Op); 121 122 r.set(SkIntToScalar(w/3), SkIntToScalar(h/3), 123 SkIntToScalar(w-10), SkIntToScalar(h-10)); 124 canvas->clipRect(r, SkRegion::kXOR_Op); 125 } 126 127 static void perform_rotate(SkCanvas* canvas, int w, int h) { 128 const SkScalar x = SkIntToScalar(w) / 2; 129 const SkScalar y = SkIntToScalar(h) / 2; 130 131 canvas->translate(x, y); 132 canvas->rotate(SkIntToScalar(35)); 133 canvas->translate(-x, -y); 134 } 135 136 static void perform_scale(SkCanvas* canvas, int w, int h) { 137 const SkScalar x = SkIntToScalar(w) / 2; 138 const SkScalar y = SkIntToScalar(h) / 2; 139 140 canvas->translate(x, y); 141 // just enough so we can't take the sprite case 142 canvas->scale(SK_Scalar1 * 99/100, SK_Scalar1 * 99/100); 143 canvas->translate(-x, -y); 144 } 145 146 static SkSurface* make_surface(SkColorType colorType, const SkIPoint& size, 147 Benchmark::Backend backend, int sampleCount, 148 GrContext* context) { 149 SkSurface* surface = NULL; 150 SkImageInfo info = SkImageInfo::Make(size.fX, size.fY, colorType, 151 kPremul_SkAlphaType); 152 153 switch (backend) { 154 case Benchmark::kRaster_Backend: 155 surface = SkSurface::NewRaster(info); 156 surface->getCanvas()->clear(SK_ColorWHITE); 157 break; 158 #if SK_SUPPORT_GPU 159 case Benchmark::kGPU_Backend: { 160 surface = SkSurface::NewRenderTarget(context, info, sampleCount); 161 break; 162 } 163 #endif 164 case Benchmark::kPDF_Backend: 165 default: 166 SkDEBUGFAIL("unsupported"); 167 } 168 return surface; 169 } 170 171 #if SK_SUPPORT_GPU 172 GrContextFactory gContextFactory; 173 typedef GrContextFactory::GLContextType GLContextType; 174 static const GLContextType kNative = GrContextFactory::kNative_GLContextType; 175 static const GLContextType kNVPR = GrContextFactory::kNVPR_GLContextType; 176 #if SK_ANGLE 177 static const GLContextType kANGLE = GrContextFactory::kANGLE_GLContextType; 178 #endif 179 static const GLContextType kDebug = GrContextFactory::kDebug_GLContextType; 180 static const GLContextType kNull = GrContextFactory::kNull_GLContextType; 181 #else 182 typedef int GLContextType; 183 static const GLContextType kNative = 0, kANGLE = 0, kDebug = 0, kNull = 0; 184 #endif 185 186 #ifdef SK_DEBUG 187 static const bool kIsDebug = true; 188 #else 189 static const bool kIsDebug = false; 190 #endif 191 192 static const struct Config { 193 SkColorType fColorType; 194 const char* name; 195 int sampleCount; 196 Benchmark::Backend backend; 197 GLContextType contextType; 198 bool runByDefault; 199 } gConfigs[] = { 200 { kN32_SkColorType, "NONRENDERING", 0, Benchmark::kNonRendering_Backend, kNative, true}, 201 { kN32_SkColorType, "8888", 0, Benchmark::kRaster_Backend, kNative, true}, 202 { kRGB_565_SkColorType, "565", 0, Benchmark::kRaster_Backend, kNative, true}, 203 #if SK_SUPPORT_GPU 204 { kN32_SkColorType, "GPU", 0, Benchmark::kGPU_Backend, kNative, true}, 205 { kN32_SkColorType, "MSAA4", 4, Benchmark::kGPU_Backend, kNative, false}, 206 { kN32_SkColorType, "MSAA16", 16, Benchmark::kGPU_Backend, kNative, false}, 207 { kN32_SkColorType, "NVPRMSAA4", 4, Benchmark::kGPU_Backend, kNVPR, true}, 208 { kN32_SkColorType, "NVPRMSAA16", 16, Benchmark::kGPU_Backend, kNVPR, false}, 209 #if SK_ANGLE 210 { kN32_SkColorType, "ANGLE", 0, Benchmark::kGPU_Backend, kANGLE, true}, 211 #endif // SK_ANGLE 212 { kN32_SkColorType, "Debug", 0, Benchmark::kGPU_Backend, kDebug, kIsDebug}, 213 { kN32_SkColorType, "NULLGPU", 0, Benchmark::kGPU_Backend, kNull, true}, 214 #endif // SK_SUPPORT_GPU 215 }; 216 217 DEFINE_string(outDir, "", "If given, image of each bench will be put in outDir."); 218 DEFINE_string(timers, "cg", "Timers to display. " 219 "Options: w(all) W(all, truncated) c(pu) C(pu, truncated) g(pu)"); 220 221 DEFINE_bool(rotate, false, "Rotate canvas before bench run?"); 222 DEFINE_bool(scale, false, "Scale canvas before bench run?"); 223 DEFINE_bool(clip, false, "Clip canvas before bench run?"); 224 225 DEFINE_bool(forceAA, true, "Force anti-aliasing?"); 226 DEFINE_bool(forceFilter, false, "Force bitmap filtering?"); 227 DEFINE_string(forceDither, "default", "Force dithering: true, false, or default?"); 228 DEFINE_bool(forceBlend, false, "Force alpha blending?"); 229 230 DEFINE_int32(gpuCacheBytes, -1, "GPU cache size limit in bytes. 0 to disable cache."); 231 DEFINE_int32(gpuCacheCount, -1, "GPU cache size limit in object count. 0 to disable cache."); 232 233 DEFINE_bool2(leaks, l, false, "show leaked ref cnt'd objects."); 234 DEFINE_string(match, "", "[~][^]substring[$] [...] of test name to run.\n" 235 "Multiple matches may be separated by spaces.\n" 236 "~ causes a matching test to always be skipped\n" 237 "^ requires the start of the test to match\n" 238 "$ requires the end of the test to match\n" 239 "^ and $ requires an exact match\n" 240 "If a test does not match any list entry,\n" 241 "it is skipped unless some list entry starts with ~\n"); 242 DEFINE_string(mode, "normal", 243 "normal: draw to a normal canvas;\n" 244 "deferred: draw to a deferred canvas;\n" 245 "deferredSilent: deferred with silent playback;\n" 246 "record: draw to an SkPicture;\n" 247 "picturerecord: draw from an SkPicture to an SkPicture.\n"); 248 DEFINE_string(config, kDefaultsConfigStr, 249 "Run configs given. By default, runs the configs marked \"runByDefault\" in gConfigs."); 250 DEFINE_string(logFile, "", "Also write stdout here."); 251 DEFINE_int32(minMs, 20, "Shortest time we'll allow a benchmark to run."); 252 DEFINE_int32(maxMs, 4000, "Longest time we'll allow a benchmark to run."); 253 DEFINE_bool(runOnce, kIsDebug, "Run each bench exactly once and don't report timings."); 254 DEFINE_double(error, 0.01, 255 "Ratio of subsequent bench measurements must drop within 1error to converge."); 256 DEFINE_string(timeFormat, "%9.2f", "Format to print results, in milliseconds per 1000 loops."); 257 DEFINE_bool2(verbose, v, false, "Print more."); 258 DEFINE_string(outResultsFile, "", "If given, the results will be written to the file in JSON format."); 259 DEFINE_bool(dryRun, false, "Don't actually run the tests, just print what would have been done."); 260 261 // Has this bench converged? First arguments are milliseconds / loop iteration, 262 // last is overall runtime in milliseconds. 263 static bool HasConverged(double prevPerLoop, double currPerLoop, double currRaw) { 264 if (currRaw < FLAGS_minMs) { 265 return false; 266 } 267 const double low = 1 - FLAGS_error, high = 1 + FLAGS_error; 268 const double ratio = currPerLoop / prevPerLoop; 269 return low < ratio && ratio < high; 270 } 271 272 int tool_main(int argc, char** argv); 273 int tool_main(int argc, char** argv) { 274 SetupCrashHandler(); 275 SkCommandLineFlags::Parse(argc, argv); 276 #if SK_ENABLE_INST_COUNT 277 if (FLAGS_leaks) { 278 gPrintInstCount = true; 279 } 280 #endif 281 SkAutoGraphics ag; 282 283 // First, parse some flags. 284 BenchLogger logger; 285 if (FLAGS_logFile.count()) { 286 logger.SetLogFile(FLAGS_logFile[0]); 287 } 288 289 LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]); 290 MultiResultsWriter writer; 291 writer.add(&logWriter); 292 293 SkAutoTDelete<JSONResultsWriter> jsonWriter; 294 if (FLAGS_outResultsFile.count()) { 295 jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0]))); 296 writer.add(jsonWriter.get()); 297 } 298 299 // Instantiate after all the writers have been added to writer so that we 300 // call close() before their destructors are called on the way out. 301 CallEnd<MultiResultsWriter> ender(writer); 302 303 const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF; 304 SkTriState::State dither = SkTriState::kDefault; 305 for (size_t i = 0; i < 3; i++) { 306 if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) { 307 dither = static_cast<SkTriState::State>(i); 308 } 309 } 310 311 BenchMode benchMode = kNormal_BenchMode; 312 for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) { 313 if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) { 314 benchMode = static_cast<BenchMode>(i); 315 } 316 } 317 318 SkTDArray<int> configs; 319 bool runDefaultConfigs = false; 320 // Try user-given configs first. 321 for (int i = 0; i < FLAGS_config.count(); i++) { 322 for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) { 323 if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) { 324 *configs.append() = j; 325 } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) { 326 runDefaultConfigs = true; 327 } 328 } 329 } 330 // If there weren't any, fill in with defaults. 331 if (runDefaultConfigs) { 332 for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) { 333 if (gConfigs[i].runByDefault) { 334 *configs.append() = i; 335 } 336 } 337 } 338 // Filter out things we can't run. 339 if (kNormal_BenchMode != benchMode) { 340 // Non-rendering configs only run in normal mode 341 for (int i = 0; i < configs.count(); ++i) { 342 const Config& config = gConfigs[configs[i]]; 343 if (Benchmark::kNonRendering_Backend == config.backend) { 344 configs.remove(i, 1); 345 --i; 346 } 347 } 348 } 349 350 #if SK_SUPPORT_GPU 351 for (int i = 0; i < configs.count(); ++i) { 352 const Config& config = gConfigs[configs[i]]; 353 354 if (Benchmark::kGPU_Backend == config.backend) { 355 GrContext* context = gContextFactory.get(config.contextType); 356 if (NULL == context) { 357 SkDebugf("GrContext could not be created for config %s. Config will be skipped.\n", 358 config.name); 359 configs.remove(i); 360 --i; 361 continue; 362 } 363 if (config.sampleCount > context->getMaxSampleCount()){ 364 SkDebugf( 365 "Sample count (%d) for config %s is not supported. Config will be skipped.\n", 366 config.sampleCount, config.name); 367 configs.remove(i); 368 --i; 369 continue; 370 } 371 } 372 } 373 #endif 374 375 // All flags should be parsed now. Report our settings. 376 if (FLAGS_runOnce) { 377 logger.logError("bench was run with --runOnce, so we're going to hide the times." 378 " It's for your own good!\n"); 379 } 380 writer.option("mode", FLAGS_mode[0]); 381 writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str()); 382 writer.option("antialias", SkStringPrintf("%d", FLAGS_forceAA).c_str()); 383 writer.option("filter", SkStringPrintf("%d", FLAGS_forceFilter).c_str()); 384 writer.option("dither", SkTriState::Name[dither]); 385 386 writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str()); 387 writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str()); 388 writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str()); 389 390 #if defined(SK_BUILD_FOR_WIN32) 391 writer.option("system", "WIN32"); 392 #elif defined(SK_BUILD_FOR_MAC) 393 writer.option("system", "MAC"); 394 #elif defined(SK_BUILD_FOR_ANDROID) 395 writer.option("system", "ANDROID"); 396 #elif defined(SK_BUILD_FOR_UNIX) 397 writer.option("system", "UNIX"); 398 #else 399 writer.option("system", "other"); 400 #endif 401 402 #if defined(SK_DEBUG) 403 writer.option("build", "DEBUG"); 404 #else 405 writer.option("build", "RELEASE"); 406 #endif 407 408 // Set texture cache limits if non-default. 409 for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) { 410 #if SK_SUPPORT_GPU 411 const Config& config = gConfigs[i]; 412 if (Benchmark::kGPU_Backend != config.backend) { 413 continue; 414 } 415 GrContext* context = gContextFactory.get(config.contextType); 416 if (NULL == context) { 417 continue; 418 } 419 420 size_t bytes; 421 int count; 422 context->getResourceCacheLimits(&count, &bytes); 423 if (-1 != FLAGS_gpuCacheBytes) { 424 bytes = static_cast<size_t>(FLAGS_gpuCacheBytes); 425 } 426 if (-1 != FLAGS_gpuCacheCount) { 427 count = FLAGS_gpuCacheCount; 428 } 429 context->setResourceCacheLimits(count, bytes); 430 #endif 431 } 432 433 // Run each bench in each configuration it supports and we asked for. 434 Iter iter; 435 Benchmark* bench; 436 while ((bench = iter.next()) != NULL) { 437 SkAutoTUnref<Benchmark> benchUnref(bench); 438 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) { 439 continue; 440 } 441 442 bench->setForceAlpha(alpha); 443 bench->setForceAA(FLAGS_forceAA); 444 bench->setForceFilter(FLAGS_forceFilter); 445 bench->setDither(dither); 446 bench->preDraw(); 447 448 bool loggedBenchName = false; 449 for (int i = 0; i < configs.count(); ++i) { 450 const int configIndex = configs[i]; 451 const Config& config = gConfigs[configIndex]; 452 453 if (!bench->isSuitableFor(config.backend)) { 454 continue; 455 } 456 457 GrContext* context = NULL; 458 #if SK_SUPPORT_GPU 459 SkGLContextHelper* glContext = NULL; 460 if (Benchmark::kGPU_Backend == config.backend) { 461 context = gContextFactory.get(config.contextType); 462 if (NULL == context) { 463 continue; 464 } 465 glContext = gContextFactory.getGLContext(config.contextType); 466 } 467 #endif 468 469 SkAutoTUnref<SkCanvas> canvas; 470 SkAutoTUnref<SkPicture> recordFrom; 471 SkPictureRecorder recorderTo; 472 const SkIPoint dim = bench->getSize(); 473 474 SkAutoTUnref<SkSurface> surface; 475 if (Benchmark::kNonRendering_Backend != config.backend) { 476 surface.reset(make_surface(config.fColorType, 477 dim, 478 config.backend, 479 config.sampleCount, 480 context)); 481 if (!surface.get()) { 482 logger.logError(SkStringPrintf( 483 "Device creation failure for config %s. Will skip.\n", config.name)); 484 continue; 485 } 486 487 switch(benchMode) { 488 case kDeferredSilent_BenchMode: 489 case kDeferred_BenchMode: 490 canvas.reset(SkDeferredCanvas::Create(surface.get())); 491 break; 492 case kRecord_BenchMode: 493 canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); 494 break; 495 case kPictureRecord_BenchMode: { 496 SkPictureRecorder recorderFrom; 497 bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.fY)); 498 recordFrom.reset(recorderFrom.endRecording()); 499 canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); 500 break; 501 } 502 case kNormal_BenchMode: 503 canvas.reset(SkRef(surface->getCanvas())); 504 break; 505 default: 506 SkASSERT(false); 507 } 508 } 509 510 if (NULL != canvas) { 511 canvas->clear(SK_ColorWHITE); 512 if (FLAGS_clip) { 513 perform_clip(canvas, dim.fX, dim.fY); 514 } 515 if (FLAGS_scale) { 516 perform_scale(canvas, dim.fX, dim.fY); 517 } 518 if (FLAGS_rotate) { 519 perform_rotate(canvas, dim.fX, dim.fY); 520 } 521 } 522 523 if (!loggedBenchName) { 524 loggedBenchName = true; 525 writer.bench(bench->getName(), dim.fX, dim.fY); 526 } 527 528 #if SK_SUPPORT_GPU 529 SkGLContextHelper* contextHelper = NULL; 530 if (Benchmark::kGPU_Backend == config.backend) { 531 contextHelper = gContextFactory.getGLContext(config.contextType); 532 } 533 Timer timer(contextHelper); 534 #else 535 Timer timer; 536 #endif 537 538 double previous = std::numeric_limits<double>::infinity(); 539 bool converged = false; 540 541 // variables used to compute loopsPerFrame 542 double frameIntervalTime = 0.0f; 543 int frameIntervalTotalLoops = 0; 544 545 bool frameIntervalComputed = false; 546 int loopsPerFrame = 0; 547 int loopsPerIter = 0; 548 if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); } 549 if (!FLAGS_dryRun) { 550 do { 551 // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion. 552 loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2; 553 if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) { 554 // If you find it takes more than a billion loops to get up to 20ms of runtime, 555 // you've got a computer clocked at several THz or have a broken benchmark. ;) 556 // "1B ought to be enough for anybody." 557 logger.logError(SkStringPrintf( 558 "\nCan't get %s %s to converge in %dms (%d loops)", 559 bench->getName(), config.name, FLAGS_maxMs, loopsPerIter)); 560 break; 561 } 562 563 if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) { 564 // Clear the recorded commands so that they do not accumulate. 565 canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); 566 } 567 568 timer.start(); 569 // Inner loop that allows us to break the run into smaller 570 // chunks (e.g. frames). This is especially useful for the GPU 571 // as we can flush and/or swap buffers to keep the GPU from 572 // queuing up too much work. 573 for (int loopCount = loopsPerIter; loopCount > 0; ) { 574 // Save and restore around each call to draw() to guarantee a pristine canvas. 575 SkAutoCanvasRestore saveRestore(canvas, true/*also save*/); 576 577 int loops; 578 if (frameIntervalComputed && loopCount > loopsPerFrame) { 579 loops = loopsPerFrame; 580 loopCount -= loopsPerFrame; 581 } else { 582 loops = loopCount; 583 loopCount = 0; 584 } 585 586 if (benchMode == kPictureRecord_BenchMode) { 587 recordFrom->draw(canvas); 588 } else { 589 bench->draw(loops, canvas); 590 } 591 592 if (kDeferredSilent_BenchMode == benchMode) { 593 static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush(); 594 } else if (NULL != canvas) { 595 canvas->flush(); 596 } 597 598 #if SK_SUPPORT_GPU 599 // swap drawing buffers on each frame to prevent the GPU 600 // from queuing up too much work 601 if (NULL != glContext) { 602 glContext->swapBuffers(); 603 } 604 #endif 605 } 606 607 608 609 // Stop truncated timers before GL calls complete, and stop the full timers after. 610 timer.truncatedEnd(); 611 #if SK_SUPPORT_GPU 612 if (NULL != glContext) { 613 context->flush(); 614 SK_GL(*glContext, Finish()); 615 } 616 #endif 617 timer.end(); 618 619 // setup the frame interval for subsequent iterations 620 if (!frameIntervalComputed) { 621 frameIntervalTime += timer.fWall; 622 frameIntervalTotalLoops += loopsPerIter; 623 if (frameIntervalTime >= FLAGS_minMs) { 624 frameIntervalComputed = true; 625 loopsPerFrame = 626 (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs); 627 if (loopsPerFrame < 1) { 628 loopsPerFrame = 1; 629 } 630 // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n", 631 // bench->getName(), frameIntervalTotalLoops, 632 // timer.fWall, loopsPerFrame); 633 } 634 } 635 636 const double current = timer.fWall / loopsPerIter; 637 if (FLAGS_verbose && current > previous) { SkDebugf(""); } 638 if (FLAGS_verbose) { SkDebugf("%.3g ", current); } 639 converged = HasConverged(previous, current, timer.fWall); 640 previous = current; 641 } while (!FLAGS_runOnce && !converged); 642 } 643 if (FLAGS_verbose) { SkDebugf("\n"); } 644 645 if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRendering_Backend != config.backend) { 646 SkAutoTUnref<SkImage> image(surface->newImageSnapshot()); 647 if (image.get()) { 648 saveFile(bench->getName(), config.name, FLAGS_outDir[0], 649 image); 650 } 651 } 652 653 if (FLAGS_runOnce) { 654 // Let's not mislead ourselves by looking at Debug build or single iteration bench times! 655 continue; 656 } 657 658 // Normalize to ms per 1000 iterations. 659 const double normalize = 1000.0 / loopsPerIter; 660 const struct { char shortName; const char* longName; double ms; } times[] = { 661 {'w', "msecs", normalize * timer.fWall}, 662 {'W', "Wmsecs", normalize * timer.fTruncatedWall}, 663 {'c', "cmsecs", normalize * timer.fCpu}, 664 {'C', "Cmsecs", normalize * timer.fTruncatedCpu}, 665 {'g', "gmsecs", normalize * timer.fGpu}, 666 }; 667 668 writer.config(config.name); 669 for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) { 670 if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) { 671 writer.timer(times[i].longName, times[i].ms); 672 } 673 } 674 } 675 } 676 #if SK_SUPPORT_GPU 677 gContextFactory.destroyContexts(); 678 #endif 679 return 0; 680 } 681 682 #if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL) 683 int main(int argc, char * const argv[]) { 684 return tool_main(argc, (char**) argv); 685 } 686 #endif 687