1 /* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package com.android.tradefed.testtype; 18 19 import com.android.ddmlib.IDevice; 20 import com.android.ddmlib.Log; 21 import com.android.ddmlib.testrunner.IRemoteAndroidTestRunner; 22 import com.android.ddmlib.testrunner.IRemoteAndroidTestRunner.TestSize; 23 import com.android.ddmlib.testrunner.InstrumentationResultParser; 24 import com.android.ddmlib.testrunner.RemoteAndroidTestRunner; 25 import com.android.ddmlib.testrunner.TestIdentifier; 26 import com.android.ddmlib.testrunner.TestRunResult; 27 import com.android.tradefed.config.ConfigurationException; 28 import com.android.tradefed.config.Option; 29 import com.android.tradefed.config.Option.Importance; 30 import com.android.tradefed.config.OptionClass; 31 import com.android.tradefed.device.DeviceNotAvailableException; 32 import com.android.tradefed.device.ITestDevice; 33 import com.android.tradefed.log.LogUtil.CLog; 34 import com.android.tradefed.result.BugreportCollector; 35 import com.android.tradefed.result.CollectingTestListener; 36 import com.android.tradefed.result.ITestInvocationListener; 37 import com.android.tradefed.result.InputStreamSource; 38 import com.android.tradefed.result.LogDataType; 39 import com.android.tradefed.result.ResultForwarder; 40 import com.android.tradefed.util.AbiFormatter; 41 import com.android.tradefed.util.ListInstrumentationParser; 42 import com.android.tradefed.util.ListInstrumentationParser.InstrumentationTarget; 43 import com.android.tradefed.util.StreamUtil; 44 import com.android.tradefed.util.StringEscapeUtils; 45 46 import com.google.common.annotations.VisibleForTesting; 47 48 import org.junit.Assert; 49 50 import java.io.File; 51 import java.util.ArrayList; 52 import java.util.Collection; 53 import java.util.HashMap; 54 import java.util.Map; 55 import java.util.concurrent.TimeUnit; 56 57 /** 58 * A Test that runs an instrumentation test package on given device. 59 */ 60 @OptionClass(alias = "instrumentation") 61 public class InstrumentationTest implements IDeviceTest, IResumableTest, ITestCollector, 62 IAbiReceiver { 63 64 private static final String LOG_TAG = "InstrumentationTest"; 65 66 /** max number of attempts to collect list of tests in package */ 67 private static final int COLLECT_TESTS_ATTEMPTS = 3; 68 /** instrumentation test runner argument key used for test execution using a file */ 69 private static final String TEST_FILE_INST_ARGS_KEY = "testFile"; 70 71 /** instrumentation test runner argument key used for individual test timeout */ 72 static final String TEST_TIMEOUT_INST_ARGS_KEY = "timeout_msec"; 73 74 /** default timeout for tests collection */ 75 static final long TEST_COLLECTION_TIMEOUT_MS = 2 * 60 * 1000; 76 77 @Option(name = "package", shortName = 'p', 78 description="The manifest package name of the Android test application to run.", 79 importance = Importance.IF_UNSET) 80 private String mPackageName = null; 81 82 @Option(name = "runner", 83 description="The instrumentation test runner class name to use. Will try to determine " 84 + "automatically if it is not specified.") 85 private String mRunnerName = null; 86 87 @Option(name = "class", shortName = 'c', 88 description="The test class name to run.") 89 private String mTestClassName = null; 90 91 @Option(name = "method", shortName = 'm', 92 description="The test method name to run.") 93 private String mTestMethodName = null; 94 95 @Option(name = "test-package", 96 description="Only run tests within this specific java package. " + 97 "Will be ignored if --class is set.") 98 private String mTestPackageName = null; 99 100 /** 101 * @deprecated use shell-timeout or test-timeout option instead. 102 */ 103 @Deprecated 104 @Option(name = "timeout", 105 description="Deprecated - Use \"shell-timeout\" or \"test-timeout\" instead.") 106 private Integer mTimeout = null; 107 108 @Option(name = "shell-timeout", 109 description="The defined timeout (in milliseconds) is used as a maximum waiting time " 110 + "when expecting the command output from the device. At any time, if the " 111 + "shell command does not output anything for a period longer than defined " 112 + "timeout the TF run terminates. For no timeout, set to 0.") 113 private long mShellTimeout = 10 * 60 * 1000; // default to 10 minutes 114 115 @Option(name = "test-timeout", 116 description="Sets timeout (in milliseconds) that will be applied to each test. In the " 117 + "event of a test timeout it will log the results and proceed with executing " 118 + "the next test. For no timeout, set to 0.") 119 private int mTestTimeout = 5 * 60 * 1000; // default to 5 minutes 120 121 @Option(name = "size", 122 description="Restrict test to a specific test size.") 123 private String mTestSize = null; 124 125 @Option(name = "rerun", 126 description = "Rerun unexecuted tests individually on same device if test run " + 127 "fails to complete.") 128 private boolean mIsRerunMode = true; 129 130 @Option(name = "resume", 131 description = "Schedule unexecuted tests for resumption on another device " + 132 "if first device becomes unavailable.") 133 private boolean mIsResumeMode = false; 134 135 @Option(name = "install-file", 136 description="Optional file path to apk file that contains the tests.") 137 private File mInstallFile = null; 138 139 @Option(name = "run-name", 140 description="Optional custom test run name to pass to listener. " + 141 "If unspecified, will use package name.") 142 private String mRunName = null; 143 144 @Option(name = "instrumentation-arg", 145 description = "Additional instrumentation arguments to provide.") 146 private Map<String, String> mInstrArgMap = new HashMap<String, String>(); 147 148 @Option(name = "bugreport-on-failure", description = "Sets which failed testcase events " + 149 "cause a bugreport to be collected. a bugreport after failed testcases. Note that " + 150 "there is _no feedback mechanism_ between the test runner and the bugreport " + 151 "collector, so use the EACH setting with due caution.") 152 private BugreportCollector.Freq mBugreportFrequency = null; 153 154 @Option(name = "screenshot-on-failure", description = "Take a screenshot on every test failure") 155 private boolean mScreenshotOnFailure = false; 156 157 @Option(name = "logcat-on-failure", description = 158 "take a logcat snapshot on every test failure.") 159 private boolean mLogcatOnFailure = false; 160 161 @Option(name = "logcat-on-failure-size", description = 162 "The max number of logcat data in bytes to capture when --logcat-on-failure is on. " + 163 "Should be an amount that can comfortably fit in memory.") 164 private int mMaxLogcatBytes = 500 * 1024; // 500K 165 166 @Option(name = "rerun-from-file", description = 167 "Use test file instead of separate adb commands for each test " + 168 "when re-running instrumentations for tests that failed to run in previous attempts. ") 169 private boolean mReRunUsingTestFile = false; 170 171 @Option(name = "rerun-from-file-attempts", description = 172 "Max attempts to rerun tests from file. -1 means rerun from file infinitely.") 173 private int mReRunUsingTestFileAttempts = -1; 174 175 @Option(name = "fallback-to-serial-rerun", description = 176 "Rerun tests serially after rerun from file failed.") 177 private boolean mFallbackToSerialRerun = true; 178 179 @Option(name = "reboot-before-rerun", description = 180 "Reboot a device before re-running instrumentations.") 181 private boolean mRebootBeforeReRun = false; 182 183 @Option(name = AbiFormatter.FORCE_ABI_STRING, 184 description = AbiFormatter.FORCE_ABI_DESCRIPTION, 185 importance = Importance.IF_UNSET) 186 private String mForceAbi = null; 187 188 @Option(name = "collect-tests-only", 189 description = "Only invoke the instrumentation to collect list of applicable test " 190 + "cases. All test run callbacks will be triggered, but test execution will " 191 + "not be actually carried out.") 192 private boolean mCollectTestsOnly = false; 193 194 @Option( 195 name = "collect-tests-timeout", 196 description = "Timeout for the tests collection operation.", 197 isTimeVal = true 198 ) 199 private long mCollectTestTimeout = TEST_COLLECTION_TIMEOUT_MS; 200 201 @Option(name = "debug", description = "Wait for debugger before instrumentation starts. Note " 202 + "that this should only be used for local debugging, not suitable for automated runs.") 203 private boolean mDebug = false; 204 205 @Option( 206 name = "enforce-ajur-format", 207 description = "Whether or not enforcing the AJUR instrumentation output format" 208 ) 209 private boolean mShouldEnforceFormat = false; 210 211 private IAbi mAbi = null; 212 213 private Collection<String> mInstallArgs = new ArrayList<>(); 214 215 private ITestDevice mDevice = null; 216 217 private IRemoteAndroidTestRunner mRunner; 218 219 private Collection<TestIdentifier> mRemainingTests = null; 220 221 private String mCoverageTarget = null; 222 223 private String mTestFilePathOnDevice = null; 224 225 private ListInstrumentationParser mListInstrumentationParser = null; 226 227 /** 228 * {@inheritDoc} 229 */ 230 @Override 231 public void setDevice(ITestDevice device) { 232 mDevice = device; 233 } 234 235 /** 236 * Set the Android manifest package to run. 237 */ 238 public void setPackageName(String packageName) { 239 mPackageName = packageName; 240 } 241 242 /** 243 * Optionally, set the Android instrumentation runner to use. 244 */ 245 public void setRunnerName(String runnerName) { 246 mRunnerName = runnerName; 247 } 248 249 /** 250 * Gets the Android instrumentation runner to be used. 251 */ 252 public String getRunnerName() { 253 return mRunnerName; 254 } 255 256 /** 257 * Optionally, set the test class name to run. 258 */ 259 public void setClassName(String testClassName) { 260 mTestClassName = testClassName; 261 } 262 263 /** 264 * Optionally, set the test method to run. 265 */ 266 public void setMethodName(String testMethodName) { 267 mTestMethodName = StringEscapeUtils.escapeShell(testMethodName); 268 } 269 270 /** 271 * Optionally, set the path to a file located on the device that should contain a list of line 272 * separated test classes and methods (format: com.foo.Class#method) to be run. 273 * If set, will automatically attempt to re-run tests using this test file 274 * via {@link InstrumentationFileTest} instead of executing separate adb commands for each 275 * remaining test via {@link InstrumentationSerialTest}" 276 */ 277 public void setTestFilePathOnDevice(String testFilePathOnDevice) { 278 mTestFilePathOnDevice = testFilePathOnDevice; 279 } 280 281 /** 282 * Optionally, set the test size to run. 283 */ 284 public void setTestSize(String size) { 285 mTestSize = size; 286 } 287 288 /** 289 * Get the Android manifest package to run. 290 */ 291 public String getPackageName() { 292 return mPackageName; 293 } 294 295 /** 296 * Get the custom test run name that will be provided to listener 297 */ 298 public String getRunName() { 299 return mRunName; 300 } 301 302 /** 303 * Set the custom test run name that will be provided to listener 304 */ 305 public void setRunName(String runName) { 306 mRunName = runName; 307 } 308 309 /** 310 * Set the collection of tests that should be executed by this InstrumentationTest. 311 * 312 * @param tests the tests to run 313 */ 314 public void setTestsToRun(Collection<TestIdentifier> tests) { 315 mRemainingTests = tests; 316 } 317 318 /** 319 * Get the class name to run. 320 */ 321 String getClassName() { 322 return mTestClassName; 323 } 324 325 /** 326 * Get the test method to run. 327 */ 328 String getMethodName() { 329 return mTestMethodName; 330 } 331 332 /** 333 * Get the path to a file that contains class#method combinations to be run 334 */ 335 String getTestFilePathOnDevice() { 336 return mTestFilePathOnDevice; 337 } 338 339 /** 340 * Get the test java package to run. 341 */ 342 String getTestPackageName() { 343 return mTestPackageName; 344 } 345 346 /** 347 * Sets the test package filter. 348 * <p/> 349 * If non-null, only tests within the given java package will be executed. 350 * <p/> 351 * Will be ignored if a non-null value has been provided to {@link #setClassName(String)} 352 */ 353 public void setTestPackageName(String testPackageName) { 354 mTestPackageName = testPackageName; 355 } 356 357 /** 358 * Get the test size to run. Returns <code>null</code> if no size has been set. 359 */ 360 String getTestSize() { 361 return mTestSize; 362 } 363 364 /** 365 * Optionally, set the maximum time (in milliseconds) expecting shell output from the device. 366 */ 367 public void setShellTimeout(long timeout) { 368 mShellTimeout = timeout; 369 } 370 371 /** 372 * Optionally, set the maximum time (in milliseconds) for each individual test run. 373 */ 374 public void setTestTimeout(int timeout) { 375 mTestTimeout = timeout; 376 } 377 378 /** 379 * Set the coverage target of this test. 380 * <p/> 381 * Currently unused. This method is just present so coverageTarget can be later retrieved via 382 * {@link #getCoverageTarget()} 383 */ 384 public void setCoverageTarget(String coverageTarget) { 385 mCoverageTarget = coverageTarget; 386 } 387 388 /** 389 * Get the coverageTarget previously set via {@link #setCoverageTarget(String)}. 390 */ 391 public String getCoverageTarget() { 392 return mCoverageTarget; 393 } 394 395 /** 396 * Return <code>true</code> if rerun mode is on. 397 */ 398 boolean isRerunMode() { 399 return mIsRerunMode; 400 } 401 402 /** 403 * {@inheritDoc} 404 */ 405 @Override 406 public boolean isResumable() { 407 // hack to not resume if tests were never run 408 // TODO: fix this properly in TestInvocation 409 if (mRemainingTests == null) { 410 return false; 411 } 412 return mIsResumeMode; 413 } 414 415 /** 416 * Optionally, set the rerun mode. 417 */ 418 public void setRerunMode(boolean rerun) { 419 mIsRerunMode = rerun; 420 } 421 422 /** 423 * Optionally, set the resume mode. 424 */ 425 public void setResumeMode(boolean resume) { 426 mIsResumeMode = resume; 427 } 428 429 /** 430 * Get the shell timeout in ms. 431 */ 432 long getShellTimeout() { 433 return mShellTimeout; 434 } 435 436 /** 437 * Get the test timeout in ms. 438 */ 439 int getTestTimeout() { 440 return mTestTimeout; 441 } 442 443 /** 444 * Set the optional file to install that contains the tests. 445 * 446 * @param installFile the installable {@link File} 447 */ 448 public void setInstallFile(File installFile) { 449 mInstallFile = installFile; 450 } 451 452 /** 453 * {@inheritDoc} 454 */ 455 @Override 456 public ITestDevice getDevice() { 457 return mDevice; 458 } 459 460 /** 461 * Set the max time in ms to allow for the 'max time to shell output response' when collecting 462 * tests. 463 * <p/> 464 * @deprecated This method is a no-op 465 */ 466 @Deprecated 467 @SuppressWarnings("unused") 468 public void setCollectsTestsShellTimeout(int timeout) { 469 // no-op 470 } 471 472 /** 473 * Set the frequency with which to automatically collect bugreports after test failures. 474 * <p /> 475 * Note that there is _no feedback mechanism_ between the test runner and the bugreport 476 * collector, so use the EACH setting with due caution: if a large quantity of failures happen 477 * in rapid succession, the bugreport for a given one of the failures could end up being 478 * collected tens of minutes or hours after the respective failure occurred. 479 */ 480 public void setBugreportFrequency(BugreportCollector.Freq freq) { 481 mBugreportFrequency = freq; 482 } 483 484 /** 485 * Add an argument to provide when running the instrumentation tests. 486 * 487 * @param key the argument name 488 * @param value the argument value 489 */ 490 public void addInstrumentationArg(String key, String value) { 491 mInstrArgMap.put(key, value); 492 } 493 494 /** 495 * Retrieve the value of an argument to provide when running the instrumentation tests. 496 * 497 * @param key the argument name 498 * <p/> 499 * Exposed for testing 500 */ 501 String getInstrumentationArg(String key) { 502 if (mInstrArgMap.containsKey(key)) { 503 return mInstrArgMap.get(key); 504 } 505 return null; 506 } 507 508 /** 509 * Sets force-abi option. 510 * @param abi 511 */ 512 public void setForceAbi(String abi) { 513 mForceAbi = abi; 514 } 515 516 public String getForceAbi() { 517 return mForceAbi; 518 } 519 520 public void setScreenshotOnFailure(boolean screenshotOnFailure) { 521 mScreenshotOnFailure = screenshotOnFailure; 522 } 523 524 public void setLogcatOnFailure(boolean logcatOnFailure) { 525 mLogcatOnFailure = logcatOnFailure; 526 } 527 528 public void setLogcatOnFailureSize(int logcatOnFailureSize) { 529 mMaxLogcatBytes = logcatOnFailureSize; 530 } 531 532 public void setReRunUsingTestFile(boolean reRunUsingTestFile) { 533 mReRunUsingTestFile = reRunUsingTestFile; 534 } 535 536 public void setFallbackToSerialRerun(boolean reRunSerially) { 537 mFallbackToSerialRerun = reRunSerially; 538 } 539 540 public void setRebootBeforeReRun(boolean rebootBeforeReRun) { 541 mRebootBeforeReRun = rebootBeforeReRun; 542 } 543 544 /** 545 * @return the {@link IRemoteAndroidTestRunner} to use. 546 * @throws DeviceNotAvailableException 547 */ 548 IRemoteAndroidTestRunner createRemoteAndroidTestRunner(String packageName, String runnerName, 549 IDevice device) throws DeviceNotAvailableException { 550 RemoteAndroidTestRunner runner = new RemoteAndroidTestRunner( 551 packageName, runnerName, device); 552 String abiName = resolveAbiName(); 553 if (abiName != null) { 554 mInstallArgs.add(String.format("--abi %s", abiName)); 555 runner.setRunOptions(String.format("--abi %s", abiName)); 556 } 557 runner.setEnforceTimeStamp(mShouldEnforceFormat); 558 return runner; 559 } 560 561 private String resolveAbiName() throws DeviceNotAvailableException { 562 if (mAbi != null && mForceAbi != null) { 563 throw new IllegalArgumentException("cannot specify both abi flags"); 564 } 565 String abiName = null; 566 if (mAbi != null) { 567 abiName = mAbi.getName(); 568 } else if (mForceAbi != null && !mForceAbi.isEmpty()) { 569 abiName = AbiFormatter.getDefaultAbi(mDevice, mForceAbi); 570 if (abiName == null) { 571 throw new RuntimeException( 572 String.format("Cannot find abi for force-abi %s", mForceAbi)); 573 } 574 } 575 return abiName; 576 } 577 578 /** 579 * Set the {@link ListInstrumentationParser}. 580 */ 581 @VisibleForTesting 582 void setListInstrumentationParser(ListInstrumentationParser listInstrumentationParser) { 583 mListInstrumentationParser = listInstrumentationParser; 584 } 585 586 /** 587 * Get the {@link ListInstrumentationParser} used to parse 'pm list instrumentation' queries. 588 */ 589 protected ListInstrumentationParser getListInstrumentationParser() { 590 if (mListInstrumentationParser == null) { 591 mListInstrumentationParser = new ListInstrumentationParser(); 592 } 593 return mListInstrumentationParser; 594 } 595 596 /** 597 * Query the device for a test runner to use. 598 * 599 * @return the first test runner name that matches the package or null if we don't find any. 600 * @throws DeviceNotAvailableException 601 */ 602 protected String queryRunnerName() throws DeviceNotAvailableException { 603 ListInstrumentationParser parser = getListInstrumentationParser(); 604 getDevice().executeShellCommand("pm list instrumentation", parser); 605 606 for (InstrumentationTarget target : parser.getInstrumentationTargets()) { 607 if (mPackageName.equals(target.packageName)) { 608 return target.runnerName; 609 } 610 } 611 CLog.w("Unable to determine runner name for package: %s", mPackageName); 612 return null; 613 } 614 615 /** 616 * {@inheritDoc} 617 */ 618 @Override 619 public void run(final ITestInvocationListener listener) throws DeviceNotAvailableException { 620 if (mPackageName == null) { 621 throw new IllegalArgumentException("package name has not been set"); 622 } 623 if (mDevice == null) { 624 throw new IllegalArgumentException("Device has not been set"); 625 } 626 627 if (mRunnerName == null) { 628 String runnerName = queryRunnerName(); 629 if (runnerName == null) { 630 throw new IllegalArgumentException( 631 "runner name has not been set and no matching instrumentations were found"); 632 } 633 setRunnerName(runnerName); 634 CLog.i("No runner name specified. Using: %s", mRunnerName); 635 } 636 637 mRunner = createRemoteAndroidTestRunner(mPackageName, mRunnerName, mDevice.getIDevice()); 638 setRunnerArgs(mRunner); 639 if (mInstallFile != null) { 640 Assert.assertNull(mDevice.installPackage(mInstallFile, true, 641 mInstallArgs.toArray(new String[]{}))); 642 } 643 doTestRun(listener); 644 if (mInstallFile != null) { 645 mDevice.uninstallPackage(mPackageName); 646 } 647 } 648 649 protected void setRunnerArgs(IRemoteAndroidTestRunner runner) { 650 if (mTestClassName != null) { 651 if (mTestMethodName != null) { 652 runner.setMethodName(mTestClassName, mTestMethodName); 653 } else { 654 runner.setClassName(mTestClassName); 655 } 656 } else if (mTestPackageName != null) { 657 runner.setTestPackageName(mTestPackageName); 658 } 659 if (mTestFilePathOnDevice != null) { 660 addInstrumentationArg(TEST_FILE_INST_ARGS_KEY, mTestFilePathOnDevice); 661 } 662 if (mTestSize != null) { 663 runner.setTestSize(TestSize.getTestSize(mTestSize)); 664 } 665 addTimeoutsToRunner(runner); 666 if (mRunName != null) { 667 runner.setRunName(mRunName); 668 } 669 for (Map.Entry<String, String> argEntry : mInstrArgMap.entrySet()) { 670 runner.addInstrumentationArg(argEntry.getKey(), argEntry.getValue()); 671 } 672 } 673 674 /** 675 * Helper method to add test-timeout & shell-timeout timeouts to given runner 676 */ 677 private void addTimeoutsToRunner(IRemoteAndroidTestRunner runner) { 678 if (mTimeout != null) { 679 CLog.w("\"timeout\" argument is deprecated and should not be used! \"shell-timeout\"" 680 + " argument value is overwritten with %d ms", mTimeout); 681 setShellTimeout(mTimeout); 682 } 683 if (mTestTimeout < 0) { 684 throw new IllegalArgumentException( 685 String.format("test-timeout %d cannot be negative", mTestTimeout)); 686 } 687 if (mShellTimeout <= mTestTimeout) { 688 // set shell timeout to 110% of test timeout 689 mShellTimeout = mTestTimeout + mTestTimeout / 10; 690 CLog.w(String.format("shell-timeout should be larger than test-timeout %d; " 691 + "NOTE: extending shell-timeout to %d, please consider fixing this!", 692 mTestTimeout, mShellTimeout)); 693 } 694 runner.setMaxTimeToOutputResponse(mShellTimeout, TimeUnit.MILLISECONDS); 695 addInstrumentationArg(TEST_TIMEOUT_INST_ARGS_KEY, Long.toString(mTestTimeout)); 696 } 697 698 /** 699 * Execute test run. 700 * 701 * @param listener the test result listener 702 * @throws DeviceNotAvailableException if device stops communicating 703 */ 704 private void doTestRun(ITestInvocationListener listener) 705 throws DeviceNotAvailableException { 706 707 if (mRemainingTests != null) { 708 // have remaining tests! This must be a rerun - rerun them individually 709 rerunTests(listener); 710 return; 711 } 712 713 // If this is a dry-run, just collect the tests and return 714 if (mCollectTestsOnly) { 715 // Use the actual listener to collect the tests, and print a error if this fails 716 Collection<TestIdentifier> collectedTests = collectTestsToRun(mRunner, listener); 717 if (collectedTests == null) { 718 CLog.e("Failed to collect tests for %s", mPackageName); 719 } else { 720 CLog.i("Collected %d tests for %s", collectedTests.size(), mPackageName); 721 } 722 return; 723 } 724 725 // Collect the tests to run, but don't notify the listener since it's not a real run 726 mRemainingTests = collectTestsToRun(mRunner, null); 727 728 // only set debug flag after collecting tests 729 if (mDebug) { 730 mRunner.setDebug(true); 731 } 732 listener = addBugreportListenerIfEnabled(listener); 733 listener = addLogcatListenerIfEnabled(listener); 734 listener = addScreenshotListenerIfEnabled(listener); 735 736 if (mRemainingTests == null) { 737 // Failed to collect the tests or collection is off. Just try to run them all. 738 mDevice.runInstrumentationTests(mRunner, listener); 739 } else if (!mRemainingTests.isEmpty()) { 740 runWithRerun(listener, mRemainingTests); 741 } else { 742 CLog.i("No tests expected for %s, skipping", mPackageName); 743 } 744 } 745 746 /** 747 * Returns a listener that will collect bugreports, or the original {@code listener} if this 748 * feature is disabled. 749 */ 750 ITestInvocationListener addBugreportListenerIfEnabled(ITestInvocationListener listener) { 751 if (mBugreportFrequency != null) { 752 // Collect a bugreport after EACH/FIRST failed testcase 753 BugreportCollector.Predicate pred = new BugreportCollector.Predicate( 754 BugreportCollector.Relation.AFTER, 755 mBugreportFrequency, 756 BugreportCollector.Noun.FAILED_TESTCASE); 757 BugreportCollector collector = new BugreportCollector(listener, getDevice()); 758 collector.addPredicate(pred); 759 listener = collector; 760 } 761 return listener; 762 } 763 764 /** 765 * Returns a listener that will collect screenshots, or the original {@code listener} if this 766 * feature is disabled. 767 */ 768 ITestInvocationListener addScreenshotListenerIfEnabled(ITestInvocationListener listener) { 769 if (mScreenshotOnFailure) { 770 listener = new FailedTestScreenshotGenerator(listener, getDevice()); 771 } 772 return listener; 773 } 774 775 /** 776 * Returns a listener that will collect logcat logs, or the original {@code listener} if this 777 * feature is disabled. 778 */ 779 ITestInvocationListener addLogcatListenerIfEnabled(ITestInvocationListener listener) { 780 if (mLogcatOnFailure) { 781 listener = new FailedTestLogcatGenerator(listener, getDevice(), mMaxLogcatBytes); 782 } 783 return listener; 784 } 785 786 /** 787 * Execute the test run, but re-run incomplete tests individually if run fails to complete. 788 * 789 * @param listener the {@link ITestInvocationListener} 790 * @param expectedTests the full set of expected tests in this run. 791 */ 792 private void runWithRerun(final ITestInvocationListener listener, 793 Collection<TestIdentifier> expectedTests) throws DeviceNotAvailableException { 794 CollectingTestListener testTracker = new CollectingTestListener(); 795 mRemainingTests = expectedTests; 796 try { 797 mDevice.runInstrumentationTests( 798 mRunner, 799 new ResultForwarder(listener, testTracker) { 800 @Override 801 public void testRunStarted(String runName, int testCount) { 802 // In case of crash, run will attempt to report with 0 803 if (testCount == 0 && !expectedTests.isEmpty()) { 804 CLog.e( 805 "Run reported 0 tests while we collected %s", 806 expectedTests.size()); 807 super.testRunStarted(runName, expectedTests.size()); 808 } else { 809 super.testRunStarted(runName, testCount); 810 } 811 } 812 }); 813 } finally { 814 calculateRemainingTests(mRemainingTests, testTracker); 815 } 816 rerunTests(listener); 817 } 818 819 /** 820 * Rerun any <var>mRemainingTests</var> 821 * 822 * @param listener the {@link ITestInvocationListener} 823 * @throws DeviceNotAvailableException 824 */ 825 private void rerunTests(final ITestInvocationListener listener) 826 throws DeviceNotAvailableException { 827 if (mRemainingTests.size() > 0) { 828 if (mRebootBeforeReRun) { 829 mDevice.reboot(); 830 } 831 if (mReRunUsingTestFile) { 832 reRunTestsFromFile(listener); 833 } else { 834 reRunTestsSerially(listener); 835 } 836 } 837 } 838 839 /** 840 * re-runs tests from test file via {@link InstrumentationFileTest} 841 */ 842 private void reRunTestsFromFile(final ITestInvocationListener listener) 843 throws DeviceNotAvailableException { 844 CLog.i("Running individual tests using a test file"); 845 try { 846 InstrumentationFileTest testReRunner = new InstrumentationFileTest(this, 847 mRemainingTests, mFallbackToSerialRerun, mReRunUsingTestFileAttempts); 848 CollectingTestListener testTracker = new CollectingTestListener(); 849 try { 850 testReRunner.run(new ResultForwarder(listener, testTracker)); 851 } finally { 852 calculateRemainingTests(mRemainingTests, testTracker); 853 } 854 } catch (ConfigurationException e) { 855 CLog.e("Failed to create InstrumentationFileTest: %s", e.getMessage()); 856 } 857 } 858 859 /** re-runs tests one by one via {@link InstrumentationSerialTest} */ 860 @VisibleForTesting 861 void reRunTestsSerially(final ITestInvocationListener listener) 862 throws DeviceNotAvailableException { 863 CLog.i("Running individual tests serially"); 864 // Since the same runner is reused we must ensure TEST_FILE_INST_ARGS_KEY is not set. 865 // Otherwise, the runner will attempt to execute tests from file. 866 if (mInstrArgMap != null && mInstrArgMap.containsKey(TEST_FILE_INST_ARGS_KEY)) { 867 mInstrArgMap.remove(TEST_FILE_INST_ARGS_KEY); 868 } 869 try { 870 InstrumentationSerialTest testReRunner = new InstrumentationSerialTest(this, mRemainingTests); 871 CollectingTestListener testTracker = new CollectingTestListener(); 872 try { 873 testReRunner.run(new ResultForwarder(listener, testTracker)); 874 } finally { 875 calculateRemainingTests(mRemainingTests, testTracker); 876 } 877 } catch (ConfigurationException e) { 878 CLog.e("Failed to create InstrumentationSerialTest: %s", e.getMessage()); 879 } 880 } 881 882 /** 883 * Remove the set of tests collected by testTracker from the set of expectedTests 884 * 885 * @param expectedTests 886 * @param testTracker 887 */ 888 private void calculateRemainingTests(Collection<TestIdentifier> expectedTests, 889 CollectingTestListener testTracker) { 890 expectedTests.removeAll(testTracker.getCurrentRunResults().getCompletedTests()); 891 } 892 893 /** 894 * Collect the list of tests that should be executed by this test run. 895 * <p/> 896 * This will be done by executing the test run in 'logOnly' mode, and recording the list of 897 * tests. 898 * 899 * @param runner the {@link IRemoteAndroidTestRunner} to use to run the tests. 900 * @return a {@link Collection} of {@link TestIdentifier}s that represent all tests to be 901 * executed by this run 902 * @throws DeviceNotAvailableException 903 */ 904 private Collection<TestIdentifier> collectTestsToRun(final IRemoteAndroidTestRunner runner, 905 final ITestInvocationListener listener) throws DeviceNotAvailableException { 906 if (isRerunMode()) { 907 Log.d(LOG_TAG, String.format("Collecting test info for %s on device %s", 908 mPackageName, mDevice.getSerialNumber())); 909 runner.setTestCollection(true); 910 // always explicitly set debug to false when collecting tests 911 runner.setDebug(false); 912 // try to collect tests multiple times, in case device is temporarily not available 913 // on first attempt 914 Collection<TestIdentifier> tests = collectTestsAndRetry(runner, listener); 915 // done with "logOnly" mode, restore proper test timeout before real test execution 916 addTimeoutsToRunner(runner); 917 runner.setTestCollection(false); 918 return tests; 919 } 920 return null; 921 } 922 923 /** 924 * Performs the actual work of collecting tests, making multiple attempts if necessary 925 * 926 * @param runner the {@link IRemoteAndroidTestRunner} that will be used for the instrumentation 927 * @param listener the {ITestInvocationListener} where to report results, can be null if we are 928 * not reporting the results to the main invocation and simply collecting tests. 929 * @return the collection of tests, or <code>null</code> if tests could not be collected 930 * @throws DeviceNotAvailableException if communication with the device was lost 931 */ 932 @VisibleForTesting 933 Collection<TestIdentifier> collectTestsAndRetry( 934 final IRemoteAndroidTestRunner runner, final ITestInvocationListener listener) 935 throws DeviceNotAvailableException { 936 boolean communicationFailure = false; 937 for (int i=0; i < COLLECT_TESTS_ATTEMPTS; i++) { 938 CollectingTestListener collector = new CollectingTestListener(); 939 boolean instrResult = false; 940 // We allow to override the ddmlib default timeout for collection of tests. 941 runner.setMaxTimeToOutputResponse(mCollectTestTimeout, TimeUnit.MILLISECONDS); 942 if (listener == null) { 943 instrResult = mDevice.runInstrumentationTests(runner, collector); 944 } else { 945 instrResult = mDevice.runInstrumentationTests(runner, collector, listener); 946 } 947 TestRunResult runResults = collector.getCurrentRunResults(); 948 if (!instrResult || !runResults.isRunComplete()) { 949 // communication failure with device, retry 950 Log.w(LOG_TAG, String.format( 951 "No results when collecting tests to run for %s on device %s. Retrying", 952 mPackageName, mDevice.getSerialNumber())); 953 communicationFailure = true; 954 } else if (runResults.isRunFailure()) { 955 // not a communication failure, but run still failed. 956 // TODO: should retry be attempted 957 CLog.w("Run failure %s when collecting tests to run for %s on device %s.", 958 runResults.getRunFailureMessage(), mPackageName, 959 mDevice.getSerialNumber()); 960 if (mShouldEnforceFormat 961 && InstrumentationResultParser.INVALID_OUTPUT_ERR_MSG.equals( 962 runResults.getRunFailureMessage())) { 963 throw new RuntimeException(InstrumentationResultParser.INVALID_OUTPUT_ERR_MSG); 964 } 965 return null; 966 } else { 967 // success! 968 return runResults.getCompletedTests(); 969 } 970 } 971 if (communicationFailure) { 972 // TODO: find a better way to handle this 973 // throwing DeviceUnresponsiveException is not always ideal because a misbehaving 974 // instrumentation can hang, even though device is responsive. Would be nice to have 975 // a louder signal for this situation though than just logging an error 976 // throw new DeviceUnresponsiveException(String.format( 977 // "Communication failure when attempting to collect tests %s on device %s", 978 // mPackageName, mDevice.getSerialNumber())); 979 CLog.w("Ignoring repeated communication failure when collecting tests %s for device %s", 980 mPackageName, mDevice.getSerialNumber()); 981 } 982 CLog.e("Failed to collect tests to run for %s on device %s.", 983 mPackageName, mDevice.getSerialNumber()); 984 return null; 985 } 986 987 /** 988 * A {@link ResultForwarder} that will forward a screenshot on test failures. 989 */ 990 private static class FailedTestScreenshotGenerator extends ResultForwarder { 991 private ITestDevice mDevice; 992 993 public FailedTestScreenshotGenerator(ITestInvocationListener listener, 994 ITestDevice device) { 995 super(listener); 996 mDevice = device; 997 } 998 999 @Override 1000 public void testFailed(TestIdentifier test, String trace) { 1001 try { 1002 InputStreamSource screenSource = mDevice.getScreenshot(); 1003 super.testLog(String.format("screenshot-%s_%s", test.getClassName(), 1004 test.getTestName()), LogDataType.PNG, screenSource); 1005 StreamUtil.cancel(screenSource); 1006 } catch (DeviceNotAvailableException e) { 1007 // TODO: rethrow this somehow 1008 CLog.e("Device %s became unavailable while capturing screenshot, %s", 1009 mDevice.getSerialNumber(), e.toString()); 1010 } 1011 1012 super.testFailed(test, trace); 1013 } 1014 } 1015 1016 /** 1017 * A {@link ResultForwarder} that will forward a logcat snapshot on each failed test. 1018 */ 1019 private static class FailedTestLogcatGenerator extends ResultForwarder { 1020 private ITestDevice mDevice; 1021 private int mNumLogcatBytes; 1022 private Map<TestIdentifier, Long> mMapStartTime = new HashMap<TestIdentifier, Long>(); 1023 1024 public FailedTestLogcatGenerator(ITestInvocationListener listener, ITestDevice device, 1025 int maxLogcatBytes) { 1026 super(listener); 1027 mDevice = device; 1028 mNumLogcatBytes = maxLogcatBytes; 1029 } 1030 1031 @Override 1032 public void testStarted(TestIdentifier test) { 1033 super.testStarted(test); 1034 // capture the starting date of the tests. 1035 try { 1036 mMapStartTime.put(test, mDevice.getDeviceDate()); 1037 } catch (DeviceNotAvailableException e) { 1038 // For convenience of interface we catch here, test will mostlikely throw it again 1039 // and it will be properly handle (recovery, etc.) 1040 CLog.e(e); 1041 mMapStartTime.put(test, 0l); 1042 } 1043 } 1044 1045 @Override 1046 public void testFailed(TestIdentifier test, String trace) { 1047 super.testFailed(test, trace); 1048 captureLog(test); 1049 } 1050 1051 @Override 1052 public void testAssumptionFailure(TestIdentifier test, String trace) { 1053 super.testAssumptionFailure(test, trace); 1054 captureLog(test); 1055 } 1056 1057 private void captureLog(TestIdentifier test) { 1058 InputStreamSource logSource = null; 1059 // if we can, capture starting the beginning of the test only to be more precise 1060 long startTime = 0; 1061 if (mMapStartTime.containsKey(test)) { 1062 startTime = mMapStartTime.remove(test); 1063 } 1064 if (startTime != 0) { 1065 logSource = mDevice.getLogcatSince(startTime); 1066 } else { 1067 logSource = mDevice.getLogcat(mNumLogcatBytes); 1068 } 1069 super.testLog(String.format("logcat-%s_%s", test.getClassName(), test.getTestName()), 1070 LogDataType.TEXT, logSource); 1071 StreamUtil.cancel(logSource); 1072 } 1073 } 1074 1075 /** 1076 * {@inheritDoc} 1077 */ 1078 @Override 1079 public void setCollectTestsOnly(boolean shouldCollectTest) { 1080 mCollectTestsOnly = shouldCollectTest; 1081 } 1082 1083 @Override 1084 public void setAbi(IAbi abi) { 1085 mAbi = abi; 1086 } 1087 1088 @Override 1089 public IAbi getAbi() { 1090 return mAbi; 1091 } 1092 1093 /** Set True if we enforce the AJUR output format of instrumentation. */ 1094 public void setEnforceFormat(boolean enforce) { 1095 mShouldEnforceFormat = enforce; 1096 } 1097 } 1098