1 # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 2 # Use of this source code is governed by a BSD-style license that can be 3 # found in the LICENSE file. 4 5 import glob 6 import logging 7 import os 8 import re 9 import urlparse 10 import urllib2 11 12 from autotest_lib.client.bin import utils 13 from autotest_lib.client.common_lib import error, global_config 14 from autotest_lib.client.common_lib.cros import dev_server 15 from autotest_lib.server import utils as server_utils 16 from chromite.lib import retry_util 17 18 try: 19 from chromite.lib import metrics 20 except ImportError: 21 metrics = utils.metrics_mock 22 23 # Local stateful update path is relative to the CrOS source directory. 24 LOCAL_STATEFUL_UPDATE_PATH = 'src/platform/dev/stateful_update' 25 LOCAL_CHROOT_STATEFUL_UPDATE_PATH = '/usr/bin/stateful_update' 26 UPDATER_IDLE = 'UPDATE_STATUS_IDLE' 27 UPDATER_NEED_REBOOT = 'UPDATE_STATUS_UPDATED_NEED_REBOOT' 28 # A list of update engine client states that occur after an update is triggered. 29 UPDATER_PROCESSING_UPDATE = ['UPDATE_STATUS_CHECKING_FORUPDATE', 30 'UPDATE_STATUS_UPDATE_AVAILABLE', 31 'UPDATE_STATUS_DOWNLOADING', 32 'UPDATE_STATUS_FINALIZING'] 33 34 class ChromiumOSError(error.InstallError): 35 """Generic error for ChromiumOS-specific exceptions.""" 36 37 38 class BrilloError(error.InstallError): 39 """Generic error for Brillo-specific exceptions.""" 40 41 42 class RootFSUpdateError(ChromiumOSError): 43 """Raised when the RootFS fails to update.""" 44 45 46 class StatefulUpdateError(ChromiumOSError): 47 """Raised when the stateful partition fails to update.""" 48 49 50 def url_to_version(update_url): 51 """Return the version based on update_url. 52 53 @param update_url: url to the image to update to. 54 55 """ 56 # The Chrome OS version is generally the last element in the URL. The only 57 # exception is delta update URLs, which are rooted under the version; e.g., 58 # http://.../update/.../0.14.755.0/au/0.14.754.0. In this case we want to 59 # strip off the au section of the path before reading the version. 60 return re.sub('/au/.*', '', 61 urlparse.urlparse(update_url).path).split('/')[-1].strip() 62 63 64 def url_to_image_name(update_url): 65 """Return the image name based on update_url. 66 67 From a URL like: 68 http://172.22.50.205:8082/update/lumpy-release/R27-3837.0.0 69 return lumpy-release/R27-3837.0.0 70 71 @param update_url: url to the image to update to. 72 @returns a string representing the image name in the update_url. 73 74 """ 75 return '/'.join(urlparse.urlparse(update_url).path.split('/')[-2:]) 76 77 78 def _get_devserver_build_from_update_url(update_url): 79 """Get the devserver and build from the update url. 80 81 @param update_url: The url for update. 82 Eg: http://devserver:port/update/build. 83 84 @return: A tuple of (devserver url, build) or None if the update_url 85 doesn't match the expected pattern. 86 87 @raises ValueError: If the update_url doesn't match the expected pattern. 88 @raises ValueError: If no global_config was found, or it doesn't contain an 89 image_url_pattern. 90 """ 91 pattern = global_config.global_config.get_config_value( 92 'CROS', 'image_url_pattern', type=str, default='') 93 if not pattern: 94 raise ValueError('Cannot parse update_url, the global config needs ' 95 'an image_url_pattern.') 96 re_pattern = pattern.replace('%s', '(\S+)') 97 parts = re.search(re_pattern, update_url) 98 if not parts or len(parts.groups()) < 2: 99 raise ValueError('%s is not an update url' % update_url) 100 return parts.groups() 101 102 103 def list_image_dir_contents(update_url): 104 """Lists the contents of the devserver for a given build/update_url. 105 106 @param update_url: An update url. Eg: http://devserver:port/update/build. 107 """ 108 if not update_url: 109 logging.warning('Need update_url to list contents of the devserver.') 110 return 111 error_msg = 'Cannot check contents of devserver, update url %s' % update_url 112 try: 113 devserver_url, build = _get_devserver_build_from_update_url(update_url) 114 except ValueError as e: 115 logging.warning('%s: %s', error_msg, e) 116 return 117 devserver = dev_server.ImageServer(devserver_url) 118 try: 119 devserver.list_image_dir(build) 120 # The devserver will retry on URLError to avoid flaky connections, but will 121 # eventually raise the URLError if it persists. All HTTPErrors get 122 # converted to DevServerExceptions. 123 except (dev_server.DevServerException, urllib2.URLError) as e: 124 logging.warning('%s: %s', error_msg, e) 125 126 127 # TODO(garnold) This implements shared updater functionality needed for 128 # supporting the autoupdate_EndToEnd server-side test. We should probably 129 # migrate more of the existing ChromiumOSUpdater functionality to it as we 130 # expand non-CrOS support in other tests. 131 class BaseUpdater(object): 132 """Platform-agnostic DUT update functionality.""" 133 134 def __init__(self, updater_ctrl_bin, update_url, host): 135 """Initializes the object. 136 137 @param updater_ctrl_bin: Path to update_engine_client. 138 @param update_url: The URL we want the update to use. 139 @param host: A client.common_lib.hosts.Host implementation. 140 """ 141 self.updater_ctrl_bin = updater_ctrl_bin 142 self.update_url = update_url 143 self.host = host 144 145 146 def check_update_status(self): 147 """Returns the current update engine state. 148 149 We use the `update_engine_client -status' command and parse the line 150 indicating the update state, e.g. "CURRENT_OP=UPDATE_STATUS_IDLE". 151 """ 152 update_status = self.host.run(command='%s -status | grep CURRENT_OP' % 153 self.updater_ctrl_bin) 154 return update_status.stdout.strip().split('=')[-1] 155 156 157 def get_last_update_error(self): 158 """Get the last autoupdate error code.""" 159 error_msg = self.host.run( 160 '%s --last_attempt_error' % self.updater_ctrl_bin) 161 error_msg = (error_msg.stdout.strip()).replace('\n', ', ') 162 return error_msg 163 164 165 def _base_update_handler_no_retry(self, run_args): 166 """Base function to handle a remote update ssh call. 167 168 @param run_args: Dictionary of args passed to ssh_host.run function. 169 170 @throws: intercepts and re-throws all exceptions 171 """ 172 try: 173 self.host.run(**run_args) 174 except Exception as e: 175 logging.debug('exception in update handler: %s', e) 176 raise e 177 178 179 def _base_update_handler(self, run_args, err_msg_prefix=None): 180 """Handle a remote update ssh call, possibly with retries. 181 182 @param run_args: Dictionary of args passed to ssh_host.run function. 183 @param err_msg_prefix: Prefix of the exception error message. 184 """ 185 def exception_handler(e): 186 """Examines exceptions and returns True if the update handler 187 should be retried. 188 189 @param e: the exception intercepted by the retry util. 190 """ 191 return (isinstance(e, error.AutoservSSHTimeout) or 192 (isinstance(e, error.GenericHostRunError) and 193 hasattr(e, 'description') and 194 (re.search('ERROR_CODE=37', e.description) or 195 re.search('generic error .255.', e.description)))) 196 197 try: 198 # Try the update twice (arg 2 is max_retry, not including the first 199 # call). Some exceptions may be caught by the retry handler. 200 retry_util.GenericRetry(exception_handler, 1, 201 self._base_update_handler_no_retry, 202 run_args) 203 except Exception as e: 204 message = err_msg_prefix + ': ' + str(e) 205 raise RootFSUpdateError(message) 206 207 208 def _wait_for_update_service(self): 209 """Ensure that the update engine daemon is running, possibly 210 by waiting for it a bit in case the DUT just rebooted and the 211 service hasn't started yet. 212 """ 213 def handler(e): 214 """Retry exception handler. 215 216 Assumes that the error is due to the update service not having 217 started yet. 218 219 @param e: the exception intercepted by the retry util. 220 """ 221 if isinstance(e, error.AutoservRunError): 222 logging.debug('update service check exception: %s\n' 223 'retrying...', e) 224 return True 225 else: 226 return False 227 228 # Retry at most three times, every 5s. 229 status = retry_util.GenericRetry(handler, 3, 230 self.check_update_status, 231 sleep=5) 232 233 # Expect the update engine to be idle. 234 if status != UPDATER_IDLE: 235 raise ChromiumOSError('%s is not in an installable state' % 236 self.host.hostname) 237 238 239 def trigger_update(self): 240 """Triggers a background update. 241 242 @raise RootFSUpdateError or unknown Exception if anything went wrong. 243 """ 244 # If this function is called immediately after reboot (which it is at 245 # this time), there is no guarantee that the update service is up and 246 # running yet, so wait for it. 247 self._wait_for_update_service() 248 249 autoupdate_cmd = ('%s --check_for_update --omaha_url=%s' % 250 (self.updater_ctrl_bin, self.update_url)) 251 run_args = {'command': autoupdate_cmd} 252 err_prefix = 'Failed to trigger an update on %s. ' % self.host.hostname 253 logging.info('Triggering update via: %s', autoupdate_cmd) 254 try: 255 to_raise = None 256 self._base_update_handler(run_args, err_prefix) 257 except Exception as e: 258 to_raise = e 259 260 build_name = url_to_image_name(self.update_url) 261 try: 262 board, build_type, milestone, _ = server_utils.ParseBuildName( 263 build_name) 264 except server_utils.ParseBuildNameException: 265 logging.warning('Unable to parse build name %s for metrics. ' 266 'Continuing anyway.', build_name) 267 board, build_type, milestone = ('', '', '') 268 c = metrics.Counter('chromeos/autotest/autoupdater/trigger') 269 f = {'dev_server': 270 dev_server.get_hostname(self.update_url), 271 'success': to_raise is None, 272 'board': board, 273 'build_type': build_type, 274 'milestone': milestone} 275 c.increment(fields=f) 276 if to_raise: 277 raise to_raise 278 279 280 def _verify_update_completed(self): 281 """Verifies that an update has completed. 282 283 @raise RootFSUpdateError: if verification fails. 284 """ 285 status = self.check_update_status() 286 if status != UPDATER_NEED_REBOOT: 287 error_msg = '' 288 if status == UPDATER_IDLE: 289 error_msg = 'Update error: %s' % self.get_last_update_error() 290 raise RootFSUpdateError('Update did not complete with correct ' 291 'status. Expecting %s, actual %s. %s' % 292 (UPDATER_NEED_REBOOT, status, error_msg)) 293 294 295 def update_image(self): 296 """Updates the device image and verifies success.""" 297 autoupdate_cmd = ('%s --update --omaha_url=%s' % 298 (self.updater_ctrl_bin, self.update_url)) 299 run_args = {'command': autoupdate_cmd, 'timeout': 3600} 300 err_prefix = ('Failed to install device image using payload at %s ' 301 'on %s. ' % (self.update_url, self.host.hostname)) 302 logging.info('Updating image via: %s', autoupdate_cmd) 303 try: 304 to_raise = None 305 self._base_update_handler(run_args, err_prefix) 306 except Exception as e: 307 to_raise = e 308 309 build_name = url_to_image_name(self.update_url) 310 try: 311 board, build_type, milestone, _ = server_utils.ParseBuildName( 312 build_name) 313 except server_utils.ParseBuildNameException: 314 logging.warning('Unable to parse build name %s for metrics. ' 315 'Continuing anyway.', build_name) 316 board, build_type, milestone = ('', '', '') 317 c = metrics.Counter('chromeos/autotest/autoupdater/update') 318 f = {'dev_server': 319 dev_server.get_hostname(self.update_url), 320 'success': to_raise is None, 321 'board': board, 322 'build_type': build_type, 323 'milestone': milestone} 324 c.increment(fields=f) 325 if to_raise: 326 raise to_raise 327 self._verify_update_completed() 328 329 330 class ChromiumOSUpdater(BaseUpdater): 331 """Helper class used to update DUT with image of desired version.""" 332 REMOTE_STATEUL_UPDATE_PATH = '/usr/local/bin/stateful_update' 333 UPDATER_BIN = '/usr/bin/update_engine_client' 334 STATEFUL_UPDATE = '/tmp/stateful_update' 335 UPDATED_MARKER = '/var/run/update_engine_autoupdate_completed' 336 UPDATER_LOGS = ['/var/log/messages', '/var/log/update_engine'] 337 338 KERNEL_A = {'name': 'KERN-A', 'kernel': 2, 'root': 3} 339 KERNEL_B = {'name': 'KERN-B', 'kernel': 4, 'root': 5} 340 # Time to wait for new kernel to be marked successful after 341 # auto update. 342 KERNEL_UPDATE_TIMEOUT = 120 343 344 def __init__(self, update_url, host=None, local_devserver=False): 345 super(ChromiumOSUpdater, self).__init__(self.UPDATER_BIN, update_url, 346 host) 347 self.local_devserver = local_devserver 348 if not local_devserver: 349 self.update_version = url_to_version(update_url) 350 else: 351 self.update_version = None 352 353 354 def reset_update_engine(self): 355 """Resets the host to prepare for a clean update regardless of state.""" 356 self._run('rm -f %s' % self.UPDATED_MARKER) 357 self._run('stop ui || true') 358 self._run('stop update-engine || true') 359 self._run('start update-engine') 360 361 # Wait for update engine to be ready. 362 self._wait_for_update_service() 363 364 365 def _run(self, cmd, *args, **kwargs): 366 """Abbreviated form of self.host.run(...)""" 367 return self.host.run(cmd, *args, **kwargs) 368 369 370 def rootdev(self, options=''): 371 """Returns the stripped output of rootdev <options>. 372 373 @param options: options to run rootdev. 374 375 """ 376 return self._run('rootdev %s' % options).stdout.strip() 377 378 379 def get_kernel_state(self): 380 """Returns the (<active>, <inactive>) kernel state as a pair.""" 381 active_root = int(re.findall('\d+\Z', self.rootdev('-s'))[0]) 382 if active_root == self.KERNEL_A['root']: 383 return self.KERNEL_A, self.KERNEL_B 384 elif active_root == self.KERNEL_B['root']: 385 return self.KERNEL_B, self.KERNEL_A 386 else: 387 raise ChromiumOSError('Encountered unknown root partition: %s' % 388 active_root) 389 390 391 def _cgpt(self, flag, kernel, dev='$(rootdev -s -d)'): 392 """Return numeric cgpt value for the specified flag, kernel, device. """ 393 return int(self._run('cgpt show -n -i %d %s %s' % ( 394 kernel['kernel'], flag, dev)).stdout.strip()) 395 396 397 def get_kernel_priority(self, kernel): 398 """Return numeric priority for the specified kernel. 399 400 @param kernel: information of the given kernel, KERNEL_A or KERNEL_B. 401 402 """ 403 return self._cgpt('-P', kernel) 404 405 406 def get_kernel_success(self, kernel): 407 """Return boolean success flag for the specified kernel. 408 409 @param kernel: information of the given kernel, KERNEL_A or KERNEL_B. 410 411 """ 412 return self._cgpt('-S', kernel) != 0 413 414 415 def get_kernel_tries(self, kernel): 416 """Return tries count for the specified kernel. 417 418 @param kernel: information of the given kernel, KERNEL_A or KERNEL_B. 419 420 """ 421 return self._cgpt('-T', kernel) 422 423 424 def get_stateful_update_script(self): 425 """Returns the path to the stateful update script on the target.""" 426 # We attempt to load the local stateful update path in 3 different 427 # ways. First we use the location specified in the autotest global 428 # config. If this doesn't exist, we attempt to use the Chromium OS 429 # Chroot path to the installed script. If all else fails, we use the 430 # stateful update script on the host. 431 stateful_update_path = os.path.join( 432 global_config.global_config.get_config_value( 433 'CROS', 'source_tree', default=''), 434 LOCAL_STATEFUL_UPDATE_PATH) 435 436 if not os.path.exists(stateful_update_path): 437 logging.warning('Could not find Chrome OS source location for ' 438 'stateful_update script at %s, falling back to ' 439 'chroot copy.', stateful_update_path) 440 stateful_update_path = LOCAL_CHROOT_STATEFUL_UPDATE_PATH 441 442 if not os.path.exists(stateful_update_path): 443 logging.warning('Could not chroot stateful_update script, falling ' 444 'back on client copy.') 445 statefuldev_script = self.REMOTE_STATEUL_UPDATE_PATH 446 else: 447 self.host.send_file( 448 stateful_update_path, self.STATEFUL_UPDATE, 449 delete_dest=True) 450 statefuldev_script = self.STATEFUL_UPDATE 451 452 return statefuldev_script 453 454 455 def reset_stateful_partition(self): 456 """Clear any pending stateful update request.""" 457 statefuldev_cmd = [self.get_stateful_update_script()] 458 statefuldev_cmd += ['--stateful_change=reset', '2>&1'] 459 self._run(' '.join(statefuldev_cmd)) 460 461 462 def revert_boot_partition(self): 463 """Revert the boot partition.""" 464 part = self.rootdev('-s') 465 logging.warning('Reverting update; Boot partition will be %s', part) 466 return self._run('/postinst %s 2>&1' % part) 467 468 469 def rollback_rootfs(self, powerwash): 470 """Triggers rollback and waits for it to complete. 471 472 @param powerwash: If true, powerwash as part of rollback. 473 474 @raise RootFSUpdateError if anything went wrong. 475 476 """ 477 version = self.host.get_release_version() 478 # Introduced can_rollback in M36 (build 5772). # etc/lsb-release matches 479 # X.Y.Z. This version split just pulls the first part out. 480 try: 481 build_number = int(version.split('.')[0]) 482 except ValueError: 483 logging.error('Could not parse build number.') 484 build_number = 0 485 486 if build_number >= 5772: 487 can_rollback_cmd = '%s --can_rollback' % self.UPDATER_BIN 488 logging.info('Checking for rollback.') 489 try: 490 self._run(can_rollback_cmd) 491 except error.AutoservRunError as e: 492 raise RootFSUpdateError("Rollback isn't possible on %s: %s" % 493 (self.host.hostname, str(e))) 494 495 rollback_cmd = '%s --rollback --follow' % self.UPDATER_BIN 496 if not powerwash: 497 rollback_cmd += ' --nopowerwash' 498 499 logging.info('Performing rollback.') 500 try: 501 self._run(rollback_cmd) 502 except error.AutoservRunError as e: 503 raise RootFSUpdateError('Rollback failed on %s: %s' % 504 (self.host.hostname, str(e))) 505 506 self._verify_update_completed() 507 508 509 # TODO(garnold) This is here for backward compatibility and should be 510 # deprecated once we shift to using update_image() everywhere. 511 def update_rootfs(self): 512 """Run the standard command to force an update.""" 513 return self.update_image() 514 515 516 def update_stateful(self, clobber=True): 517 """Updates the stateful partition. 518 519 @param clobber: If True, a clean stateful installation. 520 """ 521 logging.info('Updating stateful partition...') 522 statefuldev_url = self.update_url.replace('update', 523 'static') 524 525 # Attempt stateful partition update; this must succeed so that the newly 526 # installed host is testable after update. 527 statefuldev_cmd = [self.get_stateful_update_script(), statefuldev_url] 528 if clobber: 529 statefuldev_cmd.append('--stateful_change=clean') 530 531 statefuldev_cmd.append('2>&1') 532 try: 533 self._run(' '.join(statefuldev_cmd), timeout=1200) 534 except error.AutoservRunError: 535 update_error = StatefulUpdateError( 536 'Failed to perform stateful update on %s' % 537 self.host.hostname) 538 raise update_error 539 540 def run_update(self, update_root=True): 541 """Update the DUT with image of specific version. 542 543 @param update_root: True to force a rootfs update. 544 """ 545 booted_version = self.host.get_release_version() 546 if self.update_version: 547 logging.info('Updating from version %s to %s.', 548 booted_version, self.update_version) 549 550 # Check that Dev Server is accepting connections (from autoserv's host). 551 # If we can't talk to it, the machine host probably can't either. 552 auserver_host = 'http://%s' % urlparse.urlparse(self.update_url)[1] 553 try: 554 if not dev_server.ImageServer.devserver_healthy(auserver_host): 555 raise ChromiumOSError( 556 'Update server at %s not healthy' % auserver_host) 557 except Exception as e: 558 logging.debug('Error happens in connection to devserver: %r', e) 559 raise ChromiumOSError( 560 'Update server at %s not available' % auserver_host) 561 562 logging.info('Installing from %s to %s', self.update_url, 563 self.host.hostname) 564 565 # Reset update state. 566 self.reset_update_engine() 567 self.reset_stateful_partition() 568 569 try: 570 try: 571 if not update_root: 572 logging.info('Root update is skipped.') 573 else: 574 self.update_rootfs() 575 576 self.update_stateful() 577 except: 578 self.revert_boot_partition() 579 self.reset_stateful_partition() 580 raise 581 582 logging.info('Update complete.') 583 except: 584 # Collect update engine logs in the event of failure. 585 if self.host.job: 586 logging.info('Collecting update engine logs due to failure...') 587 self.host.get_file( 588 self.UPDATER_LOGS, self.host.job.sysinfo.sysinfodir, 589 preserve_perm=False) 590 list_image_dir_contents(self.update_url) 591 raise 592 finally: 593 logging.info('Update engine log has downloaded in ' 594 'sysinfo/update_engine dir. Check the lastest.') 595 596 597 def check_version(self): 598 """Check the image running in DUT has the desired version. 599 600 @returns: True if the DUT's image version matches the version that 601 the autoupdater tries to update to. 602 603 """ 604 booted_version = self.host.get_release_version() 605 return (self.update_version and 606 self.update_version.endswith(booted_version)) 607 608 609 def check_version_to_confirm_install(self): 610 """Check image running in DUT has the desired version to be installed. 611 612 The method should not be used to check if DUT needs to have a full 613 reimage. Only use it to confirm a image is installed. 614 615 The method is designed to verify version for following 6 scenarios with 616 samples of version to update to and expected booted version: 617 1. trybot paladin build. 618 update version: trybot-lumpy-paladin/R27-3837.0.0-b123 619 booted version: 3837.0.2013_03_21_1340 620 621 2. trybot release build. 622 update version: trybot-lumpy-release/R27-3837.0.0-b456 623 booted version: 3837.0.0 624 625 3. buildbot official release build. 626 update version: lumpy-release/R27-3837.0.0 627 booted version: 3837.0.0 628 629 4. non-official paladin rc build. 630 update version: lumpy-paladin/R27-3878.0.0-rc7 631 booted version: 3837.0.0-rc7 632 633 5. chrome-perf build. 634 update version: lumpy-chrome-perf/R28-3837.0.0-b2996 635 booted version: 3837.0.0 636 637 6. pgo-generate build. 638 update version: lumpy-release-pgo-generate/R28-3837.0.0-b2996 639 booted version: 3837.0.0-pgo-generate 640 641 When we are checking if a DUT needs to do a full install, we should NOT 642 use this method to check if the DUT is running the same version, since 643 it may return false positive for a DUT running trybot paladin build to 644 be updated to another trybot paladin build. 645 646 TODO: This logic has a bug if a trybot paladin build failed to be 647 installed in a DUT running an older trybot paladin build with same 648 platform number, but different build number (-b###). So to conclusively 649 determine if a tryjob paladin build is imaged successfully, we may need 650 to find out the date string from update url. 651 652 @returns: True if the DUT's image version (without the date string if 653 the image is a trybot build), matches the version that the 654 autoupdater is trying to update to. 655 656 """ 657 # In the local_devserver case, we can't know the expected 658 # build, so just pass. 659 if not self.update_version: 660 return True 661 662 # Always try the default check_version method first, this prevents 663 # any backward compatibility issue. 664 if self.check_version(): 665 return True 666 667 return utils.version_match(self.update_version, 668 self.host.get_release_version(), 669 self.update_url) 670 671 672 def verify_boot_expectations(self, expected_kernel_state, rollback_message): 673 """Verifies that we fully booted given expected kernel state. 674 675 This method both verifies that we booted using the correct kernel 676 state and that the OS has marked the kernel as good. 677 678 @param expected_kernel_state: kernel state that we are verifying with 679 i.e. I expect to be booted onto partition 4 etc. See output of 680 get_kernel_state. 681 @param rollback_message: string to raise as a ChromiumOSError 682 if we booted with the wrong partition. 683 684 @raises ChromiumOSError: If we didn't. 685 """ 686 # Figure out the newly active kernel. 687 active_kernel_state = self.get_kernel_state()[0] 688 689 # Check for rollback due to a bad build. 690 if (expected_kernel_state and 691 active_kernel_state != expected_kernel_state): 692 693 # Kernel crash reports should be wiped between test runs, but 694 # may persist from earlier parts of the test, or from problems 695 # with provisioning. 696 # 697 # Kernel crash reports will NOT be present if the crash happened 698 # before encrypted stateful is mounted. 699 # 700 # TODO(dgarrett): Integrate with server/crashcollect.py at some 701 # point. 702 kernel_crashes = glob.glob('/var/spool/crash/kernel.*.kcrash') 703 if kernel_crashes: 704 rollback_message += ': kernel_crash' 705 logging.debug('Found %d kernel crash reports:', 706 len(kernel_crashes)) 707 # The crash names contain timestamps that may be useful: 708 # kernel.20131207.005945.0.kcrash 709 for crash in kernel_crashes: 710 logging.debug(' %s', os.path.basename(crash)) 711 712 # Print out some information to make it easier to debug 713 # the rollback. 714 logging.debug('Dumping partition table.') 715 self._run('cgpt show $(rootdev -s -d)') 716 logging.debug('Dumping crossystem for firmware debugging.') 717 self._run('crossystem --all') 718 raise ChromiumOSError(rollback_message) 719 720 # Make sure chromeos-setgoodkernel runs. 721 try: 722 utils.poll_for_condition( 723 lambda: (self.get_kernel_tries(active_kernel_state) == 0 724 and self.get_kernel_success(active_kernel_state)), 725 exception=ChromiumOSError(), 726 timeout=self.KERNEL_UPDATE_TIMEOUT, sleep_interval=5) 727 except ChromiumOSError: 728 services_status = self._run('status system-services').stdout 729 if services_status != 'system-services start/running\n': 730 event = ('Chrome failed to reach login screen') 731 else: 732 event = ('update-engine failed to call ' 733 'chromeos-setgoodkernel') 734 raise ChromiumOSError( 735 'After update and reboot, %s ' 736 'within %d seconds' % (event, 737 self.KERNEL_UPDATE_TIMEOUT)) 738 739 740 class BrilloUpdater(BaseUpdater): 741 """Helper class for updating a Brillo DUT.""" 742 743 def __init__(self, update_url, host=None): 744 """Initialize the object. 745 746 @param update_url: The URL we want the update to use. 747 @param host: A client.common_lib.hosts.Host implementation. 748 """ 749 super(BrilloUpdater, self).__init__( 750 '/system/bin/update_engine_client', update_url, host) 751