1 # SPDX-License-Identifier: Apache-2.0 2 # 3 # Copyright (C) 2015, ARM Limited and contributors. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 # not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 # 17 18 import datetime 19 import json 20 import logging 21 import os 22 import re 23 import shutil 24 import sys 25 import time 26 import unittest 27 28 import devlib 29 from devlib.utils.misc import memoized, which 30 from devlib import Platform, TargetError 31 from trappy.stats.Topology import Topology 32 33 from wlgen import RTA 34 from energy import EnergyMeter 35 from energy_model import EnergyModel 36 from conf import JsonConf 37 from platforms.juno_energy import juno_energy 38 from platforms.hikey_energy import hikey_energy 39 from platforms.pixel_energy import pixel_energy 40 41 USERNAME_DEFAULT = 'root' 42 PASSWORD_DEFAULT = '' 43 WORKING_DIR_DEFAULT = '/data/local/schedtest' 44 FTRACE_EVENTS_DEFAULT = ['sched:*'] 45 FTRACE_BUFSIZE_DEFAULT = 10240 46 OUT_PREFIX = 'results' 47 LATEST_LINK = 'results_latest' 48 49 basepath = os.path.dirname(os.path.realpath(__file__)) 50 basepath = basepath.replace('/libs/utils', '') 51 52 def os_which(file): 53 for path in os.environ["PATH"].split(os.pathsep): 54 if os.path.exists(os.path.join(path, file)): 55 return os.path.join(path, file) 56 57 return None 58 59 class ShareState(object): 60 __shared_state = {} 61 62 def __init__(self): 63 self.__dict__ = self.__shared_state 64 65 class TestEnv(ShareState): 66 """ 67 Represents the environment configuring LISA, the target, and the test setup 68 69 The test environment is defined by: 70 71 - a target configuration (target_conf) defining which HW platform we 72 want to use to run the experiments 73 - a test configuration (test_conf) defining which SW setups we need on 74 that HW target 75 - a folder to collect the experiments results, which can be specified 76 using the test_conf::results_dir option and is by default wiped from 77 all the previous contents (if wipe=True) 78 79 :param target_conf: 80 Configuration defining the target to run experiments on. May be 81 82 - A dict defining the values directly 83 - A path to a JSON file containing the configuration 84 - ``None``, in which case $LISA_HOME/target.config is used. 85 86 You need to provide the information needed to connect to the 87 target. For SSH targets that means "host", "username" and 88 either "password" or "keyfile". All other fields are optional if 89 the relevant features aren't needed. Has the following keys: 90 91 **host** 92 Target IP or MAC address for SSH access 93 **username** 94 For SSH access 95 **keyfile** 96 Path to SSH key (alternative to password) 97 **password** 98 SSH password (alternative to keyfile) 99 **device** 100 Target Android device ID if using ADB 101 **port** 102 Port for Android connection default port is 5555 103 **ANDROID_HOME** 104 Path to Android SDK. Defaults to ``$ANDROID_HOME`` from the 105 environment. 106 **ANDROID_BUILD_TOP** 107 Path to Android root directory. Defaults to ``$ANDROID_BUILD_TOP`` from the 108 environment. 109 **TARGET_PRODUCT** 110 Target product in the lunch target. Defaults to ``$TARGET_PRODUCT`` from the 111 environment. 112 **TARGET_BUILD_VARIANT** 113 Target build variant in the lunch target. Defaults to ``$TARGET_BUILD_VARIANT`` from the 114 environment. 115 **ANDROID_PRODUCT_OUT** 116 Path to Android output directory. Defaults to ``$ANDROID_PRODUCT_OUT`` from the 117 environment. 118 **DEVICE_LISA_HOME** 119 Path to device-specific LISA directory. Set to ``$DEVICE_LISA_HOME`` from the 120 environment. 121 **rtapp-calib** 122 Calibration values for RT-App. If unspecified, LISA will 123 calibrate RT-App on the target. A message will be logged with 124 a value that can be copied here to avoid having to re-run 125 calibration on subsequent tests. 126 **tftp** 127 Directory path containing kernels and DTB images for the 128 target. LISA does *not* manage this TFTP server, it must be 129 provided externally. Optional. 130 131 :param test_conf: Configuration of software for target experiments. Takes 132 the same form as target_conf. Fields are: 133 134 **modules** 135 Devlib modules to be enabled. Default is [] 136 **exclude_modules** 137 Devlib modules to be disabled. Default is []. 138 **tools** 139 List of tools (available under ./tools/$ARCH/) to install on 140 the target. Names, not paths (e.g. ['ftrace']). Default is []. 141 **ping_time**, **reboot_time** 142 Override parameters to :meth:`reboot` method 143 **__features__** 144 List of test environment features to enable. Options are: 145 146 "no-kernel" 147 do not deploy kernel/dtb images 148 "no-reboot" 149 do not force reboot the target at each configuration change 150 "debug" 151 enable debugging messages 152 153 **ftrace** 154 Configuration for ftrace. Dictionary with keys: 155 156 events 157 events to enable. 158 functions 159 functions to enable in the function tracer. Optional. 160 buffsize 161 Size of buffer. Default is 10240. 162 163 **systrace** 164 Configuration for systrace. Dictionary with keys: 165 categories: 166 overide the list of categories enabled 167 extra_categories: 168 append to the default list of categories 169 extra_events: 170 additional ftrace events to manually enable during systrac'ing 171 buffsize: 172 Size of ftrace buffer that systrace uses 173 174 **results_dir** 175 location of results of the experiments 176 177 :param wipe: set true to cleanup all previous content from the output 178 folder 179 :type wipe: bool 180 181 :param force_new: Create a new TestEnv object even if there is one available 182 for this session. By default, TestEnv only creates one 183 object per session, use this to override this behaviour. 184 :type force_new: bool 185 """ 186 187 _initialized = False 188 189 def __init__(self, target_conf=None, test_conf=None, wipe=True, 190 force_new=False): 191 super(TestEnv, self).__init__() 192 193 if self._initialized and not force_new: 194 return 195 196 self.conf = {} 197 self.test_conf = {} 198 self.target = None 199 self.ftrace = None 200 self.workdir = WORKING_DIR_DEFAULT 201 self.__installed_tools = set() 202 self.__modules = [] 203 self.__connection_settings = None 204 self._calib = None 205 206 # Keep track of target IP and MAC address 207 self.ip = None 208 self.mac = None 209 210 # Keep track of last installed kernel 211 self.kernel = None 212 self.dtb = None 213 214 # Energy meter configuration 215 self.emeter = None 216 217 # The platform descriptor to be saved into the results folder 218 self.platform = {} 219 220 # Keep track of android support 221 self.LISA_HOME = os.environ.get('LISA_HOME', '/vagrant') 222 self.ANDROID_HOME = os.environ.get('ANDROID_HOME', None) 223 self.ANDROID_BUILD_TOP = os.environ.get('ANDROID_BUILD_TOP', None) 224 self.TARGET_PRODUCT = os.environ.get('TARGET_PRODUCT', None) 225 self.TARGET_BUILD_VARIANT = os.environ.get('TARGET_BUILD_VARIANT', None) 226 self.ANDROID_PRODUCT_OUT = os.environ.get('ANDROID_PRODUCT_OUT', None) 227 self.DEVICE_LISA_HOME = os.environ.get('DEVICE_LISA_HOME', None) 228 self.CATAPULT_HOME = os.environ.get('CATAPULT_HOME', 229 os.path.join(self.LISA_HOME, 'tools', 'catapult')) 230 231 # Setup logging 232 self._log = logging.getLogger('TestEnv') 233 234 # Compute base installation path 235 self._log.info('Using base path: %s', basepath) 236 237 # Setup target configuration 238 if isinstance(target_conf, dict): 239 self._log.info('Loading custom (inline) target configuration') 240 self.conf = target_conf 241 elif isinstance(target_conf, str): 242 self._log.info('Loading custom (file) target configuration') 243 self.conf = self.loadTargetConfig(target_conf) 244 elif target_conf is None: 245 self._log.info('Loading default (file) target configuration') 246 self.conf = self.loadTargetConfig() 247 self._log.debug('Target configuration %s', self.conf) 248 249 # Setup test configuration 250 if test_conf: 251 if isinstance(test_conf, dict): 252 self._log.info('Loading custom (inline) test configuration') 253 self.test_conf = test_conf 254 elif isinstance(test_conf, str): 255 self._log.info('Loading custom (file) test configuration') 256 self.test_conf = self.loadTargetConfig(test_conf) 257 else: 258 raise ValueError('test_conf must be either a dictionary or a filepath') 259 self._log.debug('Test configuration %s', self.conf) 260 261 # Setup target working directory 262 if 'workdir' in self.conf: 263 self.workdir = self.conf['workdir'] 264 265 # Initialize binary tools to deploy 266 test_conf_tools = self.test_conf.get('tools', []) 267 target_conf_tools = self.conf.get('tools', []) 268 self.__tools = list(set(test_conf_tools + target_conf_tools)) 269 270 # Initialize ftrace events 271 # test configuration override target one 272 if 'ftrace' in self.test_conf: 273 self.conf['ftrace'] = self.test_conf['ftrace'] 274 if self.conf.get('ftrace'): 275 self.__tools.append('trace-cmd') 276 277 # Initialize features 278 if '__features__' not in self.conf: 279 self.conf['__features__'] = [] 280 281 # Initialize local results folder 282 # test configuration overrides target one 283 self.res_dir = (self.test_conf.get('results_dir') or 284 self.conf.get('results_dir')) 285 286 if self.res_dir and not os.path.isabs(self.res_dir): 287 self.res_dir = os.path.join(basepath, 'results', self.res_dir) 288 else: 289 self.res_dir = os.path.join(basepath, OUT_PREFIX) 290 self.res_dir = datetime.datetime.now()\ 291 .strftime(self.res_dir + '/%Y%m%d_%H%M%S') 292 293 if wipe and os.path.exists(self.res_dir): 294 self._log.warning('Wipe previous contents of the results folder:') 295 self._log.warning(' %s', self.res_dir) 296 shutil.rmtree(self.res_dir, ignore_errors=True) 297 if not os.path.exists(self.res_dir): 298 os.makedirs(self.res_dir) 299 300 res_lnk = os.path.join(basepath, LATEST_LINK) 301 if os.path.islink(res_lnk): 302 os.remove(res_lnk) 303 os.symlink(self.res_dir, res_lnk) 304 305 self._init() 306 307 # Initialize FTrace events collection 308 self._init_ftrace(True) 309 310 # Initialize RT-App calibration values 311 self.calibration() 312 313 # Initialize energy probe instrument 314 self._init_energy(True) 315 316 self._log.info('Set results folder to:') 317 self._log.info(' %s', self.res_dir) 318 self._log.info('Experiment results available also in:') 319 self._log.info(' %s', res_lnk) 320 321 self._initialized = True 322 323 def loadTargetConfig(self, filepath='target.config'): 324 """ 325 Load the target configuration from the specified file. 326 327 :param filepath: Path of the target configuration file. Relative to the 328 root folder of the test suite. 329 :type filepath: str 330 331 """ 332 333 # Loading default target configuration 334 conf_file = os.path.join(basepath, filepath) 335 336 self._log.info('Loading target configuration [%s]...', conf_file) 337 conf = JsonConf(conf_file) 338 conf.load() 339 return conf.json 340 341 def _init(self, force = False): 342 343 # Initialize target 344 self._init_target(force) 345 346 # Initialize target Topology for behavior analysis 347 CLUSTERS = [] 348 349 # Build topology for a big.LITTLE systems 350 if self.target.big_core and \ 351 (self.target.abi == 'arm64' or self.target.abi == 'armeabi'): 352 # Populate cluster for a big.LITTLE platform 353 if self.target.big_core: 354 # Load cluster of LITTLE cores 355 CLUSTERS.append( 356 [i for i,t in enumerate(self.target.core_names) 357 if t == self.target.little_core]) 358 # Load cluster of big cores 359 CLUSTERS.append( 360 [i for i,t in enumerate(self.target.core_names) 361 if t == self.target.big_core]) 362 # Build topology for an SMP systems 363 elif not self.target.big_core or \ 364 self.target.abi == 'x86_64': 365 for c in set(self.target.core_clusters): 366 CLUSTERS.append( 367 [i for i,v in enumerate(self.target.core_clusters) 368 if v == c]) 369 self.topology = Topology(clusters=CLUSTERS) 370 self._log.info('Topology:') 371 self._log.info(' %s', CLUSTERS) 372 373 # Initialize the platform descriptor 374 self._init_platform() 375 376 377 def _init_target(self, force = False): 378 379 if not force and self.target is not None: 380 return self.target 381 382 self.__connection_settings = {} 383 384 # Configure username 385 if 'username' in self.conf: 386 self.__connection_settings['username'] = self.conf['username'] 387 else: 388 self.__connection_settings['username'] = USERNAME_DEFAULT 389 390 # Configure password or SSH keyfile 391 if 'keyfile' in self.conf: 392 self.__connection_settings['keyfile'] = self.conf['keyfile'] 393 elif 'password' in self.conf: 394 self.__connection_settings['password'] = self.conf['password'] 395 else: 396 self.__connection_settings['password'] = PASSWORD_DEFAULT 397 398 # Configure port 399 if 'port' in self.conf: 400 self.__connection_settings['port'] = self.conf['port'] 401 402 # Configure the host IP/MAC address 403 if 'host' in self.conf: 404 try: 405 if ':' in self.conf['host']: 406 (self.mac, self.ip) = self.resolv_host(self.conf['host']) 407 else: 408 self.ip = self.conf['host'] 409 self.__connection_settings['host'] = self.ip 410 except KeyError: 411 raise ValueError('Config error: missing [host] parameter') 412 413 try: 414 platform_type = self.conf['platform'] 415 except KeyError: 416 raise ValueError('Config error: missing [platform] parameter') 417 418 if platform_type.lower() == 'android': 419 self.ANDROID_HOME = self.conf.get('ANDROID_HOME', 420 self.ANDROID_HOME) 421 if self.ANDROID_HOME: 422 self._adb = os.path.join(self.ANDROID_HOME, 423 'platform-tools', 'adb') 424 self._fastboot = os.path.join(self.ANDROID_HOME, 425 'platform-tools', 'fastboot') 426 os.environ['ANDROID_HOME'] = self.ANDROID_HOME 427 os.environ['ANDROID_BUILD_TOP'] = self.ANDROID_BUILD_TOP 428 os.environ['TARGET_PRODUCT'] = self.TARGET_PRODUCT 429 os.environ['TARGET_BUILD_VARIANT'] = self.TARGET_BUILD_VARIANT 430 os.environ['ANDROID_PRODUCT_OUT'] = self.ANDROID_PRODUCT_OUT 431 os.environ['DEVICE_LISA_HOME'] = self.DEVICE_LISA_HOME 432 os.environ['CATAPULT_HOME'] = self.CATAPULT_HOME 433 else: 434 self._log.info('Android SDK not found as ANDROID_HOME not defined, using PATH for platform tools') 435 self._adb = os_which('adb') 436 self._fastboot = os_which('fastboot') 437 if self._adb: 438 self._log.info('Using adb from ' + self._adb) 439 if self._fastboot: 440 self._log.info('Using fastboot from ' + self._fastboot) 441 442 self._log.info('External tools using:') 443 self._log.info(' ANDROID_HOME: %s', self.ANDROID_HOME) 444 self._log.info(' ANDROID_BUILD_TOP: %s', self.ANDROID_BUILD_TOP) 445 self._log.info(' TARGET_PRODUCT: %s', self.TARGET_PRODUCT) 446 self._log.info(' TARGET_BUILD_VARIANT: %s', self.TARGET_BUILD_VARIANT) 447 self._log.info(' ANDROID_PRODUCT_OUT: %s', self.ANDROID_PRODUCT_OUT) 448 self._log.info(' DEVICE_LISA_HOME: %s', self.DEVICE_LISA_HOME) 449 self._log.info(' CATAPULT_HOME: %s', self.CATAPULT_HOME) 450 451 if not os.path.exists(self._adb): 452 raise RuntimeError('\nADB binary not found\n\t{}\ndoes not exists!\n\n' 453 'Please configure ANDROID_HOME to point to ' 454 'a valid Android SDK installation folder.'\ 455 .format(self._adb)) 456 457 ######################################################################## 458 # Board configuration 459 ######################################################################## 460 461 # Setup board default if not specified by configuration 462 self.nrg_model = None 463 platform = None 464 self.__modules = ['cpufreq', 'cpuidle'] 465 if 'board' not in self.conf: 466 self.conf['board'] = 'UNKNOWN' 467 468 # Initialize TC2 board 469 if self.conf['board'].upper() == 'TC2': 470 platform = devlib.platform.arm.TC2() 471 self.__modules = ['bl', 'hwmon', 'cpufreq'] 472 473 # Initialize JUNO board 474 elif self.conf['board'].upper() in ('JUNO', 'JUNO2'): 475 platform = devlib.platform.arm.Juno() 476 self.nrg_model = juno_energy 477 self.__modules = ['bl', 'hwmon', 'cpufreq'] 478 479 # Initialize OAK board 480 elif self.conf['board'].upper() == 'OAK': 481 platform = Platform(model='MT8173') 482 self.__modules = ['bl', 'cpufreq'] 483 484 # Initialized HiKey board 485 elif self.conf['board'].upper() == 'HIKEY': 486 self.nrg_model = hikey_energy 487 self.__modules = [ "cpufreq", "cpuidle" ] 488 platform = Platform(model='hikey') 489 490 # Initialize HiKey960 board 491 elif self.conf['board'].upper() == 'HIKEY960': 492 self.__modules = ['bl', 'cpufreq', 'cpuidle'] 493 platform = Platform(model='hikey960') 494 495 # Initialize Pixel phone 496 elif self.conf['board'].upper() == 'PIXEL': 497 self.nrg_model = pixel_energy 498 self.__modules = ['bl', 'cpufreq'] 499 platform = Platform(model='pixel') 500 501 # Initialize gem5 platform 502 elif self.conf['board'].upper() == 'GEM5': 503 self.nrg_model = None 504 self.__modules=['cpufreq'] 505 platform = self._init_target_gem5() 506 507 elif self.conf['board'] != 'UNKNOWN': 508 # Initilize from platform descriptor (if available) 509 board = self._load_board(self.conf['board']) 510 if board: 511 core_names=board['cores'] 512 platform = Platform( 513 model=self.conf['board'], 514 core_names=core_names, 515 core_clusters = self._get_clusters(core_names), 516 big_core=board.get('big_core', None) 517 ) 518 if 'modules' in board: 519 self.__modules = board['modules'] 520 521 ######################################################################## 522 # Modules configuration 523 ######################################################################## 524 525 modules = set(self.__modules) 526 527 # Refine modules list based on target.conf 528 modules.update(self.conf.get('modules', [])) 529 # Merge tests specific modules 530 modules.update(self.test_conf.get('modules', [])) 531 532 remove_modules = set(self.conf.get('exclude_modules', []) + 533 self.test_conf.get('exclude_modules', [])) 534 modules.difference_update(remove_modules) 535 536 self.__modules = list(modules) 537 self._log.info('Devlib modules to load: %s', self.__modules) 538 539 ######################################################################## 540 # Devlib target setup (based on target.config::platform) 541 ######################################################################## 542 543 # If the target is Android, we need just (eventually) the device 544 if platform_type.lower() == 'android': 545 self.__connection_settings = None 546 device = 'DEFAULT' 547 if 'device' in self.conf: 548 device = self.conf['device'] 549 self.__connection_settings = {'device' : device} 550 elif 'host' in self.conf: 551 host = self.conf['host'] 552 port = '5555' 553 if 'port' in self.conf: 554 port = str(self.conf['port']) 555 device = '{}:{}'.format(host, port) 556 self.__connection_settings = {'device' : device} 557 self._log.info('Connecting Android target [%s]', device) 558 else: 559 self._log.info('Connecting %s target:', platform_type) 560 for key in self.__connection_settings: 561 self._log.info('%10s : %s', key, 562 self.__connection_settings[key]) 563 564 self._log.info('Connection settings:') 565 self._log.info(' %s', self.__connection_settings) 566 567 if platform_type.lower() == 'linux': 568 self._log.debug('Setup LINUX target...') 569 if "host" not in self.__connection_settings: 570 raise ValueError('Missing "host" param in Linux target conf') 571 572 self.target = devlib.LinuxTarget( 573 platform = platform, 574 connection_settings = self.__connection_settings, 575 load_default_modules = False, 576 modules = self.__modules) 577 elif platform_type.lower() == 'android': 578 self._log.debug('Setup ANDROID target...') 579 self.target = devlib.AndroidTarget( 580 platform = platform, 581 connection_settings = self.__connection_settings, 582 load_default_modules = False, 583 modules = self.__modules) 584 elif platform_type.lower() == 'host': 585 self._log.debug('Setup HOST target...') 586 self.target = devlib.LocalLinuxTarget( 587 platform = platform, 588 load_default_modules = False, 589 modules = self.__modules) 590 else: 591 raise ValueError('Config error: not supported [platform] type {}'\ 592 .format(platform_type)) 593 594 self._log.debug('Checking target connection...') 595 self._log.debug('Target info:') 596 self._log.debug(' ABI: %s', self.target.abi) 597 self._log.debug(' CPUs: %s', self.target.cpuinfo) 598 self._log.debug(' Clusters: %s', self.target.core_clusters) 599 600 self._log.info('Initializing target workdir:') 601 self._log.info(' %s', self.target.working_directory) 602 603 self.target.setup() 604 self.install_tools(self.__tools) 605 606 # Verify that all the required modules have been initialized 607 for module in self.__modules: 608 self._log.debug('Check for module [%s]...', module) 609 if not hasattr(self.target, module): 610 self._log.warning('Unable to initialize [%s] module', module) 611 self._log.error('Fix your target kernel configuration or ' 612 'disable module from configuration') 613 raise RuntimeError('Failed to initialized [{}] module, ' 614 'update your kernel or test configurations'.format(module)) 615 616 if ('skip_nrg_model' in self.conf) and self.conf['skip_nrg_model']: 617 return 618 if not self.nrg_model: 619 try: 620 self._log.info('Attempting to read energy model from target') 621 self.nrg_model = EnergyModel.from_target(self.target) 622 except (TargetError, RuntimeError, ValueError) as e: 623 self._log.error("Couldn't read target energy model: %s", e) 624 625 def _init_target_gem5(self): 626 system = self.conf['gem5']['system'] 627 simulator = self.conf['gem5']['simulator'] 628 629 # Get gem5 binary arguments 630 args = simulator.get('args', []) 631 args.append('--listener-mode=on') 632 633 # Get platform description 634 args.append(system['platform']['description']) 635 636 # Get platform arguments 637 args += system['platform'].get('args', []) 638 args += ['--kernel {}'.format(system['kernel']), 639 '--dtb {}'.format(system['dtb']), 640 '--disk-image {}'.format(system['disk'])] 641 642 # Gather all arguments 643 args = ' '.join(args) 644 645 diod_path = which('diod') 646 if diod_path is None: 647 raise RuntimeError('Failed to find "diod" on your host machine, ' 648 'check your installation or your PATH variable') 649 650 # Setup virtio 651 # Brackets are there to let the output dir be created automatically 652 virtio_args = '--which-diod={} --workload-automation-vio={{}}'.format(diod_path) 653 654 # Change conf['board'] to include platform information 655 suffix = os.path.splitext(os.path.basename( 656 system['platform']['description']))[0] 657 self.conf['board'] = self.conf['board'].lower() + suffix 658 659 board = self._load_board(self.conf['board']) 660 661 # Merge all arguments 662 platform = devlib.platform.gem5.Gem5SimulationPlatform( 663 name = 'gem5', 664 gem5_bin = simulator['bin'], 665 gem5_args = args, 666 gem5_virtio = virtio_args, 667 host_output_dir = self.res_dir, 668 core_names = board['cores'] if board else None, 669 core_clusters = self._get_clusters(board['cores']) if board else None, 670 big_core = board.get('big_core', None) if board else None, 671 ) 672 673 return platform 674 675 def install_tools(self, tools): 676 """ 677 Install tools additional to those specified in the test config 'tools' 678 field 679 680 :param tools: The list of names of tools to install 681 :type tools: list(str) 682 """ 683 tools = set(tools) 684 685 # Add tools dependencies 686 if 'rt-app' in tools: 687 tools.update(['taskset', 'trace-cmd', 'perf', 'cgroup_run_into.sh']) 688 689 # Remove duplicates and already-instaled tools 690 tools.difference_update(self.__installed_tools) 691 692 tools_to_install = [] 693 for tool in tools: 694 binary = '{}/tools/scripts/{}'.format(basepath, tool) 695 if not os.path.isfile(binary): 696 binary = '{}/tools/{}/{}'\ 697 .format(basepath, self.target.abi, tool) 698 tools_to_install.append(binary) 699 700 for tool_to_install in tools_to_install: 701 self.target.install(tool_to_install) 702 703 self.__installed_tools.update(tools) 704 705 def ftrace_conf(self, conf): 706 self._init_ftrace(True, conf) 707 708 def _init_ftrace(self, force=False, conf=None): 709 710 if not force and self.ftrace is not None: 711 return self.ftrace 712 713 if conf is None and 'ftrace' not in self.conf: 714 return None 715 716 if conf is not None: 717 ftrace = conf 718 else: 719 ftrace = self.conf['ftrace'] 720 721 events = FTRACE_EVENTS_DEFAULT 722 if 'events' in ftrace: 723 events = ftrace['events'] 724 725 functions = None 726 if 'functions' in ftrace: 727 functions = ftrace['functions'] 728 729 buffsize = FTRACE_BUFSIZE_DEFAULT 730 if 'buffsize' in ftrace: 731 buffsize = ftrace['buffsize'] 732 733 self.ftrace = devlib.FtraceCollector( 734 self.target, 735 events = events, 736 functions = functions, 737 buffer_size = buffsize, 738 autoreport = False, 739 autoview = False 740 ) 741 742 if events: 743 self._log.info('Enabled tracepoints:') 744 for event in events: 745 self._log.info(' %s', event) 746 if functions: 747 self._log.info('Kernel functions profiled:') 748 for function in functions: 749 self._log.info(' %s', function) 750 751 return self.ftrace 752 753 def _init_energy(self, force): 754 755 # Initialize energy probe to board default 756 self.emeter = EnergyMeter.getInstance(self.target, self.conf, force, 757 self.res_dir) 758 759 def _init_platform_bl(self): 760 self.platform = { 761 'clusters' : { 762 'little' : self.target.bl.littles, 763 'big' : self.target.bl.bigs 764 }, 765 'freqs' : { 766 'little' : self.target.bl.list_littles_frequencies(), 767 'big' : self.target.bl.list_bigs_frequencies() 768 } 769 } 770 self.platform['cpus_count'] = \ 771 len(self.platform['clusters']['little']) + \ 772 len(self.platform['clusters']['big']) 773 774 def _init_platform_smp(self): 775 self.platform = { 776 'clusters' : {}, 777 'freqs' : {} 778 } 779 for cpu_id,node_id in enumerate(self.target.core_clusters): 780 if node_id not in self.platform['clusters']: 781 self.platform['clusters'][node_id] = [] 782 self.platform['clusters'][node_id].append(cpu_id) 783 784 if 'cpufreq' in self.target.modules: 785 # Try loading frequencies using the cpufreq module 786 for cluster_id in self.platform['clusters']: 787 core_id = self.platform['clusters'][cluster_id][0] 788 self.platform['freqs'][cluster_id] = \ 789 self.target.cpufreq.list_frequencies(core_id) 790 else: 791 self._log.warning('Unable to identify cluster frequencies') 792 793 # TODO: get the performance boundaries in case of intel_pstate driver 794 795 self.platform['cpus_count'] = len(self.target.core_clusters) 796 797 def _load_em(self, board): 798 em_path = os.path.join(basepath, 799 'libs/utils/platforms', board.lower() + '.json') 800 self._log.debug('Trying to load default EM from %s', em_path) 801 if not os.path.exists(em_path): 802 return None 803 self._log.info('Loading default EM:') 804 self._log.info(' %s', em_path) 805 board = JsonConf(em_path) 806 board.load() 807 if 'nrg_model' not in board.json: 808 return None 809 return board.json['nrg_model'] 810 811 def _load_board(self, board): 812 board_path = os.path.join(basepath, 813 'libs/utils/platforms', board.lower() + '.json') 814 self._log.debug('Trying to load board descriptor from %s', board_path) 815 if not os.path.exists(board_path): 816 return None 817 self._log.info('Loading board:') 818 self._log.info(' %s', board_path) 819 board = JsonConf(board_path) 820 board.load() 821 if 'board' not in board.json: 822 return None 823 return board.json['board'] 824 825 def _get_clusters(self, core_names): 826 idx = 0 827 clusters = [] 828 ids_map = { core_names[0] : 0 } 829 for name in core_names: 830 idx = ids_map.get(name, idx+1) 831 ids_map[name] = idx 832 clusters.append(idx) 833 return clusters 834 835 def _init_platform(self): 836 if 'bl' in self.target.modules: 837 self._init_platform_bl() 838 else: 839 self._init_platform_smp() 840 841 # Adding energy model information 842 if 'nrg_model' in self.conf: 843 self.platform['nrg_model'] = self.conf['nrg_model'] 844 # Try to load the default energy model (if available) 845 else: 846 self.platform['nrg_model'] = self._load_em(self.conf['board']) 847 848 # Adding topology information 849 self.platform['topology'] = self.topology.get_level("cluster") 850 851 # Adding kernel build information 852 kver = self.target.kernel_version 853 self.platform['kernel'] = {t: getattr(kver, t, None) 854 for t in [ 855 'release', 'version', 856 'version_number', 'major', 'minor', 857 'rc', 'sha1', 'parts' 858 ] 859 } 860 self.platform['abi'] = self.target.abi 861 self.platform['os'] = self.target.os 862 863 self._log.debug('Platform descriptor initialized\n%s', self.platform) 864 # self.platform_dump('./') 865 866 def platform_dump(self, dest_dir, dest_file='platform.json'): 867 plt_file = os.path.join(dest_dir, dest_file) 868 self._log.debug('Dump platform descriptor in [%s]', plt_file) 869 with open(plt_file, 'w') as ofile: 870 json.dump(self.platform, ofile, sort_keys=True, indent=4) 871 return (self.platform, plt_file) 872 873 def calibration(self, force=False): 874 """ 875 Get rt-app calibration. Run calibration on target if necessary. 876 877 :param force: Always run calibration on target, even if we have not 878 installed rt-app or have already run calibration. 879 :returns: A dict with calibration results, which can be passed as the 880 ``calibration`` parameter to :class:`RTA`, or ``None`` if 881 force=False and we have not installed rt-app. 882 """ 883 884 if not force and self._calib: 885 return self._calib 886 887 required = force or 'rt-app' in self.__installed_tools 888 889 if not required: 890 self._log.debug('No RT-App workloads, skipping calibration') 891 return 892 893 if not force and 'rtapp-calib' in self.conf: 894 self._log.warning('Using configuration provided RTApp calibration') 895 self._calib = { 896 int(key): int(value) 897 for key, value in self.conf['rtapp-calib'].items() 898 } 899 else: 900 self._log.info('Calibrating RTApp...') 901 self._calib = RTA.calibrate(self.target) 902 903 self._log.info('Using RT-App calibration values:') 904 self._log.info(' %s', 905 "{" + ", ".join('"%r": %r' % (key, self._calib[key]) 906 for key in sorted(self._calib)) + "}") 907 return self._calib 908 909 def resolv_host(self, host=None): 910 """ 911 Resolve a host name or IP address to a MAC address 912 913 .. TODO Is my networking terminology correct here? 914 915 :param host: IP address or host name to resolve. If None, use 'host' 916 value from target_config. 917 :type host: str 918 """ 919 if host is None: 920 host = self.conf['host'] 921 922 # Refresh ARP for local network IPs 923 self._log.debug('Collecting all Bcast address') 924 output = os.popen(r'ifconfig').read().split('\n') 925 for line in output: 926 match = IFCFG_BCAST_RE.search(line) 927 if not match: 928 continue 929 baddr = match.group(1) 930 try: 931 cmd = r'nmap -T4 -sP {}/24 &>/dev/null'.format(baddr.strip()) 932 self._log.debug(cmd) 933 os.popen(cmd) 934 except RuntimeError: 935 self._log.warning('Nmap not available, try IP lookup using broadcast ping') 936 cmd = r'ping -b -c1 {} &>/dev/null'.format(baddr) 937 self._log.debug(cmd) 938 os.popen(cmd) 939 940 return self.parse_arp_cache(host) 941 942 def parse_arp_cache(self, host): 943 output = os.popen(r'arp -n') 944 if ':' in host: 945 # Assuming this is a MAC address 946 # TODO add a suitable check on MAC address format 947 # Query ARP for the specified HW address 948 ARP_RE = re.compile( 949 r'([^ ]*).*({}|{})'.format(host.lower(), host.upper()) 950 ) 951 macaddr = host 952 ipaddr = None 953 for line in output: 954 match = ARP_RE.search(line) 955 if not match: 956 continue 957 ipaddr = match.group(1) 958 break 959 else: 960 # Assuming this is an IP address 961 # TODO add a suitable check on IP address format 962 # Query ARP for the specified IP address 963 ARP_RE = re.compile( 964 r'{}.*ether *([0-9a-fA-F:]*)'.format(host) 965 ) 966 macaddr = None 967 ipaddr = host 968 for line in output: 969 match = ARP_RE.search(line) 970 if not match: 971 continue 972 macaddr = match.group(1) 973 break 974 else: 975 # When target is accessed via WiFi, there is not MAC address 976 # reported by arp. In these cases we can know only the IP 977 # of the remote target. 978 macaddr = 'UNKNOWN' 979 980 if not ipaddr or not macaddr: 981 raise ValueError('Unable to lookup for target IP/MAC address') 982 self._log.info('Target (%s) at IP address: %s', macaddr, ipaddr) 983 return (macaddr, ipaddr) 984 985 def reboot(self, reboot_time=120, ping_time=15): 986 """ 987 Reboot target. 988 989 :param boot_time: Time to wait for the target to become available after 990 reboot before declaring failure. 991 :param ping_time: Period between attempts to ping the target while 992 waiting for reboot. 993 """ 994 # Send remote target a reboot command 995 if self._feature('no-reboot'): 996 self._log.warning('Reboot disabled by conf features') 997 else: 998 if 'reboot_time' in self.conf: 999 reboot_time = int(self.conf['reboot_time']) 1000 1001 if 'ping_time' in self.conf: 1002 ping_time = int(self.conf['ping_time']) 1003 1004 # Before rebooting make sure to have IP and MAC addresses 1005 # of the target 1006 (self.mac, self.ip) = self.parse_arp_cache(self.ip) 1007 1008 self.target.execute('sleep 2 && reboot -f &', as_root=True) 1009 1010 # Wait for the target to complete the reboot 1011 self._log.info('Waiting up to %s[s] for target [%s] to reboot...', 1012 reboot_time, self.ip) 1013 1014 ping_cmd = "ping -c 1 {} >/dev/null".format(self.ip) 1015 elapsed = 0 1016 start = time.time() 1017 while elapsed <= reboot_time: 1018 time.sleep(ping_time) 1019 self._log.debug('Trying to connect to [%s] target...', self.ip) 1020 if os.system(ping_cmd) == 0: 1021 break 1022 elapsed = time.time() - start 1023 if elapsed > reboot_time: 1024 if self.mac: 1025 self._log.warning('target [%s] not responding to PINGs, ' 1026 'trying to resolve MAC address...', 1027 self.ip) 1028 (self.mac, self.ip) = self.resolv_host(self.mac) 1029 else: 1030 self._log.warning('target [%s] not responding to PINGs, ' 1031 'trying to continue...', 1032 self.ip) 1033 1034 # Force re-initialization of all the devlib modules 1035 force = True 1036 1037 # Reset the connection to the target 1038 self._init(force) 1039 1040 # Initialize FTrace events collection 1041 self._init_ftrace(force) 1042 1043 # Initialize energy probe instrument 1044 self._init_energy(force) 1045 1046 def install_kernel(self, tc, reboot=False): 1047 """ 1048 Deploy kernel and DTB via TFTP, optionally rebooting 1049 1050 :param tc: Dicionary containing optional keys 'kernel' and 'dtb'. Values 1051 are paths to the binaries to deploy. 1052 :type tc: dict 1053 1054 :param reboot: Reboot thet target after deployment 1055 :type reboot: bool 1056 """ 1057 1058 # Default initialize the kernel/dtb settings 1059 tc.setdefault('kernel', None) 1060 tc.setdefault('dtb', None) 1061 1062 if self.kernel == tc['kernel'] and self.dtb == tc['dtb']: 1063 return 1064 1065 self._log.info('Install kernel [%s] on target...', tc['kernel']) 1066 1067 # Install kernel/dtb via FTFP 1068 if self._feature('no-kernel'): 1069 self._log.warning('Kernel deploy disabled by conf features') 1070 1071 elif 'tftp' in self.conf: 1072 self._log.info('Deploy kernel via TFTP...') 1073 1074 # Deploy kernel in TFTP folder (mandatory) 1075 if 'kernel' not in tc or not tc['kernel']: 1076 raise ValueError('Missing "kernel" parameter in conf: %s', 1077 'KernelSetup', tc) 1078 self.tftp_deploy(tc['kernel']) 1079 1080 # Deploy DTB in TFTP folder (if provided) 1081 if 'dtb' not in tc or not tc['dtb']: 1082 self._log.debug('DTB not provided, using existing one') 1083 self._log.debug('Current conf:\n%s', tc) 1084 self._log.warning('Using pre-installed DTB') 1085 else: 1086 self.tftp_deploy(tc['dtb']) 1087 1088 else: 1089 raise ValueError('Kernel installation method not supported') 1090 1091 # Keep track of last installed kernel 1092 self.kernel = tc['kernel'] 1093 if 'dtb' in tc: 1094 self.dtb = tc['dtb'] 1095 1096 if not reboot: 1097 return 1098 1099 # Reboot target 1100 self._log.info('Rebooting taget...') 1101 self.reboot() 1102 1103 1104 def tftp_deploy(self, src): 1105 """ 1106 .. TODO 1107 """ 1108 1109 tftp = self.conf['tftp'] 1110 1111 dst = tftp['folder'] 1112 if 'kernel' in src: 1113 dst = os.path.join(dst, tftp['kernel']) 1114 elif 'dtb' in src: 1115 dst = os.path.join(dst, tftp['dtb']) 1116 else: 1117 dst = os.path.join(dst, os.path.basename(src)) 1118 1119 cmd = 'cp {} {} && sync'.format(src, dst) 1120 self._log.info('Deploy %s into %s', src, dst) 1121 result = os.system(cmd) 1122 if result != 0: 1123 self._log.error('Failed to deploy image: %s', src) 1124 raise ValueError('copy error') 1125 1126 def _feature(self, feature): 1127 return feature in self.conf['__features__'] 1128 1129 IFCFG_BCAST_RE = re.compile( 1130 r'Bcast:(.*) ' 1131 ) 1132 1133 # vim :set tabstop=4 shiftwidth=4 expandtab 1134