1 # Copyright 2012 the V8 project authors. All rights reserved. 2 # Redistribution and use in source and binary forms, with or without 3 # modification, are permitted provided that the following conditions are 4 # met: 5 # 6 # * Redistributions of source code must retain the above copyright 7 # notice, this list of conditions and the following disclaimer. 8 # * Redistributions in binary form must reproduce the above 9 # copyright notice, this list of conditions and the following 10 # disclaimer in the documentation and/or other materials provided 11 # with the distribution. 12 # * Neither the name of Google Inc. nor the names of its 13 # contributors may be used to endorse or promote products derived 14 # from this software without specific prior written permission. 15 # 16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 29 import collections 30 import os 31 import re 32 import shutil 33 import sys 34 import time 35 36 from pool import Pool 37 from . import commands 38 from . import perfdata 39 from . import statusfile 40 from . import testsuite 41 from . import utils 42 from ..objects import output 43 44 45 # Base dir of the v8 checkout. 46 BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( 47 os.path.abspath(__file__))))) 48 TEST_DIR = os.path.join(BASE_DIR, "test") 49 50 51 class Instructions(object): 52 def __init__(self, command, test_id, timeout, verbose): 53 self.command = command 54 self.id = test_id 55 self.timeout = timeout 56 self.verbose = verbose 57 58 59 # Structure that keeps global information per worker process. 60 ProcessContext = collections.namedtuple( 61 "process_context", ["suites", "context"]) 62 63 64 def MakeProcessContext(context): 65 """Generate a process-local context. 66 67 This reloads all suites per process and stores the global context. 68 69 Args: 70 context: The global context from the test runner. 71 """ 72 suite_paths = utils.GetSuitePaths(TEST_DIR) 73 suites = {} 74 for root in suite_paths: 75 # Don't reinitialize global state as this is concurrently called from 76 # different processes. 77 suite = testsuite.TestSuite.LoadTestSuite( 78 os.path.join(TEST_DIR, root), global_init=False) 79 if suite: 80 suites[suite.name] = suite 81 return ProcessContext(suites, context) 82 83 84 def GetCommand(test, context): 85 d8testflag = [] 86 shell = test.shell() 87 if shell == "d8": 88 d8testflag = ["--test"] 89 if utils.IsWindows(): 90 shell += ".exe" 91 if context.random_seed: 92 d8testflag += ["--random-seed=%s" % context.random_seed] 93 cmd = (context.command_prefix + 94 [os.path.abspath(os.path.join(context.shell_dir, shell))] + 95 d8testflag + 96 test.suite.GetFlagsForTestCase(test, context) + 97 context.extra_flags) 98 return cmd 99 100 101 def _GetInstructions(test, context): 102 command = GetCommand(test, context) 103 timeout = context.timeout 104 if ("--stress-opt" in test.flags or 105 "--stress-opt" in context.mode_flags or 106 "--stress-opt" in context.extra_flags): 107 timeout *= 4 108 if "--noenable-vfp3" in context.extra_flags: 109 timeout *= 2 110 # FIXME(machenbach): Make this more OO. Don't expose default outcomes or 111 # the like. 112 if statusfile.IsSlow(test.outcomes or [statusfile.PASS]): 113 timeout *= 2 114 return Instructions(command, test.id, timeout, context.verbose) 115 116 117 class Job(object): 118 """Stores data to be sent over the multi-process boundary. 119 120 All contained fields will be pickled/unpickled. 121 """ 122 123 def Run(self, process_context): 124 """Executes the job. 125 126 Args: 127 process_context: Process-local information that is initialized by the 128 executing worker. 129 """ 130 raise NotImplementedError() 131 132 133 def SetupProblem(exception, test): 134 stderr = ">>> EXCEPTION: %s\n" % exception 135 match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception)) 136 if match: 137 # Extra debuging information when files are claimed missing. 138 f = match.group(1) 139 stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f)) 140 return test.id, output.Output(1, False, "", stderr, None), 0 141 142 143 class TestJob(Job): 144 def __init__(self, test): 145 self.test = test 146 147 def _rename_coverage_data(self, output, context): 148 """Rename coverage data. 149 150 Rename files with PIDs to files with unique test IDs, because the number 151 of tests might be higher than pid_max. E.g.: 152 d8.1234.sancov -> d8.test.1.sancov, where 1234 was the process' PID 153 and 1 is the test ID. 154 """ 155 if context.sancov_dir and output.pid is not None: 156 sancov_file = os.path.join( 157 context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid)) 158 159 # Some tests are expected to fail and don't produce coverage data. 160 if os.path.exists(sancov_file): 161 parts = sancov_file.split(".") 162 new_sancov_file = ".".join( 163 parts[:-2] + ["test", str(self.test.id)] + parts[-1:]) 164 assert not os.path.exists(new_sancov_file) 165 os.rename(sancov_file, new_sancov_file) 166 167 def Run(self, process_context): 168 try: 169 # Retrieve a new suite object on the worker-process side. The original 170 # suite object isn't pickled. 171 self.test.SetSuiteObject(process_context.suites) 172 instr = _GetInstructions(self.test, process_context.context) 173 except Exception, e: 174 return SetupProblem(e, self.test) 175 176 start_time = time.time() 177 output = commands.Execute(instr.command, instr.verbose, instr.timeout) 178 self._rename_coverage_data(output, process_context.context) 179 return (instr.id, output, time.time() - start_time) 180 181 182 def RunTest(job, process_context): 183 return job.Run(process_context) 184 185 186 class Runner(object): 187 188 def __init__(self, suites, progress_indicator, context): 189 self.datapath = os.path.join("out", "testrunner_data") 190 self.perf_data_manager = perfdata.GetPerfDataManager( 191 context, self.datapath) 192 self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) 193 self.perf_failures = False 194 self.printed_allocations = False 195 self.tests = [ t for s in suites for t in s.tests ] 196 if not context.no_sorting: 197 for t in self.tests: 198 t.duration = self.perfdata.FetchPerfData(t) or 1.0 199 slow_key = lambda t: statusfile.IsSlow(t.outcomes) 200 self.tests.sort(key=slow_key, reverse=True) 201 self.tests.sort(key=lambda t: t.duration, reverse=True) 202 self._CommonInit(suites, progress_indicator, context) 203 204 def _CommonInit(self, suites, progress_indicator, context): 205 self.total = 0 206 for s in suites: 207 for t in s.tests: 208 t.id = self.total 209 self.total += 1 210 self.indicator = progress_indicator 211 progress_indicator.SetRunner(self) 212 self.context = context 213 self.succeeded = 0 214 self.remaining = self.total 215 self.failed = [] 216 self.crashed = 0 217 self.reran_tests = 0 218 219 def _RunPerfSafe(self, fun): 220 try: 221 fun() 222 except Exception, e: 223 print("PerfData exception: %s" % e) 224 self.perf_failures = True 225 226 def _MaybeRerun(self, pool, test): 227 if test.run <= self.context.rerun_failures_count: 228 # Possibly rerun this test if its run count is below the maximum per 229 # test. <= as the flag controls reruns not including the first run. 230 if test.run == 1: 231 # Count the overall number of reran tests on the first rerun. 232 if self.reran_tests < self.context.rerun_failures_max: 233 self.reran_tests += 1 234 else: 235 # Don't rerun this if the overall number of rerun tests has been 236 # reached. 237 return 238 if test.run >= 2 and test.duration > self.context.timeout / 20.0: 239 # Rerun slow tests at most once. 240 return 241 242 # Rerun this test. 243 test.duration = None 244 test.output = None 245 test.run += 1 246 pool.add([TestJob(test)]) 247 self.remaining += 1 248 self.total += 1 249 250 def _ProcessTestNormal(self, test, result, pool): 251 test.output = result[1] 252 test.duration = result[2] 253 has_unexpected_output = test.suite.HasUnexpectedOutput(test) 254 if has_unexpected_output: 255 self.failed.append(test) 256 if test.output.HasCrashed(): 257 self.crashed += 1 258 else: 259 self.succeeded += 1 260 self.remaining -= 1 261 # For the indicator, everything that happens after the first run is treated 262 # as unexpected even if it flakily passes in order to include it in the 263 # output. 264 self.indicator.HasRun(test, has_unexpected_output or test.run > 1) 265 if has_unexpected_output: 266 # Rerun test failures after the indicator has processed the results. 267 self._VerbosePrint("Attempting to rerun test after failure.") 268 self._MaybeRerun(pool, test) 269 # Update the perf database if the test succeeded. 270 return not has_unexpected_output 271 272 def _ProcessTestPredictable(self, test, result, pool): 273 def HasDifferentAllocations(output1, output2): 274 def AllocationStr(stdout): 275 for line in reversed((stdout or "").splitlines()): 276 if line.startswith("### Allocations = "): 277 self.printed_allocations = True 278 return line 279 return "" 280 return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout)) 281 282 # Always pass the test duration for the database update. 283 test.duration = result[2] 284 if test.run == 1 and result[1].HasTimedOut(): 285 # If we get a timeout in the first run, we are already in an 286 # unpredictable state. Just report it as a failure and don't rerun. 287 test.output = result[1] 288 self.remaining -= 1 289 self.failed.append(test) 290 self.indicator.HasRun(test, True) 291 if test.run > 1 and HasDifferentAllocations(test.output, result[1]): 292 # From the second run on, check for different allocations. If a 293 # difference is found, call the indicator twice to report both tests. 294 # All runs of each test are counted as one for the statistic. 295 self.remaining -= 1 296 self.failed.append(test) 297 self.indicator.HasRun(test, True) 298 test.output = result[1] 299 self.indicator.HasRun(test, True) 300 elif test.run >= 3: 301 # No difference on the third run -> report a success. 302 self.remaining -= 1 303 self.succeeded += 1 304 test.output = result[1] 305 self.indicator.HasRun(test, False) 306 else: 307 # No difference yet and less than three runs -> add another run and 308 # remember the output for comparison. 309 test.run += 1 310 test.output = result[1] 311 pool.add([TestJob(test)]) 312 # Always update the perf database. 313 return True 314 315 def Run(self, jobs): 316 self.indicator.Starting() 317 self._RunInternal(jobs) 318 self.indicator.Done() 319 if self.failed: 320 return 1 321 elif self.remaining: 322 return 2 323 return 0 324 325 def _RunInternal(self, jobs): 326 pool = Pool(jobs) 327 test_map = {} 328 queued_exception = [None] 329 def gen_tests(): 330 for test in self.tests: 331 assert test.id >= 0 332 test_map[test.id] = test 333 try: 334 yield [TestJob(test)] 335 except Exception, e: 336 # If this failed, save the exception and re-raise it later (after 337 # all other tests have had a chance to run). 338 queued_exception[0] = e 339 continue 340 try: 341 it = pool.imap_unordered( 342 fn=RunTest, 343 gen=gen_tests(), 344 process_context_fn=MakeProcessContext, 345 process_context_args=[self.context], 346 ) 347 for result in it: 348 if result.heartbeat: 349 self.indicator.Heartbeat() 350 continue 351 test = test_map[result.value[0]] 352 if self.context.predictable: 353 update_perf = self._ProcessTestPredictable(test, result.value, pool) 354 else: 355 update_perf = self._ProcessTestNormal(test, result.value, pool) 356 if update_perf: 357 self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) 358 finally: 359 self._VerbosePrint("Closing process pool.") 360 pool.terminate() 361 self._VerbosePrint("Closing database connection.") 362 self._RunPerfSafe(lambda: self.perf_data_manager.close()) 363 if self.perf_failures: 364 # Nuke perf data in case of failures. This might not work on windows as 365 # some files might still be open. 366 print "Deleting perf test data due to db corruption." 367 shutil.rmtree(self.datapath) 368 if queued_exception[0]: 369 raise queued_exception[0] 370 371 # Make sure that any allocations were printed in predictable mode (if we 372 # ran any tests). 373 assert ( 374 not self.total or 375 not self.context.predictable or 376 self.printed_allocations 377 ) 378 379 def _VerbosePrint(self, text): 380 if self.context.verbose: 381 print text 382 sys.stdout.flush() 383 384 385 class BreakNowException(Exception): 386 def __init__(self, value): 387 self.value = value 388 def __str__(self): 389 return repr(self.value) 390