blob: 00d8ef8ba99ce76ce4a48df0d81773ef10237892 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001# SPDX-License-Identifier: GPL-2.0
Stephen Warrend2015062016-01-15 11:15:24 -07002# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Stephen Warrend2015062016-01-15 11:15:24 -07004
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
16import errno
17import os
18import os.path
Stephen Warrend2015062016-01-15 11:15:24 -070019import pytest
20from _pytest.runner import runtestprotocol
Stephen Warren1cd85f52016-02-08 14:44:16 -070021import re
Stephen Warrend2015062016-01-15 11:15:24 -070022import StringIO
23import sys
24
Paul Burton052ca372017-09-14 14:34:45 -070025try:
26 import configparser
27except:
28 import ConfigParser as configparser
29
Stephen Warrend2015062016-01-15 11:15:24 -070030# Globals: The HTML log file, and the connection to the U-Boot console.
31log = None
32console = None
33
34def mkdir_p(path):
Stephen Warrene8debf32016-01-26 13:41:30 -070035 """Create a directory path.
Stephen Warrend2015062016-01-15 11:15:24 -070036
37 This includes creating any intermediate/parent directories. Any errors
38 caused due to already extant directories are ignored.
39
40 Args:
41 path: The directory path to create.
42
43 Returns:
44 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070045 """
Stephen Warrend2015062016-01-15 11:15:24 -070046
47 try:
48 os.makedirs(path)
49 except OSError as exc:
50 if exc.errno == errno.EEXIST and os.path.isdir(path):
51 pass
52 else:
53 raise
54
55def pytest_addoption(parser):
Stephen Warrene8debf32016-01-26 13:41:30 -070056 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warrend2015062016-01-15 11:15:24 -070057
58 Args:
59 parser: The pytest command-line parser.
60
61 Returns:
62 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070063 """
Stephen Warrend2015062016-01-15 11:15:24 -070064
65 parser.addoption('--build-dir', default=None,
66 help='U-Boot build directory (O=)')
67 parser.addoption('--result-dir', default=None,
68 help='U-Boot test result/tmp directory')
69 parser.addoption('--persistent-data-dir', default=None,
70 help='U-Boot test persistent generated data directory')
71 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
72 help='U-Boot board type')
73 parser.addoption('--board-identity', '--id', default='na',
74 help='U-Boot board identity/instance')
75 parser.addoption('--build', default=False, action='store_true',
76 help='Compile U-Boot before running tests')
Stephen Warren89ab8412016-02-04 16:11:50 -070077 parser.addoption('--gdbserver', default=None,
78 help='Run sandbox under gdbserver. The argument is the channel '+
79 'over which gdbserver should communicate, e.g. localhost:1234')
Stephen Warrend2015062016-01-15 11:15:24 -070080
81def pytest_configure(config):
Stephen Warrene8debf32016-01-26 13:41:30 -070082 """pytest hook: Perform custom initialization at startup time.
Stephen Warrend2015062016-01-15 11:15:24 -070083
84 Args:
85 config: The pytest configuration.
86
87 Returns:
88 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -070089 """
Stephen Warrend2015062016-01-15 11:15:24 -070090
91 global log
92 global console
93 global ubconfig
94
95 test_py_dir = os.path.dirname(os.path.abspath(__file__))
96 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
97
98 board_type = config.getoption('board_type')
99 board_type_filename = board_type.replace('-', '_')
100
101 board_identity = config.getoption('board_identity')
102 board_identity_filename = board_identity.replace('-', '_')
103
104 build_dir = config.getoption('build_dir')
105 if not build_dir:
106 build_dir = source_dir + '/build-' + board_type
107 mkdir_p(build_dir)
108
109 result_dir = config.getoption('result_dir')
110 if not result_dir:
111 result_dir = build_dir
112 mkdir_p(result_dir)
113
114 persistent_data_dir = config.getoption('persistent_data_dir')
115 if not persistent_data_dir:
116 persistent_data_dir = build_dir + '/persistent-data'
117 mkdir_p(persistent_data_dir)
118
Stephen Warren89ab8412016-02-04 16:11:50 -0700119 gdbserver = config.getoption('gdbserver')
Igor Opaniuk7374b152019-02-12 16:18:14 +0200120 if gdbserver and not board_type.startswith('sandbox'):
121 raise Exception('--gdbserver only supported with sandbox targets')
Stephen Warren89ab8412016-02-04 16:11:50 -0700122
Stephen Warrend2015062016-01-15 11:15:24 -0700123 import multiplexed_log
124 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
125
126 if config.getoption('build'):
127 if build_dir != source_dir:
128 o_opt = 'O=%s' % build_dir
129 else:
130 o_opt = ''
131 cmds = (
132 ['make', o_opt, '-s', board_type + '_defconfig'],
133 ['make', o_opt, '-s', '-j8'],
134 )
Stephen Warren83357fd2016-02-03 16:46:34 -0700135 with log.section('make'):
136 runner = log.get_runner('make', sys.stdout)
137 for cmd in cmds:
138 runner.run(cmd, cwd=source_dir)
139 runner.close()
140 log.status_pass('OK')
Stephen Warrend2015062016-01-15 11:15:24 -0700141
142 class ArbitraryAttributeContainer(object):
143 pass
144
145 ubconfig = ArbitraryAttributeContainer()
146 ubconfig.brd = dict()
147 ubconfig.env = dict()
148
149 modules = [
150 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
151 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
152 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
153 board_identity_filename),
154 ]
155 for (dict_to_fill, module_name) in modules:
156 try:
157 module = __import__(module_name)
158 except ImportError:
159 continue
160 dict_to_fill.update(module.__dict__)
161
162 ubconfig.buildconfig = dict()
163
164 for conf_file in ('.config', 'include/autoconf.mk'):
165 dot_config = build_dir + '/' + conf_file
166 if not os.path.exists(dot_config):
167 raise Exception(conf_file + ' does not exist; ' +
168 'try passing --build option?')
169
170 with open(dot_config, 'rt') as f:
171 ini_str = '[root]\n' + f.read()
172 ini_sio = StringIO.StringIO(ini_str)
Paul Burton052ca372017-09-14 14:34:45 -0700173 parser = configparser.RawConfigParser()
Stephen Warrend2015062016-01-15 11:15:24 -0700174 parser.readfp(ini_sio)
175 ubconfig.buildconfig.update(parser.items('root'))
176
177 ubconfig.test_py_dir = test_py_dir
178 ubconfig.source_dir = source_dir
179 ubconfig.build_dir = build_dir
180 ubconfig.result_dir = result_dir
181 ubconfig.persistent_data_dir = persistent_data_dir
182 ubconfig.board_type = board_type
183 ubconfig.board_identity = board_identity
Stephen Warren89ab8412016-02-04 16:11:50 -0700184 ubconfig.gdbserver = gdbserver
Simon Glass06719602016-07-03 09:40:36 -0600185 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
Stephen Warrend2015062016-01-15 11:15:24 -0700186
187 env_vars = (
188 'board_type',
189 'board_identity',
190 'source_dir',
191 'test_py_dir',
192 'build_dir',
193 'result_dir',
194 'persistent_data_dir',
195 )
196 for v in env_vars:
197 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
198
Simon Glass2fedbaa2016-07-04 11:58:37 -0600199 if board_type.startswith('sandbox'):
Stephen Warrend2015062016-01-15 11:15:24 -0700200 import u_boot_console_sandbox
201 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
202 else:
203 import u_boot_console_exec_attach
204 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
205
Simon Glass1f0fe882017-11-25 11:57:32 -0700206re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
Stephen Warren1cd85f52016-02-08 14:44:16 -0700207def generate_ut_subtest(metafunc, fixture_name):
208 """Provide parametrization for a ut_subtest fixture.
209
210 Determines the set of unit tests built into a U-Boot binary by parsing the
211 list of symbols generated by the build process. Provides this information
212 to test functions by parameterizing their ut_subtest fixture parameter.
213
214 Args:
215 metafunc: The pytest test function.
216 fixture_name: The fixture name to test.
217
218 Returns:
219 Nothing.
220 """
221
222 fn = console.config.build_dir + '/u-boot.sym'
223 try:
224 with open(fn, 'rt') as f:
225 lines = f.readlines()
226 except:
227 lines = []
228 lines.sort()
229
230 vals = []
231 for l in lines:
232 m = re_ut_test_list.search(l)
233 if not m:
234 continue
235 vals.append(m.group(1) + ' ' + m.group(2))
236
237 ids = ['ut_' + s.replace(' ', '_') for s in vals]
238 metafunc.parametrize(fixture_name, vals, ids=ids)
239
240def generate_config(metafunc, fixture_name):
241 """Provide parametrization for {env,brd}__ fixtures.
Stephen Warrend2015062016-01-15 11:15:24 -0700242
243 If a test function takes parameter(s) (fixture names) of the form brd__xxx
244 or env__xxx, the brd and env configuration dictionaries are consulted to
245 find the list of values to use for those parameters, and the test is
246 parametrized so that it runs once for each combination of values.
247
248 Args:
249 metafunc: The pytest test function.
Stephen Warren1cd85f52016-02-08 14:44:16 -0700250 fixture_name: The fixture name to test.
Stephen Warrend2015062016-01-15 11:15:24 -0700251
252 Returns:
253 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700254 """
Stephen Warrend2015062016-01-15 11:15:24 -0700255
256 subconfigs = {
257 'brd': console.config.brd,
258 'env': console.config.env,
259 }
Stephen Warren1cd85f52016-02-08 14:44:16 -0700260 parts = fixture_name.split('__')
261 if len(parts) < 2:
262 return
263 if parts[0] not in subconfigs:
264 return
265 subconfig = subconfigs[parts[0]]
266 vals = []
267 val = subconfig.get(fixture_name, [])
268 # If that exact name is a key in the data source:
269 if val:
270 # ... use the dict value as a single parameter value.
271 vals = (val, )
272 else:
273 # ... otherwise, see if there's a key that contains a list of
274 # values to use instead.
275 vals = subconfig.get(fixture_name+ 's', [])
276 def fixture_id(index, val):
277 try:
278 return val['fixture_id']
279 except:
280 return fixture_name + str(index)
281 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
282 metafunc.parametrize(fixture_name, vals, ids=ids)
283
284def pytest_generate_tests(metafunc):
285 """pytest hook: parameterize test functions based on custom rules.
286
287 Check each test function parameter (fixture name) to see if it is one of
288 our custom names, and if so, provide the correct parametrization for that
289 parameter.
290
291 Args:
292 metafunc: The pytest test function.
293
294 Returns:
295 Nothing.
296 """
297
Stephen Warrend2015062016-01-15 11:15:24 -0700298 for fn in metafunc.fixturenames:
Stephen Warren1cd85f52016-02-08 14:44:16 -0700299 if fn == 'ut_subtest':
300 generate_ut_subtest(metafunc, fn)
Stephen Warrend2015062016-01-15 11:15:24 -0700301 continue
Stephen Warren1cd85f52016-02-08 14:44:16 -0700302 generate_config(metafunc, fn)
Stephen Warrend2015062016-01-15 11:15:24 -0700303
Stefan Brünsd8c1e032016-11-05 17:45:32 +0100304@pytest.fixture(scope='session')
305def u_boot_log(request):
306 """Generate the value of a test's log fixture.
307
308 Args:
309 request: The pytest request.
310
311 Returns:
312 The fixture value.
313 """
314
315 return console.log
316
317@pytest.fixture(scope='session')
318def u_boot_config(request):
319 """Generate the value of a test's u_boot_config fixture.
320
321 Args:
322 request: The pytest request.
323
324 Returns:
325 The fixture value.
326 """
327
328 return console.config
329
Stephen Warren636f38d2016-01-22 12:30:08 -0700330@pytest.fixture(scope='function')
Stephen Warrend2015062016-01-15 11:15:24 -0700331def u_boot_console(request):
Stephen Warrene8debf32016-01-26 13:41:30 -0700332 """Generate the value of a test's u_boot_console fixture.
Stephen Warrend2015062016-01-15 11:15:24 -0700333
334 Args:
335 request: The pytest request.
336
337 Returns:
338 The fixture value.
Stephen Warrene8debf32016-01-26 13:41:30 -0700339 """
Stephen Warrend2015062016-01-15 11:15:24 -0700340
Stephen Warren636f38d2016-01-22 12:30:08 -0700341 console.ensure_spawned()
Stephen Warrend2015062016-01-15 11:15:24 -0700342 return console
343
Stephen Warren83357fd2016-02-03 16:46:34 -0700344anchors = {}
Stephen Warren13260222016-02-10 13:47:37 -0700345tests_not_run = []
346tests_failed = []
347tests_xpassed = []
348tests_xfailed = []
349tests_skipped = []
Stephen Warren32090e52018-02-20 12:51:55 -0700350tests_warning = []
Stephen Warren13260222016-02-10 13:47:37 -0700351tests_passed = []
Stephen Warrend2015062016-01-15 11:15:24 -0700352
353def pytest_itemcollected(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700354 """pytest hook: Called once for each test found during collection.
Stephen Warrend2015062016-01-15 11:15:24 -0700355
356 This enables our custom result analysis code to see the list of all tests
357 that should eventually be run.
358
359 Args:
360 item: The item that was collected.
361
362 Returns:
363 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700364 """
Stephen Warrend2015062016-01-15 11:15:24 -0700365
Stephen Warren13260222016-02-10 13:47:37 -0700366 tests_not_run.append(item.name)
Stephen Warrend2015062016-01-15 11:15:24 -0700367
368def cleanup():
Stephen Warrene8debf32016-01-26 13:41:30 -0700369 """Clean up all global state.
Stephen Warrend2015062016-01-15 11:15:24 -0700370
371 Executed (via atexit) once the entire test process is complete. This
372 includes logging the status of all tests, and the identity of any failed
373 or skipped tests.
374
375 Args:
376 None.
377
378 Returns:
379 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700380 """
Stephen Warrend2015062016-01-15 11:15:24 -0700381
382 if console:
383 console.close()
384 if log:
Stephen Warren83357fd2016-02-03 16:46:34 -0700385 with log.section('Status Report', 'status_report'):
386 log.status_pass('%d passed' % len(tests_passed))
Stephen Warren32090e52018-02-20 12:51:55 -0700387 if tests_warning:
388 log.status_warning('%d passed with warning' % len(tests_warning))
389 for test in tests_warning:
390 anchor = anchors.get(test, None)
391 log.status_warning('... ' + test, anchor)
Stephen Warren83357fd2016-02-03 16:46:34 -0700392 if tests_skipped:
393 log.status_skipped('%d skipped' % len(tests_skipped))
394 for test in tests_skipped:
395 anchor = anchors.get(test, None)
396 log.status_skipped('... ' + test, anchor)
397 if tests_xpassed:
398 log.status_xpass('%d xpass' % len(tests_xpassed))
399 for test in tests_xpassed:
400 anchor = anchors.get(test, None)
401 log.status_xpass('... ' + test, anchor)
402 if tests_xfailed:
403 log.status_xfail('%d xfail' % len(tests_xfailed))
404 for test in tests_xfailed:
405 anchor = anchors.get(test, None)
406 log.status_xfail('... ' + test, anchor)
407 if tests_failed:
408 log.status_fail('%d failed' % len(tests_failed))
409 for test in tests_failed:
410 anchor = anchors.get(test, None)
411 log.status_fail('... ' + test, anchor)
412 if tests_not_run:
413 log.status_fail('%d not run' % len(tests_not_run))
414 for test in tests_not_run:
415 anchor = anchors.get(test, None)
416 log.status_fail('... ' + test, anchor)
Stephen Warrend2015062016-01-15 11:15:24 -0700417 log.close()
418atexit.register(cleanup)
419
420def setup_boardspec(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700421 """Process any 'boardspec' marker for a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700422
423 Such a marker lists the set of board types that a test does/doesn't
424 support. If tests are being executed on an unsupported board, the test is
425 marked to be skipped.
426
427 Args:
428 item: The pytest test item.
429
430 Returns:
431 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700432 """
Stephen Warrend2015062016-01-15 11:15:24 -0700433
434 mark = item.get_marker('boardspec')
435 if not mark:
436 return
437 required_boards = []
438 for board in mark.args:
439 if board.startswith('!'):
440 if ubconfig.board_type == board[1:]:
Stephen Warrend5170442017-09-18 11:11:48 -0600441 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warrend2015062016-01-15 11:15:24 -0700442 return
443 else:
444 required_boards.append(board)
445 if required_boards and ubconfig.board_type not in required_boards:
Stephen Warrend5170442017-09-18 11:11:48 -0600446 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warrend2015062016-01-15 11:15:24 -0700447
448def setup_buildconfigspec(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700449 """Process any 'buildconfigspec' marker for a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700450
451 Such a marker lists some U-Boot configuration feature that the test
452 requires. If tests are being executed on an U-Boot build that doesn't
453 have the required feature, the test is marked to be skipped.
454
455 Args:
456 item: The pytest test item.
457
458 Returns:
459 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700460 """
Stephen Warrend2015062016-01-15 11:15:24 -0700461
462 mark = item.get_marker('buildconfigspec')
Heinrich Schuchardt10feb302019-04-22 09:18:55 +0200463 if mark:
464 for option in mark.args:
465 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
466 pytest.skip('.config feature "%s" not enabled' % option.lower())
467 notmark = item.get_marker('notbuildconfigspec')
468 if notmark:
469 for option in notmark.args:
470 if ubconfig.buildconfig.get('config_' + option.lower(), None):
471 pytest.skip('.config feature "%s" enabled' % option.lower())
Stephen Warrend2015062016-01-15 11:15:24 -0700472
Stephen Warren2d26bf62017-09-18 11:11:49 -0600473def tool_is_in_path(tool):
474 for path in os.environ["PATH"].split(os.pathsep):
475 fn = os.path.join(path, tool)
476 if os.path.isfile(fn) and os.access(fn, os.X_OK):
477 return True
478 return False
479
480def setup_requiredtool(item):
481 """Process any 'requiredtool' marker for a test.
482
483 Such a marker lists some external tool (binary, executable, application)
484 that the test requires. If tests are being executed on a system that
485 doesn't have the required tool, the test is marked to be skipped.
486
487 Args:
488 item: The pytest test item.
489
490 Returns:
491 Nothing.
492 """
493
494 mark = item.get_marker('requiredtool')
495 if not mark:
496 return
497 for tool in mark.args:
498 if not tool_is_in_path(tool):
499 pytest.skip('tool "%s" not in $PATH' % tool)
500
Stephen Warrenb0a928a2016-10-17 17:25:52 -0600501def start_test_section(item):
502 anchors[item.name] = log.start_section(item.name)
503
Stephen Warrend2015062016-01-15 11:15:24 -0700504def pytest_runtest_setup(item):
Stephen Warrene8debf32016-01-26 13:41:30 -0700505 """pytest hook: Configure (set up) a test item.
Stephen Warrend2015062016-01-15 11:15:24 -0700506
507 Called once for each test to perform any custom configuration. This hook
508 is used to skip the test if certain conditions apply.
509
510 Args:
511 item: The pytest test item.
512
513 Returns:
514 Nothing.
Stephen Warrene8debf32016-01-26 13:41:30 -0700515 """
Stephen Warrend2015062016-01-15 11:15:24 -0700516
Stephen Warrenb0a928a2016-10-17 17:25:52 -0600517 start_test_section(item)
Stephen Warrend2015062016-01-15 11:15:24 -0700518 setup_boardspec(item)
519 setup_buildconfigspec(item)
Stephen Warren2d26bf62017-09-18 11:11:49 -0600520 setup_requiredtool(item)
Stephen Warrend2015062016-01-15 11:15:24 -0700521
522def pytest_runtest_protocol(item, nextitem):
Stephen Warrene8debf32016-01-26 13:41:30 -0700523 """pytest hook: Called to execute a test.
Stephen Warrend2015062016-01-15 11:15:24 -0700524
525 This hook wraps the standard pytest runtestprotocol() function in order
526 to acquire visibility into, and record, each test function's result.
527
528 Args:
529 item: The pytest test item to execute.
530 nextitem: The pytest test item that will be executed after this one.
531
532 Returns:
533 A list of pytest reports (test result data).
Stephen Warrene8debf32016-01-26 13:41:30 -0700534 """
Stephen Warrend2015062016-01-15 11:15:24 -0700535
Stephen Warren32090e52018-02-20 12:51:55 -0700536 log.get_and_reset_warning()
Stephen Warrend2015062016-01-15 11:15:24 -0700537 reports = runtestprotocol(item, nextitem=nextitem)
Stephen Warren32090e52018-02-20 12:51:55 -0700538 was_warning = log.get_and_reset_warning()
Stephen Warren78b39cc2016-01-27 23:57:51 -0700539
Stephen Warrenb0a928a2016-10-17 17:25:52 -0600540 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
541 # the test is skipped. That call is required to create the test's section
542 # in the log file. The call to log.end_section() requires that the log
543 # contain a section for this test. Create a section for the test if it
544 # doesn't already exist.
545 if not item.name in anchors:
546 start_test_section(item)
547
Stephen Warren78b39cc2016-01-27 23:57:51 -0700548 failure_cleanup = False
Stephen Warren32090e52018-02-20 12:51:55 -0700549 if not was_warning:
550 test_list = tests_passed
551 msg = 'OK'
552 msg_log = log.status_pass
553 else:
554 test_list = tests_warning
555 msg = 'OK (with warning)'
556 msg_log = log.status_warning
Stephen Warrend2015062016-01-15 11:15:24 -0700557 for report in reports:
558 if report.outcome == 'failed':
Stephen Warren78b39cc2016-01-27 23:57:51 -0700559 if hasattr(report, 'wasxfail'):
560 test_list = tests_xpassed
561 msg = 'XPASSED'
562 msg_log = log.status_xpass
563 else:
564 failure_cleanup = True
565 test_list = tests_failed
566 msg = 'FAILED:\n' + str(report.longrepr)
567 msg_log = log.status_fail
Stephen Warrend2015062016-01-15 11:15:24 -0700568 break
569 if report.outcome == 'skipped':
Stephen Warren78b39cc2016-01-27 23:57:51 -0700570 if hasattr(report, 'wasxfail'):
571 failure_cleanup = True
572 test_list = tests_xfailed
573 msg = 'XFAILED:\n' + str(report.longrepr)
574 msg_log = log.status_xfail
575 break
576 test_list = tests_skipped
577 msg = 'SKIPPED:\n' + str(report.longrepr)
578 msg_log = log.status_skipped
Stephen Warrend2015062016-01-15 11:15:24 -0700579
Stephen Warren78b39cc2016-01-27 23:57:51 -0700580 if failure_cleanup:
Stephen Warrenc10eb9d2016-01-22 12:30:09 -0700581 console.drain_console()
Stephen Warren78b39cc2016-01-27 23:57:51 -0700582
Stephen Warren13260222016-02-10 13:47:37 -0700583 test_list.append(item.name)
Stephen Warrend2015062016-01-15 11:15:24 -0700584 tests_not_run.remove(item.name)
585
586 try:
Stephen Warren78b39cc2016-01-27 23:57:51 -0700587 msg_log(msg)
Stephen Warrend2015062016-01-15 11:15:24 -0700588 except:
589 # If something went wrong with logging, it's better to let the test
590 # process continue, which may report other exceptions that triggered
591 # the logging issue (e.g. console.log wasn't created). Hence, just
592 # squash the exception. If the test setup failed due to e.g. syntax
593 # error somewhere else, this won't be seen. However, once that issue
594 # is fixed, if this exception still exists, it will then be logged as
595 # part of the test's stdout.
596 import traceback
Paul Burtondffd56d2017-09-14 14:34:43 -0700597 print('Exception occurred while logging runtest status:')
Stephen Warrend2015062016-01-15 11:15:24 -0700598 traceback.print_exc()
599 # FIXME: Can we force a test failure here?
600
601 log.end_section(item.name)
602
Stephen Warren78b39cc2016-01-27 23:57:51 -0700603 if failure_cleanup:
Stephen Warrend2015062016-01-15 11:15:24 -0700604 console.cleanup_spawn()
605
606 return reports