3 Copyright (c) 2011-2014 ARM Limited
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
30 from types import ListType
31 from colorama import Fore, Back, Style
32 from prettytable import PrettyTable
34 from time import sleep, time
35 from Queue import Queue, Empty
36 from os.path import join, exists, basename
37 from threading import Thread, Lock
38 from subprocess import Popen, PIPE
40 # Imports related to mbed build api
41 from workspace_tools.tests import TESTS
42 from workspace_tools.tests import TEST_MAP
43 from workspace_tools.paths import BUILD_DIR
44 from workspace_tools.paths import HOST_TESTS
45 from workspace_tools.utils import ToolException
46 from workspace_tools.utils import construct_enum
47 from workspace_tools.targets import TARGET_MAP
48 from workspace_tools.test_db import BaseDBAccess
49 from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
50 from workspace_tools.build_api import get_target_supported_toolchains
51 from workspace_tools.build_api import write_build_report
52 from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
53 from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
54 from workspace_tools.test_exporters import ReportExporter, ResultExporterType
57 import workspace_tools.host_tests.host_tests_plugins as host_tests_plugins
65 class ProcessObserver(Thread):
66 def __init__(self, proc):
76 c = self.proc.stdout.read(1)
87 class SingleTestExecutor(threading.Thread):
88 """ Example: Single test class in separate thread usage
90 def __init__(self, single_test):
91 self.single_test = single_test
92 threading.Thread.__init__(self)
96 # Execute tests depending on options and filter applied
97 test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
98 elapsed_time = time() - start
100 # Human readable summary
101 if not self.single_test.opts_suppress_summary:
102 # prints well-formed summary with results (SQL table like)
103 print self.single_test.generate_test_summary(test_summary, shuffle_seed)
104 if self.single_test.opts_test_x_toolchain_summary:
105 # prints well-formed summary with results (SQL table like)
106 # table shows text x toolchain test result matrix
107 print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
108 print "Completed in %.2f sec"% (elapsed_time)
111 class SingleTestRunner(object):
112 """ Object wrapper for single test run which may involve multiple MUTs
114 RE_DETECT_TESTCASE_RESULT = None
116 # Return codes for test script
117 TEST_RESULT_OK = "OK"
118 TEST_RESULT_FAIL = "FAIL"
119 TEST_RESULT_ERROR = "ERROR"
120 TEST_RESULT_UNDEF = "UNDEF"
121 TEST_RESULT_IOERR_COPY = "IOERR_COPY"
122 TEST_RESULT_IOERR_DISK = "IOERR_DISK"
123 TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
124 TEST_RESULT_TIMEOUT = "TIMEOUT"
125 TEST_RESULT_NO_IMAGE = "NO_IMAGE"
126 TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
128 GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
129 TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
130 TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
132 muts = {} # MUTs descriptor (from external file)
133 test_spec = {} # Test specification (from external file)
135 # mbed test suite -> SingleTestRunner
136 TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
137 "failure" : TEST_RESULT_FAIL,
138 "error" : TEST_RESULT_ERROR,
139 "ioerr_copy" : TEST_RESULT_IOERR_COPY,
140 "ioerr_disk" : TEST_RESULT_IOERR_DISK,
141 "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
142 "timeout" : TEST_RESULT_TIMEOUT,
143 "no_image" : TEST_RESULT_NO_IMAGE,
144 "end" : TEST_RESULT_UNDEF,
145 "mbed_assert" : TEST_RESULT_MBED_ASSERT
149 _global_loops_count=1,
150 _test_loops_list=None,
154 _opts_log_file_name=None,
155 _opts_report_html_file_name=None,
156 _opts_report_junit_file_name=None,
157 _opts_report_build_file_name=None,
159 _opts_goanna_for_mbed_sdk=None,
160 _opts_goanna_for_tests=None,
161 _opts_shuffle_test_order=False,
162 _opts_shuffle_test_seed=None,
163 _opts_test_by_names=None,
164 _opts_peripheral_by_names=None,
165 _opts_test_only_peripheral=False,
166 _opts_test_only_common=False,
167 _opts_verbose_skipped_tests=False,
168 _opts_verbose_test_result_only=False,
170 _opts_firmware_global_name=None,
171 _opts_only_build_tests=False,
172 _opts_parallel_test_exec=False,
173 _opts_suppress_summary=False,
174 _opts_test_x_toolchain_summary=False,
175 _opts_copy_method=None,
176 _opts_mut_reset_type=None,
178 _opts_waterfall_test=None,
179 _opts_extend_test_timeout=None):
180 """ Let's try hard to init this object
182 from colorama import init
185 PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
186 self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
187 # Settings related to test loops counters
189 _global_loops_count = int(_global_loops_count)
191 _global_loops_count = 1
192 if _global_loops_count < 1:
193 _global_loops_count = 1
194 self.GLOBAL_LOOPS_COUNT = _global_loops_count
195 self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
196 self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
198 self.shuffle_random_seed = 0.0
199 self.SHUFFLE_SEED_ROUND = 10
201 # MUT list and test specification storage
203 self.test_spec = _test_spec
205 # Settings passed e.g. from command line
206 self.opts_db_url = _opts_db_url
207 self.opts_log_file_name = _opts_log_file_name
208 self.opts_report_html_file_name = _opts_report_html_file_name
209 self.opts_report_junit_file_name = _opts_report_junit_file_name
210 self.opts_report_build_file_name = _opts_report_build_file_name
211 self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
212 self.opts_goanna_for_tests = _opts_goanna_for_tests
213 self.opts_shuffle_test_order = _opts_shuffle_test_order
214 self.opts_shuffle_test_seed = _opts_shuffle_test_seed
215 self.opts_test_by_names = _opts_test_by_names
216 self.opts_peripheral_by_names = _opts_peripheral_by_names
217 self.opts_test_only_peripheral = _opts_test_only_peripheral
218 self.opts_test_only_common = _opts_test_only_common
219 self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
220 self.opts_verbose_test_result_only = _opts_verbose_test_result_only
221 self.opts_verbose = _opts_verbose
222 self.opts_firmware_global_name = _opts_firmware_global_name
223 self.opts_only_build_tests = _opts_only_build_tests
224 self.opts_parallel_test_exec = _opts_parallel_test_exec
225 self.opts_suppress_summary = _opts_suppress_summary
226 self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
227 self.opts_copy_method = _opts_copy_method
228 self.opts_mut_reset_type = _opts_mut_reset_type
229 self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
230 self.opts_waterfall_test = _opts_waterfall_test
231 self.opts_extend_test_timeout = _opts_extend_test_timeout
232 self.opts_clean = _clean
234 # File / screen logger initialization
235 self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
237 # Database related initializations
238 self.db_logger = factory_db_logger(self.opts_db_url)
239 self.db_logger_build_id = None # Build ID (database index of build_id table)
240 # Let's connect to database to set up credentials and confirm database is ready
242 self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
243 if self.db_logger.is_connected():
244 # Get hostname and uname so we can use it as build description
245 # when creating new build_id in external database
246 (_hostname, _uname) = self.db_logger.get_hostname()
247 _host_location = os.path.dirname(os.path.abspath(__file__))
248 build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
249 self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
250 self.db_logger.disconnect()
252 def dump_options(self):
253 """ Function returns data structure with common settings passed to SingelTestRunner
254 It can be used for example to fill _extra fields in database storing test suite single run data
256 data = self.dump_options()
258 data_str = json.dumps(self.dump_options())
260 result = {"db_url" : str(self.opts_db_url),
261 "log_file_name" : str(self.opts_log_file_name),
262 "shuffle_test_order" : str(self.opts_shuffle_test_order),
263 "shuffle_test_seed" : str(self.opts_shuffle_test_seed),
264 "test_by_names" : str(self.opts_test_by_names),
265 "peripheral_by_names" : str(self.opts_peripheral_by_names),
266 "test_only_peripheral" : str(self.opts_test_only_peripheral),
267 "test_only_common" : str(self.opts_test_only_common),
268 "verbose" : str(self.opts_verbose),
269 "firmware_global_name" : str(self.opts_firmware_global_name),
270 "only_build_tests" : str(self.opts_only_build_tests),
271 "copy_method" : str(self.opts_copy_method),
272 "mut_reset_type" : str(self.opts_mut_reset_type),
273 "jobs" : str(self.opts_jobs),
274 "extend_test_timeout" : str(self.opts_extend_test_timeout),
279 def shuffle_random_func(self):
280 return self.shuffle_random_seed
282 def is_shuffle_seed_float(self):
283 """ return true if function parameter can be converted to float
287 float(self.shuffle_random_seed)
292 # This will store target / toolchain specific properties
293 test_suite_properties_ext = {} # target : toolchain
294 # Here we store test results
296 # Here we store test results in extended data structure
297 test_summary_ext = {}
298 execute_thread_slice_lock = Lock()
300 def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
301 for toolchain in toolchains:
302 # Toolchain specific build successes and failures
303 build_report[toolchain] = {
304 "mbed_failure": False,
305 "library_failure": False,
306 "library_build_passing": [],
307 "library_build_failing": [],
308 "test_build_passing": [],
309 "test_build_failing": []
311 # print target, toolchain
312 # Test suite properties returned to external tools like CI
313 test_suite_properties = {}
314 test_suite_properties['jobs'] = self.opts_jobs
315 test_suite_properties['clean'] = clean
316 test_suite_properties['target'] = target
317 test_suite_properties['test_ids'] = ', '.join(test_ids)
318 test_suite_properties['toolchain'] = toolchain
319 test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
322 # print '=== %s::%s ===' % (target, toolchain)
323 # Let's build our test
324 if target not in TARGET_MAP:
325 print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
328 T = TARGET_MAP[target]
329 build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
330 clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
333 build_mbed_libs_result = build_mbed_libs(T,
335 options=build_mbed_libs_options,
336 clean=clean_mbed_libs_options,
339 if not build_mbed_libs_result:
340 print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
342 except ToolException:
343 print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
344 build_report[toolchain]["mbed_failure"] = True
345 #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
348 build_dir = join(BUILD_DIR, "test", target, toolchain)
350 test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
351 test_suite_properties['build_dir'] = build_dir
352 test_suite_properties['skipped'] = []
354 # Enumerate through all tests and shuffle test order if requested
355 test_map_keys = sorted(TEST_MAP.keys())
357 if self.opts_shuffle_test_order:
358 random.shuffle(test_map_keys, self.shuffle_random_func)
359 # Update database with shuffle seed f applicable
361 self.db_logger.reconnect();
362 if self.db_logger.is_connected():
363 self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
364 self.db_logger.disconnect();
367 self.db_logger.reconnect();
368 if self.db_logger.is_connected():
369 # Update MUTs and Test Specification in database
370 self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
371 # Update Extra information in database (some options passed to test suite)
372 self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
373 self.db_logger.disconnect();
375 valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
376 skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
378 for skipped_test_id in skipped_test_map_keys:
379 test_suite_properties['skipped'].append(skipped_test_id)
382 # First pass through all tests and determine which libraries need to be built
384 for test_id in valid_test_map_keys:
385 test = TEST_MAP[test_id]
387 # Detect which lib should be added to test
388 # Some libs have to compiled like RTOS or ETH
389 for lib in LIBRARIES:
390 if lib['build_dir'] in test.dependencies:
391 libraries.add(lib['id'])
394 build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
395 clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
397 # Build all required libraries
398 for lib_id in libraries:
403 options=build_project_options,
404 verbose=self.opts_verbose,
405 clean=clean_mbed_libs_options,
408 build_report[toolchain]["library_build_passing"].append(lib_id)
410 except ToolException:
411 print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
412 build_report[toolchain]["library_failure"] = True
413 build_report[toolchain]["library_build_failing"].append(lib_id)
414 #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
420 for test_id in valid_test_map_keys:
421 test = TEST_MAP[test_id]
423 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
425 # TODO: move this 2 below loops to separate function
427 for lib_id in libraries:
428 if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
429 INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
432 for lib_id in libraries:
433 if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
434 MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
435 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
436 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
437 test_uuid = uuid.uuid4()
438 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
440 project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
442 path = build_project(test.source_dir,
443 join(build_dir, test_id),
447 options=build_project_options,
448 clean=clean_project_options,
449 verbose=self.opts_verbose,
454 build_report[toolchain]["test_build_passing"].append(test_id)
456 except ToolException:
457 project_name_str = project_name if project_name is not None else test_id
458 print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
459 build_report[toolchain]["test_build_failing"].append(test_id)
460 # return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
463 if self.opts_only_build_tests:
464 # With this option we are skipping testing phase
467 # Test duration can be increased by global value
468 test_duration = test.duration
469 if self.opts_extend_test_timeout is not None:
470 test_duration += self.opts_extend_test_timeout
472 # For an automated test the duration act as a timeout after
473 # which the test gets interrupted
474 test_spec = self.shape_test_request(target, path, test_id, test_duration)
475 test_loops = self.get_test_loop_count(test_id)
477 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
478 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
479 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
481 # read MUTs, test specification and perform tests
482 single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
484 # Append test results to global test summary
485 if single_test_result is not None:
486 self.test_summary.append(single_test_result)
488 # Prepare extended test results data structure (it can be used to generate detailed test report)
489 if toolchain not in self.test_summary_ext:
490 self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
491 if target not in self.test_summary_ext[toolchain]:
492 self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
493 if target not in self.test_summary_ext[toolchain][target]:
494 self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
496 test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
497 self.test_suite_properties_ext[target][toolchain] = test_suite_properties
499 # return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
500 q.put(target + '_'.join(toolchains))
504 clean = self.test_spec.get('clean', False)
505 test_ids = self.test_spec.get('test_ids', [])
508 # Generate seed for shuffle if seed is not provided in
509 self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
510 if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
511 self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
515 if self.opts_parallel_test_exec:
516 ###################################################################
517 # Experimental, parallel test execution per singletest instance.
518 ###################################################################
519 execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
520 # Note: We are building here in parallel for each target separately!
521 # So we are not building the same thing multiple times and compilers
522 # in separate threads do not collide.
523 # Inside execute_thread_slice() function function handle() will be called to
524 # get information about available MUTs (per target).
525 for target, toolchains in self.test_spec['targets'].iteritems():
526 self.test_suite_properties_ext[target] = {}
527 cur_build_report = {}
528 t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, cur_build_report))
529 build_reports.append({ "target": target, "report": cur_build_report})
532 execute_threads.append(t)
534 for t in execute_threads:
535 q.get() # t.join() would block some threads because we should not wait in any order for thread end
537 # Serialized (not parallel) test execution
538 for target, toolchains in self.test_spec['targets'].iteritems():
539 if target not in self.test_suite_properties_ext:
540 self.test_suite_properties_ext[target] = {}
542 cur_build_report = {}
543 self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
544 build_reports.append({ "target": target, "report": cur_build_report})
549 for target_build_report in build_reports:
551 "target": target_build_report["target"],
556 for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
557 print "%s - %s" % (target_build_report["target"], toolchain)
558 report = target_build_report["report"][toolchain]
560 if report["mbed_failure"]:
561 cur_report["failing"].append({
562 "toolchain": toolchain,
563 "project": "mbed library"
566 for passing_library in report["library_build_failing"]:
567 cur_report["failing"].append({
568 "toolchain": toolchain,
569 "project": "Library::%s" % (passing_library)
572 for failing_library in report["library_build_passing"]:
573 cur_report["passing"].append({
574 "toolchain": toolchain,
575 "project": "Library::%s" % (failing_library)
578 for passing_test in report["test_build_passing"]:
579 cur_report["passing"].append({
580 "toolchain": toolchain,
581 "project": "Test::%s" % (passing_test)
584 for failing_test in report["test_build_failing"]:
585 cur_report["failing"].append({
586 "toolchain": toolchain,
587 "project": "Test::%s" % (failing_test)
591 build_report.append(cur_report)
594 self.db_logger.reconnect();
595 if self.db_logger.is_connected():
596 self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
597 self.db_logger.disconnect();
599 return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report
601 def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
602 valid_test_map_keys = []
604 for test_id in test_map_keys:
605 test = TEST_MAP[test_id]
606 if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
609 if test_ids and test_id not in test_ids:
612 if self.opts_test_only_peripheral and not test.peripherals:
613 if self.opts_verbose_skipped_tests:
614 print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
617 if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
618 # We will skip tests not forced with -p option
619 if self.opts_verbose_skipped_tests:
620 print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
623 if self.opts_test_only_common and test.peripherals:
624 if self.opts_verbose_skipped_tests:
625 print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
628 if test.automated and test.is_supported(target, toolchain):
629 if test.peripherals is None and self.opts_only_build_tests:
630 # When users are using 'build only flag' and test do not have
631 # specified peripherals we can allow test building by default
633 elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
634 # If we force peripheral with option -p we expect test
635 # to pass even if peripheral is not in MUTs file.
637 elif not self.is_peripherals_available(target, test.peripherals):
638 if self.opts_verbose_skipped_tests:
640 print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
642 print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
645 # The test has made it through all the filters, so add it to the valid tests list
646 valid_test_map_keys.append(test_id)
648 return valid_test_map_keys
650 def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
651 # NOTE: This will not preserve order
652 return list(set(all_test_map_keys) - set(valid_test_map_keys))
654 def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
655 """ Prints well-formed summary with results (SQL table like)
656 table shows text x toolchain test result matrix
664 unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
665 unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
666 unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
667 unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
669 result = "Test summary:\n"
670 for target in unique_targets:
671 result_dict = {} # test : { toolchain : result }
672 unique_target_toolchains = []
673 for test in test_summary:
674 if test[TARGET_INDEX] == target:
675 if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
676 unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
677 if test[TEST_INDEX] not in result_dict:
678 result_dict[test[TEST_INDEX]] = {}
679 result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
681 pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
682 pt = PrettyTable(pt_cols)
685 pt.padding_width = 1 # One space between column edges and contents (default)
687 for test in unique_tests:
688 if test in result_dict:
689 test_results = result_dict[test]
690 if test in unique_test_desc:
691 row = [target, test, unique_test_desc[test]]
692 for toolchain in unique_toolchains:
693 if toolchain in test_results:
694 row.append(test_results[toolchain])
696 result += pt.get_string()
697 shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
698 shuffle_seed if shuffle_seed else self.shuffle_random_seed)
699 result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
702 def generate_test_summary(self, test_summary, shuffle_seed=None):
703 """ Prints well-formed summary with results (SQL table like)
704 table shows target x test results matrix across
706 result = "Test summary:\n"
707 # Pretty table package is used to print results
708 pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
709 "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
710 pt.align["Result"] = "l" # Left align
711 pt.align["Target"] = "l" # Left align
712 pt.align["Toolchain"] = "l" # Left align
713 pt.align["Test ID"] = "l" # Left align
714 pt.align["Test Description"] = "l" # Left align
715 pt.padding_width = 1 # One space between column edges and contents (default)
717 result_dict = {self.TEST_RESULT_OK : 0,
718 self.TEST_RESULT_FAIL : 0,
719 self.TEST_RESULT_ERROR : 0,
720 self.TEST_RESULT_UNDEF : 0,
721 self.TEST_RESULT_IOERR_COPY : 0,
722 self.TEST_RESULT_IOERR_DISK : 0,
723 self.TEST_RESULT_IOERR_SERIAL : 0,
724 self.TEST_RESULT_NO_IMAGE : 0,
725 self.TEST_RESULT_TIMEOUT : 0,
726 self.TEST_RESULT_MBED_ASSERT : 0
729 for test in test_summary:
730 if test[0] in result_dict:
731 result_dict[test[0]] += 1
733 result += pt.get_string()
737 result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
738 shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
739 shuffle_seed if shuffle_seed else self.shuffle_random_seed)
740 result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
743 def test_loop_list_to_dict(self, test_loops_str):
744 """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
748 test_loops = test_loops_str.split(',')
749 for test_loop in test_loops:
750 test_loop_count = test_loop.split('=')
751 if len(test_loop_count) == 2:
752 _test_id, _test_loops = test_loop_count
754 _test_loops = int(_test_loops)
757 result[_test_id] = _test_loops
760 def get_test_loop_count(self, test_id):
761 """ This function returns no. of loops per test (deducted by test_id_.
762 If test is not in list of redefined loop counts it will use default value.
764 result = self.GLOBAL_LOOPS_COUNT
765 if test_id in self.TEST_LOOPS_DICT:
766 result = self.TEST_LOOPS_DICT[test_id]
769 def delete_file(self, file_path):
770 """ Remove file from the system
779 return result, resutl_msg
781 def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
782 """ Function determines MUT's mbed disk/port and copies binary to
784 Test is being invoked afterwards.
786 data = json.loads(test_spec)
787 # Get test information, image and test timeout
788 test_id = data['test_id']
789 test = TEST_MAP[test_id]
790 test_description = TEST_MAP[test_id].get_description()
791 image = data["image"]
792 duration = data.get("duration", 10)
794 # Find a suitable MUT:
796 for id, m in self.muts.iteritems():
797 if m['mcu'] == data['mcu']:
802 print "Error: No Mbed available: MUT[%s]" % data['mcu']
805 disk = mut.get('disk')
806 port = mut.get('port')
808 if disk is None or port is None:
811 target_by_mcu = TARGET_MAP[mut['mcu']]
812 # Some extra stuff can be declared in MUTs structure
813 reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
814 reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
815 image_dest = mut.get('image_dest') # Image file destination DISK + IMAGE_DEST + BINARY_NAME
816 images_config = mut.get('images_config') # Available images selection via config file
817 mobo_config = mut.get('mobo_config') # Available board configuration selection e.g. core selection etc.
818 copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
820 # When the build and test system were separate, this was relative to a
821 # base network folder base path: join(NETWORK_BASE_PATH, )
825 self.db_logger.reconnect()
827 selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
829 # Tests can be looped so test results must be stored for the same test
831 # Test results for one test ran few times
832 detailed_test_results = {} # { Loop_number: { results ... } }
834 for test_index in range(test_loops):
835 # Host test execution
836 start_host_exec_time = time()
838 single_test_result = self.TEST_RESULT_UNDEF # single test run result
839 _copy_method = selected_copy_method
841 if not exists(image_path):
842 single_test_result = self.TEST_RESULT_NO_IMAGE
844 single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
845 print single_test_output
847 # Host test execution
848 start_host_exec_time = time()
850 host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
851 host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
852 host_test_result = self.run_host_test(test.host_test,
853 image_path, disk, port, duration,
855 verbose=host_test_verbose,
856 reset=host_test_reset,
857 reset_tout=reset_tout,
858 copy_method=selected_copy_method,
859 program_cycle_s=target_by_mcu.program_cycle_s())
860 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
863 test_all_result.append(single_test_result)
864 total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
865 elapsed_time = single_testduration # TIme of single test case execution after reset
867 detailed_test_results[test_index] = {
868 'single_test_result' : single_test_result,
869 'single_test_output' : single_test_output,
870 'target_name' : target_name,
871 'toolchain_name' : toolchain_name,
873 'test_description' : test_description,
874 'elapsed_time' : round(elapsed_time, 2),
875 'duration' : single_timeout,
876 'copy_method' : _copy_method,
879 print self.print_test_result(single_test_result, target_name, toolchain_name,
880 test_id, test_description, elapsed_time, single_timeout)
882 # Update database entries for ongoing test
883 if self.db_logger and self.db_logger.is_connected():
884 test_type = 'SingleTest'
885 self.db_logger.insert_test_entry(self.db_logger_build_id,
896 # If we perform waterfall test we test until we get OK and we stop testing
897 if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
901 self.db_logger.disconnect()
903 return (self.shape_global_test_loop_result(test_all_result),
908 round(elapsed_time, 2),
910 self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
912 def print_test_result(self, test_result, target_name, toolchain_name,
913 test_id, test_description, elapsed_time, duration):
914 """ Use specific convention to print test result and related data
917 tokens.append("TargetTest")
918 tokens.append(target_name)
919 tokens.append(toolchain_name)
920 tokens.append(test_id)
921 tokens.append(test_description)
923 time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
924 result = separator.join(tokens) + " [" + test_result +"]" + time_info
925 return Fore.MAGENTA + result + Fore.RESET
927 def shape_test_loop_ok_result_count(self, test_all_result):
928 """ Reformats list of results to simple string
930 test_loop_count = len(test_all_result)
931 test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
932 return "%d/%d"% (test_loop_ok_result, test_loop_count)
934 def shape_global_test_loop_result(self, test_all_result):
935 """ Reformats list of results to simple string
937 result = self.TEST_RESULT_FAIL
938 if all(test_all_result[0] == res for res in test_all_result):
939 result = test_all_result[0]
942 def run_host_test(self, name, image_path, disk, port, duration,
943 micro=None, reset=None, reset_tout=None,
944 verbose=False, copy_method=None, program_cycle_s=None):
945 """ Function creates new process with host test configured with particular test case.
946 Function also is pooling for serial port activity from process to catch all data
947 printed by test runner and host test during test execution
950 def get_char_from_queue(obs):
951 """ Get character from queue safe way
954 c = obs.queue.get(block=True, timeout=0.5)
959 def filter_queue_char(c):
960 """ Filters out non ASCII characters from serial port
962 if ord(c) not in range(128):
966 def get_test_result(output):
967 """ Parse test 'output' data
969 result = self.TEST_RESULT_TIMEOUT
970 for line in "".join(output).splitlines():
971 search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
972 if search_result and len(search_result.groups()):
973 result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
977 def get_auto_property_value(property_name, line):
978 """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
982 if re.search("HOST: Property '%s'"% property_name, line) is not None:
983 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
984 if property is not None and len(property.groups()) == 1:
985 result = property.groups()[0]
988 # print "{%s} port:%s disk:%s" % (name, port, disk),
992 '-f', '"%s"'% image_path,
995 '-C', str(program_cycle_s)]
997 # Add extra parameters to host_test
998 if copy_method is not None:
999 cmd += ["-c", copy_method]
1000 if micro is not None:
1001 cmd += ["-m", micro]
1002 if reset is not None:
1003 cmd += ["-r", reset]
1004 if reset_tout is not None:
1005 cmd += ["-R", str(reset_tout)]
1008 print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
1009 print "Test::Output::Start"
1011 proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
1012 obs = ProcessObserver(proc)
1013 update_once_flag = {} # Stores flags checking if some auto-parameter was already set
1017 while (time() - start_time) < (2 * duration):
1018 c = get_char_from_queue(obs)
1022 c = filter_queue_char(c)
1024 # Give the mbed under test a way to communicate the end of the test
1025 if c in ['\n', '\r']:
1027 # Checking for auto-detection information from the test about MUT reset moment
1028 if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
1029 # We will update this marker only once to prevent multiple time resets
1030 update_once_flag['reset_target'] = True
1033 # Checking for auto-detection information from the test about timeout
1034 auto_timeout_val = get_auto_property_value('timeout', line)
1035 if 'timeout' not in update_once_flag and auto_timeout_val is not None:
1036 # We will update this marker only once to prevent multiple time resets
1037 update_once_flag['timeout'] = True
1038 duration = int(auto_timeout_val)
1040 # Detect mbed assert:
1041 if 'mbed assertation failed: ' in line:
1042 output.append('{{mbed_assert}}')
1045 # Check for test end
1052 testcase_duration = end_time - start_time # Test case duration from reset to {end}
1054 c = get_char_from_queue(obs)
1059 c = filter_queue_char(c)
1063 print "Test::Output::Finish"
1067 result = get_test_result(output)
1068 return (result, "".join(output), testcase_duration, duration)
1070 def is_peripherals_available(self, target_mcu_name, peripherals=None):
1071 """ Checks if specified target should run specific peripheral test case defined in MUTs file
1073 if peripherals is not None:
1074 peripherals = set(peripherals)
1075 for id, mut in self.muts.iteritems():
1076 # Target MCU name check
1077 if mut["mcu"] != target_mcu_name:
1080 if peripherals is not None:
1081 if 'peripherals' not in mut:
1083 if not peripherals.issubset(set(mut['peripherals'])):
1088 def shape_test_request(self, mcu, image_path, test_id, duration=10):
1089 """ Function prepares JSON structure describing test specification
1093 "image": image_path,
1094 "duration": duration,
1097 return json.dumps(test_spec)
1100 def get_unique_value_from_summary(test_summary, index):
1101 """ Gets list of unique target names
1104 for test in test_summary:
1105 target_name = test[index]
1106 if target_name not in result:
1107 result.append(target_name)
1108 return sorted(result)
1111 def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
1112 """ Gets list of unique target names and return dictionary
1115 for test in test_summary:
1116 key = test[index_key]
1117 val = test[index_val]
1118 if key not in result:
1123 def show_json_file_format_error(json_spec_filename, line, column):
1124 """ Prints JSON broken content
1126 with open(json_spec_filename) as data_file:
1128 for json_line in data_file:
1129 if line_no + 5 >= line: # Print last few lines before error
1130 print 'Line %d:\t'%line_no + json_line, # Prints line
1132 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
1137 def json_format_error_defect_pos(json_error_msg):
1138 """ Gets first error line and column in JSON file format.
1139 Parsed from exception thrown by json.loads() string
1144 line_search = re.search('line [0-9]+', json_error_msg)
1145 if line_search is not None:
1146 ls = line_search.group().split(' ')
1149 # Column position search
1150 column_search = re.search('column [0-9]+', json_error_msg)
1151 if column_search is not None:
1152 cs = column_search.group().split(' ')
1155 result = [line, column]
1159 def get_json_data_from_file(json_spec_filename, verbose=False):
1160 """ Loads from file JSON formatted string to data structure
1164 with open(json_spec_filename) as data_file:
1166 result = json.load(data_file)
1167 except ValueError as json_error_msg:
1169 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
1170 # We can print where error occurred inside JSON file if we can parse exception msg
1171 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
1172 if json_format_defect_pos is not None:
1173 line = json_format_defect_pos[0]
1174 column = json_format_defect_pos[1]
1176 show_json_file_format_error(json_spec_filename, line, column)
1178 except IOError as fileopen_error_msg:
1179 print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
1181 if verbose and result:
1182 pp = pprint.PrettyPrinter(indent=4)
1187 def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
1188 """ Prints MUTs configuration passed to test script for verboseness
1191 # We need to check all unique properties for each defined MUT
1193 mut_info = json_data[k]
1194 for mut_property in mut_info:
1195 if mut_property not in muts_info_cols:
1196 muts_info_cols.append(mut_property)
1198 # Prepare pretty table object to display all MUTs
1199 pt_cols = ["index"] + muts_info_cols
1200 pt = PrettyTable(pt_cols)
1204 # Add rows to pretty print object
1207 mut_info = json_data[k]
1210 if platform_filter and 'mcu' in mut_info:
1211 add_row = re.search(platform_filter, mut_info['mcu']) is not None
1213 for col in muts_info_cols:
1214 cell_val = mut_info[col] if col in mut_info else None
1215 if type(cell_val) == ListType:
1216 cell_val = join_delim.join(cell_val)
1217 row.append(cell_val)
1219 return pt.get_string()
1222 def print_test_configuration_from_json(json_data, join_delim=", "):
1223 """ Prints test specification configuration passed to test script for verboseness
1225 toolchains_info_cols = []
1226 # We need to check all toolchains for each device
1228 # k should be 'targets'
1229 targets = json_data[k]
1230 for target in targets:
1231 toolchains = targets[target]
1232 for toolchain in toolchains:
1233 if toolchain not in toolchains_info_cols:
1234 toolchains_info_cols.append(toolchain)
1236 # Prepare pretty table object to display test specification
1237 pt_cols = ["mcu"] + sorted(toolchains_info_cols)
1238 pt = PrettyTable(pt_cols)
1242 # { target : [conflicted toolchains] }
1243 toolchain_conflicts = {}
1244 toolchain_path_conflicts = []
1246 # k should be 'targets'
1247 targets = json_data[k]
1248 for target in targets:
1249 target_supported_toolchains = get_target_supported_toolchains(target)
1250 if not target_supported_toolchains:
1251 target_supported_toolchains = []
1252 target_name = target if target in TARGET_MAP else "%s*"% target
1254 toolchains = targets[target]
1256 for toolchain in sorted(toolchains_info_cols):
1257 # Check for conflicts: target vs toolchain
1259 conflict_path = False
1260 if toolchain in toolchains:
1261 if toolchain not in target_supported_toolchains:
1263 if target not in toolchain_conflicts:
1264 toolchain_conflicts[target] = []
1265 toolchain_conflicts[target].append(toolchain)
1266 # Add marker inside table about target usage / conflict
1267 cell_val = 'Yes' if toolchain in toolchains else '-'
1270 # Check for conflicts: toolchain vs toolchain path
1271 if toolchain in TOOLCHAIN_BIN_PATH:
1272 toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
1273 if not os.path.isdir(toolchain_path):
1274 conflict_path = True
1275 if toolchain not in toolchain_path_conflicts:
1276 toolchain_path_conflicts.append(toolchain)
1279 row.append(cell_val)
1282 # generate result string
1283 result = pt.get_string() # Test specification table
1284 if toolchain_conflicts or toolchain_path_conflicts:
1286 result += "Toolchain conflicts:\n"
1287 for target in toolchain_conflicts:
1288 if target not in TARGET_MAP:
1289 result += "\t* Target %s unknown\n"% (target)
1290 conflict_target_list = join_delim.join(toolchain_conflicts[target])
1291 sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
1292 result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
1294 for toolchain in toolchain_path_conflicts:
1295 # Let's check toolchain configuration
1296 if toolchain in TOOLCHAIN_BIN_PATH:
1297 toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
1298 if not os.path.isdir(toolchain_path):
1299 result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
1303 def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
1304 """ Generates table summary with all test cases and additional test cases
1305 information using pretty print functionality. Allows test suite user to
1308 # get all unique test ID prefixes
1311 split = test['id'].split('_')[:-1]
1312 test_id_prefix = '_'.join(split)
1313 if test_id_prefix not in unique_test_id:
1314 unique_test_id.append(test_id_prefix)
1315 unique_test_id.sort()
1316 counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
1317 counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
1319 test_properties = ['id',
1324 'duration'] if cols is None else cols
1326 # All tests status table print
1327 pt = PrettyTable(test_properties)
1328 for col in test_properties:
1330 pt.align['duration'] = "r"
1333 counter_automated = 0
1334 pt.padding_width = 1 # One space between column edges and contents (default)
1336 for test_id in sorted(TEST_MAP.keys()):
1337 if platform_filter is not None:
1338 # FIlter out platforms using regex
1339 if re.search(platform_filter, test_id) is None:
1342 test = TEST_MAP[test_id]
1343 split = test_id.split('_')[:-1]
1344 test_id_prefix = '_'.join(split)
1346 for col in test_properties:
1347 col_value = test[col]
1348 if type(test[col]) == ListType:
1349 col_value = join_delim.join(test[col])
1350 elif test[col] == None:
1353 row.append(col_value)
1354 if test['automated'] == True:
1355 counter_dict_test_id_types[test_id_prefix] += 1
1356 counter_automated += 1
1360 counter_dict_test_id_types_all[test_id_prefix] += 1
1361 result = pt.get_string()
1364 if result_summary and not platform_filter:
1365 # Automation result summary
1366 test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
1367 pt = PrettyTable(test_id_cols)
1368 pt.align['automated'] = "r"
1369 pt.align['all'] = "r"
1370 pt.align['percent [%]'] = "r"
1372 percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
1373 str_progress = progress_bar(percent_progress, 75)
1374 pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
1375 result += "Automation coverage:\n"
1376 result += pt.get_string()
1379 # Test automation coverage table print
1380 test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
1381 pt = PrettyTable(test_id_cols)
1382 pt.align['id'] = "l"
1383 pt.align['automated'] = "r"
1384 pt.align['all'] = "r"
1385 pt.align['percent [%]'] = "r"
1386 for unique_id in unique_test_id:
1387 # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
1388 percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
1389 str_progress = progress_bar(percent_progress, 75)
1391 counter_dict_test_id_types[unique_id],
1392 counter_dict_test_id_types_all[unique_id],
1394 "[" + str_progress + "]"]
1396 result += "Test automation coverage:\n"
1397 result += pt.get_string()
1402 def progress_bar(percent_progress, saturation=0):
1403 """ This function creates progress bar with optional simple saturation mark
1405 step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
1406 str_progress = '#' * step + '.' * int(50 - step)
1407 c = '!' if str_progress[38] == '.' else '|'
1409 saturation = saturation / 2
1410 str_progress = str_progress[:saturation] + c + str_progress[saturation:]
1414 def singletest_in_cli_mode(single_test):
1415 """ Runs SingleTestRunner object in CLI (Command line interface) mode
1418 # Execute tests depending on options and filter applied
1419 test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
1420 elapsed_time = time() - start
1422 # Human readable summary
1423 if not single_test.opts_suppress_summary:
1424 # prints well-formed summary with results (SQL table like)
1425 print single_test.generate_test_summary(test_summary, shuffle_seed)
1426 if single_test.opts_test_x_toolchain_summary:
1427 # prints well-formed summary with results (SQL table like)
1428 # table shows text x toolchain test result matrix
1429 print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
1430 print "Completed in %.2f sec"% (elapsed_time)
1432 # Store extra reports in files
1433 if single_test.opts_report_html_file_name:
1434 # Export results in form of HTML report to separate file
1435 report_exporter = ReportExporter(ResultExporterType.HTML)
1436 report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
1437 if single_test.opts_report_junit_file_name:
1438 # Export results in form of JUnit XML report to separate file
1439 report_exporter = ReportExporter(ResultExporterType.JUNIT)
1440 report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
1441 if single_test.opts_report_build_file_name:
1442 # Export build results as html report to sparate file
1443 write_build_report(build_report, 'tests_build/report.html', single_test.opts_report_build_file_name)
1447 """ Super-class for logging and printing ongoing events for test suite pass
1449 def __init__(self, store_log=True):
1450 """ We can control if logger actually stores log in memory
1451 or just handled all log entries immediately
1454 self.log_to_file = False
1455 self.log_file_name = None
1456 self.store_log = store_log
1458 self.LogType = construct_enum(INFO='Info',
1460 NOTIF='Notification',
1464 self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
1465 APPEND=2) # Append to existing log file
1467 def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
1468 """ Log one line of text
1470 log_timestamp = time()
1471 log_entry = {'log_type' : LogType,
1472 'log_timestamp' : log_timestamp,
1473 'log_line' : log_line,
1476 # Store log in memory
1478 self.log.append(log_entry)
1482 class CLITestLogger(TestLogger):
1483 """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
1485 def __init__(self, store_log=True, file_name=None):
1486 TestLogger.__init__(self)
1487 self.log_file_name = file_name
1488 #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
1489 self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
1491 def log_print(self, log_entry, timestamp=True):
1492 """ Prints on screen formatted log entry
1494 ts = log_entry['log_timestamp']
1495 timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
1496 log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
1497 return timestamp_str + log_line_str
1499 def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
1500 """ Logs line, if log file output was specified log line will be appended
1501 at the end of log file
1503 log_entry = TestLogger.log_line(self, LogType, log_line)
1504 log_line_str = self.log_print(log_entry, timestamp)
1505 if self.log_file_name is not None:
1507 with open(self.log_file_name, 'a') as f:
1508 f.write(log_line_str + line_delim)
1514 def factory_db_logger(db_url):
1515 """ Factory database driver depending on database type supplied in database connection string db_url
1517 if db_url is not None:
1518 from workspace_tools.test_mysql import MySQLDBAccess
1519 connection_info = BaseDBAccess().parse_db_connection_string(db_url)
1520 if connection_info is not None:
1521 (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
1522 if db_type == 'mysql':
1523 return MySQLDBAccess()
1527 def detect_database_verbose(db_url):
1528 """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
1530 result = BaseDBAccess().parse_db_connection_string(db_url)
1531 if result is not None:
1533 (db_type, username, password, host, db_name) = result
1534 #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
1535 # Let's try to connect
1536 db_ = factory_db_logger(db_url)
1538 print "Connecting to database '%s'..."% db_url,
1539 db_.connect(host, username, password, db_name)
1540 if db_.is_connected():
1542 print "Detecting database..."
1543 print db_.detect_database(verbose=True)
1544 print "Disconnecting...",
1548 print "Database type '%s' unknown"% db_type
1550 print "Parse error: '%s' - DB Url error"% (db_url)
1553 def get_module_avail(module_name):
1554 """ This function returns True if module_name is already impored module
1556 return module_name in sys.modules.keys()
1559 def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
1560 """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
1561 If function fails to auto-detect devices it will return empty dictionary.
1563 if get_module_avail('mbed_lstools'):
1564 mbeds = mbed_lstools.create()
1565 mbeds_list = mbeds.list_mbeds()
1567 @param mbeds_list list of mbeds captured from mbed_lstools
1568 @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
1570 result = {} # Should be in muts_all.json format
1571 # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
1572 # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
1574 for mut in mbeds_list:
1575 m = {'mcu' : mut['platform_name'],
1576 'port' : mut['serial_port'],
1577 'disk' : mut['mount_point'],
1578 'peripherals' : [] # No peripheral detection
1580 if index not in result:
1587 def get_autodetected_TEST_SPEC(mbeds_list,
1588 use_default_toolchain=True,
1589 use_supported_toolchains=False,
1590 toolchain_filter=None,
1591 platform_name_filter=None):
1592 """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
1593 If function fails to auto-detect devices it will return empty 'targets' test_spec description.
1595 use_default_toolchain - if True add default toolchain to test_spec
1596 use_supported_toolchains - if True add all supported toolchains to test_spec
1597 toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
1599 result = {'targets': {} }
1601 for mut in mbeds_list:
1602 mcu = mut['platform_name']
1603 if platform_name_filter is None or (platform_name_filter and mut['platform_name'] in platform_name_filter):
1604 if mcu in TARGET_MAP:
1605 default_toolchain = TARGET_MAP[mcu].default_toolchain
1606 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
1608 # Decide which toolchains should be added to test specification toolchain pool for each target
1610 if use_default_toolchain:
1611 toolchains.append(default_toolchain)
1612 if use_supported_toolchains:
1613 toolchains += supported_toolchains
1614 if toolchain_filter is not None:
1615 all_toolchains = supported_toolchains + [default_toolchain]
1616 for toolchain in toolchain_filter.split(','):
1617 if toolchain in all_toolchains:
1618 toolchains.append(toolchain)
1620 result['targets'][mcu] = list(set(toolchains))
1624 def get_default_test_options_parser():
1625 """ Get common test script options used by CLI, web services etc.
1627 parser = optparse.OptionParser()
1628 parser.add_option('-i', '--tests',
1629 dest='test_spec_filename',
1631 help='Points to file with test specification')
1633 parser.add_option('-M', '--MUTS',
1634 dest='muts_spec_filename',
1636 help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
1638 parser.add_option("-j", "--jobs",
1642 help="Define number of compilation jobs. Default value is 1")
1644 if get_module_avail('mbed_lstools'):
1645 # Additional features available when mbed_lstools is installed on host and imported
1646 # mbed_lstools allow users to detect connected to host mbed-enabled devices
1647 parser.add_option('', '--auto',
1650 action="store_true",
1651 help='Use mbed-ls module to detect all connected mbed devices')
1653 parser.add_option('', '--tc',
1654 dest='toolchains_filter',
1655 help="Toolchain filter for --auto option. Use toolcahins names separated by comma, 'default' or 'all' to select toolchains")
1657 parser.add_option('', '--clean',
1660 action="store_true",
1661 help='Clean the build directory')
1663 parser.add_option('-P', '--only-peripherals',
1664 dest='test_only_peripheral',
1666 action="store_true",
1667 help='Test only peripheral declared for MUT and skip common tests')
1669 parser.add_option('-C', '--only-commons',
1670 dest='test_only_common',
1672 action="store_true",
1673 help='Test only board internals. Skip perpherials tests and perform common tests.')
1675 parser.add_option('-n', '--test-by-names',
1676 dest='test_by_names',
1677 help='Runs only test enumerated it this switch. Use comma to separate test case names.')
1679 parser.add_option('-p', '--peripheral-by-names',
1680 dest='peripheral_by_names',
1681 help='Forces discovery of particular peripherals. Use comma to separate peripheral names.')
1683 copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
1684 copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
1686 parser.add_option('-c', '--copy-method',
1688 help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
1690 reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
1691 reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
1693 parser.add_option('-r', '--reset-type',
1694 dest='mut_reset_type',
1696 help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
1698 parser.add_option('-g', '--goanna-for-tests',
1699 dest='goanna_for_tests',
1701 action="store_true",
1702 help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
1704 parser.add_option('-G', '--goanna-for-sdk',
1705 dest='goanna_for_mbed_sdk',
1707 action="store_true",
1708 help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
1710 parser.add_option('-s', '--suppress-summary',
1711 dest='suppress_summary',
1713 action="store_true",
1714 help='Suppresses display of wellformatted table with test results')
1716 parser.add_option('-t', '--test-summary',
1717 dest='test_x_toolchain_summary',
1719 action="store_true",
1720 help='Displays wellformatted table with test x toolchain test result per target')
1722 parser.add_option('-A', '--test-automation-report',
1723 dest='test_automation_report',
1725 action="store_true",
1726 help='Prints information about all tests and exits')
1728 parser.add_option('-R', '--test-case-report',
1729 dest='test_case_report',
1731 action="store_true",
1732 help='Prints information about all test cases and exits')
1734 parser.add_option("-S", "--supported-toolchains",
1735 action="store_true",
1736 dest="supported_toolchains",
1738 help="Displays supported matrix of MCUs and toolchains")
1740 parser.add_option("-O", "--only-build",
1741 action="store_true",
1742 dest="only_build_tests",
1744 help="Only build tests, skips actual test procedures (flashing etc.)")
1746 parser.add_option('', '--parallel',
1747 dest='parallel_test_exec',
1749 action="store_true",
1750 help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
1752 parser.add_option('', '--config',
1753 dest='verbose_test_configuration_only',
1755 action="store_true",
1756 help='Displays full test specification and MUTs configration and exits')
1758 parser.add_option('', '--loops',
1759 dest='test_loops_list',
1760 help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
1762 parser.add_option('', '--global-loops',
1763 dest='test_global_loops_value',
1764 help='Set global number of test loops per test. Default value is set 1')
1766 parser.add_option('-W', '--waterfall',
1767 dest='waterfall_test',
1769 action="store_true",
1770 help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed.')
1772 parser.add_option('-N', '--firmware-name',
1773 dest='firmware_global_name',
1774 help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts.')
1776 parser.add_option('-u', '--shuffle',
1777 dest='shuffle_test_order',
1779 action="store_true",
1780 help='Shuffles test execution order')
1782 parser.add_option('', '--shuffle-seed',
1783 dest='shuffle_test_seed',
1785 help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
1787 parser.add_option('-f', '--filter',
1788 dest='general_filter_regex',
1790 help='For some commands you can use filter to filter out results')
1792 parser.add_option('', '--inc-timeout',
1793 dest='extend_test_timeout',
1796 help='You can increase global timeout for each test by specifying additional test timeout in seconds')
1798 parser.add_option('', '--db',
1800 help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
1802 parser.add_option('-l', '--log',
1803 dest='log_file_name',
1804 help='Log events to external file (note not all console entries may be visible in log file)')
1806 parser.add_option('', '--report-html',
1807 dest='report_html_file_name',
1808 help='You can log test suite results in form of HTML report')
1810 parser.add_option('', '--report-junit',
1811 dest='report_junit_file_name',
1812 help='You can log test suite results in form of JUnit compliant XML report')
1814 parser.add_option("", "--report-build",
1815 dest="report_build_file_name",
1816 help="Output the build results to an html file")
1818 parser.add_option('', '--verbose-skipped',
1819 dest='verbose_skipped_tests',
1821 action="store_true",
1822 help='Prints some extra information about skipped tests')
1824 parser.add_option('-V', '--verbose-test-result',
1825 dest='verbose_test_result_only',
1827 action="store_true",
1828 help='Prints test serial output')
1830 parser.add_option('-v', '--verbose',
1833 action="store_true",
1834 help='Verbose mode (prints some extra information)')
1836 parser.add_option('', '--version',
1839 action="store_true",
1840 help='Prints script version and exits')