analyze_outcomes.py: Add test coverage regresion analyze for driver only builds

Signed-off-by: Przemek Stekiel <przemyslaw.stekiel@mobica.com>
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
index d06a059..f5d2ac1 100755
--- a/tests/scripts/analyze_outcomes.py
+++ b/tests/scripts/analyze_outcomes.py
@@ -60,6 +60,41 @@
             # fixed this branch to have full coverage of test cases.
             results.warning('Test case not executed: {}', key)
 
+def analyze_driver_vs_reference(outcomes, components, ignored_tests):
+    """Check that all tests executed in the reference component are also
+    executed in the corresponding driver component.
+    Skip test suits provided in ignored_tests list.
+    """
+    driver_component = components[0]
+    reference_component = components[1]
+    available = check_test_cases.collect_available_test_cases()
+    result = True
+
+    for key in available:
+        # Skip ignored test suites
+        test_suit = key.split(';')[0] # retrieve test suit name
+        test_suit = test_suit.split('.')[0] # retrieve main part of test suit name
+        if(test_suit in ignored_tests):
+            continue
+        # Continue if test was not executed by any component
+        hits = outcomes[key].hits() if key in outcomes else 0
+        if(hits == 0):
+            continue
+        # Search for tests that run in reference component and not in driver component
+        driver_test_passed = False
+        reference_test_passed = False
+        for entry in outcomes[key].successes:
+            if(driver_component in entry):
+                driver_test_passed = True
+            if(reference_component in entry):
+                reference_test_passed = True
+        #if(driver_test_passed == True and reference_test_passed == False):
+        #    print('{}: driver: passed; reference: skipped'.format(key))
+        if(driver_test_passed == False and reference_test_passed == True):
+            print('{}: driver: skipped/failed; reference: passed'.format(key))
+            result = False
+    return result
+
 def analyze_outcomes(outcomes):
     """Run all analyses on the given outcome collection."""
     results = Results()
@@ -87,20 +122,50 @@
                 outcomes[key].failures.append(setup)
     return outcomes
 
-def analyze_outcome_file(outcome_file):
-    """Analyze the given outcome file."""
+def do_analyze_coverage(outcome_file):
+    """Perform coverage analyze."""
     outcomes = read_outcome_file(outcome_file)
-    return analyze_outcomes(outcomes)
+    results = analyze_outcomes(outcomes)
+    return (True if results.error_count == 0 else False)
+
+def do_analyze_driver_vs_reference(outcome_file, components, ignored_tests):
+    """Perform driver vs reference analyze."""
+    # We need exactly 2 components to analyze (first driver and second reference)
+    if(len(components) != 2 or "accel" not in components[0] or "reference" not in components[1]):
+        print('Error: Wrong component list. Exactly 2 components are required (driver,reference). ')
+        return False
+    outcomes = read_outcome_file(outcome_file)
+    return analyze_driver_vs_reference(outcomes, components, ignored_tests)
 
 def main():
     try:
         parser = argparse.ArgumentParser(description=__doc__)
-        parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
+        parser.add_argument('--outcomes', metavar='OUTCOMES.CSV',
                             help='Outcome file to analyze')
+        parser.add_argument('--task',
+                            help='Analyze to be done: analyze_coverage or analyze_driver_vs_reference')
+        parser.add_argument('--components',
+                            help='List of test components to compare. Must be exactly 2 in valid order: driver,reference. '
+                            'Apply only for analyze_driver_vs_reference task.')
+        parser.add_argument('--ignore',
+                            help='List of test suits to ignore. Apply only for analyze_driver_vs_reference task.')
         options = parser.parse_args()
-        results = analyze_outcome_file(options.outcomes)
-        if results.error_count > 0:
+
+        result = False
+
+        if(options.task == 'analyze_coverage'):
+            result = do_analyze_coverage(options.outcomes)
+        elif(options.task == 'analyze_driver_vs_reference'):
+            components_list = options.components.split(',')
+            ignored_tests_list = options.ignore.split(',')
+            ignored_tests_list = ['test_suite_' + x for x in ignored_tests_list]
+            result = do_analyze_driver_vs_reference(options.outcomes, components_list, ignored_tests_list)
+        else:
+            print('Error: Unknown task: {}'.format(options.task))
+
+        if(result == False):
             sys.exit(1)
+        print("SUCCESS :-)")
     except Exception: # pylint: disable=broad-except
         # Print the backtrace and exit explicitly with our chosen status.
         traceback.print_exc()