analyze_outcomes: escape {} in string format for test description

{} are valid characters in test description, but they're not escaped
properly in python string format(). To resolve the bug of KeyError
when it tries to log test description which contains {}, we replace
{XXX} format with {{XXX}} in order to escape {} in python string
format() properly.

In addition, the calls to Results.log() are also handled to avoid
similar potential problems.

Signed-off-by: Yanray Wang <yanray.wang@arm.com>
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
index 119dbb5..085bff2 100755
--- a/tests/scripts/analyze_outcomes.py
+++ b/tests/scripts/analyze_outcomes.py
@@ -60,13 +60,13 @@
     # If the outcome file already exists, we assume that the user wants to
     # perform the comparison analysis again without repeating the tests.
     if os.path.exists(outcome_file):
-        Results.log("Outcome file (" + outcome_file + ") already exists. " + \
-                    "Tests will be skipped.")
+        Results.log("Outcome file {} already exists. Tests will be skipped.",
+                    outcome_file)
         return
 
     shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
                     " " + ref_component + " " + driver_component
-    Results.log("Running: " + shell_command)
+    Results.log("Running: {}", shell_command)
     ret_val = subprocess.run(shell_command.split(), check=False).returncode
 
     if ret_val != 0:
@@ -101,6 +101,7 @@
     """
     available = check_test_cases.collect_available_test_cases()
     result = True
+    escape_curly_brace = lambda x: x.replace('{', '{{').replace('}', '}}')
 
     for key in available:
         # Continue if test was not executed by any component
@@ -125,7 +126,7 @@
             if component_ref in entry:
                 reference_test_passed = True
         if(reference_test_passed and not driver_test_passed):
-            Results.log(key)
+            Results.log(escape_curly_brace(key))
             result = False
     return result
 
@@ -172,8 +173,8 @@
     ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
 
     outcomes = read_outcome_file(outcome_file)
-    Results.log("\n*** Analyze driver {} vs reference {} ***\n".format(
-        args['component_driver'], args['component_ref']))
+    Results.log("\n*** Analyze driver {} vs reference {} ***\n",
+                args['component_driver'], args['component_ref'])
     return analyze_driver_vs_reference(outcomes, args['component_ref'],
                                        args['component_driver'], ignored_suites,
                                        args['ignored_tests'])
@@ -652,7 +653,7 @@
 
             for task in tasks:
                 if task not in TASKS:
-                    Results.log('Error: invalid task: {}'.format(task))
+                    Results.log('Error: invalid task: {}', task)
                     sys.exit(1)
 
         TASKS['analyze_coverage']['args']['full_coverage'] = \