blob: e33435fa2e7b2673773500626270bc0998b858e6 [file] [log] [blame]
Minos Galanakisea421232019-06-20 17:11:28 +01001#!/usr/bin/env python3
2
3""" report_parser.py:
4
5 Report parser parses openci json reports and conveys the invormation in a
6 one or more standard formats (To be implememented)
7
8 After all information is captured it validates the success/failure status
9 and can change the script exit code for intergration with standard CI
10 executors.
11 """
12
13from __future__ import print_function
14
15__copyright__ = """
16/*
Minos Galanakisc8859352020-03-10 16:55:30 +000017 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
Minos Galanakisea421232019-06-20 17:11:28 +010018 *
19 * SPDX-License-Identifier: BSD-3-Clause
20 *
21 */
22 """
23__author__ = "Minos Galanakis"
24__email__ = "minos.galanakis@linaro.org"
25__project__ = "Trusted Firmware-M Open CI"
26__status__ = "stable"
27__version__ = "1.1"
28
29
30import os
31import re
32import sys
33import json
34import argparse
35from pprint import pprint
36
37try:
38 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
39 save_json, list_subdirs, get_remote_git_info, \
40 convert_git_ref_path, xml_read
41except ImportError:
42 dir_path = os.path.dirname(os.path.realpath(__file__))
43 sys.path.append(os.path.join(dir_path, "../"))
44
45 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
46 save_json, list_subdirs, get_remote_git_info, \
47 convert_git_ref_path, xml_read
48
49
50def split_keys(joint_arg, sep="="):
51 """ Split two keys spread by a separator, and return them as a tuple
52 with whitespace removed """
53
54 keys = joint_arg.split(sep)
55
56 # Remove whitespace
57 keys = map(str.strip, list(keys))
58 # If key contains the word True/False convert it.
59 keys = list(map(lambda x:
60 eval(x.title()) if x.lower() in ["true", "false"] else x,
61 keys))
62 return keys
63
64
65def dependencies_mdt_collect(path_list,
66 out_f=None,
Minos Galanakisc8859352020-03-10 16:55:30 +000067 known_content_types=["mbedcrypto",
68 "cmsis",
69 "checkpatch",
70 "fpga",
71 "fastmodel"],
Tamas Ban681834a2019-12-02 11:05:03 +000072 expected_paths=["mbedcrypto",
Minos Galanakisea421232019-06-20 17:11:28 +010073 "cmsis",
74 "checkpatch"]):
75 """ Collect dependencies checkout metadata. It creates a json report which
76 can be optionally exported to a file """
77
78 cpaths = {k: v for k, v in [n.split("=") for n in path_list]}
79 cwd = os.path.abspath(os.getcwd())
80
Minos Galanakisc8859352020-03-10 16:55:30 +000081 # Test that all the required paths are present
82 intsec_set = set(expected_paths).intersection(set(cpaths.keys()))
83 if len(intsec_set) != len(set(expected_paths)):
84 _missing = set(expected_paths).difference(intsec_set)
85 err_msg = "Error missing core paths.\nRequired: %s\nPresent: %s" % (
86 ",".join(_missing), ",".join(cpaths.keys())
Minos Galanakisea421232019-06-20 17:11:28 +010087 )
88 print(err_msg)
89 raise Exception(err_msg)
90
Minos Galanakisc8859352020-03-10 16:55:30 +000091 # Create a dataset for the entires of known data format
92 known_data = {n: {} for n in
93 set(known_content_types).intersection(set(cpaths.keys()))}
94
95 # Create a dataset for unexpected data entries of unknown format
96 extra_data = {n: {}
97 for n in set(cpaths.keys()).difference(set(known_data))}
98
Minos Galanakisea421232019-06-20 17:11:28 +010099 for d in list_subdirs(cpaths["mbedcrypto"]):
100 print("mbed-crypto dir: ", d)
101 # if checkout directory name contains a git reference convert to short
102 d = convert_git_ref_path(d)
103
104 git_info = get_local_git_info(d)
105 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
106
107 # Absolute paths will not work in jenkins since it will change the
108 # workspaace directory between stages convert to relative path
109 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
Minos Galanakisc8859352020-03-10 16:55:30 +0000110 known_data["mbedcrypto"][tag] = git_info
Minos Galanakisea421232019-06-20 17:11:28 +0100111
112 for d in list_subdirs(cpaths["cmsis"]):
113 print("CMS subdir: ", d)
114 d = convert_git_ref_path(d)
115 git_info = get_local_git_info(d)
116 tag = os.path.split(git_info["dir"])[-1]
117
118 # Absolute paths will not work in jenkins since it will change the
119 # workspaace directory between stages convert to relative path
120 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
Minos Galanakisc8859352020-03-10 16:55:30 +0000121 known_data["cmsis"][tag] = git_info
Minos Galanakisea421232019-06-20 17:11:28 +0100122
123 for d in list_subdirs(cpaths["checkpatch"]):
124 print("Checkpatch subdir:", d)
125
126 with open(os.path.join(d, "version.info"), "r") as F:
127 url = F.readline().strip()
128
129 git_info = get_remote_git_info(url)
130 d = convert_git_ref_path(d)
131 git_info['dir'] = d
132 tag = os.path.split(git_info["dir"])[-1].split("_")[-1]
133
134 # Absolute paths will not work in jenkins since it will change the
135 # workspaace directory between stages convert to relative path
136 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
Minos Galanakisc8859352020-03-10 16:55:30 +0000137 known_data["checkpatch"][tag] = git_info
138
139 if "fastmodel" in cpaths:
140 for d in list_subdirs(cpaths["fastmodel"]):
141 print("Fastmodel subdir:", d)
142 json_info = load_json(os.path.join(d, "version.info"))
143 json_info["dir"] = os.path.relpath(d, cwd)
144
145 tag = json_info["version"]
146 # Absolute paths will not work in jenkins since it will change the
147 # workspaace directory between stages convert to relative path
148 known_data["fastmodel"][tag] = json_info
149
Minos Galanakisea421232019-06-20 17:11:28 +0100150 if "fpga" in cpaths:
151 for d in os.listdir(cpaths["fpga"]):
152 print("FPGA imagefile:", d)
153 if ".tar.gz" in d:
154 name = d.split(".tar.gz")[0]
155 platform, subsys, ver = name.split("_")
Minos Galanakisc8859352020-03-10 16:55:30 +0000156 known_data["fpga"][name] = {"platform": platform,
157 "subsys": subsys,
158 "version": ver,
159 "recovery": os.path.join(
160 cpaths["fpga"],
161 d)}
Minos Galanakis27046222019-11-06 15:58:48 +0000162
163 #Attempt to detect what the unexpected paths contain
164 for e_path in extra_data.keys():
165 for d in list_subdirs(cpaths[e_path]):
166 print("%s subdir: %s" % (e_path, d))
167 # If it contains a version.info
168 if os.path.isfile(os.path.join(d, "version.info")):
169 json_info = load_json(os.path.join(d, "version.info"))
170 json_info["dir"] = os.path.relpath(d, cwd)
171
172 tag = json_info["version"]
173 # Absolute paths will not work in jenkins since it will change
174 # the workspaace directory between stages convert to rel-path
175 extra_data[e_path][tag] = json_info
176 # If it contains git information
177 elif os.path.exists(os.path.join(d, ".git")):
178 d = convert_git_ref_path(d)
179
180 git_info = get_local_git_info(d)
181 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
182
183 # Absolute paths will not work in jenkins since it will change
184 # the workspaace directory between stages convert to rel-path
185 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
186 extra_data[e_path][tag] = git_info
187 # Do not break flow if detection fails
188 else:
189 print("Error determining contents of directory: %s/%s for "
190 "indexing purposes" % (e_path, d))
191 extra_data[e_path][tag] = {"info": "N.A"}
192
193 # Add the extra paths to the expected ones
194 for k, v in extra_data.items():
Minos Galanakisc8859352020-03-10 16:55:30 +0000195 known_data[k] = v
Minos Galanakisea421232019-06-20 17:11:28 +0100196 if out_f:
197 print("Exporting metadata to", out_f)
Minos Galanakisc8859352020-03-10 16:55:30 +0000198 save_json(out_f, known_data)
Minos Galanakisea421232019-06-20 17:11:28 +0100199 else:
Minos Galanakisc8859352020-03-10 16:55:30 +0000200 pprint(known_data)
Minos Galanakisea421232019-06-20 17:11:28 +0100201
202
203def cppcheck_mdt_collect(file_list, out_f=None):
204 """ XML parse multiple cppcheck output files and create a json report """
205
206 xml_files = list(map(os.path.abspath, file_list))
207
208 dict_data = []
209 version = None
210 for xf in xml_files:
211 data = xml_read(xf)
212
213 version = data["results"]["cppcheck"]["@version"]
214 # If nothing is found the errors dictionary will be a Nonetype object
215 if data["results"]["errors"] is not None:
216 # Use json to flatten ordered dict
217 str_data = json.dumps(data["results"]["errors"]["error"])
218 # Remove @ prefix on first char of files that cppcheck adds
219 str_data = str_data.replace("@", '')
220
221 # Convert to dict again(xml to json will have added an array)
222 _dt = json.loads(str_data)
223
224 if isinstance(_dt, list):
225 dict_data += _dt
226 # If only one error is foud it will give it as a single item
227 elif isinstance(_dt, dict):
228 dict_data += [_dt]
229 else:
230 print("Ignoring cpp entry %s of type %s" % (_dt, type(_dt)))
231
232 out_data = {"_metadata_": {"cppcheck-version": version},
233 "report": {}}
234
235 for E in dict_data:
236
237 sever = E.pop("severity")
238
239 # Sort it based on serverity
240 try:
241 out_data["report"][sever].append(E)
242 except KeyError:
243 out_data["report"][sever] = [E]
244
245 _errors = 0
246 for msg_sever, msg_sever_entries in out_data["report"].items():
247 out_data["_metadata_"][msg_sever] = str(len(msg_sever_entries))
248 if msg_sever == "error":
249 _errors = len(msg_sever_entries)
250
251 out_data["_metadata_"]["success"] = True if not int(_errors) else False
252
253 if out_f:
254 save_json(out_f, out_data)
255 else:
256 pprint(out_data)
257
258
259def checkpatch_mdt_collect(file_name, out_f=None):
260 """ Regex parse a checpatch output file and create a report """
261
262 out_data = {"_metadata_": {"errors": 0,
263 "warnings": 0,
264 "lines": 0,
265 "success": True},
266 "report": {}
267 }
268 with open(file_name, "r") as F:
269 cpatch_data = F.read().strip()
270
271 # checkpatch will not report anything when no issues are found
272 if len(cpatch_data):
273 stat_rex = re.compile(r'^total: (\d+) errors, '
274 r'(\d+) warnings, (\d+) lines',
275 re.MULTILINE)
276 line_rex = re.compile(r'([\S]+:)\s([\S]+:)\s([\S ]+)\n', re.MULTILINE)
277 ewl = stat_rex.search(cpatch_data)
278 try:
279 _errors, _warnings, _lines = ewl.groups()
280 except Exception as E:
281 print("Exception parsing checkpatch file.", E)
282 # If there is text but not in know format return -1 and fail job
283 _errors = _warnings = _lines = "-1"
284 checkpath_entries = line_rex.findall(cpatch_data)
285
286 for en in checkpath_entries:
287 _file, _line, _ = en[0].split(":")
Galanakis, Minosc3e8c742019-12-02 16:18:50 +0000288 try:
289 _type, _subtype, _ = en[1].split(":")
290 except Exception as e:
291 print("WARNING: Ignoring Malformed checkpatch line: %s" %
292 "".join(en))
293 continue
Minos Galanakisea421232019-06-20 17:11:28 +0100294 _msg = en[2]
295
296 out_data["_metadata_"] = {"errors": _errors,
297 "warnings": _warnings,
298 "lines": _lines,
299 "success": True if not int(_errors)
300 else False}
301
302 E = {"id": _subtype,
303 "verbose": _subtype,
304 "msg": _msg,
305 "location": {"file": _file, "line": _line}
306 }
307 try:
308 out_data["report"][_type.lower()].append(E)
309 except KeyError:
310 out_data["report"][_type.lower()] = [E]
311
312 if out_f:
313 save_json(out_f, out_data)
314 else:
315 pprint(out_data)
316
317
318def jenkins_mdt_collect(out_f):
319 """ Collects Jenkins enviroment information and stores
320 it in a key value list """
321
322 # Jenkins environment parameters are always valid
323 jenkins_env_keys = ["BUILD_ID",
324 "BUILD_URL",
325 "JOB_BASE_NAME",
326 "GERRIT_URL",
327 "GERRIT_PROJECT"]
328 # The following Gerrit parameters only exist when
329 # a job is triggered by a web hook
330 gerrit_trigger_keys = ["GERRIT_CHANGE_NUMBER",
331 "GERRIT_CHANGE_SUBJECT",
332 "GERRIT_CHANGE_ID",
333 "GERRIT_PATCHSET_REVISION",
334 "GERRIT_PATCHSET_NUMBER",
335 "GERRIT_REFSPEC",
336 "GERRIT_CHANGE_URL",
337 "GERRIT_BRANCH",
338 "GERRIT_CHANGE_OWNER_EMAIL",
339 "GERRIT_PATCHSET_UPLOADER_EMAIL"]
340
341 # Find as mamny of the variables in environent
342 el = set(os.environ).intersection(set(jenkins_env_keys +
343 gerrit_trigger_keys))
344 # Format it in key:value pairs
345 out_data = {n: os.environ[n] for n in el}
346 if out_f:
347 save_json(out_f, out_data)
348 else:
349 pprint(out_data)
350
351
352def metadata_collect(user_args):
353 """ Logic for information collection during different stages of
354 the build """
355
356 if user_args.dependencies_checkout and user_args.content_paths:
357 dependencies_mdt_collect(user_args.content_paths,
358 user_args.out_f)
359 elif user_args.git_info:
360 git_info = get_local_git_info(os.path.abspath(user_args.git_info))
361
362 if user_args.out_f:
363 save_json(user_args.out_f, git_info)
364 else:
365 pprint(git_info)
366 elif user_args.cppcheck_files:
367 cppcheck_mdt_collect(user_args.cppcheck_files, user_args.out_f)
368 elif user_args.checkpatch_file:
369 checkpatch_mdt_collect(user_args.checkpatch_file, user_args.out_f)
370 elif user_args.jenkins_info:
371 jenkins_mdt_collect(user_args.out_f)
372 else:
373 print("Invalid Metadata collection arguments")
374 print(user_args)
375 sys.exit(1)
376
377
378def collate_report(key_file_list, ouput_f=None, stdout=True):
379 """ Join different types of json formatted reports into one """
380
381 out_data = {"_metadata_": {}, "report": {}}
382 for kf in key_file_list:
383 try:
384 key, fl = kf.split("=")
385 data = load_json(fl)
386 # If data is a standard reprort (metdata-report parse it)
387 if ("_metadata_" in data.keys() and "report" in data.keys()):
388 out_data["_metadata_"][key] = data["_metadata_"]
389 out_data["report"][key] = data["report"]
390 # Else treat it as a raw information passing dataset
391 else:
392 try:
393 out_data["info"][key] = data
394 except KeyError as E:
395 out_data["info"] = {key: data}
396 except Exception as E:
397 print("Exception parsing argument", kf, E)
398 continue
399 if ouput_f:
400 save_json(ouput_f, out_data)
401 elif stdout:
402 pprint(out_data)
403 return out_data
404
405
406def filter_report(key_value_list, input_f, ouput_f):
407 """ Generates a subset of the data contained in
408 input_f, by selecting only the values defined in key_value list """
409
410 try:
411 rep_data = load_json(input_f)
412 except Exception as E:
413 print("Exception parsing ", input_f, E)
414 sys.exit(1)
415
416 out_data = {}
417 for kf in key_value_list:
418 try:
419 tag, value = kf.split("=")
420 # if multiple selection
421 if(",") in value:
422 out_data[tag] = {}
423 for v in value.split(","):
424 data = rep_data[tag][v]
425 out_data[tag][v] = data
426 else:
427 data = rep_data[tag][value]
428 out_data[tag] = {value: data}
429 except Exception as E:
430 print("Could not extract data-set for k: %s v: %s" % (tag, value))
431 print(E)
432 continue
433 if ouput_f:
434 save_json(ouput_f, out_data)
435 else:
436 pprint(out_data)
437
438
439def parse_report(user_args):
440 """ Parse a report and attempt to determine if it is overall successful or
441 not. It will set the script's exit code accordingly """
442
443 # Parse Mode
444 in_rep = load_json(user_args.report)
445 report_eval = None
446
447 # Extract the required condition for evalutation to pass
448 pass_key, pass_val = split_keys(user_args.set_pass)
449
450 print("Evaluation will succeed if \"%s\" is \"%s\"" % (pass_key,
451 pass_val))
452 try:
453 report_eval = in_rep["_metadata_"][pass_key] == pass_val
454 print("Evaluating detected '%s' field in _metaddata_. " % pass_key)
455 except Exception as E:
456 pass
457
458 if report_eval is None:
459 if isinstance(in_rep, dict):
460 # If report contains an overall success field in metadata do not
461 # parse the items
462 in_rep = in_rep["report"]
463 ev_list = in_rep.values()
464 elif isinstance(in_rep, list):
465 ev_list = in_rep
466 else:
467 print("Invalid data type: %s" % type(in_rep))
468 return
469
470 if user_args.onepass:
471 try:
472 report_eval = in_rep[user_args.onepass][pass_key] == pass_val
473 except Exception as e:
474 report_eval = False
475
476 # If every singel field need to be succesfful, invert the check and
477 # look for those who are not
478 elif user_args.allpass:
479 try:
480 if list(filter(lambda x: x[pass_key] != pass_val, ev_list)):
481 pass
482 else:
483 report_eval = True
484 except Exception as e:
485 print(e)
486 report_eval = False
487 else:
488 print("Evaluation condition not set. Please use -a or -o. Launch"
489 "help (-h) for more information")
490
491 print("Evaluation %s" % ("passed" if report_eval else "failed"))
492 if user_args.eif:
493 print("Setting script exit status")
494 sys.exit(0 if report_eval else 1)
495
496
497def main(user_args):
498 """ Main logic """
499
500 # Metadat Collect Mode
501 if user_args.collect:
502 metadata_collect(user_args)
503 return
504 elif user_args.filter_report:
505 filter_report(user_args.filter_report,
506 user_args.report,
507 user_args.out_f)
508 elif user_args.collate_report:
509 collate_report(user_args.collate_report, user_args.out_f)
510 else:
511 parse_report(user_args)
512
513
514def get_cmd_args():
515 """ Parse command line arguments """
516
517 # Parse command line arguments to override config
518 parser = argparse.ArgumentParser(description="TFM Report Parser.")
519 parser.add_argument("-e", "--error_if_failed",
520 dest="eif",
521 action="store_true",
522 help="If set will change the script exit code")
523 parser.add_argument("-s", "--set-success-field",
524 dest="set_pass",
525 default="status = Success",
526 action="store",
527 help="Set the key which the script will use to"
528 "assert success/failure")
529 parser.add_argument("-a", "--all-fields-must-pass",
530 dest="allpass",
531 action="store_true",
532 help="When set and a list is provided, all entries"
533 "must be succefull for evaluation to pass")
534 parser.add_argument("-o", "--one-field-must-pass",
535 dest="onepass",
536 action="store",
537 help="Only the user defined field must pass")
538 parser.add_argument("-r", "--report",
539 dest="report",
540 action="store",
541 help="JSON file containing input report")
542 parser.add_argument("-c", "--collect",
543 dest="collect",
544 action="store_true",
545 help="When set, the parser will attempt to collect"
546 "information and produce a report")
547 parser.add_argument("-d", "--dependencies-checkout",
548 dest="dependencies_checkout",
549 action="store_true",
550 help="Collect information from a dependencies "
551 "checkout job")
552 parser.add_argument("-f", "--output-file",
553 dest="out_f",
554 action="store",
555 help="Output file to store captured information")
556 parser.add_argument('-p', '--content-paths',
557 dest="content_paths",
558 nargs='*',
559 help=("Pass a space separated list of paths in the"
560 "following format: -p mbedtls=/yourpath/"
561 "fpv=/another/path .Used in conjuction with -n"))
562 parser.add_argument("-g", "--git-info",
563 dest="git_info",
564 action="store",
565 help="Extract git information from given path. "
566 "Requires --colect directive. Optional parameter"
567 "--output-file ")
568 parser.add_argument("-x", "--cpp-check-xml",
569 dest="cppcheck_files",
570 nargs='*',
571 action="store",
572 help="Extract cppcheck static analysis information "
573 " output files, provided as a space separated "
574 "list. Requires --colect directive."
575 " Optional parameter --output-file ")
576 parser.add_argument("-z", "--checkpatch-parse-f",
577 dest="checkpatch_file",
578 action="store",
579 help="Extract checkpatch static analysis information "
580 " output file. Requires --colect directive."
581 " Optional parameter --output-file ")
582 parser.add_argument("-j", "--jenkins-info",
583 dest="jenkins_info",
584 action="store_true",
585 help="Extract jenkings and gerrit trigger enviroment "
586 "information fr. Requires --colect directive."
587 " Optional parameter --output-file ")
588 parser.add_argument("-l", "--collate-report",
589 dest="collate_report",
590 action="store",
591 nargs='*',
592 help="Pass a space separated list of key-value pairs"
593 "following format: -l report_key_0=report_file_0"
594 " report_key_1=report_file_1. Collate will "
595 "generate a joint dataset and print it to stdout."
596 "Optional parameter --output-file ")
597 parser.add_argument("-t", "--filter-report",
598 dest="filter_report",
599 action="store",
600 nargs='*',
601 help="Requires --report parameter for input file."
602 "Pass a space separated list of key-value pairs"
603 "following format: -l report_key_0=value_0"
604 " report_key_1=value_0. Filter will remote all"
605 "entries of the original report but the ones"
606 "mathing the key:value pairs defined and print it"
607 "to stdout.Optional parameter --output-file")
608 return parser.parse_args()
609
610
611if __name__ == "__main__":
612 main(get_cmd_args())