blob: 859ebe7cef8ccffd809332002ab8bf42cf4bf2ae [file] [log] [blame]
Minos Galanakisea421232019-06-20 17:11:28 +01001#!/usr/bin/env python3
2
3""" report_parser.py:
4
5 Report parser parses openci json reports and conveys the invormation in a
6 one or more standard formats (To be implememented)
7
8 After all information is captured it validates the success/failure status
9 and can change the script exit code for intergration with standard CI
10 executors.
11 """
12
13from __future__ import print_function
14
15__copyright__ = """
16/*
17 * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
18 *
19 * SPDX-License-Identifier: BSD-3-Clause
20 *
21 */
22 """
23__author__ = "Minos Galanakis"
24__email__ = "minos.galanakis@linaro.org"
25__project__ = "Trusted Firmware-M Open CI"
26__status__ = "stable"
27__version__ = "1.1"
28
29
30import os
31import re
32import sys
33import json
34import argparse
35from pprint import pprint
36
37try:
38 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
39 save_json, list_subdirs, get_remote_git_info, \
40 convert_git_ref_path, xml_read
41except ImportError:
42 dir_path = os.path.dirname(os.path.realpath(__file__))
43 sys.path.append(os.path.join(dir_path, "../"))
44
45 from tfm_ci_pylib.utils import load_json, get_local_git_info, \
46 save_json, list_subdirs, get_remote_git_info, \
47 convert_git_ref_path, xml_read
48
49
50def split_keys(joint_arg, sep="="):
51 """ Split two keys spread by a separator, and return them as a tuple
52 with whitespace removed """
53
54 keys = joint_arg.split(sep)
55
56 # Remove whitespace
57 keys = map(str.strip, list(keys))
58 # If key contains the word True/False convert it.
59 keys = list(map(lambda x:
60 eval(x.title()) if x.lower() in ["true", "false"] else x,
61 keys))
62 return keys
63
64
65def dependencies_mdt_collect(path_list,
66 out_f=None,
67 expected_paths=["mbedtls",
68 "mbedcrypto",
69 "cmsis",
70 "checkpatch"]):
71 """ Collect dependencies checkout metadata. It creates a json report which
72 can be optionally exported to a file """
73
74 cpaths = {k: v for k, v in [n.split("=") for n in path_list]}
75 cwd = os.path.abspath(os.getcwd())
76
77 # Create an empty dataset
78 data = {n: {} for n in set(expected_paths).union(set(cpaths.keys()))}
79
80 # Perform basic sanity check
81 if not set(data.keys()).issubset(set(cpaths.keys())):
82 err_msg = "Error locating required paths.\nNeeded: %s\nHas: %s" % (
83 ",".join(data.keys()), ",".join(cpaths.keys())
84 )
85 print(err_msg)
86 raise Exception(err_msg)
87
88 for d in list_subdirs(cpaths["mbedtls"]):
89 print("mbedtls dir: ", d)
90 # if checkout directory name contains a git reference convert to short
91 d = convert_git_ref_path(d)
92
93 git_info = get_local_git_info(d)
94 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
95
96 # Absolute paths will not work in jenkins since it will change the
97 # workspaace directory between stages convert to relative path
98 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
99 data["mbedtls"][tag] = git_info
100
101 for d in list_subdirs(cpaths["mbedcrypto"]):
102 print("mbed-crypto dir: ", d)
103 # if checkout directory name contains a git reference convert to short
104 d = convert_git_ref_path(d)
105
106 git_info = get_local_git_info(d)
107 tag = os.path.split(git_info["dir"])[-1].split("-")[-1]
108
109 # Absolute paths will not work in jenkins since it will change the
110 # workspaace directory between stages convert to relative path
111 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
112 data["mbedcrypto"][tag] = git_info
113
114 for d in list_subdirs(cpaths["cmsis"]):
115 print("CMS subdir: ", d)
116 d = convert_git_ref_path(d)
117 git_info = get_local_git_info(d)
118 tag = os.path.split(git_info["dir"])[-1]
119
120 # Absolute paths will not work in jenkins since it will change the
121 # workspaace directory between stages convert to relative path
122 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
123 data["cmsis"][tag] = git_info
124
125 if "fastmodel" in cpaths:
126 for d in list_subdirs(cpaths["fastmodel"]):
127 print("Fastmodel subdir:", d)
128 json_info = load_json(os.path.join(d, "version.info"))
129 json_info["dir"] = os.path.relpath(d, cwd)
130
131 tag = json_info["version"]
132 # Absolute paths will not work in jenkins since it will change the
133 # workspaace directory between stages convert to relative path
134 data["fastmodel"][tag] = json_info
135
136 for d in list_subdirs(cpaths["checkpatch"]):
137 print("Checkpatch subdir:", d)
138
139 with open(os.path.join(d, "version.info"), "r") as F:
140 url = F.readline().strip()
141
142 git_info = get_remote_git_info(url)
143 d = convert_git_ref_path(d)
144 git_info['dir'] = d
145 tag = os.path.split(git_info["dir"])[-1].split("_")[-1]
146
147 # Absolute paths will not work in jenkins since it will change the
148 # workspaace directory between stages convert to relative path
149 git_info["dir"] = os.path.relpath(git_info["dir"], cwd)
150 data["checkpatch"][tag] = git_info
151 if "fpga" in cpaths:
152 for d in os.listdir(cpaths["fpga"]):
153 print("FPGA imagefile:", d)
154 if ".tar.gz" in d:
155 name = d.split(".tar.gz")[0]
156 platform, subsys, ver = name.split("_")
157 data["fpga"][name] = {"platform": platform,
158 "subsys": subsys,
159 "version": ver,
160 "recovery": os.path.join(cpaths["fpga"],
161 d)}
162 if out_f:
163 print("Exporting metadata to", out_f)
164 save_json(out_f, data)
165 else:
166 pprint(data)
167
168
169def cppcheck_mdt_collect(file_list, out_f=None):
170 """ XML parse multiple cppcheck output files and create a json report """
171
172 xml_files = list(map(os.path.abspath, file_list))
173
174 dict_data = []
175 version = None
176 for xf in xml_files:
177 data = xml_read(xf)
178
179 version = data["results"]["cppcheck"]["@version"]
180 # If nothing is found the errors dictionary will be a Nonetype object
181 if data["results"]["errors"] is not None:
182 # Use json to flatten ordered dict
183 str_data = json.dumps(data["results"]["errors"]["error"])
184 # Remove @ prefix on first char of files that cppcheck adds
185 str_data = str_data.replace("@", '')
186
187 # Convert to dict again(xml to json will have added an array)
188 _dt = json.loads(str_data)
189
190 if isinstance(_dt, list):
191 dict_data += _dt
192 # If only one error is foud it will give it as a single item
193 elif isinstance(_dt, dict):
194 dict_data += [_dt]
195 else:
196 print("Ignoring cpp entry %s of type %s" % (_dt, type(_dt)))
197
198 out_data = {"_metadata_": {"cppcheck-version": version},
199 "report": {}}
200
201 for E in dict_data:
202
203 sever = E.pop("severity")
204
205 # Sort it based on serverity
206 try:
207 out_data["report"][sever].append(E)
208 except KeyError:
209 out_data["report"][sever] = [E]
210
211 _errors = 0
212 for msg_sever, msg_sever_entries in out_data["report"].items():
213 out_data["_metadata_"][msg_sever] = str(len(msg_sever_entries))
214 if msg_sever == "error":
215 _errors = len(msg_sever_entries)
216
217 out_data["_metadata_"]["success"] = True if not int(_errors) else False
218
219 if out_f:
220 save_json(out_f, out_data)
221 else:
222 pprint(out_data)
223
224
225def checkpatch_mdt_collect(file_name, out_f=None):
226 """ Regex parse a checpatch output file and create a report """
227
228 out_data = {"_metadata_": {"errors": 0,
229 "warnings": 0,
230 "lines": 0,
231 "success": True},
232 "report": {}
233 }
234 with open(file_name, "r") as F:
235 cpatch_data = F.read().strip()
236
237 # checkpatch will not report anything when no issues are found
238 if len(cpatch_data):
239 stat_rex = re.compile(r'^total: (\d+) errors, '
240 r'(\d+) warnings, (\d+) lines',
241 re.MULTILINE)
242 line_rex = re.compile(r'([\S]+:)\s([\S]+:)\s([\S ]+)\n', re.MULTILINE)
243 ewl = stat_rex.search(cpatch_data)
244 try:
245 _errors, _warnings, _lines = ewl.groups()
246 except Exception as E:
247 print("Exception parsing checkpatch file.", E)
248 # If there is text but not in know format return -1 and fail job
249 _errors = _warnings = _lines = "-1"
250 checkpath_entries = line_rex.findall(cpatch_data)
251
252 for en in checkpath_entries:
253 _file, _line, _ = en[0].split(":")
254 _type, _subtype, _ = en[1].split(":")
255 _msg = en[2]
256
257 out_data["_metadata_"] = {"errors": _errors,
258 "warnings": _warnings,
259 "lines": _lines,
260 "success": True if not int(_errors)
261 else False}
262
263 E = {"id": _subtype,
264 "verbose": _subtype,
265 "msg": _msg,
266 "location": {"file": _file, "line": _line}
267 }
268 try:
269 out_data["report"][_type.lower()].append(E)
270 except KeyError:
271 out_data["report"][_type.lower()] = [E]
272
273 if out_f:
274 save_json(out_f, out_data)
275 else:
276 pprint(out_data)
277
278
279def jenkins_mdt_collect(out_f):
280 """ Collects Jenkins enviroment information and stores
281 it in a key value list """
282
283 # Jenkins environment parameters are always valid
284 jenkins_env_keys = ["BUILD_ID",
285 "BUILD_URL",
286 "JOB_BASE_NAME",
287 "GERRIT_URL",
288 "GERRIT_PROJECT"]
289 # The following Gerrit parameters only exist when
290 # a job is triggered by a web hook
291 gerrit_trigger_keys = ["GERRIT_CHANGE_NUMBER",
292 "GERRIT_CHANGE_SUBJECT",
293 "GERRIT_CHANGE_ID",
294 "GERRIT_PATCHSET_REVISION",
295 "GERRIT_PATCHSET_NUMBER",
296 "GERRIT_REFSPEC",
297 "GERRIT_CHANGE_URL",
298 "GERRIT_BRANCH",
299 "GERRIT_CHANGE_OWNER_EMAIL",
300 "GERRIT_PATCHSET_UPLOADER_EMAIL"]
301
302 # Find as mamny of the variables in environent
303 el = set(os.environ).intersection(set(jenkins_env_keys +
304 gerrit_trigger_keys))
305 # Format it in key:value pairs
306 out_data = {n: os.environ[n] for n in el}
307 if out_f:
308 save_json(out_f, out_data)
309 else:
310 pprint(out_data)
311
312
313def metadata_collect(user_args):
314 """ Logic for information collection during different stages of
315 the build """
316
317 if user_args.dependencies_checkout and user_args.content_paths:
318 dependencies_mdt_collect(user_args.content_paths,
319 user_args.out_f)
320 elif user_args.git_info:
321 git_info = get_local_git_info(os.path.abspath(user_args.git_info))
322
323 if user_args.out_f:
324 save_json(user_args.out_f, git_info)
325 else:
326 pprint(git_info)
327 elif user_args.cppcheck_files:
328 cppcheck_mdt_collect(user_args.cppcheck_files, user_args.out_f)
329 elif user_args.checkpatch_file:
330 checkpatch_mdt_collect(user_args.checkpatch_file, user_args.out_f)
331 elif user_args.jenkins_info:
332 jenkins_mdt_collect(user_args.out_f)
333 else:
334 print("Invalid Metadata collection arguments")
335 print(user_args)
336 sys.exit(1)
337
338
339def collate_report(key_file_list, ouput_f=None, stdout=True):
340 """ Join different types of json formatted reports into one """
341
342 out_data = {"_metadata_": {}, "report": {}}
343 for kf in key_file_list:
344 try:
345 key, fl = kf.split("=")
346 data = load_json(fl)
347 # If data is a standard reprort (metdata-report parse it)
348 if ("_metadata_" in data.keys() and "report" in data.keys()):
349 out_data["_metadata_"][key] = data["_metadata_"]
350 out_data["report"][key] = data["report"]
351 # Else treat it as a raw information passing dataset
352 else:
353 try:
354 out_data["info"][key] = data
355 except KeyError as E:
356 out_data["info"] = {key: data}
357 except Exception as E:
358 print("Exception parsing argument", kf, E)
359 continue
360 if ouput_f:
361 save_json(ouput_f, out_data)
362 elif stdout:
363 pprint(out_data)
364 return out_data
365
366
367def filter_report(key_value_list, input_f, ouput_f):
368 """ Generates a subset of the data contained in
369 input_f, by selecting only the values defined in key_value list """
370
371 try:
372 rep_data = load_json(input_f)
373 except Exception as E:
374 print("Exception parsing ", input_f, E)
375 sys.exit(1)
376
377 out_data = {}
378 for kf in key_value_list:
379 try:
380 tag, value = kf.split("=")
381 # if multiple selection
382 if(",") in value:
383 out_data[tag] = {}
384 for v in value.split(","):
385 data = rep_data[tag][v]
386 out_data[tag][v] = data
387 else:
388 data = rep_data[tag][value]
389 out_data[tag] = {value: data}
390 except Exception as E:
391 print("Could not extract data-set for k: %s v: %s" % (tag, value))
392 print(E)
393 continue
394 if ouput_f:
395 save_json(ouput_f, out_data)
396 else:
397 pprint(out_data)
398
399
400def parse_report(user_args):
401 """ Parse a report and attempt to determine if it is overall successful or
402 not. It will set the script's exit code accordingly """
403
404 # Parse Mode
405 in_rep = load_json(user_args.report)
406 report_eval = None
407
408 # Extract the required condition for evalutation to pass
409 pass_key, pass_val = split_keys(user_args.set_pass)
410
411 print("Evaluation will succeed if \"%s\" is \"%s\"" % (pass_key,
412 pass_val))
413 try:
414 report_eval = in_rep["_metadata_"][pass_key] == pass_val
415 print("Evaluating detected '%s' field in _metaddata_. " % pass_key)
416 except Exception as E:
417 pass
418
419 if report_eval is None:
420 if isinstance(in_rep, dict):
421 # If report contains an overall success field in metadata do not
422 # parse the items
423 in_rep = in_rep["report"]
424 ev_list = in_rep.values()
425 elif isinstance(in_rep, list):
426 ev_list = in_rep
427 else:
428 print("Invalid data type: %s" % type(in_rep))
429 return
430
431 if user_args.onepass:
432 try:
433 report_eval = in_rep[user_args.onepass][pass_key] == pass_val
434 except Exception as e:
435 report_eval = False
436
437 # If every singel field need to be succesfful, invert the check and
438 # look for those who are not
439 elif user_args.allpass:
440 try:
441 if list(filter(lambda x: x[pass_key] != pass_val, ev_list)):
442 pass
443 else:
444 report_eval = True
445 except Exception as e:
446 print(e)
447 report_eval = False
448 else:
449 print("Evaluation condition not set. Please use -a or -o. Launch"
450 "help (-h) for more information")
451
452 print("Evaluation %s" % ("passed" if report_eval else "failed"))
453 if user_args.eif:
454 print("Setting script exit status")
455 sys.exit(0 if report_eval else 1)
456
457
458def main(user_args):
459 """ Main logic """
460
461 # Metadat Collect Mode
462 if user_args.collect:
463 metadata_collect(user_args)
464 return
465 elif user_args.filter_report:
466 filter_report(user_args.filter_report,
467 user_args.report,
468 user_args.out_f)
469 elif user_args.collate_report:
470 collate_report(user_args.collate_report, user_args.out_f)
471 else:
472 parse_report(user_args)
473
474
475def get_cmd_args():
476 """ Parse command line arguments """
477
478 # Parse command line arguments to override config
479 parser = argparse.ArgumentParser(description="TFM Report Parser.")
480 parser.add_argument("-e", "--error_if_failed",
481 dest="eif",
482 action="store_true",
483 help="If set will change the script exit code")
484 parser.add_argument("-s", "--set-success-field",
485 dest="set_pass",
486 default="status = Success",
487 action="store",
488 help="Set the key which the script will use to"
489 "assert success/failure")
490 parser.add_argument("-a", "--all-fields-must-pass",
491 dest="allpass",
492 action="store_true",
493 help="When set and a list is provided, all entries"
494 "must be succefull for evaluation to pass")
495 parser.add_argument("-o", "--one-field-must-pass",
496 dest="onepass",
497 action="store",
498 help="Only the user defined field must pass")
499 parser.add_argument("-r", "--report",
500 dest="report",
501 action="store",
502 help="JSON file containing input report")
503 parser.add_argument("-c", "--collect",
504 dest="collect",
505 action="store_true",
506 help="When set, the parser will attempt to collect"
507 "information and produce a report")
508 parser.add_argument("-d", "--dependencies-checkout",
509 dest="dependencies_checkout",
510 action="store_true",
511 help="Collect information from a dependencies "
512 "checkout job")
513 parser.add_argument("-f", "--output-file",
514 dest="out_f",
515 action="store",
516 help="Output file to store captured information")
517 parser.add_argument('-p', '--content-paths',
518 dest="content_paths",
519 nargs='*',
520 help=("Pass a space separated list of paths in the"
521 "following format: -p mbedtls=/yourpath/"
522 "fpv=/another/path .Used in conjuction with -n"))
523 parser.add_argument("-g", "--git-info",
524 dest="git_info",
525 action="store",
526 help="Extract git information from given path. "
527 "Requires --colect directive. Optional parameter"
528 "--output-file ")
529 parser.add_argument("-x", "--cpp-check-xml",
530 dest="cppcheck_files",
531 nargs='*',
532 action="store",
533 help="Extract cppcheck static analysis information "
534 " output files, provided as a space separated "
535 "list. Requires --colect directive."
536 " Optional parameter --output-file ")
537 parser.add_argument("-z", "--checkpatch-parse-f",
538 dest="checkpatch_file",
539 action="store",
540 help="Extract checkpatch static analysis information "
541 " output file. Requires --colect directive."
542 " Optional parameter --output-file ")
543 parser.add_argument("-j", "--jenkins-info",
544 dest="jenkins_info",
545 action="store_true",
546 help="Extract jenkings and gerrit trigger enviroment "
547 "information fr. Requires --colect directive."
548 " Optional parameter --output-file ")
549 parser.add_argument("-l", "--collate-report",
550 dest="collate_report",
551 action="store",
552 nargs='*',
553 help="Pass a space separated list of key-value pairs"
554 "following format: -l report_key_0=report_file_0"
555 " report_key_1=report_file_1. Collate will "
556 "generate a joint dataset and print it to stdout."
557 "Optional parameter --output-file ")
558 parser.add_argument("-t", "--filter-report",
559 dest="filter_report",
560 action="store",
561 nargs='*',
562 help="Requires --report parameter for input file."
563 "Pass a space separated list of key-value pairs"
564 "following format: -l report_key_0=value_0"
565 " report_key_1=value_0. Filter will remote all"
566 "entries of the original report but the ones"
567 "mathing the key:value pairs defined and print it"
568 "to stdout.Optional parameter --output-file")
569 return parser.parse_args()
570
571
572if __name__ == "__main__":
573 main(get_cmd_args())