blob: ffc53430651477b944b566305ffedcc1f9896274 [file] [log] [blame]
Nick Child4983ddf2022-12-14 15:04:40 -06001#!/usr/bin/env python3
2#
3# Copyright The Mbed TLS Contributors
4# SPDX-License-Identifier: Apache-2.0
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17#
18
19"""
20Make fuzz like testing for pkcs7 tests
21Given a valid DER pkcs7 file add tests to the test_suite_pkcs7.data file
22 - It is expected that the pkcs7_asn1_fail( data_t *pkcs7_buf )
23 function is defined in test_suite_pkcs7.function
24 - This is not meant to be portable code, if anything it is meant to serve as
25 documentation for showing how those ugly tests in test_suite_pkcs7.data were created
26"""
27
28
29import sys
30from os.path import exists
31
32pkcs7_test_file = "../suites/test_suite_pkcs7.data"
33
34class Test:
35 def __init__(self, name, depends, func_call):
36 self.name = name
37 self.depends = depends
38 self.func_call = func_call
39
40 def to_string(self):
41 return f"\n{self.name}\n{self.depends}\n{self.func_call}\n"
42
43class TestData:
44 mandatory_dep = "MBEDTLS_SHA256_C"
45 test_name = "PKCS7 Parse Failure Invalid ASN1"
46 test_function = "pkcs7_asn1_fail:"
47 def __init__(self, file_name):
48 self.file_name = file_name
49 self.last_test_num, self.old_tests = self.read_test_file(file_name)
50 self.new_tests = []
51
52 def read_test_file(self, file):
53 tests = []
54 if not exists(file):
55 print(f"{file} Does not exist")
56 quit(1)
57 f = open(file, "r")
58 data = f.read()
59 f.close()
60 lines = [line.strip() for line in data.split('\n') if len(line.strip()) > 1]
61 i = 0
62 while i < len(lines):
63 if "depends" in lines[i+1]:
64 tests.append(Test(lines[i],lines[i+1], lines[i+2]))
65 i += 3
66 else:
67 tests.append(Test(lines[i], None, lines[i+1]))
68 i += 2
69 latest_test_num = float(tests[-1].name.split('#')[1])
70 return latest_test_num, tests
71
72 def add(self, name, func_call):
73 self.last_test_num += 1
74 self.new_tests.append(Test(f"{self.test_name}: {name} #{self.last_test_num}", f"depends_on:{self.mandatory_dep}", f'{self.test_function}"{func_call}"'))
75
76 def write_changes(self):
77 f = open(self.file_name, 'a')
78 f.write("\n")
79 for t in self.new_tests:
80 f.write(t.to_string())
81 f.close()
82
83
84def asn1_mutate(data):
85 mutations = []
86 reasons = []
87 # we have been given an asn1 structure representing a pkcs7
88 # we want to return an array of slightly modified versions of this data
89 # they should be modified in a way which makes the structure invalid
90
91 # we know that asn1 structures are:
92 # |---1 byte showing data type---|----byte(s) for length of data---|---data content--|
93 # we know that some data types can contain other data types
94
95 # off the bat just add bytes to start and end of the buffer
96 mutations.append(["00"] + data)
97 reasons.append("Add null byte to start")
98 mutations.append(data + ["00"])
99 reasons.append("Add null byte to end")
100 # for every asn1 entry we should attempt to:
101 # - change the data type tag
102 # - make the length longer than actual
103 # - make the length shorter than actual
104 i = 0
105 while i < len(data):
106 tag_i = i
107 leng_i = tag_i + 1
108 data_i = leng_i + 1 + (int(data[leng_i][1], 16) if data[leng_i][0] == '8' else 0)
109 if data[leng_i][0] == '8':
110 length = int(''.join(data[leng_i + 1: data_i]), 16)
111 else:
112 length = int(data[leng_i], 16)
113
114 tag = data[tag_i]
115 print(f"Looking at ans1: offset {i}, tag = {tag}, length = {length}:")
116 print(f"{''.join(data[data_i:data_i+length])}")
117 # change tag to something else
118 if tag == "02":
119 # turn integers into octet strings
120 new_tag = "04"
121 else:
122 # turn everything else into an integer
123 new_tag = "02"
124 mutations.append(data[:tag_i] + [new_tag] + data[leng_i:])
125 reasons.append(f"Change tag {tag} to {new_tag}")
126
127 # change lengths to too big
128 # skip any edge cases which would cause carry over
129 if int(data[data_i - 1], 16) < 255:
130 new_length = str(hex(int(data[data_i - 1], 16) + 1))[2:]
131 if len(new_length) == 1:
132 new_length = "0"+new_length
133 mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
134 reasons.append(f"Change length from {length} to {length + 1}")
135 # we can add another test here for tags that contain other tags where they have more data than there containing tags account for
136 if tag in ["30", "a0", "31"]:
137 mutations.append(data[:data_i -1] + [new_length] + data[data_i:data_i + length] + ["00"] + data[data_i + length:])
138 reasons.append(f"Change contents of tag {tag} to contain one unaccounted extra byte")
139 # change lengths to too small
140 if int(data[data_i - 1], 16) > 0:
141 new_length = str(hex(int(data[data_i - 1], 16) - 1))[2:]
142 if len(new_length) == 1:
143 new_length = "0"+new_length
144 mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
145 reasons.append(f"Change length from {length} to {length - 1}")
146
147 # some tag types contain other tag types so we should iterate into the data
148 if tag in ["30", "a0", "31"]:
149 i = data_i
150 else:
151 i = data_i + length
152
153 return list(zip(reasons, mutations))
154
155if len(sys.argv) < 2:
156 print(f"USAGE: {sys.argv[0]} <pkcs7_der_file>")
157 quit(1)
158
159data_file = sys.argv[1]
160test_data = TestData(pkcs7_test_file)
161f = open(data_file, 'rb')
162data = f.read().hex()
163f.close()
164# make data an array of byte strings eg ['de','ad','be','ef']
165data = list(map(''.join, [[data[i], data[i+1]] for i in range(0,len(data),2)]))
166# returns tuples of test_names and modified data buffers
167mutations = asn1_mutate(data)
168
169print(f"made {len(mutations)} new tests")
170for new_test in mutations:
171 test_data.add(new_test[0], ''.join(new_test[1]))
172
173
174test_data.write_changes()
175