blob: 48b39368503b434b4b086b10eac4c471db70eb66 [file] [log] [blame]
Hanno Becker13cd7842021-01-12 07:08:33 +00001/*
2 * Message Processing Stack, Reader implementation
3 *
4 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00005 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Hanno Becker13cd7842021-01-12 07:08:33 +00006 *
7 * This file is part of Mbed TLS (https://tls.mbed.org)
8 */
9
Hanno Becker43c8f8c2021-03-05 05:16:45 +000010#include "common.h"
11
Ronald Cron6f135e12021-12-08 16:57:54 +010012#if defined(MBEDTLS_SSL_PROTO_TLS1_3)
Hanno Becker43c8f8c2021-03-05 05:16:45 +000013
Hanno Beckerc518c3b2021-01-28 07:08:08 +000014#include "mps_reader.h"
15#include "mps_common.h"
16#include "mps_trace.h"
Hanno Becker13cd7842021-01-12 07:08:33 +000017
18#include <string.h>
19
Hanno Becker984fbde2021-01-28 09:02:18 +000020#if defined(MBEDTLS_MPS_ENABLE_TRACE)
21static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER;
22#endif /* MBEDTLS_MPS_ENABLE_TRACE */
Hanno Beckerb9100162021-01-12 09:46:03 +000023
Hanno Becker13cd7842021-01-12 07:08:33 +000024/*
25 * GENERAL NOTE ON CODING STYLE
26 *
27 * The following code intentionally separates memory loads
28 * and stores from other operations (arithmetic or branches).
29 * This leads to the introduction of many local variables
30 * and significantly increases the C-code line count, but
31 * should not increase the size of generated assembly.
32 *
Hanno Beckerfea81b32021-02-22 15:18:11 +000033 * The reason for this is twofold:
Hanno Becker13cd7842021-01-12 07:08:33 +000034 * (1) It will ease verification efforts using the VST
Hanno Beckerfea81b32021-02-22 15:18:11 +000035 * (Verified Software Toolchain)
Hanno Becker13cd7842021-01-12 07:08:33 +000036 * whose program logic cannot directly reason
37 * about instructions containing a load or store in
38 * addition to other operations (e.g. *p = *q or
39 * tmp = *p + 42).
40 * (2) Operating on local variables and writing the results
41 * back to the target contexts on success only
42 * allows to maintain structure invariants even
43 * on failure - this in turn has two benefits:
44 * (2.a) If for some reason an error code is not caught
45 * and operation continues, functions are nonetheless
46 * called with sane contexts, reducing the risk
47 * of dangerous behavior.
48 * (2.b) Randomized testing is easier if structures
49 * remain intact even in the face of failing
50 * and/or non-sensical calls.
51 * Moreover, it might even reduce code-size because
52 * the compiler need not write back temporary results
53 * to memory in case of failure.
54 *
55 */
56
Hanno Beckerf81e41f2021-02-08 08:04:01 +000057static inline int mps_reader_is_accumulating(
Gilles Peskine449bd832023-01-11 14:50:10 +010058 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000059{
Hanno Becker1682a8b2021-02-22 16:38:56 +000060 mbedtls_mps_size_t acc_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +010061 if (rd->acc == NULL) {
62 return 0;
63 }
Hanno Beckerf81e41f2021-02-08 08:04:01 +000064
Hanno Becker1682a8b2021-02-22 16:38:56 +000065 acc_remaining = rd->acc_share.acc_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +010066 return acc_remaining > 0;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000067}
68
69static inline int mps_reader_is_producing(
Gilles Peskine449bd832023-01-11 14:50:10 +010070 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000071{
72 unsigned char *frag = rd->frag;
Gilles Peskine449bd832023-01-11 14:50:10 +010073 return frag == NULL;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000074}
75
76static inline int mps_reader_is_consuming(
Gilles Peskine449bd832023-01-11 14:50:10 +010077 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000078{
Gilles Peskine449bd832023-01-11 14:50:10 +010079 return !mps_reader_is_producing(rd);
Hanno Beckerf81e41f2021-02-08 08:04:01 +000080}
81
82static inline mbedtls_mps_size_t mps_reader_get_fragment_offset(
Gilles Peskine449bd832023-01-11 14:50:10 +010083 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000084{
85 unsigned char *acc = rd->acc;
Hanno Becker1682a8b2021-02-22 16:38:56 +000086 mbedtls_mps_size_t frag_offset;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000087
Gilles Peskine449bd832023-01-11 14:50:10 +010088 if (acc == NULL) {
89 return 0;
90 }
Hanno Beckerf81e41f2021-02-08 08:04:01 +000091
Hanno Becker1682a8b2021-02-22 16:38:56 +000092 frag_offset = rd->acc_share.frag_offset;
Gilles Peskine449bd832023-01-11 14:50:10 +010093 return frag_offset;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000094}
95
96static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator(
Gilles Peskine449bd832023-01-11 14:50:10 +010097 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000098{
Hanno Becker1682a8b2021-02-22 16:38:56 +000099 mbedtls_mps_size_t frag_offset, end;
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000100
Gilles Peskine449bd832023-01-11 14:50:10 +0100101 frag_offset = mps_reader_get_fragment_offset(rd);
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000102 end = rd->end;
103
Gilles Peskine449bd832023-01-11 14:50:10 +0100104 return end < frag_offset;
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000105}
106
Gilles Peskine449bd832023-01-11 14:50:10 +0100107static inline void mps_reader_zero(mbedtls_mps_reader *rd)
Hanno Becker13cd7842021-01-12 07:08:33 +0000108{
109 /* A plain memset() would likely be more efficient,
110 * but the current way of zeroing makes it harder
111 * to overlook fields which should not be zero-initialized.
Hanno Becker0bea62f2021-02-08 07:54:19 +0000112 * It's also more suitable for FV efforts since it
Hanno Becker13cd7842021-01-12 07:08:33 +0000113 * doesn't require reasoning about structs being
114 * interpreted as unstructured binary blobs. */
Hanno Becker88993962021-01-28 09:45:47 +0000115 static mbedtls_mps_reader const zero =
Gilles Peskine449bd832023-01-11 14:50:10 +0100116 { .frag = NULL,
117 .frag_len = 0,
118 .commit = 0,
119 .end = 0,
120 .pending = 0,
121 .acc = NULL,
122 .acc_len = 0,
123 .acc_available = 0,
124 .acc_share = { .acc_remaining = 0 } };
Hanno Becker13cd7842021-01-12 07:08:33 +0000125 *rd = zero;
126}
127
Gilles Peskine449bd832023-01-11 14:50:10 +0100128int mbedtls_mps_reader_init(mbedtls_mps_reader *rd,
129 unsigned char *acc,
130 mbedtls_mps_size_t acc_len)
Hanno Becker13cd7842021-01-12 07:08:33 +0000131{
Gilles Peskine449bd832023-01-11 14:50:10 +0100132 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_init");
133 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
134 "* Accumulator size: %u bytes", (unsigned) acc_len);
135 mps_reader_zero(rd);
Hanno Becker13cd7842021-01-12 07:08:33 +0000136 rd->acc = acc;
137 rd->acc_len = acc_len;
Gilles Peskine449bd832023-01-11 14:50:10 +0100138 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000139}
140
Gilles Peskine449bd832023-01-11 14:50:10 +0100141int mbedtls_mps_reader_free(mbedtls_mps_reader *rd)
Hanno Becker13cd7842021-01-12 07:08:33 +0000142{
Gilles Peskine449bd832023-01-11 14:50:10 +0100143 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_free");
144 mps_reader_zero(rd);
145 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000146}
147
Gilles Peskine449bd832023-01-11 14:50:10 +0100148int mbedtls_mps_reader_feed(mbedtls_mps_reader *rd,
149 unsigned char *new_frag,
150 mbedtls_mps_size_t new_frag_len)
Hanno Becker13cd7842021-01-12 07:08:33 +0000151{
Hanno Becker13cd7842021-01-12 07:08:33 +0000152 mbedtls_mps_size_t copy_to_acc;
Gilles Peskine449bd832023-01-11 14:50:10 +0100153 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_feed");
154 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
155 "* Fragment length: %u bytes", (unsigned) new_frag_len);
Hanno Becker13cd7842021-01-12 07:08:33 +0000156
Gilles Peskine449bd832023-01-11 14:50:10 +0100157 if (new_frag == NULL) {
158 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_INVALID_ARG);
159 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000160
Gilles Peskine449bd832023-01-11 14:50:10 +0100161 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_producing(
162 rd),
163 "mbedtls_mps_reader_feed() requires reader to be in producing mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000164
Gilles Peskine449bd832023-01-11 14:50:10 +0100165 if (mps_reader_is_accumulating(rd)) {
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000166 unsigned char *acc = rd->acc;
Hanno Becker032b3522021-03-08 16:23:26 +0000167 mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining;
168 mbedtls_mps_size_t acc_available = rd->acc_available;
Hanno Becker13cd7842021-01-12 07:08:33 +0000169
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000170 /* Skip over parts of the accumulator that have already been filled. */
Hanno Becker032b3522021-03-08 16:23:26 +0000171 acc += acc_available;
Hanno Becker13cd7842021-01-12 07:08:33 +0000172
Hanno Becker032b3522021-03-08 16:23:26 +0000173 copy_to_acc = acc_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +0100174 if (copy_to_acc > new_frag_len) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000175 copy_to_acc = new_frag_len;
Gilles Peskine449bd832023-01-11 14:50:10 +0100176 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000177
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000178 /* Copy new contents to accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100179 memcpy(acc, new_frag, copy_to_acc);
Hanno Becker13cd7842021-01-12 07:08:33 +0000180
Gilles Peskine449bd832023-01-11 14:50:10 +0100181 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
182 "Copy new data of size %u of %u into accumulator at offset %u",
183 (unsigned) copy_to_acc, (unsigned) new_frag_len,
184 (unsigned) acc_available);
Hanno Becker13cd7842021-01-12 07:08:33 +0000185
186 /* Check if, with the new fragment, we have enough data. */
Hanno Becker032b3522021-03-08 16:23:26 +0000187 acc_remaining -= copy_to_acc;
Gilles Peskine449bd832023-01-11 14:50:10 +0100188 if (acc_remaining > 0) {
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000189 /* We need to accumulate more data. Stay in producing mode. */
Hanno Becker032b3522021-03-08 16:23:26 +0000190 acc_available += copy_to_acc;
191 rd->acc_share.acc_remaining = acc_remaining;
192 rd->acc_available = acc_available;
Gilles Peskine449bd832023-01-11 14:50:10 +0100193 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_MORE);
Hanno Becker13cd7842021-01-12 07:08:33 +0000194 }
195
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000196 /* We have filled the accumulator: Move to consuming mode. */
197
Gilles Peskine449bd832023-01-11 14:50:10 +0100198 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
199 "Enough data available to serve user request");
Hanno Becker13cd7842021-01-12 07:08:33 +0000200
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000201 /* Remember overlap of accumulator and fragment. */
Hanno Becker032b3522021-03-08 16:23:26 +0000202 rd->acc_share.frag_offset = acc_available;
203 acc_available += copy_to_acc;
204 rd->acc_available = acc_available;
Gilles Peskine449bd832023-01-11 14:50:10 +0100205 } else { /* Not accumulating */
Hanno Becker13cd7842021-01-12 07:08:33 +0000206 rd->acc_share.frag_offset = 0;
207 }
208
209 rd->frag = new_frag;
210 rd->frag_len = new_frag_len;
211 rd->commit = 0;
212 rd->end = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100213 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000214}
215
216
Gilles Peskine449bd832023-01-11 14:50:10 +0100217int mbedtls_mps_reader_get(mbedtls_mps_reader *rd,
218 mbedtls_mps_size_t desired,
219 unsigned char **buffer,
220 mbedtls_mps_size_t *buflen)
Hanno Becker13cd7842021-01-12 07:08:33 +0000221{
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000222 unsigned char *frag;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000223 mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +0100224 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_get");
225 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
226 "* Bytes requested: %u", (unsigned) desired);
Hanno Becker13cd7842021-01-12 07:08:33 +0000227
Gilles Peskine449bd832023-01-11 14:50:10 +0100228 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
229 rd),
230 "mbedtls_mps_reader_get() requires reader to be in consuming mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000231
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000232 end = rd->end;
Gilles Peskine449bd832023-01-11 14:50:10 +0100233 frag_offset = mps_reader_get_fragment_offset(rd);
Hanno Becker13cd7842021-01-12 07:08:33 +0000234
235 /* Check if we're still serving from the accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100236 if (mps_reader_serving_from_accumulator(rd)) {
Hanno Becker77e4f482021-02-22 16:46:06 +0000237 /* Illustration of supported and unsupported cases:
238 *
239 * - Allowed #1
240 *
241 * +-----------------------------------+
242 * | frag |
243 * +-----------------------------------+
244 *
245 * end end+desired
246 * | |
247 * +-----v-------v-------------+
248 * | acc |
249 * +---------------------------+
250 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000251 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000252 *
253 * - Allowed #2
254 *
255 * +-----------------------------------+
256 * | frag |
257 * +-----------------------------------+
258 *
259 * end end+desired
260 * | |
261 * +----------v----------------v
262 * | acc |
263 * +---------------------------+
264 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000265 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000266 *
267 * - Not allowed #1 (could be served, but we don't actually use it):
268 *
269 * +-----------------------------------+
270 * | frag |
271 * +-----------------------------------+
272 *
273 * end end+desired
274 * | |
275 * +------v-------------v------+
276 * | acc |
277 * +---------------------------+
278 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000279 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000280 *
281 *
282 * - Not allowed #2 (can't be served with a contiguous buffer):
283 *
284 * +-----------------------------------+
285 * | frag |
286 * +-----------------------------------+
287 *
288 * end end + desired
289 * | |
290 * +------v--------------------+ v
291 * | acc |
292 * +---------------------------+
293 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000294 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000295 *
296 * In case of Allowed #2 we're switching to serve from
297 * `frag` starting from the next call to mbedtls_mps_reader_get().
298 */
299
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000300 unsigned char *acc;
301
Gilles Peskine449bd832023-01-11 14:50:10 +0100302 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
303 "Serve the request from the accumulator");
304 if (frag_offset - end < desired) {
Hanno Becker1682a8b2021-02-22 16:38:56 +0000305 mbedtls_mps_size_t acc_available;
306 acc_available = rd->acc_available;
Gilles Peskine449bd832023-01-11 14:50:10 +0100307 if (acc_available - end != desired) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000308 /* It might be possible to serve some of these situations by
309 * making additional space in the accumulator, removing those
310 * parts that have already been committed.
311 * On the other hand, this brings additional complexity and
312 * enlarges the code size, while there doesn't seem to be a use
313 * case where we don't attempt exactly the same `get` calls when
314 * resuming on a reader than what we tried before pausing it.
315 * If we believe we adhere to this restricted usage throughout
316 * the library, this check is a good opportunity to
317 * validate this. */
Hanno Becker984fbde2021-01-28 09:02:18 +0000318 MBEDTLS_MPS_TRACE_RETURN(
Gilles Peskine449bd832023-01-11 14:50:10 +0100319 MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS);
Hanno Becker13cd7842021-01-12 07:08:33 +0000320 }
321 }
322
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000323 acc = rd->acc;
Hanno Becker13cd7842021-01-12 07:08:33 +0000324 acc += end;
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000325
Hanno Becker13cd7842021-01-12 07:08:33 +0000326 *buffer = acc;
Gilles Peskine449bd832023-01-11 14:50:10 +0100327 if (buflen != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000328 *buflen = desired;
Gilles Peskine449bd832023-01-11 14:50:10 +0100329 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000330
331 end += desired;
332 rd->end = end;
333 rd->pending = 0;
334
Gilles Peskine449bd832023-01-11 14:50:10 +0100335 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000336 }
337
338 /* Attempt to serve the request from the current fragment */
Gilles Peskine449bd832023-01-11 14:50:10 +0100339 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
340 "Serve the request from the current fragment.");
Hanno Becker13cd7842021-01-12 07:08:33 +0000341
Hanno Becker1682a8b2021-02-22 16:38:56 +0000342 frag_len = rd->frag_len;
343 frag_fetched = end - frag_offset; /* The amount of data from the current
344 * fragment that has already been passed
345 * to the user. */
346 frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */
Hanno Becker13cd7842021-01-12 07:08:33 +0000347
348 /* Check if we can serve the read request from the fragment. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100349 if (frag_remaining < desired) {
350 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
351 "There's not enough data in the current fragment "
352 "to serve the request.");
Hanno Becker13cd7842021-01-12 07:08:33 +0000353 /* There's not enough data in the current fragment,
354 * so either just RETURN what we have or fail. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100355 if (buflen == NULL) {
356 if (frag_remaining > 0) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000357 rd->pending = desired - frag_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +0100358 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
359 "Remember to collect %u bytes before re-opening",
360 (unsigned) rd->pending);
Hanno Becker13cd7842021-01-12 07:08:33 +0000361 }
Gilles Peskine449bd832023-01-11 14:50:10 +0100362 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_OUT_OF_DATA);
Hanno Becker13cd7842021-01-12 07:08:33 +0000363 }
364
365 desired = frag_remaining;
366 }
367
368 /* There's enough data in the current fragment to serve the
369 * (potentially modified) read request. */
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000370
371 frag = rd->frag;
372 frag += frag_fetched;
373
Hanno Becker13cd7842021-01-12 07:08:33 +0000374 *buffer = frag;
Gilles Peskine449bd832023-01-11 14:50:10 +0100375 if (buflen != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000376 *buflen = desired;
Gilles Peskine449bd832023-01-11 14:50:10 +0100377 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000378
379 end += desired;
380 rd->end = end;
381 rd->pending = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100382 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000383}
384
Gilles Peskine449bd832023-01-11 14:50:10 +0100385int mbedtls_mps_reader_commit(mbedtls_mps_reader *rd)
Hanno Becker13cd7842021-01-12 07:08:33 +0000386{
Hanno Becker4f84e202021-02-08 06:54:30 +0000387 mbedtls_mps_size_t end;
Gilles Peskine449bd832023-01-11 14:50:10 +0100388 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_commit");
389 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
390 rd),
391 "mbedtls_mps_reader_commit() requires reader to be in consuming mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000392
Hanno Becker13cd7842021-01-12 07:08:33 +0000393 end = rd->end;
Hanno Becker13cd7842021-01-12 07:08:33 +0000394 rd->commit = end;
Hanno Becker13cd7842021-01-12 07:08:33 +0000395
Gilles Peskine449bd832023-01-11 14:50:10 +0100396 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000397}
398
Gilles Peskine449bd832023-01-11 14:50:10 +0100399int mbedtls_mps_reader_reclaim(mbedtls_mps_reader *rd,
400 int *paused)
Hanno Becker13cd7842021-01-12 07:08:33 +0000401{
402 unsigned char *frag, *acc;
403 mbedtls_mps_size_t pending, commit;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000404 mbedtls_mps_size_t acc_len, frag_offset, frag_len;
Gilles Peskine449bd832023-01-11 14:50:10 +0100405 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_reclaim");
Hanno Becker13cd7842021-01-12 07:08:33 +0000406
Gilles Peskine449bd832023-01-11 14:50:10 +0100407 if (paused != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000408 *paused = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100409 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000410
Gilles Peskine449bd832023-01-11 14:50:10 +0100411 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
412 rd),
413 "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000414
Hanno Becker1682a8b2021-02-22 16:38:56 +0000415 frag = rd->frag;
416 acc = rd->acc;
417 pending = rd->pending;
418 commit = rd->commit;
419 frag_len = rd->frag_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000420
Gilles Peskine449bd832023-01-11 14:50:10 +0100421 frag_offset = mps_reader_get_fragment_offset(rd);
Hanno Becker13cd7842021-01-12 07:08:33 +0000422
Gilles Peskine449bd832023-01-11 14:50:10 +0100423 if (pending == 0) {
424 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
425 "No unsatisfied read-request has been logged.");
Hanno Becker4f84e202021-02-08 06:54:30 +0000426
Hanno Becker13cd7842021-01-12 07:08:33 +0000427 /* Check if there's data left to be consumed. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100428 if (commit < frag_offset || commit - frag_offset < frag_len) {
429 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
430 "There is data left to be consumed.");
Hanno Becker13cd7842021-01-12 07:08:33 +0000431 rd->end = commit;
Gilles Peskine449bd832023-01-11 14:50:10 +0100432 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_DATA_LEFT);
Hanno Becker13cd7842021-01-12 07:08:33 +0000433 }
Hanno Becker4f84e202021-02-08 06:54:30 +0000434
Hanno Beckerb1855432021-02-08 08:07:35 +0000435 rd->acc_available = 0;
Hanno Becker4f84e202021-02-08 06:54:30 +0000436 rd->acc_share.acc_remaining = 0;
437
Gilles Peskine449bd832023-01-11 14:50:10 +0100438 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
439 "Fragment has been fully processed and committed.");
440 } else {
Hanno Becker4f84e202021-02-08 06:54:30 +0000441 int overflow;
442
443 mbedtls_mps_size_t acc_backup_offset;
444 mbedtls_mps_size_t acc_backup_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000445 mbedtls_mps_size_t frag_backup_offset;
446 mbedtls_mps_size_t frag_backup_len;
Hanno Becker4f84e202021-02-08 06:54:30 +0000447
448 mbedtls_mps_size_t backup_len;
449 mbedtls_mps_size_t acc_len_needed;
450
Gilles Peskine449bd832023-01-11 14:50:10 +0100451 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
452 "There has been an unsatisfied read with %u bytes overhead.",
453 (unsigned) pending);
Hanno Becker13cd7842021-01-12 07:08:33 +0000454
Gilles Peskine449bd832023-01-11 14:50:10 +0100455 if (acc == NULL) {
456 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
457 "No accumulator present");
Hanno Becker984fbde2021-01-28 09:02:18 +0000458 MBEDTLS_MPS_TRACE_RETURN(
Gilles Peskine449bd832023-01-11 14:50:10 +0100459 MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR);
Hanno Becker13cd7842021-01-12 07:08:33 +0000460 }
Hanno Becker1682a8b2021-02-22 16:38:56 +0000461 acc_len = rd->acc_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000462
463 /* Check if the upper layer has already fetched
464 * and committed the contents of the accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100465 if (commit < frag_offset) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000466 /* No, accumulator is still being processed. */
Hanno Becker13cd7842021-01-12 07:08:33 +0000467 frag_backup_offset = 0;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000468 frag_backup_len = frag_len;
Hanno Becker4f84e202021-02-08 06:54:30 +0000469 acc_backup_offset = commit;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000470 acc_backup_len = frag_offset - commit;
Gilles Peskine449bd832023-01-11 14:50:10 +0100471 } else {
Hanno Becker13cd7842021-01-12 07:08:33 +0000472 /* Yes, the accumulator is already processed. */
Hanno Becker1682a8b2021-02-22 16:38:56 +0000473 frag_backup_offset = commit - frag_offset;
474 frag_backup_len = frag_len - frag_backup_offset;
Hanno Becker4f84e202021-02-08 06:54:30 +0000475 acc_backup_offset = 0;
476 acc_backup_len = 0;
Hanno Becker13cd7842021-01-12 07:08:33 +0000477 }
478
Hanno Becker4f84e202021-02-08 06:54:30 +0000479 backup_len = acc_backup_len + frag_backup_len;
480 acc_len_needed = backup_len + pending;
481
482 overflow = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100483 overflow |= (backup_len < acc_backup_len);
484 overflow |= (acc_len_needed < backup_len);
Hanno Becker4f84e202021-02-08 06:54:30 +0000485
Gilles Peskine449bd832023-01-11 14:50:10 +0100486 if (overflow || acc_len < acc_len_needed) {
Hanno Becker4f84e202021-02-08 06:54:30 +0000487 /* Except for the different return code, we behave as if
488 * there hadn't been a call to mbedtls_mps_reader_get()
489 * since the last commit. */
490 rd->end = commit;
491 rd->pending = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100492 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
493 "The accumulator is too small to handle the backup.");
494 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
495 "* Size: %u", (unsigned) acc_len);
496 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
497 "* Needed: %u (%u + %u)",
498 (unsigned) acc_len_needed,
499 (unsigned) backup_len, (unsigned) pending);
Hanno Becker4f84e202021-02-08 06:54:30 +0000500 MBEDTLS_MPS_TRACE_RETURN(
Gilles Peskine449bd832023-01-11 14:50:10 +0100501 MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL);
Hanno Becker4f84e202021-02-08 06:54:30 +0000502 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000503
Gilles Peskine449bd832023-01-11 14:50:10 +0100504 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
505 "Fragment backup: %u", (unsigned) frag_backup_len);
506 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
507 "Accumulator backup: %u", (unsigned) acc_backup_len);
Hanno Becker13cd7842021-01-12 07:08:33 +0000508
Hanno Becker4f84e202021-02-08 06:54:30 +0000509 /* Move uncommitted parts from the accumulator to the front
510 * of the accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100511 memmove(acc, acc + acc_backup_offset, acc_backup_len);
Hanno Becker4f84e202021-02-08 06:54:30 +0000512
Tom Cosgrove1797b052022-12-04 17:19:59 +0000513 /* Copy uncommitted parts of the current fragment to the
Hanno Becker4f84e202021-02-08 06:54:30 +0000514 * accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100515 memcpy(acc + acc_backup_len,
516 frag + frag_backup_offset, frag_backup_len);
Hanno Becker4f84e202021-02-08 06:54:30 +0000517
Hanno Beckerb1855432021-02-08 08:07:35 +0000518 rd->acc_available = backup_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000519 rd->acc_share.acc_remaining = pending;
520
Gilles Peskine449bd832023-01-11 14:50:10 +0100521 if (paused != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000522 *paused = 1;
Gilles Peskine449bd832023-01-11 14:50:10 +0100523 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000524 }
525
526 rd->frag = NULL;
527 rd->frag_len = 0;
528
Hanno Becker4f84e202021-02-08 06:54:30 +0000529 rd->commit = 0;
530 rd->end = 0;
531 rd->pending = 0;
Hanno Becker13cd7842021-01-12 07:08:33 +0000532
Gilles Peskine449bd832023-01-11 14:50:10 +0100533 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
534 "Final state: aa %u, al %u, ar %u",
535 (unsigned) rd->acc_available, (unsigned) rd->acc_len,
536 (unsigned) rd->acc_share.acc_remaining);
537 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000538}
Hanno Becker43c8f8c2021-03-05 05:16:45 +0000539
Ronald Cron6f135e12021-12-08 16:57:54 +0100540#endif /* MBEDTLS_SSL_PROTO_TLS1_3 */