blob: 0fe7dfe95f702e0aac6848631a08640a2a291e96 [file] [log] [blame]
Hanno Becker13cd7842021-01-12 07:08:33 +00001/*
2 * Message Processing Stack, Reader implementation
3 *
4 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00005 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Hanno Becker13cd7842021-01-12 07:08:33 +00006 */
7
Harry Ramseye8e23fb2024-10-11 12:21:30 +01008#include "ssl_misc.h"
Hanno Becker43c8f8c2021-03-05 05:16:45 +00009
Ronald Cron6f135e12021-12-08 16:57:54 +010010#if defined(MBEDTLS_SSL_PROTO_TLS1_3)
Hanno Becker43c8f8c2021-03-05 05:16:45 +000011
Hanno Beckerc518c3b2021-01-28 07:08:08 +000012#include "mps_reader.h"
13#include "mps_common.h"
14#include "mps_trace.h"
Hanno Becker13cd7842021-01-12 07:08:33 +000015
16#include <string.h>
17
Hanno Becker984fbde2021-01-28 09:02:18 +000018#if defined(MBEDTLS_MPS_ENABLE_TRACE)
19static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER;
20#endif /* MBEDTLS_MPS_ENABLE_TRACE */
Hanno Beckerb9100162021-01-12 09:46:03 +000021
Hanno Becker13cd7842021-01-12 07:08:33 +000022/*
23 * GENERAL NOTE ON CODING STYLE
24 *
25 * The following code intentionally separates memory loads
26 * and stores from other operations (arithmetic or branches).
27 * This leads to the introduction of many local variables
28 * and significantly increases the C-code line count, but
29 * should not increase the size of generated assembly.
30 *
Hanno Beckerfea81b32021-02-22 15:18:11 +000031 * The reason for this is twofold:
Hanno Becker13cd7842021-01-12 07:08:33 +000032 * (1) It will ease verification efforts using the VST
Hanno Beckerfea81b32021-02-22 15:18:11 +000033 * (Verified Software Toolchain)
Hanno Becker13cd7842021-01-12 07:08:33 +000034 * whose program logic cannot directly reason
35 * about instructions containing a load or store in
36 * addition to other operations (e.g. *p = *q or
37 * tmp = *p + 42).
38 * (2) Operating on local variables and writing the results
39 * back to the target contexts on success only
40 * allows to maintain structure invariants even
41 * on failure - this in turn has two benefits:
42 * (2.a) If for some reason an error code is not caught
43 * and operation continues, functions are nonetheless
44 * called with sane contexts, reducing the risk
45 * of dangerous behavior.
46 * (2.b) Randomized testing is easier if structures
47 * remain intact even in the face of failing
48 * and/or non-sensical calls.
49 * Moreover, it might even reduce code-size because
50 * the compiler need not write back temporary results
51 * to memory in case of failure.
52 *
53 */
54
Hanno Beckerf81e41f2021-02-08 08:04:01 +000055static inline int mps_reader_is_accumulating(
Gilles Peskine449bd832023-01-11 14:50:10 +010056 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000057{
Hanno Becker1682a8b2021-02-22 16:38:56 +000058 mbedtls_mps_size_t acc_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +010059 if (rd->acc == NULL) {
60 return 0;
61 }
Hanno Beckerf81e41f2021-02-08 08:04:01 +000062
Hanno Becker1682a8b2021-02-22 16:38:56 +000063 acc_remaining = rd->acc_share.acc_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +010064 return acc_remaining > 0;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000065}
66
67static inline int mps_reader_is_producing(
Gilles Peskine449bd832023-01-11 14:50:10 +010068 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000069{
70 unsigned char *frag = rd->frag;
Gilles Peskine449bd832023-01-11 14:50:10 +010071 return frag == NULL;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000072}
73
74static inline int mps_reader_is_consuming(
Gilles Peskine449bd832023-01-11 14:50:10 +010075 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000076{
Gilles Peskine449bd832023-01-11 14:50:10 +010077 return !mps_reader_is_producing(rd);
Hanno Beckerf81e41f2021-02-08 08:04:01 +000078}
79
80static inline mbedtls_mps_size_t mps_reader_get_fragment_offset(
Gilles Peskine449bd832023-01-11 14:50:10 +010081 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000082{
83 unsigned char *acc = rd->acc;
Hanno Becker1682a8b2021-02-22 16:38:56 +000084 mbedtls_mps_size_t frag_offset;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000085
Gilles Peskine449bd832023-01-11 14:50:10 +010086 if (acc == NULL) {
87 return 0;
88 }
Hanno Beckerf81e41f2021-02-08 08:04:01 +000089
Hanno Becker1682a8b2021-02-22 16:38:56 +000090 frag_offset = rd->acc_share.frag_offset;
Gilles Peskine449bd832023-01-11 14:50:10 +010091 return frag_offset;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000092}
93
94static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator(
Gilles Peskine449bd832023-01-11 14:50:10 +010095 mbedtls_mps_reader const *rd)
Hanno Beckerf81e41f2021-02-08 08:04:01 +000096{
Hanno Becker1682a8b2021-02-22 16:38:56 +000097 mbedtls_mps_size_t frag_offset, end;
Hanno Beckerf81e41f2021-02-08 08:04:01 +000098
Gilles Peskine449bd832023-01-11 14:50:10 +010099 frag_offset = mps_reader_get_fragment_offset(rd);
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000100 end = rd->end;
101
Gilles Peskine449bd832023-01-11 14:50:10 +0100102 return end < frag_offset;
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000103}
104
Gilles Peskine449bd832023-01-11 14:50:10 +0100105static inline void mps_reader_zero(mbedtls_mps_reader *rd)
Hanno Becker13cd7842021-01-12 07:08:33 +0000106{
107 /* A plain memset() would likely be more efficient,
108 * but the current way of zeroing makes it harder
109 * to overlook fields which should not be zero-initialized.
Hanno Becker0bea62f2021-02-08 07:54:19 +0000110 * It's also more suitable for FV efforts since it
Hanno Becker13cd7842021-01-12 07:08:33 +0000111 * doesn't require reasoning about structs being
112 * interpreted as unstructured binary blobs. */
Hanno Becker88993962021-01-28 09:45:47 +0000113 static mbedtls_mps_reader const zero =
Gilles Peskine449bd832023-01-11 14:50:10 +0100114 { .frag = NULL,
115 .frag_len = 0,
116 .commit = 0,
117 .end = 0,
118 .pending = 0,
119 .acc = NULL,
120 .acc_len = 0,
121 .acc_available = 0,
122 .acc_share = { .acc_remaining = 0 } };
Hanno Becker13cd7842021-01-12 07:08:33 +0000123 *rd = zero;
124}
125
Gilles Peskine449bd832023-01-11 14:50:10 +0100126int mbedtls_mps_reader_init(mbedtls_mps_reader *rd,
127 unsigned char *acc,
128 mbedtls_mps_size_t acc_len)
Hanno Becker13cd7842021-01-12 07:08:33 +0000129{
Gilles Peskine449bd832023-01-11 14:50:10 +0100130 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_init");
131 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
132 "* Accumulator size: %u bytes", (unsigned) acc_len);
133 mps_reader_zero(rd);
Hanno Becker13cd7842021-01-12 07:08:33 +0000134 rd->acc = acc;
135 rd->acc_len = acc_len;
Gilles Peskine449bd832023-01-11 14:50:10 +0100136 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000137}
138
Gilles Peskine449bd832023-01-11 14:50:10 +0100139int mbedtls_mps_reader_free(mbedtls_mps_reader *rd)
Hanno Becker13cd7842021-01-12 07:08:33 +0000140{
Gilles Peskine449bd832023-01-11 14:50:10 +0100141 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_free");
142 mps_reader_zero(rd);
143 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000144}
145
Gilles Peskine449bd832023-01-11 14:50:10 +0100146int mbedtls_mps_reader_feed(mbedtls_mps_reader *rd,
147 unsigned char *new_frag,
148 mbedtls_mps_size_t new_frag_len)
Hanno Becker13cd7842021-01-12 07:08:33 +0000149{
Hanno Becker13cd7842021-01-12 07:08:33 +0000150 mbedtls_mps_size_t copy_to_acc;
Gilles Peskine449bd832023-01-11 14:50:10 +0100151 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_feed");
152 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
153 "* Fragment length: %u bytes", (unsigned) new_frag_len);
Hanno Becker13cd7842021-01-12 07:08:33 +0000154
Gilles Peskine449bd832023-01-11 14:50:10 +0100155 if (new_frag == NULL) {
156 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_INVALID_ARG);
157 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000158
Gilles Peskine449bd832023-01-11 14:50:10 +0100159 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_producing(
160 rd),
161 "mbedtls_mps_reader_feed() requires reader to be in producing mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000162
Gilles Peskine449bd832023-01-11 14:50:10 +0100163 if (mps_reader_is_accumulating(rd)) {
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000164 unsigned char *acc = rd->acc;
Hanno Becker032b3522021-03-08 16:23:26 +0000165 mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining;
166 mbedtls_mps_size_t acc_available = rd->acc_available;
Hanno Becker13cd7842021-01-12 07:08:33 +0000167
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000168 /* Skip over parts of the accumulator that have already been filled. */
Hanno Becker032b3522021-03-08 16:23:26 +0000169 acc += acc_available;
Hanno Becker13cd7842021-01-12 07:08:33 +0000170
Hanno Becker032b3522021-03-08 16:23:26 +0000171 copy_to_acc = acc_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +0100172 if (copy_to_acc > new_frag_len) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000173 copy_to_acc = new_frag_len;
Gilles Peskine449bd832023-01-11 14:50:10 +0100174 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000175
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000176 /* Copy new contents to accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100177 memcpy(acc, new_frag, copy_to_acc);
Hanno Becker13cd7842021-01-12 07:08:33 +0000178
Gilles Peskine449bd832023-01-11 14:50:10 +0100179 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
180 "Copy new data of size %u of %u into accumulator at offset %u",
181 (unsigned) copy_to_acc, (unsigned) new_frag_len,
182 (unsigned) acc_available);
Hanno Becker13cd7842021-01-12 07:08:33 +0000183
184 /* Check if, with the new fragment, we have enough data. */
Hanno Becker032b3522021-03-08 16:23:26 +0000185 acc_remaining -= copy_to_acc;
Gilles Peskine449bd832023-01-11 14:50:10 +0100186 if (acc_remaining > 0) {
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000187 /* We need to accumulate more data. Stay in producing mode. */
Hanno Becker032b3522021-03-08 16:23:26 +0000188 acc_available += copy_to_acc;
189 rd->acc_share.acc_remaining = acc_remaining;
190 rd->acc_available = acc_available;
Gilles Peskine449bd832023-01-11 14:50:10 +0100191 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_MORE);
Hanno Becker13cd7842021-01-12 07:08:33 +0000192 }
193
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000194 /* We have filled the accumulator: Move to consuming mode. */
195
Gilles Peskine449bd832023-01-11 14:50:10 +0100196 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
197 "Enough data available to serve user request");
Hanno Becker13cd7842021-01-12 07:08:33 +0000198
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000199 /* Remember overlap of accumulator and fragment. */
Hanno Becker032b3522021-03-08 16:23:26 +0000200 rd->acc_share.frag_offset = acc_available;
201 acc_available += copy_to_acc;
202 rd->acc_available = acc_available;
Gilles Peskine449bd832023-01-11 14:50:10 +0100203 } else { /* Not accumulating */
Hanno Becker13cd7842021-01-12 07:08:33 +0000204 rd->acc_share.frag_offset = 0;
205 }
206
207 rd->frag = new_frag;
208 rd->frag_len = new_frag_len;
209 rd->commit = 0;
210 rd->end = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100211 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000212}
213
214
Gilles Peskine449bd832023-01-11 14:50:10 +0100215int mbedtls_mps_reader_get(mbedtls_mps_reader *rd,
216 mbedtls_mps_size_t desired,
217 unsigned char **buffer,
218 mbedtls_mps_size_t *buflen)
Hanno Becker13cd7842021-01-12 07:08:33 +0000219{
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000220 unsigned char *frag;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000221 mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +0100222 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_get");
223 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
224 "* Bytes requested: %u", (unsigned) desired);
Hanno Becker13cd7842021-01-12 07:08:33 +0000225
Gilles Peskine449bd832023-01-11 14:50:10 +0100226 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
227 rd),
228 "mbedtls_mps_reader_get() requires reader to be in consuming mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000229
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000230 end = rd->end;
Gilles Peskine449bd832023-01-11 14:50:10 +0100231 frag_offset = mps_reader_get_fragment_offset(rd);
Hanno Becker13cd7842021-01-12 07:08:33 +0000232
233 /* Check if we're still serving from the accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100234 if (mps_reader_serving_from_accumulator(rd)) {
Hanno Becker77e4f482021-02-22 16:46:06 +0000235 /* Illustration of supported and unsupported cases:
236 *
237 * - Allowed #1
238 *
239 * +-----------------------------------+
240 * | frag |
241 * +-----------------------------------+
242 *
243 * end end+desired
244 * | |
245 * +-----v-------v-------------+
246 * | acc |
247 * +---------------------------+
248 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000249 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000250 *
251 * - Allowed #2
252 *
253 * +-----------------------------------+
254 * | frag |
255 * +-----------------------------------+
256 *
257 * end end+desired
258 * | |
259 * +----------v----------------v
260 * | acc |
261 * +---------------------------+
262 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000263 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000264 *
265 * - Not allowed #1 (could be served, but we don't actually use it):
266 *
267 * +-----------------------------------+
268 * | frag |
269 * +-----------------------------------+
270 *
271 * end end+desired
272 * | |
273 * +------v-------------v------+
274 * | acc |
275 * +---------------------------+
276 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000277 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000278 *
279 *
280 * - Not allowed #2 (can't be served with a contiguous buffer):
281 *
282 * +-----------------------------------+
283 * | frag |
284 * +-----------------------------------+
285 *
286 * end end + desired
287 * | |
288 * +------v--------------------+ v
289 * | acc |
290 * +---------------------------+
291 * | |
Hanno Beckerecb02fb2021-03-26 19:20:49 +0000292 * frag_offset acc_available
Hanno Becker77e4f482021-02-22 16:46:06 +0000293 *
294 * In case of Allowed #2 we're switching to serve from
295 * `frag` starting from the next call to mbedtls_mps_reader_get().
296 */
297
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000298 unsigned char *acc;
299
Gilles Peskine449bd832023-01-11 14:50:10 +0100300 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
301 "Serve the request from the accumulator");
302 if (frag_offset - end < desired) {
Hanno Becker1682a8b2021-02-22 16:38:56 +0000303 mbedtls_mps_size_t acc_available;
304 acc_available = rd->acc_available;
Gilles Peskine449bd832023-01-11 14:50:10 +0100305 if (acc_available - end != desired) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000306 /* It might be possible to serve some of these situations by
307 * making additional space in the accumulator, removing those
308 * parts that have already been committed.
309 * On the other hand, this brings additional complexity and
310 * enlarges the code size, while there doesn't seem to be a use
311 * case where we don't attempt exactly the same `get` calls when
312 * resuming on a reader than what we tried before pausing it.
313 * If we believe we adhere to this restricted usage throughout
314 * the library, this check is a good opportunity to
315 * validate this. */
Hanno Becker984fbde2021-01-28 09:02:18 +0000316 MBEDTLS_MPS_TRACE_RETURN(
Gilles Peskine449bd832023-01-11 14:50:10 +0100317 MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS);
Hanno Becker13cd7842021-01-12 07:08:33 +0000318 }
319 }
320
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000321 acc = rd->acc;
Hanno Becker13cd7842021-01-12 07:08:33 +0000322 acc += end;
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000323
Hanno Becker13cd7842021-01-12 07:08:33 +0000324 *buffer = acc;
Gilles Peskine449bd832023-01-11 14:50:10 +0100325 if (buflen != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000326 *buflen = desired;
Gilles Peskine449bd832023-01-11 14:50:10 +0100327 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000328
329 end += desired;
330 rd->end = end;
331 rd->pending = 0;
332
Gilles Peskine449bd832023-01-11 14:50:10 +0100333 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000334 }
335
336 /* Attempt to serve the request from the current fragment */
Gilles Peskine449bd832023-01-11 14:50:10 +0100337 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
338 "Serve the request from the current fragment.");
Hanno Becker13cd7842021-01-12 07:08:33 +0000339
Hanno Becker1682a8b2021-02-22 16:38:56 +0000340 frag_len = rd->frag_len;
341 frag_fetched = end - frag_offset; /* The amount of data from the current
342 * fragment that has already been passed
343 * to the user. */
344 frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */
Hanno Becker13cd7842021-01-12 07:08:33 +0000345
346 /* Check if we can serve the read request from the fragment. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100347 if (frag_remaining < desired) {
348 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
349 "There's not enough data in the current fragment "
350 "to serve the request.");
Hanno Becker13cd7842021-01-12 07:08:33 +0000351 /* There's not enough data in the current fragment,
352 * so either just RETURN what we have or fail. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100353 if (buflen == NULL) {
354 if (frag_remaining > 0) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000355 rd->pending = desired - frag_remaining;
Gilles Peskine449bd832023-01-11 14:50:10 +0100356 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
357 "Remember to collect %u bytes before re-opening",
358 (unsigned) rd->pending);
Hanno Becker13cd7842021-01-12 07:08:33 +0000359 }
Gilles Peskine449bd832023-01-11 14:50:10 +0100360 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_OUT_OF_DATA);
Hanno Becker13cd7842021-01-12 07:08:33 +0000361 }
362
363 desired = frag_remaining;
364 }
365
366 /* There's enough data in the current fragment to serve the
367 * (potentially modified) read request. */
Hanno Beckerf81e41f2021-02-08 08:04:01 +0000368
369 frag = rd->frag;
370 frag += frag_fetched;
371
Hanno Becker13cd7842021-01-12 07:08:33 +0000372 *buffer = frag;
Gilles Peskine449bd832023-01-11 14:50:10 +0100373 if (buflen != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000374 *buflen = desired;
Gilles Peskine449bd832023-01-11 14:50:10 +0100375 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000376
377 end += desired;
378 rd->end = end;
379 rd->pending = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100380 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000381}
382
Gilles Peskine449bd832023-01-11 14:50:10 +0100383int mbedtls_mps_reader_commit(mbedtls_mps_reader *rd)
Hanno Becker13cd7842021-01-12 07:08:33 +0000384{
Hanno Becker4f84e202021-02-08 06:54:30 +0000385 mbedtls_mps_size_t end;
Gilles Peskine449bd832023-01-11 14:50:10 +0100386 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_commit");
387 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
388 rd),
389 "mbedtls_mps_reader_commit() requires reader to be in consuming mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000390
Hanno Becker13cd7842021-01-12 07:08:33 +0000391 end = rd->end;
Hanno Becker13cd7842021-01-12 07:08:33 +0000392 rd->commit = end;
Hanno Becker13cd7842021-01-12 07:08:33 +0000393
Gilles Peskine449bd832023-01-11 14:50:10 +0100394 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000395}
396
Gilles Peskine449bd832023-01-11 14:50:10 +0100397int mbedtls_mps_reader_reclaim(mbedtls_mps_reader *rd,
398 int *paused)
Hanno Becker13cd7842021-01-12 07:08:33 +0000399{
400 unsigned char *frag, *acc;
401 mbedtls_mps_size_t pending, commit;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000402 mbedtls_mps_size_t acc_len, frag_offset, frag_len;
Gilles Peskine449bd832023-01-11 14:50:10 +0100403 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_reclaim");
Hanno Becker13cd7842021-01-12 07:08:33 +0000404
Gilles Peskine449bd832023-01-11 14:50:10 +0100405 if (paused != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000406 *paused = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100407 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000408
Gilles Peskine449bd832023-01-11 14:50:10 +0100409 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming(
410 rd),
411 "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode");
Hanno Becker13cd7842021-01-12 07:08:33 +0000412
Hanno Becker1682a8b2021-02-22 16:38:56 +0000413 frag = rd->frag;
414 acc = rd->acc;
415 pending = rd->pending;
416 commit = rd->commit;
417 frag_len = rd->frag_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000418
Gilles Peskine449bd832023-01-11 14:50:10 +0100419 frag_offset = mps_reader_get_fragment_offset(rd);
Hanno Becker13cd7842021-01-12 07:08:33 +0000420
Gilles Peskine449bd832023-01-11 14:50:10 +0100421 if (pending == 0) {
422 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
423 "No unsatisfied read-request has been logged.");
Hanno Becker4f84e202021-02-08 06:54:30 +0000424
Hanno Becker13cd7842021-01-12 07:08:33 +0000425 /* Check if there's data left to be consumed. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100426 if (commit < frag_offset || commit - frag_offset < frag_len) {
427 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
428 "There is data left to be consumed.");
Hanno Becker13cd7842021-01-12 07:08:33 +0000429 rd->end = commit;
Gilles Peskine449bd832023-01-11 14:50:10 +0100430 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_DATA_LEFT);
Hanno Becker13cd7842021-01-12 07:08:33 +0000431 }
Hanno Becker4f84e202021-02-08 06:54:30 +0000432
Hanno Beckerb1855432021-02-08 08:07:35 +0000433 rd->acc_available = 0;
Hanno Becker4f84e202021-02-08 06:54:30 +0000434 rd->acc_share.acc_remaining = 0;
435
Gilles Peskine449bd832023-01-11 14:50:10 +0100436 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
437 "Fragment has been fully processed and committed.");
438 } else {
Hanno Becker4f84e202021-02-08 06:54:30 +0000439 int overflow;
440
441 mbedtls_mps_size_t acc_backup_offset;
442 mbedtls_mps_size_t acc_backup_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000443 mbedtls_mps_size_t frag_backup_offset;
444 mbedtls_mps_size_t frag_backup_len;
Hanno Becker4f84e202021-02-08 06:54:30 +0000445
446 mbedtls_mps_size_t backup_len;
447 mbedtls_mps_size_t acc_len_needed;
448
Gilles Peskine449bd832023-01-11 14:50:10 +0100449 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
450 "There has been an unsatisfied read with %u bytes overhead.",
451 (unsigned) pending);
Hanno Becker13cd7842021-01-12 07:08:33 +0000452
Gilles Peskine449bd832023-01-11 14:50:10 +0100453 if (acc == NULL) {
454 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
455 "No accumulator present");
Hanno Becker984fbde2021-01-28 09:02:18 +0000456 MBEDTLS_MPS_TRACE_RETURN(
Gilles Peskine449bd832023-01-11 14:50:10 +0100457 MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR);
Hanno Becker13cd7842021-01-12 07:08:33 +0000458 }
Hanno Becker1682a8b2021-02-22 16:38:56 +0000459 acc_len = rd->acc_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000460
461 /* Check if the upper layer has already fetched
462 * and committed the contents of the accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100463 if (commit < frag_offset) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000464 /* No, accumulator is still being processed. */
Hanno Becker13cd7842021-01-12 07:08:33 +0000465 frag_backup_offset = 0;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000466 frag_backup_len = frag_len;
Hanno Becker4f84e202021-02-08 06:54:30 +0000467 acc_backup_offset = commit;
Hanno Becker1682a8b2021-02-22 16:38:56 +0000468 acc_backup_len = frag_offset - commit;
Gilles Peskine449bd832023-01-11 14:50:10 +0100469 } else {
Hanno Becker13cd7842021-01-12 07:08:33 +0000470 /* Yes, the accumulator is already processed. */
Hanno Becker1682a8b2021-02-22 16:38:56 +0000471 frag_backup_offset = commit - frag_offset;
472 frag_backup_len = frag_len - frag_backup_offset;
Hanno Becker4f84e202021-02-08 06:54:30 +0000473 acc_backup_offset = 0;
474 acc_backup_len = 0;
Hanno Becker13cd7842021-01-12 07:08:33 +0000475 }
476
Hanno Becker4f84e202021-02-08 06:54:30 +0000477 backup_len = acc_backup_len + frag_backup_len;
478 acc_len_needed = backup_len + pending;
479
480 overflow = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100481 overflow |= (backup_len < acc_backup_len);
482 overflow |= (acc_len_needed < backup_len);
Hanno Becker4f84e202021-02-08 06:54:30 +0000483
Gilles Peskine449bd832023-01-11 14:50:10 +0100484 if (overflow || acc_len < acc_len_needed) {
Hanno Becker4f84e202021-02-08 06:54:30 +0000485 /* Except for the different return code, we behave as if
486 * there hadn't been a call to mbedtls_mps_reader_get()
487 * since the last commit. */
488 rd->end = commit;
489 rd->pending = 0;
Gilles Peskine449bd832023-01-11 14:50:10 +0100490 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
491 "The accumulator is too small to handle the backup.");
492 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
493 "* Size: %u", (unsigned) acc_len);
494 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR,
495 "* Needed: %u (%u + %u)",
496 (unsigned) acc_len_needed,
497 (unsigned) backup_len, (unsigned) pending);
Hanno Becker4f84e202021-02-08 06:54:30 +0000498 MBEDTLS_MPS_TRACE_RETURN(
Gilles Peskine449bd832023-01-11 14:50:10 +0100499 MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL);
Hanno Becker4f84e202021-02-08 06:54:30 +0000500 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000501
Gilles Peskine449bd832023-01-11 14:50:10 +0100502 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
503 "Fragment backup: %u", (unsigned) frag_backup_len);
504 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
505 "Accumulator backup: %u", (unsigned) acc_backup_len);
Hanno Becker13cd7842021-01-12 07:08:33 +0000506
Hanno Becker4f84e202021-02-08 06:54:30 +0000507 /* Move uncommitted parts from the accumulator to the front
508 * of the accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100509 memmove(acc, acc + acc_backup_offset, acc_backup_len);
Hanno Becker4f84e202021-02-08 06:54:30 +0000510
Tom Cosgrove1797b052022-12-04 17:19:59 +0000511 /* Copy uncommitted parts of the current fragment to the
Hanno Becker4f84e202021-02-08 06:54:30 +0000512 * accumulator. */
Gilles Peskine449bd832023-01-11 14:50:10 +0100513 memcpy(acc + acc_backup_len,
514 frag + frag_backup_offset, frag_backup_len);
Hanno Becker4f84e202021-02-08 06:54:30 +0000515
Hanno Beckerb1855432021-02-08 08:07:35 +0000516 rd->acc_available = backup_len;
Hanno Becker13cd7842021-01-12 07:08:33 +0000517 rd->acc_share.acc_remaining = pending;
518
Gilles Peskine449bd832023-01-11 14:50:10 +0100519 if (paused != NULL) {
Hanno Becker13cd7842021-01-12 07:08:33 +0000520 *paused = 1;
Gilles Peskine449bd832023-01-11 14:50:10 +0100521 }
Hanno Becker13cd7842021-01-12 07:08:33 +0000522 }
523
524 rd->frag = NULL;
525 rd->frag_len = 0;
526
Hanno Becker4f84e202021-02-08 06:54:30 +0000527 rd->commit = 0;
528 rd->end = 0;
529 rd->pending = 0;
Hanno Becker13cd7842021-01-12 07:08:33 +0000530
Gilles Peskine449bd832023-01-11 14:50:10 +0100531 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT,
532 "Final state: aa %u, al %u, ar %u",
533 (unsigned) rd->acc_available, (unsigned) rd->acc_len,
534 (unsigned) rd->acc_share.acc_remaining);
535 MBEDTLS_MPS_TRACE_RETURN(0);
Hanno Becker13cd7842021-01-12 07:08:33 +0000536}
Hanno Becker43c8f8c2021-03-05 05:16:45 +0000537
Ronald Cron6f135e12021-12-08 16:57:54 +0100538#endif /* MBEDTLS_SSL_PROTO_TLS1_3 */