Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Message Processing Stack, Reader implementation |
| 3 | * |
| 4 | * Copyright The Mbed TLS Contributors |
| 5 | * SPDX-License-Identifier: Apache-2.0 |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 8 | * not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 15 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * This file is part of Mbed TLS (https://tls.mbed.org) |
| 20 | */ |
| 21 | |
Hanno Becker | c518c3b | 2021-01-28 07:08:08 +0000 | [diff] [blame^] | 22 | #include "mps_reader.h" |
| 23 | #include "mps_common.h" |
| 24 | #include "mps_trace.h" |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 25 | |
| 26 | #include <string.h> |
| 27 | |
| 28 | #if ( defined(__ARMCC_VERSION) || defined(_MSC_VER) ) && \ |
| 29 | !defined(inline) && !defined(__cplusplus) |
| 30 | #define inline __inline |
| 31 | #endif |
| 32 | |
Hanno Becker | b910016 | 2021-01-12 09:46:03 +0000 | [diff] [blame] | 33 | #if defined(MBEDTLS_MPS_TRACE) |
| 34 | static int trace_id = TRACE_BIT_READER; |
| 35 | #endif /* MBEDTLS_MPS_TRACE */ |
| 36 | |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 37 | /* |
| 38 | * GENERAL NOTE ON CODING STYLE |
| 39 | * |
| 40 | * The following code intentionally separates memory loads |
| 41 | * and stores from other operations (arithmetic or branches). |
| 42 | * This leads to the introduction of many local variables |
| 43 | * and significantly increases the C-code line count, but |
| 44 | * should not increase the size of generated assembly. |
| 45 | * |
| 46 | * This reason for this is twofold: |
| 47 | * (1) It will ease verification efforts using the VST |
| 48 | * whose program logic cannot directly reason |
| 49 | * about instructions containing a load or store in |
| 50 | * addition to other operations (e.g. *p = *q or |
| 51 | * tmp = *p + 42). |
| 52 | * (2) Operating on local variables and writing the results |
| 53 | * back to the target contexts on success only |
| 54 | * allows to maintain structure invariants even |
| 55 | * on failure - this in turn has two benefits: |
| 56 | * (2.a) If for some reason an error code is not caught |
| 57 | * and operation continues, functions are nonetheless |
| 58 | * called with sane contexts, reducing the risk |
| 59 | * of dangerous behavior. |
| 60 | * (2.b) Randomized testing is easier if structures |
| 61 | * remain intact even in the face of failing |
| 62 | * and/or non-sensical calls. |
| 63 | * Moreover, it might even reduce code-size because |
| 64 | * the compiler need not write back temporary results |
| 65 | * to memory in case of failure. |
| 66 | * |
| 67 | */ |
| 68 | |
| 69 | static inline void mps_reader_zero( mbedtls_reader *rd ) |
| 70 | { |
| 71 | /* A plain memset() would likely be more efficient, |
| 72 | * but the current way of zeroing makes it harder |
| 73 | * to overlook fields which should not be zero-initialized. |
| 74 | * It's also more suitable for VF efforts since it |
| 75 | * doesn't require reasoning about structs being |
| 76 | * interpreted as unstructured binary blobs. */ |
| 77 | static mbedtls_reader const zero = |
| 78 | { .frag = NULL, |
| 79 | .frag_len = 0, |
| 80 | .commit = 0, |
| 81 | .end = 0, |
| 82 | .pending = 0, |
| 83 | .acc = NULL, |
| 84 | .acc_len = 0, |
| 85 | .acc_avail = 0, |
| 86 | .acc_share = { .acc_remaining = 0 } |
| 87 | }; |
| 88 | *rd = zero; |
| 89 | } |
| 90 | |
| 91 | int mbedtls_reader_init( mbedtls_reader *rd, |
| 92 | unsigned char *acc, |
| 93 | mbedtls_mps_size_t acc_len ) |
| 94 | { |
| 95 | TRACE_INIT( "reader_init, acc len %u", (unsigned) acc_len ); |
| 96 | mps_reader_zero( rd ); |
| 97 | rd->acc = acc; |
| 98 | rd->acc_len = acc_len; |
| 99 | RETURN( 0 ); |
| 100 | } |
| 101 | |
| 102 | int mbedtls_reader_free( mbedtls_reader *rd ) |
| 103 | { |
| 104 | TRACE_INIT( "reader_free" ); |
| 105 | mps_reader_zero( rd ); |
| 106 | RETURN( 0 ); |
| 107 | } |
| 108 | |
| 109 | int mbedtls_reader_feed( mbedtls_reader *rd, |
| 110 | unsigned char *new_frag, |
| 111 | mbedtls_mps_size_t new_frag_len ) |
| 112 | { |
| 113 | unsigned char *acc; |
| 114 | mbedtls_mps_size_t copy_to_acc; |
| 115 | TRACE_INIT( "reader_feed, frag %p, len %u", |
| 116 | (void*) new_frag, (unsigned) new_frag_len ); |
| 117 | |
| 118 | if( new_frag == NULL ) |
| 119 | RETURN( MBEDTLS_ERR_MPS_READER_INVALID_ARG ); |
| 120 | |
| 121 | MBEDTLS_MPS_STATE_VALIDATE_RAW( rd->frag == NULL, |
| 122 | "mbedtls_reader_feed() requires reader to be in producing mode" ); |
| 123 | |
| 124 | acc = rd->acc; |
| 125 | if( acc != NULL ) |
| 126 | { |
| 127 | mbedtls_mps_size_t aa, ar; |
| 128 | |
| 129 | ar = rd->acc_share.acc_remaining; |
| 130 | aa = rd->acc_avail; |
| 131 | |
| 132 | copy_to_acc = ar; |
| 133 | if( copy_to_acc > new_frag_len ) |
| 134 | copy_to_acc = new_frag_len; |
| 135 | |
| 136 | acc += aa; |
| 137 | |
| 138 | if( copy_to_acc > 0 ) |
| 139 | memcpy( acc, new_frag, copy_to_acc ); |
| 140 | |
| 141 | TRACE( trace_comment, "Copy new data of size %u of %u into accumulator at offset %u", |
| 142 | (unsigned) copy_to_acc, (unsigned) new_frag_len, (unsigned) aa ); |
| 143 | |
| 144 | /* Check if, with the new fragment, we have enough data. */ |
| 145 | ar -= copy_to_acc; |
| 146 | if( ar > 0 ) |
| 147 | { |
| 148 | /* Need more data */ |
| 149 | aa += copy_to_acc; |
| 150 | rd->acc_share.acc_remaining = ar; |
| 151 | rd->acc_avail = aa; |
| 152 | RETURN( MBEDTLS_ERR_MPS_READER_NEED_MORE ); |
| 153 | } |
| 154 | |
| 155 | TRACE( trace_comment, "Enough data available to serve user request" ); |
| 156 | |
| 157 | rd->acc_share.frag_offset = aa; |
| 158 | aa += copy_to_acc; |
| 159 | rd->acc_avail = aa; |
| 160 | } |
| 161 | else |
| 162 | { |
| 163 | rd->acc_share.frag_offset = 0; |
| 164 | } |
| 165 | |
| 166 | rd->frag = new_frag; |
| 167 | rd->frag_len = new_frag_len; |
| 168 | rd->commit = 0; |
| 169 | rd->end = 0; |
| 170 | RETURN( 0 ); |
| 171 | } |
| 172 | |
| 173 | |
| 174 | int mbedtls_reader_get( mbedtls_reader *rd, |
| 175 | mbedtls_mps_size_t desired, |
| 176 | unsigned char **buffer, |
| 177 | mbedtls_mps_size_t *buflen ) |
| 178 | { |
| 179 | unsigned char *frag, *acc; |
| 180 | mbedtls_mps_size_t end, fo, fl, frag_fetched, frag_remaining; |
| 181 | TRACE_INIT( "reader_get %p, desired %u", (void*) rd, (unsigned) desired ); |
| 182 | |
| 183 | frag = rd->frag; |
| 184 | MBEDTLS_MPS_STATE_VALIDATE_RAW( frag != NULL, |
| 185 | "mbedtls_reader_get() requires reader to be in consuming mode" ); |
| 186 | |
| 187 | /* The fragment offset indicates the offset of the fragment |
| 188 | * from the accmulator, if the latter is present. Use a offset |
| 189 | * of \c 0 if no accumulator is used. */ |
| 190 | acc = rd->acc; |
| 191 | if( acc == NULL ) |
| 192 | fo = 0; |
| 193 | else |
| 194 | fo = rd->acc_share.frag_offset; |
| 195 | |
| 196 | TRACE( trace_comment, "frag_off %u, end %u, acc_avail %d", |
| 197 | (unsigned) fo, (unsigned) rd->end, |
| 198 | acc == NULL ? -1 : (int) rd->acc_avail ); |
| 199 | |
| 200 | /* Check if we're still serving from the accumulator. */ |
| 201 | end = rd->end; |
| 202 | if( end < fo ) |
| 203 | { |
| 204 | TRACE( trace_comment, "Serve the request from the accumulator" ); |
| 205 | if( fo - end < desired ) |
| 206 | { |
| 207 | /* Illustration of supported and unsupported cases: |
| 208 | * |
| 209 | * - Allowed #1 |
| 210 | * |
| 211 | * +-----------------------------------+ |
| 212 | * | frag | |
| 213 | * +-----------------------------------+ |
| 214 | * |
| 215 | * end end+desired |
| 216 | * | | |
| 217 | * +-----v-------v-------------+ |
| 218 | * | acc | |
| 219 | * +---------------------------+ |
| 220 | * | | |
| 221 | * fo/frag_offset aa/acc_avail |
| 222 | * |
| 223 | * - Allowed #2 |
| 224 | * |
| 225 | * +-----------------------------------+ |
| 226 | * | frag | |
| 227 | * +-----------------------------------+ |
| 228 | * |
| 229 | * end end+desired |
| 230 | * | | |
| 231 | * +----------v----------------v |
| 232 | * | acc | |
| 233 | * +---------------------------+ |
| 234 | * | | |
| 235 | * fo/frag_offset aa/acc_avail |
| 236 | * |
| 237 | * - Not allowed #1 (could be served, but we don't actually use it): |
| 238 | * |
| 239 | * +-----------------------------------+ |
| 240 | * | frag | |
| 241 | * +-----------------------------------+ |
| 242 | * |
| 243 | * end end+desired |
| 244 | * | | |
| 245 | * +------v-------------v------+ |
| 246 | * | acc | |
| 247 | * +---------------------------+ |
| 248 | * | | |
| 249 | * fo/frag_offset aa/acc_avail |
| 250 | * |
| 251 | * |
| 252 | * - Not allowed #2 (can't be served with a contiguous buffer): |
| 253 | * |
| 254 | * +-----------------------------------+ |
| 255 | * | frag | |
| 256 | * +-----------------------------------+ |
| 257 | * |
| 258 | * end end + desired |
| 259 | * | | |
| 260 | * +------v--------------------+ v |
| 261 | * | acc | |
| 262 | * +---------------------------+ |
| 263 | * | | |
| 264 | * fo/frag_offset aa/acc_avail |
| 265 | * |
| 266 | * In case of Allowed #1 and #2 we're switching to serve from |
| 267 | * `frag` starting from the next call to mbedtls_reader_get(). |
| 268 | */ |
| 269 | |
| 270 | mbedtls_mps_size_t aa; |
| 271 | aa = rd->acc_avail; |
| 272 | if( aa - end != desired ) |
| 273 | { |
| 274 | /* It might be possible to serve some of these situations by |
| 275 | * making additional space in the accumulator, removing those |
| 276 | * parts that have already been committed. |
| 277 | * On the other hand, this brings additional complexity and |
| 278 | * enlarges the code size, while there doesn't seem to be a use |
| 279 | * case where we don't attempt exactly the same `get` calls when |
| 280 | * resuming on a reader than what we tried before pausing it. |
| 281 | * If we believe we adhere to this restricted usage throughout |
| 282 | * the library, this check is a good opportunity to |
| 283 | * validate this. */ |
| 284 | RETURN( MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS ); |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | acc += end; |
| 289 | *buffer = acc; |
| 290 | if( buflen != NULL ) |
| 291 | *buflen = desired; |
| 292 | |
| 293 | end += desired; |
| 294 | rd->end = end; |
| 295 | rd->pending = 0; |
| 296 | |
| 297 | RETURN( 0 ); |
| 298 | } |
| 299 | |
| 300 | /* Attempt to serve the request from the current fragment */ |
| 301 | TRACE( trace_comment, "Serve the request from the current fragment." ); |
| 302 | |
| 303 | fl = rd->frag_len; |
| 304 | frag_fetched = end - fo; /* The amount of data from the current fragment |
| 305 | * that has already been passed to the user. */ |
| 306 | frag += frag_fetched; |
| 307 | frag_remaining = fl - frag_fetched; /* Remaining data in fragment */ |
| 308 | |
| 309 | /* Check if we can serve the read request from the fragment. */ |
| 310 | if( frag_remaining < desired ) |
| 311 | { |
| 312 | TRACE( trace_comment, "There's not enough data in the current fragment to serve the request." ); |
| 313 | /* There's not enough data in the current fragment, |
| 314 | * so either just RETURN what we have or fail. */ |
| 315 | if( buflen == NULL ) |
| 316 | { |
| 317 | if( frag_remaining > 0 ) |
| 318 | { |
| 319 | rd->pending = desired - frag_remaining; |
| 320 | TRACE( trace_comment, "Remember to collect %u bytes before re-opening", |
| 321 | (unsigned) rd->pending ); |
| 322 | } |
| 323 | RETURN( MBEDTLS_ERR_MPS_READER_OUT_OF_DATA ); |
| 324 | } |
| 325 | |
| 326 | desired = frag_remaining; |
| 327 | } |
| 328 | |
| 329 | /* There's enough data in the current fragment to serve the |
| 330 | * (potentially modified) read request. */ |
| 331 | *buffer = frag; |
| 332 | if( buflen != NULL ) |
| 333 | *buflen = desired; |
| 334 | |
| 335 | end += desired; |
| 336 | rd->end = end; |
| 337 | rd->pending = 0; |
| 338 | RETURN( 0 ); |
| 339 | } |
| 340 | |
| 341 | int mbedtls_reader_commit( mbedtls_reader *rd ) |
| 342 | { |
| 343 | unsigned char *acc; |
| 344 | mbedtls_mps_size_t aa, end, fo, shift; |
| 345 | TRACE_INIT( "reader_commit" ); |
| 346 | |
| 347 | MBEDTLS_MPS_STATE_VALIDATE_RAW( rd->frag != NULL, |
| 348 | "mbedtls_reader_commit() requires reader to be in consuming mode" ); |
| 349 | |
| 350 | acc = rd->acc; |
| 351 | end = rd->end; |
| 352 | |
| 353 | if( acc == NULL ) |
| 354 | { |
| 355 | TRACE( trace_comment, "No accumulator, just shift end" ); |
| 356 | rd->commit = end; |
| 357 | RETURN( 0 ); |
| 358 | } |
| 359 | |
| 360 | fo = rd->acc_share.frag_offset; |
| 361 | if( end >= fo ) |
| 362 | { |
| 363 | TRACE( trace_comment, "Started to serve fragment, get rid of accumulator" ); |
| 364 | shift = fo; |
| 365 | aa = 0; |
| 366 | } |
| 367 | else |
| 368 | { |
| 369 | TRACE( trace_comment, "Still serving from accumulator" ); |
| 370 | aa = rd->acc_avail; |
| 371 | shift = end; |
| 372 | memmove( acc, acc + shift, aa - shift ); |
| 373 | aa -= shift; |
| 374 | } |
| 375 | |
| 376 | end -= shift; |
| 377 | fo -= shift; |
| 378 | |
| 379 | rd->acc_share.frag_offset = fo; |
| 380 | rd->acc_avail = aa; |
| 381 | rd->commit = end; |
| 382 | rd->end = end; |
| 383 | |
| 384 | TRACE( trace_comment, "Final state: (end=commit,fo,avail) = (%u,%u,%u)", |
| 385 | (unsigned) end, (unsigned) fo, (unsigned) aa ); |
| 386 | RETURN( 0 ); |
| 387 | } |
| 388 | |
| 389 | int mbedtls_reader_reclaim( mbedtls_reader *rd, |
| 390 | mbedtls_mps_size_t *paused ) |
| 391 | { |
| 392 | unsigned char *frag, *acc; |
| 393 | mbedtls_mps_size_t pending, commit; |
| 394 | mbedtls_mps_size_t al, fo, fl; |
| 395 | TRACE_INIT( "reader_reclaim" ); |
| 396 | |
| 397 | if( paused != NULL ) |
| 398 | *paused = 0; |
| 399 | |
| 400 | frag = rd->frag; |
| 401 | MBEDTLS_MPS_STATE_VALIDATE_RAW( frag != NULL, |
| 402 | "mbedtls_reader_reclaim() requires reader to be in consuming mode" ); |
| 403 | |
| 404 | acc = rd->acc; |
| 405 | pending = rd->pending; |
| 406 | commit = rd->commit; |
| 407 | fl = rd->frag_len; |
| 408 | |
| 409 | if( acc == NULL ) |
| 410 | fo = 0; |
| 411 | else |
| 412 | fo = rd->acc_share.frag_offset; |
| 413 | |
| 414 | if( pending == 0 ) |
| 415 | { |
| 416 | TRACE( trace_comment, "No unsatisfied read-request has been logged." ); |
| 417 | /* Check if there's data left to be consumed. */ |
| 418 | if( commit < fo || commit - fo < fl ) |
| 419 | { |
| 420 | TRACE( trace_comment, "There is data left to be consumed." ); |
| 421 | rd->end = commit; |
| 422 | RETURN( MBEDTLS_ERR_MPS_READER_DATA_LEFT ); |
| 423 | } |
| 424 | TRACE( trace_comment, "The fragment has been completely processed and committed." ); |
| 425 | } |
| 426 | else |
| 427 | { |
| 428 | mbedtls_mps_size_t frag_backup_offset; |
| 429 | mbedtls_mps_size_t frag_backup_len; |
| 430 | TRACE( trace_comment, "There has been an unsatisfied read-request with %u bytes overhead.", |
| 431 | (unsigned) pending ); |
| 432 | |
| 433 | if( acc == NULL ) |
| 434 | { |
| 435 | TRACE( trace_comment, "No accumulator present" ); |
| 436 | RETURN( MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR ); |
| 437 | } |
| 438 | al = rd->acc_len; |
| 439 | |
| 440 | /* Check if the upper layer has already fetched |
| 441 | * and committed the contents of the accumulator. */ |
| 442 | if( commit < fo ) |
| 443 | { |
| 444 | /* No, accumulator is still being processed. */ |
| 445 | int overflow; |
| 446 | TRACE( trace_comment, "Still processing data from the accumulator" ); |
| 447 | |
| 448 | overflow = |
| 449 | ( fo + fl < fo ) || ( fo + fl + pending < fo + fl ); |
| 450 | if( overflow || al < fo + fl + pending ) |
| 451 | { |
| 452 | rd->end = commit; |
| 453 | rd->pending = 0; |
| 454 | TRACE( trace_error, "The accumulator is too small to handle the backup." ); |
| 455 | TRACE( trace_error, "* Remaining size: %u", (unsigned) al ); |
| 456 | TRACE( trace_error, "* Needed: %u (%u + %u + %u)", |
| 457 | (unsigned) ( fo + fl + pending ), |
| 458 | (unsigned) fo, (unsigned) fl, (unsigned) pending ); |
| 459 | RETURN( MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL ); |
| 460 | } |
| 461 | frag_backup_offset = 0; |
| 462 | frag_backup_len = fl; |
| 463 | } |
| 464 | else |
| 465 | { |
| 466 | /* Yes, the accumulator is already processed. */ |
| 467 | int overflow; |
| 468 | TRACE( trace_comment, "The accumulator has already been processed" ); |
| 469 | |
| 470 | frag_backup_offset = commit; |
| 471 | frag_backup_len = fl - commit; |
| 472 | overflow = ( frag_backup_len + pending < pending ); |
| 473 | |
| 474 | if( overflow || |
| 475 | al - fo < frag_backup_len + pending ) |
| 476 | { |
| 477 | rd->end = commit; |
| 478 | rd->pending = 0; |
| 479 | TRACE( trace_error, "The accumulator is too small to handle the backup." ); |
| 480 | TRACE( trace_error, "* Remaining size: %u", (unsigned) ( al - fo ) ); |
| 481 | TRACE( trace_error, "* Needed: %u (%u + %u)", |
| 482 | (unsigned) ( frag_backup_len + pending ), |
| 483 | (unsigned) frag_backup_len, (unsigned) pending ); |
| 484 | RETURN( MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL ); |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | frag += frag_backup_offset; |
| 489 | acc += fo; |
| 490 | memcpy( acc, frag, frag_backup_len ); |
| 491 | |
| 492 | TRACE( trace_comment, "Backup %u bytes into accumulator", |
| 493 | (unsigned) frag_backup_len ); |
| 494 | |
| 495 | rd->acc_avail = fo + frag_backup_len; |
| 496 | rd->acc_share.acc_remaining = pending; |
| 497 | |
| 498 | if( paused != NULL ) |
| 499 | *paused = 1; |
| 500 | } |
| 501 | |
| 502 | rd->frag = NULL; |
| 503 | rd->frag_len = 0; |
| 504 | |
| 505 | rd->commit = 0; |
| 506 | rd->end = 0; |
| 507 | rd->pending = 0; |
| 508 | |
| 509 | TRACE( trace_comment, "Final state: aa %u, al %u, ar %u", |
| 510 | (unsigned) rd->acc_avail, (unsigned) rd->acc_len, |
| 511 | (unsigned) rd->acc_share.acc_remaining ); |
| 512 | RETURN( 0 ); |
| 513 | } |