Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright The Transfer List Library Contributors |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT OR GPL-2.0-or-later |
| 5 | */ |
| 6 | |
| 7 | #include <assert.h> |
| 8 | #include <inttypes.h> |
| 9 | #include <stdio.h> |
| 10 | #include <string.h> |
| 11 | |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 12 | #include <logging.h> |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 13 | #include <private/math_utils.h> |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 14 | #include <transfer_list.h> |
| 15 | |
Govindraj Raja | 442c09a | 2025-04-10 16:30:22 +0200 | [diff] [blame] | 16 | void transfer_list_dump(struct transfer_list_header *tl) |
| 17 | { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 18 | struct transfer_list_entry *te = NULL; |
| 19 | int i = 0; |
| 20 | |
| 21 | if (!tl) { |
| 22 | return; |
| 23 | } |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 24 | info("Dump transfer list:\n"); |
| 25 | info("signature 0x%x\n", tl->signature); |
| 26 | info("checksum 0x%x\n", tl->checksum); |
| 27 | info("version 0x%x\n", tl->version); |
| 28 | info("hdr_size 0x%x\n", tl->hdr_size); |
| 29 | info("alignment 0x%x\n", tl->alignment); |
| 30 | info("size 0x%x\n", tl->size); |
| 31 | info("max_size 0x%x\n", tl->max_size); |
| 32 | info("flags 0x%x\n", tl->flags); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 33 | while (true) { |
| 34 | te = transfer_list_next(tl, te); |
| 35 | if (!te) { |
| 36 | break; |
| 37 | } |
Harrison Mutai | db83bfa | 2025-03-21 15:24:55 +0000 | [diff] [blame] | 38 | |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 39 | info("Entry %d:\n", i++); |
Harrison Mutai | db83bfa | 2025-03-21 15:24:55 +0000 | [diff] [blame] | 40 | transfer_entry_dump(te); |
| 41 | } |
| 42 | } |
| 43 | |
| 44 | void transfer_entry_dump(struct transfer_list_entry *te) |
| 45 | { |
| 46 | if (te) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 47 | info("tag_id 0x%x\n", te->tag_id); |
| 48 | info("hdr_size 0x%x\n", te->hdr_size); |
| 49 | info("data_size 0x%x\n", te->data_size); |
| 50 | info("data_addr 0x%lx\n", |
| 51 | (unsigned long)transfer_list_entry_data(te)); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 52 | } |
| 53 | } |
| 54 | |
| 55 | /******************************************************************************* |
| 56 | * Creating a transfer list in a reserved memory region specified |
| 57 | * Compliant to 2.4.5 of Firmware handoff specification (v0.9) |
| 58 | * Return pointer to the created transfer list or NULL on error |
| 59 | ******************************************************************************/ |
| 60 | struct transfer_list_header *transfer_list_init(void *addr, size_t max_size) |
| 61 | { |
| 62 | struct transfer_list_header *tl = addr; |
| 63 | |
| 64 | if (!addr || max_size == 0) { |
| 65 | return NULL; |
| 66 | } |
| 67 | |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 68 | if (!libtl_is_aligned((uintptr_t)addr, |
| 69 | 1 << TRANSFER_LIST_INIT_MAX_ALIGN) || |
| 70 | !libtl_is_aligned(max_size, 1 << TRANSFER_LIST_INIT_MAX_ALIGN) || |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 71 | max_size < sizeof(*tl)) { |
| 72 | return NULL; |
| 73 | } |
| 74 | |
| 75 | memset(tl, 0, max_size); |
| 76 | tl->signature = TRANSFER_LIST_SIGNATURE; |
| 77 | tl->version = TRANSFER_LIST_VERSION; |
| 78 | tl->hdr_size = sizeof(*tl); |
| 79 | tl->alignment = TRANSFER_LIST_INIT_MAX_ALIGN; /* initial max align */ |
| 80 | tl->size = sizeof(*tl); /* initial size is the size of header */ |
| 81 | tl->max_size = max_size; |
| 82 | tl->flags = TL_FLAGS_HAS_CHECKSUM; |
| 83 | |
| 84 | transfer_list_update_checksum(tl); |
| 85 | |
| 86 | return tl; |
| 87 | } |
| 88 | |
| 89 | /******************************************************************************* |
| 90 | * Relocating a transfer list to a reserved memory region specified |
| 91 | * Compliant to 2.4.6 of Firmware handoff specification (v0.9) |
| 92 | * Return pointer to the relocated transfer list or NULL on error |
| 93 | ******************************************************************************/ |
| 94 | struct transfer_list_header * |
| 95 | transfer_list_relocate(struct transfer_list_header *tl, void *addr, |
| 96 | size_t max_size) |
| 97 | { |
| 98 | uintptr_t new_addr, align_mask, align_off; |
| 99 | struct transfer_list_header *new_tl; |
| 100 | uint32_t new_max_size; |
| 101 | |
| 102 | if (!tl || !addr || max_size == 0) { |
| 103 | return NULL; |
| 104 | } |
| 105 | |
| 106 | align_mask = (1 << tl->alignment) - 1; |
| 107 | align_off = (uintptr_t)tl & align_mask; |
| 108 | new_addr = ((uintptr_t)addr & ~align_mask) + align_off; |
| 109 | |
| 110 | if (new_addr < (uintptr_t)addr) { |
| 111 | new_addr += (1 << tl->alignment); |
| 112 | } |
| 113 | |
| 114 | new_max_size = max_size - (new_addr - (uintptr_t)addr); |
| 115 | |
| 116 | /* the new space is not sufficient for the tl */ |
| 117 | if (tl->size > new_max_size) { |
| 118 | return NULL; |
| 119 | } |
| 120 | |
| 121 | new_tl = (struct transfer_list_header *)new_addr; |
| 122 | memmove(new_tl, tl, tl->size); |
| 123 | new_tl->max_size = new_max_size; |
| 124 | |
| 125 | transfer_list_update_checksum(new_tl); |
| 126 | |
| 127 | return new_tl; |
| 128 | } |
| 129 | |
| 130 | /******************************************************************************* |
| 131 | * Verifying the header of a transfer list |
| 132 | * Compliant to 2.4.1 of Firmware handoff specification (v0.9) |
| 133 | * Return transfer list operation status code |
| 134 | ******************************************************************************/ |
| 135 | enum transfer_list_ops |
| 136 | transfer_list_check_header(const struct transfer_list_header *tl) |
| 137 | { |
| 138 | if (!tl) { |
| 139 | return TL_OPS_NON; |
| 140 | } |
| 141 | |
| 142 | if (tl->signature != TRANSFER_LIST_SIGNATURE) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 143 | warn("Bad transfer list signature %#" PRIx32 "\n", |
| 144 | tl->signature); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 145 | return TL_OPS_NON; |
| 146 | } |
| 147 | |
| 148 | if (!tl->max_size) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 149 | warn("Bad transfer list max size %#" PRIx32 "\n", tl->max_size); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 150 | return TL_OPS_NON; |
| 151 | } |
| 152 | |
| 153 | if (tl->size > tl->max_size) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 154 | warn("Bad transfer list size %#" PRIx32 "\n", tl->size); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 155 | return TL_OPS_NON; |
| 156 | } |
| 157 | |
| 158 | if (tl->hdr_size != sizeof(struct transfer_list_header)) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 159 | warn("Bad transfer list header size %#" PRIx32 "\n", |
| 160 | tl->hdr_size); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 161 | return TL_OPS_NON; |
| 162 | } |
| 163 | |
| 164 | if (!transfer_list_verify_checksum(tl)) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 165 | warn("Bad transfer list checksum %#" PRIx32 "\n", tl->checksum); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 166 | return TL_OPS_NON; |
| 167 | } |
| 168 | |
| 169 | if (tl->version == 0) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 170 | warn("Transfer list version is invalid\n"); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 171 | return TL_OPS_NON; |
| 172 | } else if (tl->version == TRANSFER_LIST_VERSION) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 173 | info("Transfer list version is valid for all operations\n"); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 174 | return TL_OPS_ALL; |
| 175 | } else if (tl->version > TRANSFER_LIST_VERSION) { |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 176 | info("Transfer list version is valid for read-only\n"); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 177 | return TL_OPS_RO; |
| 178 | } |
| 179 | |
Harrison Mutai | 789db58 | 2025-04-28 14:46:49 +0000 | [diff] [blame^] | 180 | info("Old transfer list version is detected\n"); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 181 | return TL_OPS_CUS; |
| 182 | } |
| 183 | |
| 184 | /******************************************************************************* |
| 185 | * Enumerate the next transfer entry |
| 186 | * Return pointer to the next transfer entry or NULL on error |
| 187 | ******************************************************************************/ |
| 188 | struct transfer_list_entry *transfer_list_next(struct transfer_list_header *tl, |
| 189 | struct transfer_list_entry *last) |
| 190 | { |
| 191 | struct transfer_list_entry *te = NULL; |
| 192 | uintptr_t tl_ev = 0; |
| 193 | uintptr_t va = 0; |
| 194 | uintptr_t ev = 0; |
| 195 | size_t sz = 0; |
| 196 | |
| 197 | if (!tl) { |
| 198 | return NULL; |
| 199 | } |
| 200 | |
| 201 | tl_ev = (uintptr_t)tl + tl->size; |
| 202 | |
| 203 | if (last) { |
| 204 | va = (uintptr_t)last; |
| 205 | /* check if the total size overflow */ |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 206 | if (libtl_add_overflow(last->hdr_size, last->data_size, &sz)) { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 207 | return NULL; |
| 208 | } |
| 209 | /* roundup to the next entry */ |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 210 | if (libtl_add_with_round_up_overflow( |
| 211 | va, sz, TRANSFER_LIST_GRANULE, &va)) { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 212 | return NULL; |
| 213 | } |
| 214 | } else { |
| 215 | va = (uintptr_t)tl + tl->hdr_size; |
| 216 | } |
| 217 | |
| 218 | te = (struct transfer_list_entry *)va; |
| 219 | |
| 220 | if (va + sizeof(*te) > tl_ev || te->hdr_size < sizeof(*te) || |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 221 | libtl_add_overflow(te->hdr_size, te->data_size, &sz) || |
| 222 | libtl_add_overflow(va, sz, &ev) || ev > tl_ev) { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 223 | return NULL; |
| 224 | } |
| 225 | |
| 226 | return te; |
| 227 | } |
| 228 | |
| 229 | /******************************************************************************* |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 230 | * Enumerate the prev transfer entry |
| 231 | * Return pointer to the prev transfer entry or NULL on error |
| 232 | ******************************************************************************/ |
| 233 | struct transfer_list_entry *transfer_list_prev(struct transfer_list_header *tl, |
| 234 | struct transfer_list_entry *last) |
| 235 | { |
| 236 | struct transfer_list_entry *prev; |
| 237 | struct transfer_list_entry *te = NULL; |
| 238 | |
| 239 | if (!last || !tl || (tl + tl->hdr_size == (void *)last)) { |
| 240 | return NULL; |
| 241 | } |
| 242 | |
| 243 | do { |
| 244 | prev = te; |
| 245 | te = transfer_list_next(tl, te); |
| 246 | } while (te && te != last); |
| 247 | |
| 248 | return (te != NULL) ? prev : NULL; |
| 249 | } |
| 250 | |
| 251 | /******************************************************************************* |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 252 | * Calculate the byte sum of a transfer list |
| 253 | * Return byte sum of the transfer list |
| 254 | ******************************************************************************/ |
| 255 | static uint8_t calc_byte_sum(const struct transfer_list_header *tl) |
| 256 | { |
| 257 | uint8_t *b = (uint8_t *)tl; |
| 258 | uint8_t cs = 0; |
| 259 | size_t n = 0; |
| 260 | |
| 261 | for (n = 0; n < tl->size; n++) { |
| 262 | cs += b[n]; |
| 263 | } |
| 264 | |
| 265 | return cs; |
| 266 | } |
| 267 | |
| 268 | /******************************************************************************* |
| 269 | * Update the checksum of a transfer list |
| 270 | * Return updated checksum of the transfer list |
| 271 | ******************************************************************************/ |
| 272 | void transfer_list_update_checksum(struct transfer_list_header *tl) |
| 273 | { |
| 274 | uint8_t cs; |
| 275 | |
| 276 | if (!tl || !(tl->flags & TL_FLAGS_HAS_CHECKSUM)) { |
| 277 | return; |
| 278 | } |
| 279 | |
| 280 | cs = calc_byte_sum(tl); |
| 281 | cs -= tl->checksum; |
| 282 | cs = 256 - cs; |
| 283 | tl->checksum = cs; |
| 284 | assert(transfer_list_verify_checksum(tl)); |
| 285 | } |
| 286 | |
| 287 | /******************************************************************************* |
| 288 | * Verify the checksum of a transfer list |
| 289 | * Return true if verified or false if not |
| 290 | ******************************************************************************/ |
| 291 | bool transfer_list_verify_checksum(const struct transfer_list_header *tl) |
| 292 | { |
| 293 | if (!tl) { |
| 294 | return false; |
| 295 | } |
| 296 | |
| 297 | if (!(tl->flags & TL_FLAGS_HAS_CHECKSUM)) { |
| 298 | return true; |
| 299 | } |
| 300 | |
| 301 | return !calc_byte_sum(tl); |
| 302 | } |
| 303 | |
| 304 | /******************************************************************************* |
| 305 | * Update the data size of a transfer entry |
| 306 | * Return true on success or false on error |
| 307 | ******************************************************************************/ |
| 308 | bool transfer_list_set_data_size(struct transfer_list_header *tl, |
| 309 | struct transfer_list_entry *te, |
| 310 | uint32_t new_data_size) |
| 311 | { |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 312 | uintptr_t tl_old_ev, new_ev = 0, old_ev = 0, merge_ev, ru_new_ev; |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 313 | struct transfer_list_entry *dummy_te = NULL; |
| 314 | size_t gap = 0; |
| 315 | size_t mov_dis = 0; |
| 316 | size_t sz = 0; |
| 317 | |
| 318 | if (!tl || !te) { |
| 319 | return false; |
| 320 | } |
| 321 | tl_old_ev = (uintptr_t)tl + tl->size; |
| 322 | |
| 323 | /* |
| 324 | * calculate the old and new end of TE |
| 325 | * both must be roundup to align with TRANSFER_LIST_GRANULE |
| 326 | */ |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 327 | if (libtl_add_overflow(te->hdr_size, te->data_size, &sz) || |
| 328 | libtl_add_with_round_up_overflow((uintptr_t)te, sz, |
| 329 | TRANSFER_LIST_GRANULE, &old_ev)) { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 330 | return false; |
| 331 | } |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 332 | if (libtl_add_overflow(te->hdr_size, new_data_size, &sz) || |
| 333 | libtl_add_with_round_up_overflow((uintptr_t)te, sz, |
| 334 | TRANSFER_LIST_GRANULE, &new_ev)) { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 335 | return false; |
| 336 | } |
| 337 | |
| 338 | if (new_ev > old_ev) { |
| 339 | /* |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 340 | * When next transfer list is dummy, |
| 341 | * - if te can be extended in boundary of dummy entry, |
| 342 | * extend it to dummy entry and set new dummy entry. |
| 343 | * |
| 344 | * - otherwise, merge dummy entry with existing te and |
| 345 | * extend transfer list as much as it requires. |
| 346 | */ |
| 347 | dummy_te = transfer_list_next(tl, te); |
| 348 | if (dummy_te && (dummy_te->tag_id == TL_TAG_EMPTY)) { |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 349 | merge_ev = libtl_align_up(old_ev + dummy_te->hdr_size + |
| 350 | dummy_te->data_size, |
| 351 | TRANSFER_LIST_GRANULE); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 352 | if (merge_ev >= new_ev) { |
| 353 | gap = merge_ev - new_ev; |
| 354 | goto set_dummy; |
| 355 | } else { |
| 356 | old_ev = merge_ev; |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | /* |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 361 | * move distance should be roundup |
| 362 | * to meet the requirement of TE data max alignment |
| 363 | * ensure that the increased size doesn't exceed |
| 364 | * the max size of TL |
| 365 | */ |
| 366 | mov_dis = new_ev - old_ev; |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 367 | if (libtl_round_up_overflow(mov_dis, 1 << tl->alignment, |
| 368 | &mov_dis) || |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 369 | tl->size + mov_dis > tl->max_size) { |
| 370 | return false; |
| 371 | } |
| 372 | ru_new_ev = old_ev + mov_dis; |
| 373 | memmove((void *)ru_new_ev, (void *)old_ev, tl_old_ev - old_ev); |
| 374 | tl->size += mov_dis; |
| 375 | gap = ru_new_ev - new_ev; |
| 376 | } else { |
| 377 | gap = old_ev - new_ev; |
| 378 | } |
| 379 | |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 380 | set_dummy: |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 381 | if (gap >= sizeof(*dummy_te)) { |
| 382 | /* create a dummy TE to fill up the gap */ |
| 383 | dummy_te = (struct transfer_list_entry *)new_ev; |
| 384 | dummy_te->tag_id = TL_TAG_EMPTY; |
| 385 | dummy_te->hdr_size = sizeof(*dummy_te); |
| 386 | dummy_te->data_size = gap - sizeof(*dummy_te); |
| 387 | } |
| 388 | |
| 389 | te->data_size = new_data_size; |
| 390 | |
| 391 | transfer_list_update_checksum(tl); |
| 392 | return true; |
| 393 | } |
| 394 | |
| 395 | /******************************************************************************* |
| 396 | * Remove a specified transfer entry from a transfer list |
| 397 | * Return true on success or false on error |
| 398 | ******************************************************************************/ |
| 399 | bool transfer_list_rem(struct transfer_list_header *tl, |
| 400 | struct transfer_list_entry *te) |
| 401 | { |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 402 | struct transfer_list_entry *prev; |
| 403 | struct transfer_list_entry *next; |
| 404 | |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 405 | if (!tl || !te || (uintptr_t)te > (uintptr_t)tl + tl->size) { |
| 406 | return false; |
| 407 | } |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 408 | |
| 409 | prev = transfer_list_prev(tl, te); |
| 410 | next = transfer_list_next(tl, te); |
| 411 | |
| 412 | if (prev && prev->tag_id == TL_TAG_EMPTY) { |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 413 | prev->data_size += libtl_align_up(te->hdr_size + te->data_size, |
| 414 | TRANSFER_LIST_GRANULE); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 415 | te = prev; |
| 416 | } |
| 417 | |
| 418 | if (next && next->tag_id == TL_TAG_EMPTY) { |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 419 | te->data_size += |
| 420 | libtl_align_up(next->hdr_size + next->data_size, |
| 421 | TRANSFER_LIST_GRANULE); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 422 | } |
| 423 | |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 424 | te->tag_id = TL_TAG_EMPTY; |
| 425 | transfer_list_update_checksum(tl); |
| 426 | return true; |
| 427 | } |
| 428 | |
| 429 | /******************************************************************************* |
| 430 | * Add a new transfer entry into a transfer list |
| 431 | * Compliant to 2.4.3 of Firmware handoff specification (v0.9) |
| 432 | * Return pointer to the added transfer entry or NULL on error |
| 433 | ******************************************************************************/ |
| 434 | struct transfer_list_entry *transfer_list_add(struct transfer_list_header *tl, |
| 435 | uint32_t tag_id, |
| 436 | uint32_t data_size, |
| 437 | const void *data) |
| 438 | { |
Harrison Mutai | 7fb4122 | 2025-04-16 14:18:46 +0000 | [diff] [blame] | 439 | uintptr_t tl_ev; |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 440 | struct transfer_list_entry *te = NULL; |
| 441 | uint8_t *te_data = NULL; |
| 442 | uintptr_t te_end; |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 443 | |
| 444 | if (!tl || (tag_id & (1 << 24))) { |
| 445 | return NULL; |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * skip the step 1 (optional step) |
| 450 | * new TE will be added into the tail |
| 451 | */ |
| 452 | tl_ev = (uintptr_t)tl + tl->size; |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 453 | te = (struct transfer_list_entry *)libtl_align_up( |
| 454 | tl_ev, TRANSFER_LIST_GRANULE); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 455 | |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 456 | te_end = libtl_align_up((uintptr_t)te + sizeof(*te) + data_size, |
| 457 | TRANSFER_LIST_GRANULE); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 458 | |
| 459 | if (te_end > (uintptr_t)tl + tl->max_size) { |
| 460 | return NULL; |
| 461 | } |
| 462 | |
| 463 | te->tag_id = tag_id; |
| 464 | te->hdr_size = sizeof(*te); |
| 465 | te->data_size = data_size; |
| 466 | tl->size += te_end - tl_ev; |
| 467 | |
| 468 | if (data) { |
| 469 | /* get TE data pointer */ |
| 470 | te_data = transfer_list_entry_data(te); |
| 471 | if (!te_data) { |
| 472 | return NULL; |
| 473 | } |
| 474 | memmove(te_data, data, data_size); |
| 475 | } |
| 476 | |
| 477 | transfer_list_update_checksum(tl); |
| 478 | |
| 479 | return te; |
| 480 | } |
| 481 | |
| 482 | /******************************************************************************* |
| 483 | * Add a new transfer entry into a transfer list with specified new data |
| 484 | * alignment requirement |
| 485 | * Compliant to 2.4.4 of Firmware handoff specification (v0.9) |
| 486 | * Return pointer to the added transfer entry or NULL on error |
| 487 | ******************************************************************************/ |
| 488 | struct transfer_list_entry * |
| 489 | transfer_list_add_with_align(struct transfer_list_header *tl, uint32_t tag_id, |
| 490 | uint32_t data_size, const void *data, |
| 491 | uint8_t alignment) |
| 492 | { |
| 493 | struct transfer_list_entry *te = NULL; |
| 494 | uintptr_t tl_ev, ev, new_tl_ev; |
| 495 | size_t dummy_te_data_sz = 0; |
| 496 | |
| 497 | if (!tl) { |
| 498 | return NULL; |
| 499 | } |
| 500 | |
| 501 | tl_ev = (uintptr_t)tl + tl->size; |
| 502 | ev = tl_ev + sizeof(struct transfer_list_entry); |
| 503 | |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 504 | if (!libtl_is_aligned(ev, 1 << alignment)) { |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 505 | /* |
| 506 | * TE data address is not aligned to the new alignment |
| 507 | * fill the gap with an empty TE as a placeholder before |
| 508 | * adding the desire TE |
| 509 | */ |
Harrison Mutai | bd05e57 | 2025-04-16 14:20:35 +0000 | [diff] [blame] | 510 | new_tl_ev = libtl_align_up(ev, 1 << alignment) - |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 511 | sizeof(struct transfer_list_entry); |
| 512 | dummy_te_data_sz = |
| 513 | new_tl_ev - tl_ev - sizeof(struct transfer_list_entry); |
| 514 | if (!transfer_list_add(tl, TL_TAG_EMPTY, dummy_te_data_sz, |
| 515 | NULL)) { |
| 516 | return NULL; |
| 517 | } |
| 518 | } |
| 519 | |
| 520 | te = transfer_list_add(tl, tag_id, data_size, data); |
| 521 | |
| 522 | if (alignment > tl->alignment) { |
| 523 | tl->alignment = alignment; |
| 524 | transfer_list_update_checksum(tl); |
| 525 | } |
| 526 | |
| 527 | return te; |
| 528 | } |
| 529 | |
| 530 | /******************************************************************************* |
| 531 | * Search for an existing transfer entry with the specified tag id from a |
| 532 | * transfer list |
| 533 | * Return pointer to the found transfer entry or NULL on error |
| 534 | ******************************************************************************/ |
| 535 | struct transfer_list_entry *transfer_list_find(struct transfer_list_header *tl, |
| 536 | uint32_t tag_id) |
| 537 | { |
| 538 | struct transfer_list_entry *te = NULL; |
| 539 | |
| 540 | do { |
| 541 | te = transfer_list_next(tl, te); |
| 542 | } while (te && (te->tag_id != tag_id)); |
| 543 | |
| 544 | return te; |
| 545 | } |
| 546 | |
| 547 | /******************************************************************************* |
| 548 | * Retrieve the data pointer of a specified transfer entry |
| 549 | * Return pointer to the transfer entry data or NULL on error |
| 550 | ******************************************************************************/ |
| 551 | void *transfer_list_entry_data(struct transfer_list_entry *entry) |
| 552 | { |
| 553 | if (!entry) { |
| 554 | return NULL; |
| 555 | } |
| 556 | return (uint8_t *)entry + entry->hdr_size; |
| 557 | } |
Harrison Mutai | db83bfa | 2025-03-21 15:24:55 +0000 | [diff] [blame] | 558 | |
| 559 | /******************************************************************************* |
| 560 | * Verifies that the transfer list has not already been initialized, then |
| 561 | * initializes it at the specified memory location. |
| 562 | * |
| 563 | * Return pointer to the transfer list or NULL on error |
| 564 | * *****************************************************************************/ |
| 565 | struct transfer_list_header *transfer_list_ensure(void *addr, size_t size) |
| 566 | { |
| 567 | struct transfer_list_header *tl = NULL; |
| 568 | |
| 569 | if (transfer_list_check_header(addr) == TL_OPS_ALL) { |
| 570 | return (struct transfer_list_header *)addr; |
| 571 | } |
| 572 | |
| 573 | tl = transfer_list_init((void *)addr, size); |
| 574 | |
| 575 | return tl; |
| 576 | } |