Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <arch_helpers.h> |
| 9 | #include <assert.h> |
| 10 | #include <errno.h> |
Antonio Nino Diaz | ffb7ce7 | 2018-10-30 11:34:23 +0000 | [diff] [blame^] | 11 | #include <object_pool.h> |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 12 | #include <platform_def.h> |
| 13 | #include <platform.h> |
Antonio Nino Diaz | ffb7ce7 | 2018-10-30 11:34:23 +0000 | [diff] [blame^] | 14 | #include <sp_res_desc.h> |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 15 | #include <spm_svc.h> |
Antonio Nino Diaz | ffb7ce7 | 2018-10-30 11:34:23 +0000 | [diff] [blame^] | 16 | #include <string.h> |
| 17 | #include <utils.h> |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 18 | #include <xlat_tables_v2.h> |
| 19 | |
| 20 | #include "spm_private.h" |
| 21 | #include "spm_shim_private.h" |
| 22 | |
Antonio Nino Diaz | ffb7ce7 | 2018-10-30 11:34:23 +0000 | [diff] [blame^] | 23 | /******************************************************************************* |
| 24 | * Instantiation of translation table context |
| 25 | ******************************************************************************/ |
| 26 | |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 27 | /* Place translation tables by default along with the ones used by BL31. */ |
| 28 | #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME |
| 29 | #define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table" |
| 30 | #endif |
| 31 | |
| 32 | /* Allocate and initialise the translation context for the secure partitions. */ |
| 33 | REGISTER_XLAT_CONTEXT2(sp, |
| 34 | PLAT_SP_IMAGE_MMAP_REGIONS, |
| 35 | PLAT_SP_IMAGE_MAX_XLAT_TABLES, |
| 36 | PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE, |
| 37 | EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME); |
| 38 | |
| 39 | /* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */ |
| 40 | static spinlock_t mem_attr_smc_lock; |
| 41 | |
| 42 | /* Get handle of Secure Partition translation context */ |
| 43 | xlat_ctx_t *spm_get_sp_xlat_context(void) |
| 44 | { |
| 45 | return &sp_xlat_ctx; |
| 46 | }; |
| 47 | |
Antonio Nino Diaz | ffb7ce7 | 2018-10-30 11:34:23 +0000 | [diff] [blame^] | 48 | /******************************************************************************* |
| 49 | * Functions to allocate memory for regions. |
| 50 | ******************************************************************************/ |
| 51 | |
| 52 | /* |
| 53 | * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is |
| 54 | * reserved for SPM to use as heap to allocate memory regions of Secure |
| 55 | * Partitions. This is only done at boot. |
| 56 | */ |
| 57 | static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U, |
| 58 | PLAT_SPM_HEAP_SIZE); |
| 59 | |
| 60 | static uintptr_t spm_alloc_heap(size_t size) |
| 61 | { |
| 62 | return (uintptr_t)pool_alloc_n(&spm_heap_mem, size); |
| 63 | } |
| 64 | |
| 65 | /******************************************************************************* |
| 66 | * Functions to map memory regions described in the resource description. |
| 67 | ******************************************************************************/ |
| 68 | static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr) |
| 69 | { |
| 70 | unsigned int index = attr & RD_MEM_MASK; |
| 71 | |
| 72 | const unsigned int mmap_attr_arr[8] = { |
| 73 | MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */ |
| 74 | MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */ |
| 75 | MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */ |
| 76 | MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */ |
| 77 | MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */ |
| 78 | MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */ |
| 79 | MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */ |
| 80 | MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */ |
| 81 | }; |
| 82 | |
| 83 | if (index >= ARRAY_SIZE(mmap_attr_arr)) { |
| 84 | ERROR("Unsupported RD memory attributes 0x%x\n", attr); |
| 85 | panic(); |
| 86 | } |
| 87 | |
| 88 | return mmap_attr_arr[index]; |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * The data provided in the resource description structure is not directly |
| 93 | * compatible with a mmap_region structure. This function handles the conversion |
| 94 | * and maps it. |
| 95 | */ |
| 96 | static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem) |
| 97 | { |
| 98 | int rc; |
| 99 | mmap_region_t mmap; |
| 100 | |
| 101 | /* Location of the SP image */ |
| 102 | uintptr_t sp_size = sp_ctx->image_size; |
| 103 | uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address; |
| 104 | unsigned long long sp_base_pa = sp_ctx->image_base; |
| 105 | |
| 106 | /* Location of the memory region to map */ |
| 107 | size_t rd_size = rdmem->size; |
| 108 | uintptr_t rd_base_va = rdmem->base; |
| 109 | unsigned long long rd_base_pa; |
| 110 | |
| 111 | unsigned int memtype = rdmem->attr & RD_MEM_MASK; |
| 112 | |
| 113 | VERBOSE("Adding memory region '%s'\n", rdmem->name); |
| 114 | |
| 115 | mmap.granularity = REGION_DEFAULT_GRANULARITY; |
| 116 | |
| 117 | /* Check if the RD region is inside of the SP image or not */ |
| 118 | int is_outside = (rd_base_va + rd_size <= sp_base_va) || |
| 119 | (sp_base_va + sp_size <= rd_base_va); |
| 120 | |
| 121 | /* Set to 1 if it is needed to zero this region */ |
| 122 | int zero_region = 0; |
| 123 | |
| 124 | switch (memtype) { |
| 125 | case RD_MEM_DEVICE: |
| 126 | /* Device regions are mapped 1:1 */ |
| 127 | rd_base_pa = rd_base_va; |
| 128 | break; |
| 129 | |
| 130 | case RD_MEM_NORMAL_CODE: |
| 131 | case RD_MEM_NORMAL_RODATA: |
| 132 | { |
| 133 | if (is_outside == 1) { |
| 134 | ERROR("Code and rodata sections must be fully contained in the image."); |
| 135 | panic(); |
| 136 | } |
| 137 | |
| 138 | /* Get offset into the image */ |
| 139 | rd_base_pa = sp_base_pa + rd_base_va - sp_base_va; |
| 140 | break; |
| 141 | } |
| 142 | case RD_MEM_NORMAL_DATA: |
| 143 | { |
| 144 | if (is_outside == 1) { |
| 145 | ERROR("Data sections must be fully contained in the image."); |
| 146 | panic(); |
| 147 | } |
| 148 | |
| 149 | rd_base_pa = spm_alloc_heap(rd_size); |
| 150 | |
| 151 | /* Get offset into the image */ |
| 152 | void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va); |
| 153 | |
| 154 | VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa); |
| 155 | |
| 156 | /* Map destination */ |
| 157 | rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa, |
| 158 | rd_size, MT_MEMORY | MT_RW | MT_SECURE); |
| 159 | if (rc != 0) { |
| 160 | ERROR("Unable to map data region at EL3: %d\n", rc); |
| 161 | panic(); |
| 162 | } |
| 163 | |
| 164 | /* Copy original data to destination */ |
| 165 | memcpy((void *)rd_base_pa, img_pa, rd_size); |
| 166 | |
| 167 | /* Unmap destination region */ |
| 168 | rc = mmap_remove_dynamic_region(rd_base_pa, rd_size); |
| 169 | if (rc != 0) { |
| 170 | ERROR("Unable to remove data region at EL3: %d\n", rc); |
| 171 | panic(); |
| 172 | } |
| 173 | |
| 174 | break; |
| 175 | } |
| 176 | case RD_MEM_NORMAL_MISCELLANEOUS: |
| 177 | /* Allow SPM to change the attributes of the region. */ |
| 178 | mmap.granularity = PAGE_SIZE; |
| 179 | rd_base_pa = spm_alloc_heap(rd_size); |
| 180 | zero_region = 1; |
| 181 | break; |
| 182 | |
| 183 | case RD_MEM_NORMAL_SPM_SP_SHARED_MEM: |
| 184 | if ((sp_ctx->spm_sp_buffer_base != 0) || |
| 185 | (sp_ctx->spm_sp_buffer_size != 0)) { |
| 186 | ERROR("A partition must have only one SPM<->SP buffer.\n"); |
| 187 | panic(); |
| 188 | } |
| 189 | rd_base_pa = spm_alloc_heap(rd_size); |
| 190 | zero_region = 1; |
| 191 | /* Save location of this buffer, it is needed by SPM */ |
| 192 | sp_ctx->spm_sp_buffer_base = rd_base_pa; |
| 193 | sp_ctx->spm_sp_buffer_size = rd_size; |
| 194 | break; |
| 195 | |
| 196 | case RD_MEM_NORMAL_CLIENT_SHARED_MEM: |
| 197 | /* Fallthrough */ |
| 198 | case RD_MEM_NORMAL_BSS: |
| 199 | rd_base_pa = spm_alloc_heap(rd_size); |
| 200 | zero_region = 1; |
| 201 | break; |
| 202 | |
| 203 | default: |
| 204 | panic(); |
| 205 | } |
| 206 | |
| 207 | mmap.base_pa = rd_base_pa; |
| 208 | mmap.base_va = rd_base_va; |
| 209 | mmap.size = rd_size; |
| 210 | |
| 211 | /* Only S-EL0 mappings supported for now */ |
| 212 | mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER; |
| 213 | |
| 214 | VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n", |
| 215 | mmap.base_va, mmap.base_pa, mmap.size, mmap.attr); |
| 216 | |
| 217 | /* Map region in the context of the Secure Partition */ |
| 218 | mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap); |
| 219 | |
| 220 | if (zero_region == 1) { |
| 221 | VERBOSE(" Zeroing region...\n"); |
| 222 | |
| 223 | rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa, |
| 224 | mmap.size, MT_MEMORY | MT_RW | MT_SECURE); |
| 225 | if (rc != 0) { |
| 226 | ERROR("Unable to map memory at EL3 to zero: %d\n", |
| 227 | rc); |
| 228 | panic(); |
| 229 | } |
| 230 | |
| 231 | zeromem((void *)mmap.base_pa, mmap.size); |
| 232 | |
| 233 | /* |
| 234 | * Unmap destination region unless it is the SPM<->SP buffer, |
| 235 | * which must be used by SPM. |
| 236 | */ |
| 237 | if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) { |
| 238 | rc = mmap_remove_dynamic_region(rd_base_pa, rd_size); |
| 239 | if (rc != 0) { |
| 240 | ERROR("Unable to remove region at EL3: %d\n", rc); |
| 241 | panic(); |
| 242 | } |
| 243 | } |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | void sp_map_memory_regions(sp_context_t *sp_ctx) |
| 248 | { |
| 249 | /* This region contains the exception vectors used at S-EL1. */ |
| 250 | const mmap_region_t sel1_exception_vectors = |
| 251 | MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, |
| 252 | SPM_SHIM_EXCEPTIONS_SIZE, |
| 253 | MT_CODE | MT_SECURE | MT_PRIVILEGED); |
| 254 | |
| 255 | mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, |
| 256 | &sel1_exception_vectors); |
| 257 | |
| 258 | struct sp_rd_sect_mem_region *rdmem; |
| 259 | |
| 260 | for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) { |
| 261 | map_rdmem(sp_ctx, rdmem); |
| 262 | } |
| 263 | |
| 264 | init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle); |
| 265 | } |
| 266 | |
| 267 | /******************************************************************************* |
| 268 | * Functions to manipulate memory regions |
| 269 | ******************************************************************************/ |
| 270 | |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 271 | /* |
| 272 | * Attributes are encoded using a different format in the SMC interface than in |
| 273 | * the Trusted Firmware, where the mmap_attr_t enum type is used. This function |
| 274 | * converts an attributes value from the SMC format to the mmap_attr_t format by |
| 275 | * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER. |
| 276 | * The other fields are left as 0 because they are ignored by the function |
Antonio Nino Diaz | e5d5951 | 2018-08-05 15:34:10 +0100 | [diff] [blame] | 277 | * xlat_change_mem_attributes_ctx(). |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 278 | */ |
| 279 | static unsigned int smc_attr_to_mmap_attr(unsigned int attributes) |
| 280 | { |
| 281 | unsigned int tf_attr = 0U; |
| 282 | |
| 283 | unsigned int access = (attributes & SP_MEMORY_ATTRIBUTES_ACCESS_MASK) |
| 284 | >> SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT; |
| 285 | |
| 286 | if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RW) { |
| 287 | tf_attr |= MT_RW | MT_USER; |
| 288 | } else if (access == SP_MEMORY_ATTRIBUTES_ACCESS_RO) { |
| 289 | tf_attr |= MT_RO | MT_USER; |
| 290 | } else { |
| 291 | /* Other values are reserved. */ |
| 292 | assert(access == SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS); |
| 293 | /* The only requirement is that there's no access from EL0 */ |
| 294 | tf_attr |= MT_RO | MT_PRIVILEGED; |
| 295 | } |
| 296 | |
| 297 | if ((attributes & SP_MEMORY_ATTRIBUTES_NON_EXEC) == 0) { |
| 298 | tf_attr |= MT_EXECUTE; |
| 299 | } else { |
| 300 | tf_attr |= MT_EXECUTE_NEVER; |
| 301 | } |
| 302 | |
| 303 | return tf_attr; |
| 304 | } |
| 305 | |
| 306 | /* |
| 307 | * This function converts attributes from the Trusted Firmware format into the |
| 308 | * SMC interface format. |
| 309 | */ |
| 310 | static unsigned int smc_mmap_to_smc_attr(unsigned int attr) |
| 311 | { |
| 312 | unsigned int smc_attr = 0U; |
| 313 | |
| 314 | unsigned int data_access; |
| 315 | |
| 316 | if ((attr & MT_USER) == 0) { |
| 317 | /* No access from EL0. */ |
| 318 | data_access = SP_MEMORY_ATTRIBUTES_ACCESS_NOACCESS; |
| 319 | } else { |
| 320 | if ((attr & MT_RW) != 0) { |
| 321 | assert(MT_TYPE(attr) != MT_DEVICE); |
| 322 | data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RW; |
| 323 | } else { |
| 324 | data_access = SP_MEMORY_ATTRIBUTES_ACCESS_RO; |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | smc_attr |= (data_access & SP_MEMORY_ATTRIBUTES_ACCESS_MASK) |
| 329 | << SP_MEMORY_ATTRIBUTES_ACCESS_SHIFT; |
| 330 | |
| 331 | if ((attr & MT_EXECUTE_NEVER) != 0U) { |
| 332 | smc_attr |= SP_MEMORY_ATTRIBUTES_NON_EXEC; |
| 333 | } |
| 334 | |
| 335 | return smc_attr; |
| 336 | } |
| 337 | |
Antonio Nino Diaz | 07f3f63 | 2018-05-22 16:26:48 +0100 | [diff] [blame] | 338 | int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx, |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 339 | uintptr_t base_va) |
| 340 | { |
| 341 | uint32_t attributes; |
| 342 | |
| 343 | spin_lock(&mem_attr_smc_lock); |
| 344 | |
Antonio Nino Diaz | e5d5951 | 2018-08-05 15:34:10 +0100 | [diff] [blame] | 345 | int rc = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle, |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 346 | base_va, &attributes); |
| 347 | |
| 348 | spin_unlock(&mem_attr_smc_lock); |
| 349 | |
Antonio Nino Diaz | e5d5951 | 2018-08-05 15:34:10 +0100 | [diff] [blame] | 350 | /* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */ |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 351 | assert((rc == 0) || (rc == -EINVAL)); |
| 352 | |
| 353 | if (rc == 0) { |
| 354 | return (int32_t) smc_mmap_to_smc_attr(attributes); |
| 355 | } else { |
| 356 | return SPM_INVALID_PARAMETER; |
| 357 | } |
| 358 | } |
| 359 | |
Antonio Nino Diaz | 07f3f63 | 2018-05-22 16:26:48 +0100 | [diff] [blame] | 360 | int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx, |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 361 | u_register_t page_address, |
| 362 | u_register_t pages_count, |
| 363 | u_register_t smc_attributes) |
| 364 | { |
| 365 | uintptr_t base_va = (uintptr_t) page_address; |
| 366 | size_t size = (size_t) (pages_count * PAGE_SIZE); |
| 367 | uint32_t attributes = (uint32_t) smc_attributes; |
| 368 | |
| 369 | INFO(" Start address : 0x%lx\n", base_va); |
| 370 | INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size); |
| 371 | INFO(" Attributes : 0x%x\n", attributes); |
| 372 | |
| 373 | spin_lock(&mem_attr_smc_lock); |
| 374 | |
Antonio Nino Diaz | e5d5951 | 2018-08-05 15:34:10 +0100 | [diff] [blame] | 375 | int ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle, |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 376 | base_va, size, |
| 377 | smc_attr_to_mmap_attr(attributes)); |
| 378 | |
| 379 | spin_unlock(&mem_attr_smc_lock); |
| 380 | |
Antonio Nino Diaz | e5d5951 | 2018-08-05 15:34:10 +0100 | [diff] [blame] | 381 | /* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */ |
Antonio Nino Diaz | bfcdda4 | 2018-05-22 16:45:35 +0100 | [diff] [blame] | 382 | assert((ret == 0) || (ret == -EINVAL)); |
| 383 | |
| 384 | return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER; |
| 385 | } |