blob: 7b1a101037eb81dddef6a08df3242113f1d50f7a [file] [log] [blame]
Yatharth Kochar9003fa02015-10-14 15:27:24 +01001/*
Chris Kayda043412023-02-14 11:30:04 +00002 * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
Yatharth Kochar9003fa02015-10-14 15:27:24 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Yatharth Kochar9003fa02015-10-14 15:27:24 +01005 */
6
7#include <platform_def.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +00008
Masahiro Yamada665e71b2020-03-09 17:39:48 +09009#include <common/bl_common.ld.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000010#include <lib/xlat_tables/xlat_tables_defs.h>
Yatharth Kochar9003fa02015-10-14 15:27:24 +010011
12OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
13OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
14ENTRY(bl2u_entrypoint)
15
16MEMORY {
17 RAM (rwx): ORIGIN = BL2U_BASE, LENGTH = BL2U_LIMIT - BL2U_BASE
18}
19
Chris Kayf90fe022022-09-29 14:36:53 +010020SECTIONS {
Harrison Mutaif6088162023-04-19 10:08:56 +010021 RAM_REGION_START = ORIGIN(RAM);
22 RAM_REGION_LENGTH = LENGTH(RAM);
Yatharth Kochar9003fa02015-10-14 15:27:24 +010023 . = BL2U_BASE;
Chris Kayf90fe022022-09-29 14:36:53 +010024
Antonio Nino Diaza2aedac2017-11-15 11:45:35 +000025 ASSERT(. == ALIGN(PAGE_SIZE),
Chris Kayf90fe022022-09-29 14:36:53 +010026 "BL2U_BASE address is not aligned on a page boundary.")
Yatharth Kochar9003fa02015-10-14 15:27:24 +010027
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010028#if SEPARATE_CODE_AND_RODATA
29 .text . : {
30 __TEXT_START__ = .;
Chris Kayf90fe022022-09-29 14:36:53 +010031
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010032 *bl2u_entrypoint.o(.text*)
Samuel Hollandebd6efa2019-10-20 16:11:25 -050033 *(SORT_BY_ALIGNMENT(.text*))
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010034 *(.vectors)
Michal Simekf7d445f2023-04-27 14:26:03 +020035 __TEXT_END_UNALIGNED__ = .;
Chris Kayf90fe022022-09-29 14:36:53 +010036
Roberto Vargas5629b2b2018-04-11 11:53:31 +010037 . = ALIGN(PAGE_SIZE);
Chris Kayf90fe022022-09-29 14:36:53 +010038
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010039 __TEXT_END__ = .;
Chris Kayf90fe022022-09-29 14:36:53 +010040 } >RAM
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010041
Chris Kayf90fe022022-09-29 14:36:53 +010042 /* .ARM.extab and .ARM.exidx are only added because Clang needs them */
43 .ARM.extab . : {
Roberto Vargasad925092018-05-10 11:01:16 +010044 *(.ARM.extab* .gnu.linkonce.armextab.*)
Chris Kayf90fe022022-09-29 14:36:53 +010045 } >RAM
Roberto Vargasad925092018-05-10 11:01:16 +010046
Chris Kayf90fe022022-09-29 14:36:53 +010047 .ARM.exidx . : {
Roberto Vargasad925092018-05-10 11:01:16 +010048 *(.ARM.exidx* .gnu.linkonce.armexidx.*)
Chris Kayf90fe022022-09-29 14:36:53 +010049 } >RAM
Roberto Vargasad925092018-05-10 11:01:16 +010050
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010051 .rodata . : {
52 __RODATA_START__ = .;
Samuel Hollandebd6efa2019-10-20 16:11:25 -050053 *(SORT_BY_ALIGNMENT(.rodata*))
Masahiro Yamada0a0a7a92020-03-26 10:57:12 +090054
Chris Kayf90fe022022-09-29 14:36:53 +010055 RODATA_COMMON
Masahiro Yamada0a0a7a92020-03-26 10:57:12 +090056
Michal Simekf7d445f2023-04-27 14:26:03 +020057 __RODATA_END_UNALIGNED__ = .;
Roberto Vargas5629b2b2018-04-11 11:53:31 +010058 . = ALIGN(PAGE_SIZE);
Sandrine Bailleux5d1c1042016-07-08 14:37:40 +010059 __RODATA_END__ = .;
60 } >RAM
Chris Kayf90fe022022-09-29 14:36:53 +010061#else /* SEPARATE_CODE_AND_RODATA */
Chris Kayda043412023-02-14 11:30:04 +000062 .ro . : {
Yatharth Kochar9003fa02015-10-14 15:27:24 +010063 __RO_START__ = .;
Chris Kayf90fe022022-09-29 14:36:53 +010064
Yatharth Kochar9003fa02015-10-14 15:27:24 +010065 *bl2u_entrypoint.o(.text*)
Samuel Hollandebd6efa2019-10-20 16:11:25 -050066 *(SORT_BY_ALIGNMENT(.text*))
67 *(SORT_BY_ALIGNMENT(.rodata*))
Yatharth Kochar9003fa02015-10-14 15:27:24 +010068
Chris Kayf90fe022022-09-29 14:36:53 +010069 RODATA_COMMON
Masahiro Yamada0a0a7a92020-03-26 10:57:12 +090070
Yatharth Kochar9003fa02015-10-14 15:27:24 +010071 *(.vectors)
Chris Kayf90fe022022-09-29 14:36:53 +010072
Yatharth Kochar9003fa02015-10-14 15:27:24 +010073 __RO_END_UNALIGNED__ = .;
Chris Kayf90fe022022-09-29 14:36:53 +010074
Yatharth Kochar9003fa02015-10-14 15:27:24 +010075 /*
Chris Kayf90fe022022-09-29 14:36:53 +010076 * Memory page(s) mapped to this section will be marked as read-only,
77 * executable. No RW data from the next section must creep in. Ensure
78 * that the rest of the current memory page is unused.
Yatharth Kochar9003fa02015-10-14 15:27:24 +010079 */
Roberto Vargas5629b2b2018-04-11 11:53:31 +010080 . = ALIGN(PAGE_SIZE);
Chris Kayf90fe022022-09-29 14:36:53 +010081
Yatharth Kochar9003fa02015-10-14 15:27:24 +010082 __RO_END__ = .;
83 } >RAM
Chris Kayf90fe022022-09-29 14:36:53 +010084#endif /* SEPARATE_CODE_AND_RODATA */
Yatharth Kochar9003fa02015-10-14 15:27:24 +010085
Chris Kayf90fe022022-09-29 14:36:53 +010086 __RW_START__ = .;
Yatharth Kochar9003fa02015-10-14 15:27:24 +010087
Masahiro Yamadacaa3e7e2020-04-22 10:50:12 +090088 DATA_SECTION >RAM
Masahiro Yamadaa926a9f2020-04-07 13:04:24 +090089 STACK_SECTION >RAM
Masahiro Yamadaa7739bc2020-03-26 13:16:33 +090090 BSS_SECTION >RAM
Masahiro Yamada665e71b2020-03-09 17:39:48 +090091 XLAT_TABLE_SECTION >RAM
Yatharth Kochar9003fa02015-10-14 15:27:24 +010092
93#if USE_COHERENT_MEM
94 /*
Chris Kayf90fe022022-09-29 14:36:53 +010095 * The base address of the coherent memory section must be page-aligned to
96 * guarantee that the coherent data are stored on their own pages and are
97 * not mixed with normal data. This is required to set up the correct
Yatharth Kochar9003fa02015-10-14 15:27:24 +010098 * memory attributes for the coherent data page tables.
99 */
Chris Kayda043412023-02-14 11:30:04 +0000100 .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100101 __COHERENT_RAM_START__ = .;
Chris Kayda043412023-02-14 11:30:04 +0000102 *(.tzfw_coherent_mem)
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100103 __COHERENT_RAM_END_UNALIGNED__ = .;
Chris Kayf90fe022022-09-29 14:36:53 +0100104
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100105 /*
Chris Kayf90fe022022-09-29 14:36:53 +0100106 * Memory page(s) mapped to this section will be marked as device
107 * memory. No other unexpected data must creep in. Ensure the rest of
108 * the current memory page is unused.
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100109 */
Roberto Vargas5629b2b2018-04-11 11:53:31 +0100110 . = ALIGN(PAGE_SIZE);
Chris Kayf90fe022022-09-29 14:36:53 +0100111
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100112 __COHERENT_RAM_END__ = .;
113 } >RAM
Chris Kayf90fe022022-09-29 14:36:53 +0100114#endif /* USE_COHERENT_MEM */
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100115
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100116 __RW_END__ = .;
117 __BL2U_END__ = .;
118
119 __BSS_SIZE__ = SIZEOF(.bss);
120
121 ASSERT(. <= BL2U_LIMIT, "BL2U image has exceeded its limit.")
Harrison Mutaif6088162023-04-19 10:08:56 +0100122 RAM_REGION_END = .;
Yatharth Kochar9003fa02015-10-14 15:27:24 +0100123}