blob: 2c7b619670e0a536681d221e8bd0268d5fc433de [file] [log] [blame]
Andrew Thoelke5e910072014-06-02 11:40:35 +01001/*
Zelalem Awekec5ea4f82021-07-09 17:54:30 -05002 * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
Andrew Thoelke5e910072014-06-02 11:40:35 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Andrew Thoelke5e910072014-06-02 11:40:35 +01005 */
6
Antonio Nino Diaz43534992018-10-25 17:11:02 +01007#ifndef CPU_DATA_H
8#define CPU_DATA_H
Andrew Thoelke5e910072014-06-02 11:40:35 +01009
Etienne Carriere86606eb2017-09-01 10:22:20 +020010#include <platform_def.h> /* CACHE_WRITEBACK_GRANULE required */
11
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000012#include <bl31/ehf.h>
13
Alexei Fedoroved108b52019-09-13 14:11:59 +010014/* Size of psci_cpu_data structure */
15#define PSCI_CPU_DATA_SIZE 12
16
Julius Werner402b3cf2019-07-09 14:02:43 -070017#ifdef __aarch64__
Soby Mathewe33b78a2016-05-05 14:10:46 +010018
Alexei Fedoroved108b52019-09-13 14:11:59 +010019/* 8-bytes aligned size of psci_cpu_data structure */
20#define PSCI_CPU_DATA_SIZE_ALIGNED ((PSCI_CPU_DATA_SIZE + 7) & ~7)
21
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050022#if ENABLE_RME
23/* Size of cpu_context array */
24#define CPU_DATA_CONTEXT_NUM 3
Alexei Fedoroved108b52019-09-13 14:11:59 +010025/* Offset of cpu_ops_ptr, size 8 bytes */
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050026#define CPU_DATA_CPU_OPS_PTR 0x18
27#else /* ENABLE_RME */
28#define CPU_DATA_CONTEXT_NUM 2
Soby Mathewe33b78a2016-05-05 14:10:46 +010029#define CPU_DATA_CPU_OPS_PTR 0x10
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050030#endif /* ENABLE_RME */
Soby Mathewe33b78a2016-05-05 14:10:46 +010031
Alexei Fedoroved108b52019-09-13 14:11:59 +010032#if ENABLE_PAUTH
33/* 8-bytes aligned offset of apiakey[2], size 16 bytes */
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050034#define CPU_DATA_APIAKEY_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
35 + CPU_DATA_CPU_OPS_PTR)
36#define CPU_DATA_CRASH_BUF_OFFSET (0x10 + CPU_DATA_APIAKEY_OFFSET)
37#else /* ENABLE_PAUTH */
38#define CPU_DATA_CRASH_BUF_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
39 + CPU_DATA_CPU_OPS_PTR)
40#endif /* ENABLE_PAUTH */
Alexei Fedoroved108b52019-09-13 14:11:59 +010041
42/* need enough space in crash buffer to save 8 registers */
43#define CPU_DATA_CRASH_BUF_SIZE 64
44
45#else /* !__aarch64__ */
Julius Werner402b3cf2019-07-09 14:02:43 -070046
47#if CRASH_REPORTING
48#error "Crash reporting is not supported in AArch32"
49#endif
50#define CPU_DATA_CPU_OPS_PTR 0x0
Alexei Fedoroved108b52019-09-13 14:11:59 +010051#define CPU_DATA_CRASH_BUF_OFFSET (0x4 + PSCI_CPU_DATA_SIZE)
Julius Werner402b3cf2019-07-09 14:02:43 -070052
Alexei Fedoroved108b52019-09-13 14:11:59 +010053#endif /* __aarch64__ */
Soby Mathewe33b78a2016-05-05 14:10:46 +010054
Soby Mathew626ed512014-06-25 10:07:40 +010055#if CRASH_REPORTING
dp-arm872be882016-09-19 11:18:44 +010056#define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \
57 CPU_DATA_CRASH_BUF_SIZE)
Soby Mathew626ed512014-06-25 10:07:40 +010058#else
dp-arm872be882016-09-19 11:18:44 +010059#define CPU_DATA_CRASH_BUF_END CPU_DATA_CRASH_BUF_OFFSET
60#endif
61
Etienne Carriere86606eb2017-09-01 10:22:20 +020062/* cpu_data size is the data size rounded up to the platform cache line size */
63#define CPU_DATA_SIZE (((CPU_DATA_CRASH_BUF_END + \
64 CACHE_WRITEBACK_GRANULE - 1) / \
65 CACHE_WRITEBACK_GRANULE) * \
66 CACHE_WRITEBACK_GRANULE)
67
dp-arm872be882016-09-19 11:18:44 +010068#if ENABLE_RUNTIME_INSTRUMENTATION
69/* Temporary space to store PMF timestamps from assembly code */
70#define CPU_DATA_PMF_TS_COUNT 1
71#define CPU_DATA_PMF_TS0_OFFSET CPU_DATA_CRASH_BUF_END
72#define CPU_DATA_PMF_TS0_IDX 0
Soby Mathew626ed512014-06-25 10:07:40 +010073#endif
Soby Mathew9b476842014-08-14 11:33:56 +010074
Julius Wernerd5dfdeb2019-07-09 13:49:11 -070075#ifndef __ASSEMBLER__
Andrew Thoelke5e910072014-06-02 11:40:35 +010076
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050077#include <assert.h>
78#include <stdint.h>
79
Andrew Thoelke5e910072014-06-02 11:40:35 +010080#include <arch_helpers.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000081#include <lib/cassert.h>
82#include <lib/psci/psci.h>
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050083
Andrew Thoelke5e910072014-06-02 11:40:35 +010084#include <platform_def.h>
Andrew Thoelke5e910072014-06-02 11:40:35 +010085
Soby Mathew8c5fe0b2015-01-08 18:02:19 +000086/* Offsets for the cpu_data structure */
87#define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\
88 (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
89
90#if PLAT_PCPU_DATA_SIZE
91#define CPU_DATA_PLAT_PCPU_OFFSET __builtin_offsetof\
92 (cpu_data_t, platform_cpu_data)
93#endif
94
Zelalem Awekec5ea4f82021-07-09 17:54:30 -050095typedef enum context_pas {
96 CPU_CONTEXT_SECURE = 0,
97 CPU_CONTEXT_NS,
98#if ENABLE_RME
99 CPU_CONTEXT_REALM,
100#endif
101 CPU_CONTEXT_NUM
102} context_pas_t;
103
Andrew Thoelke5e910072014-06-02 11:40:35 +0100104/*******************************************************************************
105 * Function & variable prototypes
106 ******************************************************************************/
107
108/*******************************************************************************
109 * Cache of frequently used per-cpu data:
Zelalem Awekec5ea4f82021-07-09 17:54:30 -0500110 * Pointers to non-secure, realm, and secure security state contexts
Andrew Thoelke5e910072014-06-02 11:40:35 +0100111 * Address of the crash stack
112 * It is aligned to the cache line boundary to allow efficient concurrent
113 * manipulation of these pointers on different cpus
114 *
Andrew Thoelke5e910072014-06-02 11:40:35 +0100115 * The data structure and the _cpu_data accessors should not be used directly
116 * by components that have per-cpu members. The member access macros should be
117 * used for this.
118 ******************************************************************************/
Andrew Thoelke5e910072014-06-02 11:40:35 +0100119typedef struct cpu_data {
Julius Werner402b3cf2019-07-09 14:02:43 -0700120#ifdef __aarch64__
Zelalem Awekec5ea4f82021-07-09 17:54:30 -0500121 void *cpu_context[CPU_DATA_CONTEXT_NUM];
122#endif /* __aarch64__ */
Soby Mathew4c0d0392016-06-16 14:52:04 +0100123 uintptr_t cpu_ops_ptr;
Alexei Fedoroved108b52019-09-13 14:11:59 +0100124 struct psci_cpu_data psci_svc_cpu_data;
125#if ENABLE_PAUTH
126 uint64_t apiakey[2];
127#endif
Soby Mathew626ed512014-06-25 10:07:40 +0100128#if CRASH_REPORTING
Soby Mathew4c0d0392016-06-16 14:52:04 +0100129 u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
Soby Mathew626ed512014-06-25 10:07:40 +0100130#endif
dp-arm872be882016-09-19 11:18:44 +0100131#if ENABLE_RUNTIME_INSTRUMENTATION
132 uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
133#endif
Soby Mathew8c5fe0b2015-01-08 18:02:19 +0000134#if PLAT_PCPU_DATA_SIZE
135 uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
136#endif
Jeenu Viswambharan21b818c2017-09-22 08:32:10 +0100137#if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
138 pe_exc_data_t ehf_data;
139#endif
Andrew Thoelke5e910072014-06-02 11:40:35 +0100140} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
141
Roberto Vargas7fabe1a2018-02-12 12:36:17 +0000142extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
143
Zelalem Awekec5ea4f82021-07-09 17:54:30 -0500144#ifdef __aarch64__
145CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
146 assert_cpu_data_context_num_mismatch);
147#endif
148
Alexei Fedoroved108b52019-09-13 14:11:59 +0100149#if ENABLE_PAUTH
150CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
151 (cpu_data_t, apiakey),
Olivier Deprezb4f8d442021-08-19 11:36:26 +0200152 assert_cpu_data_pauth_stack_offset_mismatch);
Alexei Fedoroved108b52019-09-13 14:11:59 +0100153#endif
154
Soby Mathew626ed512014-06-25 10:07:40 +0100155#if CRASH_REPORTING
156/* verify assembler offsets match data structures */
157CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
158 (cpu_data_t, crash_buf),
159 assert_cpu_data_crash_stack_offset_mismatch);
160#endif
161
Etienne Carriere86606eb2017-09-01 10:22:20 +0200162CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
163 assert_cpu_data_size_mismatch);
Soby Mathew626ed512014-06-25 10:07:40 +0100164
Soby Mathew9b476842014-08-14 11:33:56 +0100165CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
166 (cpu_data_t, cpu_ops_ptr),
167 assert_cpu_data_cpu_ops_ptr_offset_mismatch);
168
dp-arm872be882016-09-19 11:18:44 +0100169#if ENABLE_RUNTIME_INSTRUMENTATION
170CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
171 (cpu_data_t, cpu_data_pmf_ts[0]),
172 assert_cpu_data_pmf_ts0_offset_mismatch);
173#endif
174
Andrew Thoelke5e910072014-06-02 11:40:35 +0100175struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
Andrew Thoelke5e910072014-06-02 11:40:35 +0100176
Julius Werner402b3cf2019-07-09 14:02:43 -0700177#ifdef __aarch64__
Andrew Thoelke5e910072014-06-02 11:40:35 +0100178/* Return the cpu_data structure for the current CPU. */
179static inline struct cpu_data *_cpu_data(void)
180{
181 return (cpu_data_t *)read_tpidr_el3();
182}
Soby Mathewe33b78a2016-05-05 14:10:46 +0100183#else
184struct cpu_data *_cpu_data(void);
185#endif
Andrew Thoelke5e910072014-06-02 11:40:35 +0100186
Zelalem Awekec5ea4f82021-07-09 17:54:30 -0500187/*
188 * Returns the index of the cpu_context array for the given security state.
189 * All accesses to cpu_context should be through this helper to make sure
190 * an access is not out-of-bounds. The function assumes security_state is
191 * valid.
192 */
193static inline context_pas_t get_cpu_context_index(uint32_t security_state)
194{
195 if (security_state == SECURE) {
196 return CPU_CONTEXT_SECURE;
197 } else {
198#if ENABLE_RME
199 if (security_state == NON_SECURE) {
200 return CPU_CONTEXT_NS;
201 } else {
202 assert(security_state == REALM);
203 return CPU_CONTEXT_REALM;
204 }
205#else
206 assert(security_state == NON_SECURE);
207 return CPU_CONTEXT_NS;
208#endif
209 }
210}
211
Andrew Thoelke5e910072014-06-02 11:40:35 +0100212/**************************************************************************
213 * APIs for initialising and accessing per-cpu data
214 *************************************************************************/
215
216void init_cpu_data_ptr(void);
Vikram Kanigiri12e7c4a2015-01-29 18:27:38 +0000217void init_cpu_ops(void);
Andrew Thoelke5e910072014-06-02 11:40:35 +0100218
219#define get_cpu_data(_m) _cpu_data()->_m
Antonio Nino Diaza0fee742018-10-31 15:25:35 +0000220#define set_cpu_data(_m, _v) _cpu_data()->_m = (_v)
Andrew Thoelke5e910072014-06-02 11:40:35 +0100221#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
Antonio Nino Diaza0fee742018-10-31 15:25:35 +0000222#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
Joel Hutton2614ea32017-10-20 10:31:14 +0100223/* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
Soby Mathewda554d72016-05-03 17:11:42 +0100224#define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \
Joel Hutton2614ea32017-10-20 10:31:14 +0100225 &(_cpu_data()->_m), \
226 sizeof(((cpu_data_t *)0)->_m))
Soby Mathewda554d72016-05-03 17:11:42 +0100227#define inv_cpu_data(_m) inv_dcache_range((uintptr_t) \
Joel Hutton2614ea32017-10-20 10:31:14 +0100228 &(_cpu_data()->_m), \
229 sizeof(((cpu_data_t *)0)->_m))
Soby Mathew09997342014-11-18 10:14:14 +0000230#define flush_cpu_data_by_index(_ix, _m) \
Soby Mathew4c0d0392016-06-16 14:52:04 +0100231 flush_dcache_range((uintptr_t) \
Soby Mathew09997342014-11-18 10:14:14 +0000232 &(_cpu_data_by_index(_ix)->_m), \
Joel Hutton2614ea32017-10-20 10:31:14 +0100233 sizeof(((cpu_data_t *)0)->_m))
Achin Gupta04fafce2014-07-25 14:47:05 +0100234
Andrew Thoelke5e910072014-06-02 11:40:35 +0100235
Julius Wernerd5dfdeb2019-07-09 13:49:11 -0700236#endif /* __ASSEMBLER__ */
Antonio Nino Diaz43534992018-10-25 17:11:02 +0100237#endif /* CPU_DATA_H */