blob: a4a6839ffe23aeef3e8e827decb1623b49ff9f34 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Jayanth Dodderi Chidanand445f7b52023-06-19 16:20:02 +01002 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-arm82cb2c12017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +00006#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
Achin Gupta4f6ad662013-10-25 09:08:21 +01008
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +00009#include <assert_macros.S>
Boyan Karatotev007433d2023-01-25 16:55:18 +000010#include <lib/cpus/cpu_ops.h>
Boyan Karatotev6bb96fa2023-01-27 09:37:07 +000011#include <lib/cpus/errata.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010012
Soby Mathew9b476842014-08-14 11:33:56 +010013 /*
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000014 * Write given expressions as quad words
15 *
16 * _count:
17 * Write at least _count quad words. If the given number of
18 * expressions is less than _count, repeat the last expression to
19 * fill _count quad words in total
20 * _rest:
21 * Optional list of expressions. _this is for parameter extraction
22 * only, and has no significance to the caller
23 *
24 * Invoked as:
25 * fill_constants 2, foo, bar, blah, ...
Soby Mathew9b476842014-08-14 11:33:56 +010026 */
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000027 .macro fill_constants _count:req, _this, _rest:vararg
28 .ifgt \_count
29 /* Write the current expression */
30 .ifb \_this
31 .error "Nothing to fill"
32 .endif
33 .quad \_this
34
35 /* Invoke recursively for remaining expressions */
36 .ifnb \_rest
37 fill_constants \_count-1, \_rest
38 .else
39 fill_constants \_count-1, \_this
40 .endif
41 .endif
42 .endm
43
44 /*
45 * Declare CPU operations
46 *
47 * _name:
48 * Name of the CPU for which operations are being specified
49 * _midr:
50 * Numeric value expected to read from CPU's MIDR
51 * _resetfunc:
Boyan Karatotev529f1562025-03-26 13:00:20 +010052 * Reset function for the CPU.
Dimitris Papastamosa205a562018-03-12 14:47:09 +000053 * _extra1:
54 * This is a placeholder for future per CPU operations. Currently,
55 * some CPUs use this entry to set a test function to determine if
56 * the workaround for CVE-2017-5715 needs to be applied or not.
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010057 * _extra2:
Bipin Ravi9b2510b2022-02-23 23:45:50 -060058 * This is a placeholder for future per CPU operations. Currently
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010059 * some CPUs use this entry to set a function to disable the
60 * workaround for CVE-2018-3639.
Bipin Ravi9b2510b2022-02-23 23:45:50 -060061 * _extra3:
62 * This is a placeholder for future per CPU operations. Currently,
63 * some CPUs use this entry to set a test function to determine if
64 * the workaround for CVE-2022-23960 needs to be applied or not.
Arvind Ram Prakashb5386662025-02-03 17:10:38 +010065 * _extra4:
66 * This is a placeholder for future per CPU operations. Currently,
67 * some CPUs use this entry to set a test function to determine if
68 * the workaround for CVE-2024-7881 needs to be applied or not.
laurenw-arm80942622019-08-20 15:51:24 -050069 * _e_handler:
70 * This is a placeholder for future per CPU exception handlers.
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000071 * _power_down_ops:
72 * Comma-separated list of functions to perform power-down
73 * operatios on the CPU. At least one, and up to
74 * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
75 * Starting at power level 0, these functions shall handle power
76 * down at subsequent power levels. If there aren't exactly
77 * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
78 * used to handle power down at subsequent levels
79 */
Dimitris Papastamosa205a562018-03-12 14:47:09 +000080 .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
Arvind Ram Prakashb5386662025-02-03 17:10:38 +010081 _extra1:req, _extra2:req, _extra3:req, _extra4:req, \
82 _e_handler:req, _power_down_ops:vararg
Chris Kayda043412023-02-14 11:30:04 +000083 .section .cpu_ops, "a"
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000084 .align 3
Soby Mathew9b476842014-08-14 11:33:56 +010085 .type cpu_ops_\_name, %object
86 .quad \_midr
Roberto Vargasb1d27b42017-10-30 14:43:43 +000087#if defined(IMAGE_AT_EL3)
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000088 .quad \_resetfunc
Soby Mathew9b476842014-08-14 11:33:56 +010089#endif
Dimitris Papastamosa205a562018-03-12 14:47:09 +000090 .quad \_extra1
Dimitris Papastamosfe007b22018-05-16 11:36:14 +010091 .quad \_extra2
Bipin Ravi9b2510b2022-02-23 23:45:50 -060092 .quad \_extra3
Arvind Ram Prakashb5386662025-02-03 17:10:38 +010093 .quad \_extra4
laurenw-arm80942622019-08-20 15:51:24 -050094 .quad \_e_handler
Masahiro Yamada3d8256b2016-12-25 23:36:24 +090095#ifdef IMAGE_BL31
Jeenu Viswambharan5dd9dbb2016-11-18 12:58:28 +000096 /* Insert list of functions */
97 fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
Soby Mathewadd40352014-08-14 12:49:05 +010098#endif
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +000099 /*
100 * It is possible (although unlikely) that a cpu may have no errata in
101 * code. In that case the start label will not be defined. The list is
102 * intended to be used in a loop, so define it as zero-length for
103 * predictable behaviour. Since this macro is always called at the end
104 * of the cpu file (after all errata have been parsed) we can be sure
105 * that we are at the end of the list. Some cpus call declare_cpu_ops
106 * twice, so only do this once.
107 */
108 .pushsection .rodata.errata_entries
109 .ifndef \_name\()_errata_list_start
110 \_name\()_errata_list_start:
111 .endif
112 .ifndef \_name\()_errata_list_end
113 \_name\()_errata_list_end:
114 .endif
115 .popsection
116
117 /* and now put them in cpu_ops */
118 .quad \_name\()_errata_list_start
119 .quad \_name\()_errata_list_end
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000120
121#if REPORT_ERRATA
122 .ifndef \_name\()_cpu_str
123 /*
124 * Place errata reported flag, and the spinlock to arbitrate access to
125 * it in the data section.
126 */
127 .pushsection .data
128 define_asm_spinlock \_name\()_errata_lock
129 \_name\()_errata_reported:
130 .word 0
131 .popsection
132
133 /* Place CPU string in rodata */
134 .pushsection .rodata
135 \_name\()_cpu_str:
136 .asciz "\_name"
137 .popsection
138 .endif
139
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000140 .quad \_name\()_cpu_str
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000141
142#ifdef IMAGE_BL31
143 /* Pointers to errata lock and reported flag */
144 .quad \_name\()_errata_lock
145 .quad \_name\()_errata_reported
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000146#endif /* IMAGE_BL31 */
147#endif /* REPORT_ERRATA */
Jeenu Viswambharan10bcd762017-01-03 11:01:51 +0000148
Masahiro Yamada3d8256b2016-12-25 23:36:24 +0900149#if defined(IMAGE_BL31) && CRASH_REPORTING
Soby Mathewd3f70af2014-08-14 13:36:41 +0100150 .quad \_name\()_cpu_reg_dump
151#endif
Soby Mathew9b476842014-08-14 11:33:56 +0100152 .endm
Dan Handleye2bf57f2015-04-01 17:34:24 +0100153
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000154 .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
155 _power_down_ops:vararg
Arvind Ram Prakashb5386662025-02-03 17:10:38 +0100156 declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, 0, \
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000157 \_power_down_ops
158 .endm
159
laurenw-arm80942622019-08-20 15:51:24 -0500160 .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
161 _e_handler:req, _power_down_ops:vararg
162 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashb5386662025-02-03 17:10:38 +0100163 0, 0, 0, 0, \_e_handler, \_power_down_ops
laurenw-arm80942622019-08-20 15:51:24 -0500164 .endm
165
Dimitris Papastamosfe007b22018-05-16 11:36:14 +0100166 .macro declare_cpu_ops_wa _name:req, _midr:req, \
167 _resetfunc:req, _extra1:req, _extra2:req, \
Bipin Ravi9b2510b2022-02-23 23:45:50 -0600168 _extra3:req, _power_down_ops:vararg
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000169 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
Arvind Ram Prakashb5386662025-02-03 17:10:38 +0100170 \_extra1, \_extra2, \_extra3, 0, 0, \_power_down_ops
171 .endm
172
173 .macro declare_cpu_ops_wa_4 _name:req, _midr:req, \
174 _resetfunc:req, _extra1:req, _extra2:req, \
175 _extra3:req, _extra4:req, _power_down_ops:vararg
176 declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
177 \_extra1, \_extra2, \_extra3, \_extra4, 0, \_power_down_ops
Dimitris Papastamosa205a562018-03-12 14:47:09 +0000178 .endm
179
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000180 /*
181 * This macro is used on some CPUs to detect if they are vulnerable
182 * to CVE-2017-5715.
183 */
184 .macro cpu_check_csv2 _reg _label
185 mrs \_reg, id_aa64pfr0_el1
186 ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
187 /*
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +0000188 * If the field equals 1, branch targets trained in one context cannot
189 * affect speculative execution in a different context.
190 *
191 * If the field equals 2, it means that the system is also aware of
192 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
193 * expect users of the registers to do the right thing.
194 *
195 * Only apply mitigations if the value of this field is 0.
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000196 */
Antonio Nino Diazff6f62e2019-02-12 11:25:02 +0000197#if ENABLE_ASSERTIONS
198 cmp \_reg, #3 /* Only values 0 to 2 are expected */
199 ASM_ASSERT(lo)
200#endif
201
202 cmp \_reg, #0
203 bne \_label
Dimitris Papastamos3991a6a2018-03-12 13:27:02 +0000204 .endm
Deepak Pandeyda3b0382018-10-11 13:44:43 +0530205
206 /*
207 * Helper macro that reads the part number of the current
208 * CPU and jumps to the given label if it matches the CPU
209 * MIDR provided.
210 *
211 * Clobbers x0.
212 */
213 .macro jump_if_cpu_midr _cpu_midr, _label
214 mrs x0, midr_el1
215 ubfx x0, x0, MIDR_PN_SHIFT, #12
216 cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
217 b.eq \_label
218 .endm
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000219
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000220
221/*
222 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
223 * will be applied automatically
224 *
225 * _cpu:
226 * Name of cpu as given to declare_cpu_ops
227 *
228 * _cve:
229 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
230 *
231 * _id:
232 * Erratum or CVE number. Please combine with previous field with ERRATUM
233 * or CVE macros
234 *
235 * _chosen:
236 * Compile time flag on whether the erratum is included
237 *
238 * _apply_at_reset:
239 * Whether the erratum should be automatically applied at reset
240 */
241.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
242 .pushsection .rodata.errata_entries
243 .align 3
244 .ifndef \_cpu\()_errata_list_start
245 \_cpu\()_errata_list_start:
246 .endif
247
248 /* check if unused and compile out if no references */
249 .if \_apply_at_reset && \_chosen
250 .quad erratum_\_cpu\()_\_id\()_wa
251 .else
252 .quad 0
253 .endif
254 /* TODO(errata ABI): this prevents all checker functions from
255 * being optimised away. Can be done away with unless the ABI
256 * needs them */
257 .quad check_erratum_\_cpu\()_\_id
258 /* Will fit CVEs with up to 10 character in the ID field */
259 .word \_id
260 .hword \_cve
261 .byte \_chosen
262 /* TODO(errata ABI): mitigated field for known but unmitigated
263 * errata */
264 .byte 0x1
265 .popsection
266.endm
267
268.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
269 add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
270
271 func erratum_\_cpu\()_\_id\()_wa
272 mov x8, x30
273
274 /* save rev_var for workarounds that might need it but don't
275 * restore to x0 because few will care */
276 mov x7, x0
277 bl check_erratum_\_cpu\()_\_id
278 cbz x0, erratum_\_cpu\()_\_id\()_skip
279.endm
280
281.macro _workaround_end _cpu:req, _id:req
282 erratum_\_cpu\()_\_id\()_skip:
283 ret x8
284 endfunc erratum_\_cpu\()_\_id\()_wa
285.endm
286
287/*******************************************************************************
288 * Errata workaround wrappers
289 ******************************************************************************/
290/*
291 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
292 * will be applied automatically
293 *
294 * _cpu:
295 * Name of cpu as given to declare_cpu_ops
296 *
297 * _cve:
298 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
299 *
300 * _id:
301 * Erratum or CVE number. Please combine with previous field with ERRATUM
302 * or CVE macros
303 *
304 * _chosen:
305 * Compile time flag on whether the erratum is included
306 *
307 * in body:
308 * clobber x0 to x7 (please only use those)
309 * argument x7 - cpu_rev_var
310 *
311 * _wa clobbers: x0-x8 (PCS compliant)
312 */
313.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
314 _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
315.endm
316
317/*
318 * See `workaround_reset_start` for usage info. Additional arguments:
319 *
320 * _midr:
321 * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
322 * for errata applied in generic code
323 */
324.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
325 /*
326 * Let errata specify if they need MIDR checking. Sadly, storing the
327 * MIDR in an .equ to retrieve automatically blows up as it stores some
328 * brackets in the symbol
329 */
330 .ifnb \_midr
331 jump_if_cpu_midr \_midr, 1f
332 b erratum_\_cpu\()_\_id\()_skip
333
334 1:
335 .endif
336 _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
337.endm
338
339/*
340 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
341 * is kept here so the same #define can be used as that macro
342 */
343.macro workaround_reset_end _cpu:req, _cve:req, _id:req
344 _workaround_end \_cpu, \_id
345.endm
346
347/*
348 * See `workaround_reset_start` for usage info. The _cve argument is kept here
349 * so the same #define can be used as that macro. Additional arguments:
350 *
351 * _no_isb:
352 * Optionally do not include the trailing isb. Please disable with the
353 * NO_ISB macro
354 */
355.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
356 /*
357 * Runtime errata do not have a reset function to call the isb for them
358 * and missing the isb could be very problematic. It is also likely as
359 * they tend to be scattered in generic code.
360 */
361 .ifb \_no_isb
362 isb
363 .endif
364 _workaround_end \_cpu, \_id
365.endm
366
367/*******************************************************************************
368 * Errata workaround helpers
369 ******************************************************************************/
370/*
371 * Set a bit in a system register. Can set multiple bits but is limited by the
372 * way the ORR instruction encodes them.
373 *
374 * _reg:
375 * Register to write to
376 *
377 * _bit:
378 * Bit to set. Please use a descriptive #define
379 *
380 * _assert:
381 * Optionally whether to read back and assert that the bit has been
382 * written. Please disable with NO_ASSERT macro
383 *
384 * clobbers: x1
385 */
386.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
387 mrs x1, \_reg
388 orr x1, x1, #\_bit
389 msr \_reg, x1
390.endm
391
392/*
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100393 * Clear a bit in a system register. Can clear multiple bits but is limited by
394 * the way the BIC instrucion encodes them.
395 *
396 * see sysreg_bit_set for usage
397 */
398.macro sysreg_bit_clear _reg:req, _bit:req
399 mrs x1, \_reg
400 bic x1, x1, #\_bit
401 msr \_reg, x1
402.endm
403
404.macro override_vector_table _table:req
405 adr x1, \_table
406 msr vbar_el3, x1
407.endm
408
409/*
Jayanth Dodderi Chidanand445f7b52023-06-19 16:20:02 +0100410 * BFI : Inserts bitfield into a system register.
411 *
412 * BFI{cond} Rd, Rn, #lsb, #width
413 */
414.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
415 /* Source value for BFI */
416 mov x1, #\_src
417 mrs x0, \_reg
418 bfi x0, x1, #\_lsb, #\_width
419 msr \_reg, x0
420.endm
421
422/*
Boyan Karatotevadc2f6d2024-12-04 15:25:27 +0000423 * Extract CPU revision and variant, and combine them into a single numeric for
424 * easier comparison.
425 *
426 * _res:
427 * register where the result will be placed
428 * _tmp:
429 * register to clobber for temporaries
430 */
431.macro get_rev_var _res:req, _tmp:req
432 mrs \_tmp, midr_el1
433
434 /*
435 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
436 * as variant[7:4] and revision[3:0] of x0.
437 *
438 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
439 * extract x1[3:0] into x0[3:0] retaining other bits.
440 */
441 ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
442 bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS
443.endm
444
445/*
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000446 * Apply erratum
447 *
448 * _cpu:
449 * Name of cpu as given to declare_cpu_ops
450 *
451 * _cve:
452 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
453 *
454 * _id:
455 * Erratum or CVE number. Please combine with previous field with ERRATUM
456 * or CVE macros
457 *
458 * _chosen:
459 * Compile time flag on whether the erratum is included
460 *
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100461 * _get_rev:
462 * Optional parameter that determines whether to insert a call to the CPU revision fetching
Boyan Karatotev0eed05e2024-09-26 17:09:53 +0100463 * procedure. Stores the result of this in the temporary register x10 to allow for chaining
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100464 *
465 * clobbers: x0-x10 (PCS compliant)
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000466 */
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100467.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
Boyan Karatotevc99a2132024-09-26 17:00:09 +0100468 .if (\_chosen && \_get_rev)
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000469 mov x9, x30
470 bl cpu_get_rev_var
Harrison Mutai4d22b0e2023-06-26 16:25:21 +0100471 mov x10, x0
472 .elseif (\_chosen)
473 mov x9, x30
474 mov x0, x10
475 .endif
476
477 .if \_chosen
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000478 bl erratum_\_cpu\()_\_id\()_wa
479 mov x30, x9
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000480 .endif
481.endm
482
483/*
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000484 * Helpers to report if an erratum applies. Compares the given revision variant
485 * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly.
486 *
487 * _rev_num: the given revision variant. Or
488 * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant
489 *
490 * in body:
491 * clobber: x0
492 * argument: x0 - cpu_rev_var
493 */
494.macro cpu_rev_var_ls _rev_num:req
495 cmp x0, #\_rev_num
496 cset x0, ls
497.endm
498
499.macro cpu_rev_var_hs _rev_num:req
500 cmp x0, #\_rev_num
501 cset x0, hs
502.endm
503
504.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req
505 cmp x0, #\_rev_num_lo
506 mov x1, #\_rev_num_hi
507 ccmp x0, x1, #2, hs
508 cset x0, ls
509.endm
510
511/*
512 * Helpers to select which revisions errata apply to.
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000513 *
514 * _cpu:
515 * Name of cpu as given to declare_cpu_ops
516 *
517 * _cve:
518 * Whether erratum is a CVE. CVE year if yes, 0 otherwise
519 *
520 * _id:
521 * Erratum or CVE number. Please combine with previous field with ERRATUM
522 * or CVE macros
523 *
524 * _rev_num:
525 * Revision to apply to
526 *
527 * in body:
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000528 * clobber: x0 to x1
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000529 * argument: x0 - cpu_rev_var
530 */
531.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
532 func check_erratum_\_cpu\()_\_id
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000533 cpu_rev_var_ls \_rev_num
534 ret
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000535 endfunc check_erratum_\_cpu\()_\_id
536.endm
537
538.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
539 func check_erratum_\_cpu\()_\_id
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000540 cpu_rev_var_hs \_rev_num
541 ret
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000542 endfunc check_erratum_\_cpu\()_\_id
543.endm
544
545.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
546 func check_erratum_\_cpu\()_\_id
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000547 cpu_rev_var_range \_rev_num_lo, \_rev_num_hi
548 ret
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000549 endfunc check_erratum_\_cpu\()_\_id
550.endm
551
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100552.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
553 func check_erratum_\_cpu\()_\_id
554 .if \_chosen
555 mov x0, #ERRATA_APPLIES
556 .else
557 mov x0, #ERRATA_MISSING
558 .endif
559 ret
560 endfunc check_erratum_\_cpu\()_\_id
561.endm
562
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000563/*
564 * provide a shorthand for the name format for annoying errata
Boyan Karatotevadc2f6d2024-12-04 15:25:27 +0000565 * body: clobber x0 to x4
Boyan Karatoteveffb9742025-01-21 08:44:52 +0000566 */
Boyan Karatotev94a75ad2023-04-04 11:29:00 +0100567.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
568 func check_erratum_\_cpu\()_\_id
569.endm
570
571.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
572 endfunc check_erratum_\_cpu\()_\_id
573.endm
574
575
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000576/*******************************************************************************
577 * CPU reset function wrapper
578 ******************************************************************************/
579
580/*
581 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
582 *
583 * _cpu:
584 * Name of cpu as given to declare_cpu_ops
585 *
586 * in body:
587 * clobber x8 to x14
588 * argument x14 - cpu_rev_var
589 */
590.macro cpu_reset_func_start _cpu:req
591 func \_cpu\()_reset_func
592 mov x15, x30
Boyan Karatotevadc2f6d2024-12-04 15:25:27 +0000593 get_rev_var x14, x0
Boyan Karatotev3f4c1e12023-01-27 09:35:10 +0000594
595 /* short circuit the location to avoid searching the list */
596 adrp x12, \_cpu\()_errata_list_start
597 add x12, x12, :lo12:\_cpu\()_errata_list_start
598 adrp x13, \_cpu\()_errata_list_end
599 add x13, x13, :lo12:\_cpu\()_errata_list_end
600
601 errata_begin:
602 /* if head catches up with end of list, exit */
603 cmp x12, x13
604 b.eq errata_end
605
606 ldr x10, [x12, #ERRATUM_WA_FUNC]
607 /* TODO(errata ABI): check mitigated and checker function fields
608 * for 0 */
609 ldrb w11, [x12, #ERRATUM_CHOSEN]
610
611 /* skip if not chosen */
612 cbz x11, 1f
613 /* skip if runtime erratum */
614 cbz x10, 1f
615
616 /* put cpu revision in x0 and call workaround */
617 mov x0, x14
618 blr x10
619 1:
620 add x12, x12, #ERRATUM_ENTRY_SIZE
621 b errata_begin
622 errata_end:
623.endm
624
625.macro cpu_reset_func_end _cpu:req
626 isb
627 ret x15
628 endfunc \_cpu\()_reset_func
629.endm
Boyan Karatotev4f748cc2023-01-27 09:38:15 +0000630
Antonio Nino Diazc3cf06f2018-11-08 10:20:19 +0000631#endif /* CPU_MACROS_S */