blob: e36fdfa84926af89e097faeb53faf5f1c062e02c [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
2 * Copyright (c) 2013, ARM Limited. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32
33 .globl enable_irq
34 .globl disable_irq
35
36 .globl enable_fiq
37 .globl disable_fiq
38
39 .globl enable_serror
40 .globl disable_serror
41
Sandrine Bailleux37382742013-11-18 17:26:59 +000042 .globl enable_debug_exceptions
43 .globl disable_debug_exceptions
44
Achin Gupta4f6ad662013-10-25 09:08:21 +010045 .globl read_daif
46 .globl write_daif
47
48 .globl read_spsr
49 .globl read_spsr_el1
50 .globl read_spsr_el2
51 .globl read_spsr_el3
52
53 .globl write_spsr
54 .globl write_spsr_el1
55 .globl write_spsr_el2
56 .globl write_spsr_el3
57
58 .globl read_elr
59 .globl read_elr_el1
60 .globl read_elr_el2
61 .globl read_elr_el3
62
63 .globl write_elr
64 .globl write_elr_el1
65 .globl write_elr_el2
66 .globl write_elr_el3
67
68 .globl get_afflvl_shift
69 .globl mpidr_mask_lower_afflvls
70 .globl dsb
71 .globl isb
72 .globl sev
73 .globl wfe
74 .globl wfi
75 .globl eret
76 .globl smc
77
Sandrine Bailleux65f546a2013-11-28 09:43:06 +000078 .globl zeromem16
79 .globl memcpy16
Achin Gupta4f6ad662013-10-25 09:08:21 +010080
81 .section .text, "ax"
82
83get_afflvl_shift:; .type get_afflvl_shift, %function
84 cmp x0, #3
85 cinc x0, x0, eq
86 mov x1, #MPIDR_AFFLVL_SHIFT
87 lsl x0, x0, x1
88 ret
89
90mpidr_mask_lower_afflvls:; .type mpidr_mask_lower_afflvls, %function
91 cmp x1, #3
92 cinc x1, x1, eq
93 mov x2, #MPIDR_AFFLVL_SHIFT
94 lsl x2, x1, x2
95 lsr x0, x0, x2
96 lsl x0, x0, x2
97 ret
98
99 /* -----------------------------------------------------
100 * Asynchronous exception manipulation accessors
101 * -----------------------------------------------------
102 */
103enable_irq:; .type enable_irq, %function
104 msr daifclr, #DAIF_IRQ_BIT
105 ret
106
107
108enable_fiq:; .type enable_fiq, %function
109 msr daifclr, #DAIF_FIQ_BIT
110 ret
111
112
113enable_serror:; .type enable_serror, %function
114 msr daifclr, #DAIF_ABT_BIT
115 ret
116
117
Sandrine Bailleux37382742013-11-18 17:26:59 +0000118enable_debug_exceptions:
119 msr daifclr, #DAIF_DBG_BIT
120 ret
121
122
Achin Gupta4f6ad662013-10-25 09:08:21 +0100123disable_irq:; .type disable_irq, %function
124 msr daifset, #DAIF_IRQ_BIT
125 ret
126
127
128disable_fiq:; .type disable_fiq, %function
129 msr daifset, #DAIF_FIQ_BIT
130 ret
131
132
133disable_serror:; .type disable_serror, %function
134 msr daifset, #DAIF_ABT_BIT
135 ret
136
137
Sandrine Bailleux37382742013-11-18 17:26:59 +0000138disable_debug_exceptions:
139 msr daifset, #DAIF_DBG_BIT
140 ret
141
142
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143read_daif:; .type read_daif, %function
144 mrs x0, daif
145 ret
146
147
148write_daif:; .type write_daif, %function
149 msr daif, x0
150 ret
151
152
153read_spsr:; .type read_spsr, %function
154 mrs x0, CurrentEl
155 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
156 b.eq read_spsr_el1
157 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
158 b.eq read_spsr_el2
159 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
160 b.eq read_spsr_el3
161
162
163read_spsr_el1:; .type read_spsr_el1, %function
164 mrs x0, spsr_el1
165 ret
166
167
168read_spsr_el2:; .type read_spsr_el2, %function
169 mrs x0, spsr_el2
170 ret
171
172
173read_spsr_el3:; .type read_spsr_el3, %function
174 mrs x0, spsr_el3
175 ret
176
177
178write_spsr:; .type write_spsr, %function
179 mrs x1, CurrentEl
180 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
181 b.eq write_spsr_el1
182 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
183 b.eq write_spsr_el2
184 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
185 b.eq write_spsr_el3
186
187
188write_spsr_el1:; .type write_spsr_el1, %function
189 msr spsr_el1, x0
190 isb
191 ret
192
193
194write_spsr_el2:; .type write_spsr_el2, %function
195 msr spsr_el2, x0
196 isb
197 ret
198
199
200write_spsr_el3:; .type write_spsr_el3, %function
201 msr spsr_el3, x0
202 isb
203 ret
204
205
206read_elr:; .type read_elr, %function
207 mrs x0, CurrentEl
208 cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
209 b.eq read_elr_el1
210 cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
211 b.eq read_elr_el2
212 cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
213 b.eq read_elr_el3
214
215
216read_elr_el1:; .type read_elr_el1, %function
217 mrs x0, elr_el1
218 ret
219
220
221read_elr_el2:; .type read_elr_el2, %function
222 mrs x0, elr_el2
223 ret
224
225
226read_elr_el3:; .type read_elr_el3, %function
227 mrs x0, elr_el3
228 ret
229
230
231write_elr:; .type write_elr, %function
232 mrs x1, CurrentEl
233 cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
234 b.eq write_elr_el1
235 cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
236 b.eq write_elr_el2
237 cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
238 b.eq write_elr_el3
239
240
241write_elr_el1:; .type write_elr_el1, %function
242 msr elr_el1, x0
243 isb
244 ret
245
246
247write_elr_el2:; .type write_elr_el2, %function
248 msr elr_el2, x0
249 isb
250 ret
251
252
253write_elr_el3:; .type write_elr_el3, %function
254 msr elr_el3, x0
255 isb
256 ret
257
258
259dsb:; .type dsb, %function
260 dsb sy
261 ret
262
263
264isb:; .type isb, %function
265 isb
266 ret
267
268
269sev:; .type sev, %function
270 sev
271 ret
272
273
274wfe:; .type wfe, %function
275 wfe
276 ret
277
278
279wfi:; .type wfi, %function
280 wfi
281 ret
282
283
284eret:; .type eret, %function
285 eret
286
287
288smc:; .type smc, %function
289 smc #0
Sandrine Bailleux65f546a2013-11-28 09:43:06 +0000290
291/* -----------------------------------------------------------------------
292 * void zeromem16(void *mem, unsigned int length);
293 *
294 * Initialise a memory region to 0.
295 * The memory address must be 16-byte aligned.
296 * -----------------------------------------------------------------------
297 */
298zeromem16:
299 add x2, x0, x1
300/* zero 16 bytes at a time */
301z_loop16:
302 sub x3, x2, x0
303 cmp x3, #16
304 b.lt z_loop1
305 stp xzr, xzr, [x0], #16
306 b z_loop16
307/* zero byte per byte */
308z_loop1:
309 cmp x0, x2
310 b.eq z_end
311 strb wzr, [x0], #1
312 b z_loop1
313z_end: ret
314
315
316/* --------------------------------------------------------------------------
317 * void memcpy16(void *dest, const void *src, unsigned int length)
318 *
319 * Copy length bytes from memory area src to memory area dest.
320 * The memory areas should not overlap.
321 * Destination and source addresses must be 16-byte aligned.
322 * --------------------------------------------------------------------------
323 */
324memcpy16:
325/* copy 16 bytes at a time */
326m_loop16:
327 cmp x2, #16
328 b.lt m_loop1
329 ldp x3, x4, [x1], #16
330 stp x3, x4, [x0], #16
331 sub x2, x2, #16
332 b m_loop16
333/* copy byte per byte */
334m_loop1:
335 cbz x2, m_end
336 ldrb w3, [x1], #1
337 strb w3, [x0], #1
338 subs x2, x2, #1
339 b.ne m_loop1
340m_end: ret