blob: 5628d79178331cbb9c245b338ff156eca14bed98 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <asm_macros.S>
Dan Handley97043ac2014-04-09 13:14:54 +010033#include <psci.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010034
35 .globl psci_aff_on_finish_entry
36 .globl psci_aff_suspend_finish_entry
37 .globl __psci_cpu_off
38 .globl __psci_cpu_suspend
Achin Gupta317ba092014-05-09 19:32:25 +010039 .globl psci_power_down_wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
Achin Gupta4f6ad662013-10-25 09:08:21 +010041 /* -----------------------------------------------------
42 * This cpu has been physically powered up. Depending
43 * upon whether it was resumed from suspend or simply
44 * turned on, call the common power on finisher with
45 * the handlers (chosen depending upon original state).
46 * For ease, the finisher is called with coherent
47 * stacks. This allows the cluster/cpu finishers to
48 * enter coherency and enable the mmu without running
49 * into issues. We switch back to normal stacks once
50 * all this is done.
51 * -----------------------------------------------------
52 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000053func psci_aff_on_finish_entry
Achin Gupta4f6ad662013-10-25 09:08:21 +010054 adr x23, psci_afflvl_on_finishers
55 b psci_aff_common_finish_entry
56
57psci_aff_suspend_finish_entry:
58 adr x23, psci_afflvl_suspend_finishers
59
60psci_aff_common_finish_entry:
61 adr x22, psci_afflvl_power_on_finish
Achin Guptab739f222014-01-18 16:50:09 +000062
63 /* ---------------------------------------------
Andrew Thoelke5e910072014-06-02 11:40:35 +010064 * Initialise the pcpu cache pointer for the CPU
65 * ---------------------------------------------
66 */
67 bl init_cpu_data_ptr
68
69 /* ---------------------------------------------
Andrew Thoelkeee94cc62014-06-02 12:38:12 +010070 * Set the exception vectors
Achin Guptab739f222014-01-18 16:50:09 +000071 * ---------------------------------------------
72 */
Andrew Thoelkeee94cc62014-06-02 12:38:12 +010073 adr x0, runtime_exceptions
Achin Guptab739f222014-01-18 16:50:09 +000074 msr vbar_el3, x0
75 isb
76
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000077 /* ---------------------------------------------
78 * Use SP_EL0 for the C runtime stack.
79 * ---------------------------------------------
80 */
81 msr spsel, #0
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000082
Andrew Thoelke7935d0a2014-04-28 12:32:02 +010083 mrs x0, mpidr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +010084 bl platform_set_coherent_stack
85
86 /* ---------------------------------------------
87 * Call the finishers starting from affinity
88 * level 0.
89 * ---------------------------------------------
90 */
Andrew Thoelke7935d0a2014-04-28 12:32:02 +010091 mrs x0, mpidr_el1
Achin Guptaa45e3972013-12-05 15:10:48 +000092 bl get_power_on_target_afflvl
93 cmp x0, xzr
94 b.lt _panic
Achin Gupta4f6ad662013-10-25 09:08:21 +010095 mov x3, x23
96 mov x2, x0
Achin Gupta4f6ad662013-10-25 09:08:21 +010097 mov x1, #MPIDR_AFFLVL0
Andrew Thoelke7935d0a2014-04-28 12:32:02 +010098 mrs x0, mpidr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +010099 blr x22
Achin Gupta4f6ad662013-10-25 09:08:21 +0100100
101 /* --------------------------------------------
102 * Give ourselves a stack allocated in Normal
103 * -IS-WBWA memory
104 * --------------------------------------------
105 */
Andrew Thoelke7935d0a2014-04-28 12:32:02 +0100106 mrs x0, mpidr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100107 bl platform_set_stack
108
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000109 b el3_exit
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110_panic:
111 b _panic
112
113 /* -----------------------------------------------------
114 * The following two stubs give the calling cpu a
115 * coherent stack to allow flushing of caches without
116 * suffering from stack coherency issues
117 * -----------------------------------------------------
118 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000119func __psci_cpu_off
Achin Gupta4f6ad662013-10-25 09:08:21 +0100120 func_prologue
121 sub sp, sp, #0x10
122 stp x19, x20, [sp, #0]
123 mov x19, sp
Andrew Thoelke7935d0a2014-04-28 12:32:02 +0100124 mrs x0, mpidr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100125 bl platform_set_coherent_stack
126 bl psci_cpu_off
Achin Gupta4f6ad662013-10-25 09:08:21 +0100127 mov sp, x19
128 ldp x19, x20, [sp,#0]
129 add sp, sp, #0x10
130 func_epilogue
131 ret
132
Andrew Thoelke0a30cf52014-03-18 13:46:55 +0000133func __psci_cpu_suspend
Achin Gupta4f6ad662013-10-25 09:08:21 +0100134 func_prologue
135 sub sp, sp, #0x20
136 stp x19, x20, [sp, #0]
137 stp x21, x22, [sp, #0x10]
138 mov x19, sp
139 mov x20, x0
140 mov x21, x1
141 mov x22, x2
Andrew Thoelke7935d0a2014-04-28 12:32:02 +0100142 mrs x0, mpidr_el1
Achin Gupta4f6ad662013-10-25 09:08:21 +0100143 bl platform_set_coherent_stack
144 mov x0, x20
145 mov x1, x21
146 mov x2, x22
147 bl psci_cpu_suspend
Achin Gupta4f6ad662013-10-25 09:08:21 +0100148 mov sp, x19
149 ldp x21, x22, [sp,#0x10]
150 ldp x19, x20, [sp,#0]
151 add sp, sp, #0x20
152 func_epilogue
153 ret
154
Achin Gupta317ba092014-05-09 19:32:25 +0100155 /* --------------------------------------------
156 * This function is called to indicate to the
157 * power controller that it is safe to power
158 * down this cpu. It should not exit the wfi
159 * and will be released from reset upon power
160 * up. 'wfi_spill' is used to catch erroneous
161 * exits from wfi.
162 * --------------------------------------------
163 */
164func psci_power_down_wfi
Andrew Thoelke8cec5982014-04-28 12:28:39 +0100165 dsb sy // ensure write buffer empty
Achin Gupta4f6ad662013-10-25 09:08:21 +0100166 wfi
167wfi_spill:
168 b wfi_spill
169