blob: e77d0e63003b99b6568db9496d0be408fc856864 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <asm_macros.S>
Dan Handley97043ac2014-04-09 13:14:54 +010033#include <psci.h>
Achin Guptab51da822014-06-26 09:58:52 +010034#include <xlat_tables.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010035
36 .globl psci_aff_on_finish_entry
37 .globl psci_aff_suspend_finish_entry
38 .globl __psci_cpu_off
39 .globl __psci_cpu_suspend
Achin Gupta317ba092014-05-09 19:32:25 +010040 .globl psci_power_down_wfi
Achin Gupta4f6ad662013-10-25 09:08:21 +010041
Achin Gupta4f6ad662013-10-25 09:08:21 +010042 /* -----------------------------------------------------
43 * This cpu has been physically powered up. Depending
44 * upon whether it was resumed from suspend or simply
45 * turned on, call the common power on finisher with
46 * the handlers (chosen depending upon original state).
47 * For ease, the finisher is called with coherent
48 * stacks. This allows the cluster/cpu finishers to
49 * enter coherency and enable the mmu without running
50 * into issues. We switch back to normal stacks once
51 * all this is done.
52 * -----------------------------------------------------
53 */
Andrew Thoelke0a30cf52014-03-18 13:46:55 +000054func psci_aff_on_finish_entry
Achin Gupta4f6ad662013-10-25 09:08:21 +010055 adr x23, psci_afflvl_on_finishers
56 b psci_aff_common_finish_entry
57
58psci_aff_suspend_finish_entry:
59 adr x23, psci_afflvl_suspend_finishers
60
61psci_aff_common_finish_entry:
Achin Guptab739f222014-01-18 16:50:09 +000062 /* ---------------------------------------------
Andrew Thoelke5e910072014-06-02 11:40:35 +010063 * Initialise the pcpu cache pointer for the CPU
64 * ---------------------------------------------
65 */
66 bl init_cpu_data_ptr
67
68 /* ---------------------------------------------
Andrew Thoelkeee94cc62014-06-02 12:38:12 +010069 * Set the exception vectors
Achin Guptab739f222014-01-18 16:50:09 +000070 * ---------------------------------------------
71 */
Andrew Thoelkeee94cc62014-06-02 12:38:12 +010072 adr x0, runtime_exceptions
Achin Guptab739f222014-01-18 16:50:09 +000073 msr vbar_el3, x0
74 isb
75
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000076 /* ---------------------------------------------
77 * Use SP_EL0 for the C runtime stack.
78 * ---------------------------------------------
79 */
80 msr spsel, #0
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000081
Achin Guptab51da822014-06-26 09:58:52 +010082 /* --------------------------------------------
83 * Give ourselves a stack whose memory will be
84 * marked as Normal-IS-WBWA when the MMU is
85 * enabled.
86 * --------------------------------------------
87 */
Andrew Thoelke7935d0a2014-04-28 12:32:02 +010088 mrs x0, mpidr_el1
Achin Guptab51da822014-06-26 09:58:52 +010089 bl platform_set_stack
90
91 /* --------------------------------------------
92 * Enable the MMU with the DCache disabled. It
93 * is safe to use stacks allocated in normal
94 * memory as a result. All memory accesses are
95 * marked nGnRnE when the MMU is disabled. So
96 * all the stack writes will make it to memory.
97 * All memory accesses are marked Non-cacheable
98 * when the MMU is enabled but D$ is disabled.
99 * So used stack memory is guaranteed to be
100 * visible immediately after the MMU is enabled
101 * Enabling the DCache at the same time as the
102 * MMU can lead to speculatively fetched and
103 * possibly stale stack memory being read from
104 * other caches. This can lead to coherency
105 * issues.
106 * --------------------------------------------
107 */
108 mov x0, #DISABLE_DCACHE
109 bl bl31_plat_enable_mmu
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110
111 /* ---------------------------------------------
112 * Call the finishers starting from affinity
113 * level 0.
114 * ---------------------------------------------
115 */
Andrew Thoelke7935d0a2014-04-28 12:32:02 +0100116 mrs x0, mpidr_el1
Achin Guptaa45e3972013-12-05 15:10:48 +0000117 bl get_power_on_target_afflvl
118 cmp x0, xzr
119 b.lt _panic
Andrew Thoelke56378aa2014-06-09 12:44:21 +0100120 mov x2, x23
121 mov x1, x0
122 mov x0, #MPIDR_AFFLVL0
123 bl psci_afflvl_power_on_finish
Achin Gupta4f6ad662013-10-25 09:08:21 +0100124
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000125 b el3_exit
Achin Gupta4f6ad662013-10-25 09:08:21 +0100126_panic:
127 b _panic
128
Achin Gupta317ba092014-05-09 19:32:25 +0100129 /* --------------------------------------------
130 * This function is called to indicate to the
131 * power controller that it is safe to power
132 * down this cpu. It should not exit the wfi
133 * and will be released from reset upon power
134 * up. 'wfi_spill' is used to catch erroneous
135 * exits from wfi.
136 * --------------------------------------------
137 */
138func psci_power_down_wfi
Andrew Thoelke8cec5982014-04-28 12:28:39 +0100139 dsb sy // ensure write buffer empty
Achin Gupta4f6ad662013-10-25 09:08:21 +0100140 wfi
141wfi_spill:
142 b wfi_spill
143