blob: 9b57a47464d62e5e220d521c3a841e754da1adeb [file] [log] [blame]
Soby Mathewb48349e2015-06-29 16:30:12 +01001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <assert.h>
32#include <bl_common.h>
33#include <arch.h>
34#include <arch_helpers.h>
35#include <context.h>
36#include <context_mgmt.h>
37#include <cpu_data.h>
38#include <debug.h>
39#include <platform.h>
40#include <runtime_svc.h>
41#include <stddef.h>
42#include "psci_private.h"
43
Soby Mathewb48349e2015-06-29 16:30:12 +010044/*******************************************************************************
45 * This function saves the power state parameter passed in the current PSCI
46 * cpu_suspend call in the per-cpu data array.
47 ******************************************************************************/
48void psci_set_suspend_power_state(unsigned int power_state)
49{
50 set_cpu_data(psci_svc_cpu_data.power_state, power_state);
51 flush_cpu_data(psci_svc_cpu_data.power_state);
52}
53
54/*******************************************************************************
55 * This function gets the affinity level till which the current cpu could be
56 * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
57 * power state is invalid.
58 ******************************************************************************/
59int psci_get_suspend_afflvl(void)
60{
61 unsigned int power_state;
62
63 power_state = get_cpu_data(psci_svc_cpu_data.power_state);
64
65 return ((power_state == PSCI_INVALID_DATA) ?
66 power_state : psci_get_pstate_afflvl(power_state));
67}
68
69/*******************************************************************************
70 * This function gets the state id of the current cpu from the power state
71 * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
72 * power state saved is invalid.
73 ******************************************************************************/
74int psci_get_suspend_stateid(void)
75{
76 unsigned int power_state;
77
78 power_state = get_cpu_data(psci_svc_cpu_data.power_state);
79
80 return ((power_state == PSCI_INVALID_DATA) ?
81 power_state : psci_get_pstate_id(power_state));
82}
83
84/*******************************************************************************
85 * This function gets the state id of the cpu specified by the 'mpidr' parameter
86 * from the power state parameter saved in the per-cpu data array. Returns
87 * PSCI_INVALID_DATA if the power state saved is invalid.
88 ******************************************************************************/
89int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
90{
91 unsigned int power_state;
92
93 power_state = get_cpu_data_by_mpidr(mpidr,
94 psci_svc_cpu_data.power_state);
95
96 return ((power_state == PSCI_INVALID_DATA) ?
97 power_state : psci_get_pstate_id(power_state));
98}
99
100/*******************************************************************************
Soby Mathewb48349e2015-06-29 16:30:12 +0100101 * Top level handler which is called when a cpu wants to suspend its execution.
Soby Mathew6590ce22015-06-30 11:00:24 +0100102 * It is assumed that along with suspending the cpu, higher affinity levels
103 * until the target affinity level will be suspended as well. It finds the
104 * highest level to be suspended by traversing the node information and then
105 * performs generic, architectural, platform setup and state management
106 * required to suspend that affinity level and affinity levels below it.
107 * e.g. For a cpu that's to be suspended, it could mean programming the
108 * power controller whereas for a cluster that's to be suspended, it will call
109 * the platform specific code which will disable coherency at the interconnect
110 * level if the cpu is the last in the cluster and also the program the power
111 * controller.
Soby Mathewb48349e2015-06-29 16:30:12 +0100112 *
113 * All the required parameter checks are performed at the beginning and after
Soby Mathew6590ce22015-06-30 11:00:24 +0100114 * the state transition has been done, no further error is expected and it is
115 * not possible to undo any of the actions taken beyond that point.
Soby Mathewb48349e2015-06-29 16:30:12 +0100116 ******************************************************************************/
117void psci_afflvl_suspend(entry_point_info_t *ep,
Soby Mathewb48349e2015-06-29 16:30:12 +0100118 int end_afflvl)
119{
120 int skip_wfi = 0;
121 mpidr_aff_map_nodes_t mpidr_nodes;
122 unsigned int max_phys_off_afflvl;
Soby Mathew6590ce22015-06-30 11:00:24 +0100123 unsigned long psci_entrypoint;
Soby Mathewb48349e2015-06-29 16:30:12 +0100124
125 /*
126 * This function must only be called on platforms where the
127 * CPU_SUSPEND platform hooks have been implemented.
128 */
129 assert(psci_plat_pm_ops->affinst_suspend &&
130 psci_plat_pm_ops->affinst_suspend_finish);
131
132 /*
133 * Collect the pointers to the nodes in the topology tree for
134 * each affinity instance in the mpidr. If this function does
135 * not return successfully then either the mpidr or the affinity
136 * levels are incorrect. Either way, this an internal TF error
137 * therefore assert.
138 */
139 if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
Soby Mathew6590ce22015-06-30 11:00:24 +0100140 MPIDR_AFFLVL0, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
Soby Mathewb48349e2015-06-29 16:30:12 +0100141 assert(0);
142
143 /*
144 * This function acquires the lock corresponding to each affinity
145 * level so that by the time all locks are taken, the system topology
146 * is snapshot and state management can be done safely.
147 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100148 psci_acquire_afflvl_locks(MPIDR_AFFLVL0,
Soby Mathewb48349e2015-06-29 16:30:12 +0100149 end_afflvl,
150 mpidr_nodes);
151
152 /*
153 * We check if there are any pending interrupts after the delay
154 * introduced by lock contention to increase the chances of early
155 * detection that a wake-up interrupt has fired.
156 */
157 if (read_isr_el1()) {
158 skip_wfi = 1;
159 goto exit;
160 }
161
162 /*
163 * Call the cpu suspend handler registered by the Secure Payload
164 * Dispatcher to let it do any bookeeping. If the handler encounters an
165 * error, it's expected to assert within
166 */
167 if (psci_spd_pm && psci_spd_pm->svc_suspend)
168 psci_spd_pm->svc_suspend(0);
169
170 /*
171 * This function updates the state of each affinity instance
172 * corresponding to the mpidr in the range of affinity levels
173 * specified.
174 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100175 psci_do_afflvl_state_mgmt(MPIDR_AFFLVL0,
Soby Mathewb48349e2015-06-29 16:30:12 +0100176 end_afflvl,
177 mpidr_nodes,
178 PSCI_STATE_SUSPEND);
179
Soby Mathew6590ce22015-06-30 11:00:24 +0100180 max_phys_off_afflvl = psci_find_max_phys_off_afflvl(MPIDR_AFFLVL0,
Soby Mathewb48349e2015-06-29 16:30:12 +0100181 end_afflvl,
182 mpidr_nodes);
183 assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
184
Soby Mathewb48349e2015-06-29 16:30:12 +0100185 /*
186 * Store the re-entry information for the non-secure world.
187 */
188 cm_init_context(read_mpidr_el1(), ep);
189
Soby Mathew6590ce22015-06-30 11:00:24 +0100190 /* Set the secure world (EL3) re-entry point after BL1 */
191 psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
Soby Mathewb48349e2015-06-29 16:30:12 +0100192
193 /*
Soby Mathew6590ce22015-06-30 11:00:24 +0100194 * Arch. management. Perform the necessary steps to flush all
195 * cpu caches.
Soby Mathewb48349e2015-06-29 16:30:12 +0100196 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100197 psci_do_pwrdown_cache_maintenance(max_phys_off_afflvl);
198
199 /*
200 * Plat. management: Allow the platform to perform the
201 * necessary actions to turn off this cpu e.g. set the
202 * platform defined mailbox with the psci entrypoint,
203 * program the power controller etc.
204 */
205 psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
206 max_phys_off_afflvl);
Soby Mathewb48349e2015-06-29 16:30:12 +0100207
208exit:
209 /*
210 * Release the locks corresponding to each affinity level in the
211 * reverse order to which they were acquired.
212 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100213 psci_release_afflvl_locks(MPIDR_AFFLVL0,
Soby Mathewb48349e2015-06-29 16:30:12 +0100214 end_afflvl,
215 mpidr_nodes);
216 if (!skip_wfi)
217 psci_power_down_wfi();
218}
219
220/*******************************************************************************
221 * The following functions finish an earlier affinity suspend request. They
222 * are called by the common finisher routine in psci_common.c.
223 ******************************************************************************/
Soby Mathew6590ce22015-06-30 11:00:24 +0100224void psci_afflvl_suspend_finisher(aff_map_node_t *node[], int afflvl)
Soby Mathewb48349e2015-06-29 16:30:12 +0100225{
Soby Mathewb48349e2015-06-29 16:30:12 +0100226 int32_t suspend_level;
227 uint64_t counter_freq;
228
Soby Mathew6590ce22015-06-30 11:00:24 +0100229 assert(node[afflvl]->level == afflvl);
Soby Mathewb48349e2015-06-29 16:30:12 +0100230
231 /* Ensure we have been woken up from a suspended state */
Soby Mathew6590ce22015-06-30 11:00:24 +0100232 assert(psci_get_state(node[MPIDR_AFFLVL0]) == PSCI_STATE_SUSPEND);
Soby Mathewb48349e2015-06-29 16:30:12 +0100233
234 /*
235 * Plat. management: Perform the platform specific actions
236 * before we change the state of the cpu e.g. enabling the
237 * gic or zeroing the mailbox register. If anything goes
238 * wrong then assert as there is no way to recover from this
239 * situation.
240 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100241 psci_plat_pm_ops->affinst_suspend_finish(afflvl);
Soby Mathewb48349e2015-06-29 16:30:12 +0100242
243 /*
244 * Arch. management: Enable the data cache, manage stack memory and
245 * restore the stashed EL3 architectural context from the 'cpu_context'
246 * structure for this cpu.
247 */
248 psci_do_pwrup_cache_maintenance();
249
250 /* Re-init the cntfrq_el0 register */
251 counter_freq = plat_get_syscnt_freq();
252 write_cntfrq_el0(counter_freq);
253
254 /*
255 * Call the cpu suspend finish handler registered by the Secure Payload
256 * Dispatcher to let it do any bookeeping. If the handler encounters an
257 * error, it's expected to assert within
258 */
259 if (psci_spd_pm && psci_spd_pm->svc_suspend) {
260 suspend_level = psci_get_suspend_afflvl();
261 assert (suspend_level != PSCI_INVALID_DATA);
262 psci_spd_pm->svc_suspend_finish(suspend_level);
263 }
264
265 /* Invalidate the suspend context for the node */
266 psci_set_suspend_power_state(PSCI_INVALID_DATA);
267
268 /*
269 * Generic management: Now we just need to retrieve the
270 * information that we had stashed away during the suspend
271 * call to set this cpu on its way.
272 */
273 cm_prepare_el3_exit(NON_SECURE);
274
275 /* Clean caches before re-entering normal world */
276 dcsw_op_louis(DCCSW);
277}
278