blob: 9741fb68fdf33f8e8433589543bee2e2bb5c79bd [file] [log] [blame]
Soby Mathewb48349e2015-06-29 16:30:12 +01001/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <arch_helpers.h>
33#include <assert.h>
34#include <debug.h>
35#include <platform.h>
36#include <runtime_svc.h>
37#include <std_svc.h>
38#include "psci_private.h"
39
40/*******************************************************************************
41 * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
42 ******************************************************************************/
43int psci_cpu_on(unsigned long target_cpu,
44 unsigned long entrypoint,
45 unsigned long context_id)
46
47{
48 int rc;
Soby Mathew6590ce22015-06-30 11:00:24 +010049 unsigned int end_afflvl;
Soby Mathewb48349e2015-06-29 16:30:12 +010050 entry_point_info_t ep;
51
52 /* Determine if the cpu exists of not */
53 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
54 if (rc != PSCI_E_SUCCESS) {
55 return PSCI_E_INVALID_PARAMS;
56 }
57
58 /* Validate the entrypoint using platform pm_ops */
59 if (psci_plat_pm_ops->validate_ns_entrypoint) {
60 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
61 if (rc != PSCI_E_SUCCESS) {
62 assert(rc == PSCI_E_INVALID_PARAMS);
63 return PSCI_E_INVALID_PARAMS;
64 }
65 }
66
67 /*
68 * Verify and derive the re-entry information for
69 * the non-secure world from the non-secure state from
70 * where this call originated.
71 */
72 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
73 if (rc != PSCI_E_SUCCESS)
74 return rc;
75
Soby Mathewb48349e2015-06-29 16:30:12 +010076 /*
77 * To turn this cpu on, specify which affinity
78 * levels need to be turned on
79 */
Soby Mathewb48349e2015-06-29 16:30:12 +010080 end_afflvl = PLATFORM_MAX_AFFLVL;
81 rc = psci_afflvl_on(target_cpu,
82 &ep,
Soby Mathewb48349e2015-06-29 16:30:12 +010083 end_afflvl);
Soby Mathewb48349e2015-06-29 16:30:12 +010084 return rc;
85}
86
87unsigned int psci_version(void)
88{
89 return PSCI_MAJOR_VER | PSCI_MINOR_VER;
90}
91
92int psci_cpu_suspend(unsigned int power_state,
93 unsigned long entrypoint,
94 unsigned long context_id)
95{
96 int rc;
97 unsigned int target_afflvl, pstate_type;
98 entry_point_info_t ep;
99
100 /* Check SBZ bits in power state are zero */
101 if (psci_validate_power_state(power_state))
102 return PSCI_E_INVALID_PARAMS;
103
104 /* Sanity check the requested state */
105 target_afflvl = psci_get_pstate_afflvl(power_state);
106 if (target_afflvl > PLATFORM_MAX_AFFLVL)
107 return PSCI_E_INVALID_PARAMS;
108
109 /* Validate the power_state using platform pm_ops */
110 if (psci_plat_pm_ops->validate_power_state) {
111 rc = psci_plat_pm_ops->validate_power_state(power_state);
112 if (rc != PSCI_E_SUCCESS) {
113 assert(rc == PSCI_E_INVALID_PARAMS);
114 return PSCI_E_INVALID_PARAMS;
115 }
116 }
117
118 /* Validate the entrypoint using platform pm_ops */
119 if (psci_plat_pm_ops->validate_ns_entrypoint) {
120 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
121 if (rc != PSCI_E_SUCCESS) {
122 assert(rc == PSCI_E_INVALID_PARAMS);
123 return PSCI_E_INVALID_PARAMS;
124 }
125 }
126
127 /* Determine the 'state type' in the 'power_state' parameter */
128 pstate_type = psci_get_pstate_type(power_state);
129
130 /*
131 * Ensure that we have a platform specific handler for entering
132 * a standby state.
133 */
134 if (pstate_type == PSTATE_TYPE_STANDBY) {
135 if (!psci_plat_pm_ops->affinst_standby)
136 return PSCI_E_INVALID_PARAMS;
137
138 psci_plat_pm_ops->affinst_standby(power_state);
139 return PSCI_E_SUCCESS;
140 }
141
142 /*
143 * Verify and derive the re-entry information for
144 * the non-secure world from the non-secure state from
145 * where this call originated.
146 */
147 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
148 if (rc != PSCI_E_SUCCESS)
149 return rc;
150
151 /* Save PSCI power state parameter for the core in suspend context */
152 psci_set_suspend_power_state(power_state);
153
154 /*
155 * Do what is needed to enter the power down state. Upon success,
156 * enter the final wfi which will power down this CPU.
157 */
158 psci_afflvl_suspend(&ep,
Soby Mathewb48349e2015-06-29 16:30:12 +0100159 target_afflvl);
160
161 /* Reset PSCI power state parameter for the core. */
162 psci_set_suspend_power_state(PSCI_INVALID_DATA);
163 return PSCI_E_SUCCESS;
164}
165
166int psci_system_suspend(unsigned long entrypoint,
167 unsigned long context_id)
168{
169 int rc;
170 unsigned int power_state;
171 entry_point_info_t ep;
172
173 /* Validate the entrypoint using platform pm_ops */
174 if (psci_plat_pm_ops->validate_ns_entrypoint) {
175 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
176 if (rc != PSCI_E_SUCCESS) {
177 assert(rc == PSCI_E_INVALID_PARAMS);
178 return PSCI_E_INVALID_PARAMS;
179 }
180 }
181
182 /* Check if the current CPU is the last ON CPU in the system */
183 if (!psci_is_last_on_cpu())
184 return PSCI_E_DENIED;
185
186 /*
187 * Verify and derive the re-entry information for
188 * the non-secure world from the non-secure state from
189 * where this call originated.
190 */
191 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
192 if (rc != PSCI_E_SUCCESS)
193 return rc;
194
195 /*
196 * Assert that the required pm_ops hook is implemented to ensure that
197 * the capability detected during psci_setup() is valid.
198 */
199 assert(psci_plat_pm_ops->get_sys_suspend_power_state);
200
201 /*
202 * Query the platform for the power_state required for system suspend
203 */
204 power_state = psci_plat_pm_ops->get_sys_suspend_power_state();
205
206 /* Save PSCI power state parameter for the core in suspend context */
207 psci_set_suspend_power_state(power_state);
208
209 /*
210 * Do what is needed to enter the power down state. Upon success,
211 * enter the final wfi which will power down this cpu.
212 */
213 psci_afflvl_suspend(&ep,
214 MPIDR_AFFLVL0,
215 PLATFORM_MAX_AFFLVL);
216
217 /* Reset PSCI power state parameter for the core. */
218 psci_set_suspend_power_state(PSCI_INVALID_DATA);
219 return PSCI_E_SUCCESS;
220}
221
222int psci_cpu_off(void)
223{
224 int rc;
225 int target_afflvl = PLATFORM_MAX_AFFLVL;
226
227 /*
228 * Traverse from the highest to the lowest affinity level. When the
229 * lowest affinity level is hit, all the locks are acquired. State
230 * management is done immediately followed by cpu, cluster ...
231 * ..target_afflvl specific actions as this function unwinds back.
232 */
Soby Mathew6590ce22015-06-30 11:00:24 +0100233 rc = psci_afflvl_off(target_afflvl);
Soby Mathewb48349e2015-06-29 16:30:12 +0100234
235 /*
236 * The only error cpu_off can return is E_DENIED. So check if that's
237 * indeed the case.
238 */
239 assert (rc == PSCI_E_DENIED);
240
241 return rc;
242}
243
244int psci_affinity_info(unsigned long target_affinity,
245 unsigned int lowest_affinity_level)
246{
247 int rc = PSCI_E_INVALID_PARAMS;
248 unsigned int aff_state;
249 aff_map_node_t *node;
250
251 if (lowest_affinity_level > PLATFORM_MAX_AFFLVL)
252 return rc;
253
254 node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
255 if (node && (node->state & PSCI_AFF_PRESENT)) {
256
257 /*
258 * TODO: For affinity levels higher than 0 i.e. cpu, the
259 * state will always be either ON or OFF. Need to investigate
260 * how critical is it to support ON_PENDING here.
261 */
262 aff_state = psci_get_state(node);
263
264 /* A suspended cpu is available & on for the OS */
265 if (aff_state == PSCI_STATE_SUSPEND) {
266 aff_state = PSCI_STATE_ON;
267 }
268
269 rc = aff_state;
270 }
271
272 return rc;
273}
274
275int psci_migrate(unsigned long target_cpu)
276{
277 int rc;
278 unsigned long resident_cpu_mpidr;
279
280 rc = psci_spd_migrate_info(&resident_cpu_mpidr);
281 if (rc != PSCI_TOS_UP_MIG_CAP)
282 return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
283 PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
284
285 /*
286 * Migrate should only be invoked on the CPU where
287 * the Secure OS is resident.
288 */
289 if (resident_cpu_mpidr != read_mpidr_el1())
290 return PSCI_E_NOT_PRESENT;
291
292 /* Check the validity of the specified target cpu */
293 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
294 if (rc != PSCI_E_SUCCESS)
295 return PSCI_E_INVALID_PARAMS;
296
297 assert(psci_spd_pm && psci_spd_pm->svc_migrate);
298
299 rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
300 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
301
302 return rc;
303}
304
305int psci_migrate_info_type(void)
306{
307 unsigned long resident_cpu_mpidr;
308
309 return psci_spd_migrate_info(&resident_cpu_mpidr);
310}
311
312long psci_migrate_info_up_cpu(void)
313{
314 unsigned long resident_cpu_mpidr;
315 int rc;
316
317 /*
318 * Return value of this depends upon what
319 * psci_spd_migrate_info() returns.
320 */
321 rc = psci_spd_migrate_info(&resident_cpu_mpidr);
322 if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
323 return PSCI_E_INVALID_PARAMS;
324
325 return resident_cpu_mpidr;
326}
327
328int psci_features(unsigned int psci_fid)
329{
330 uint32_t local_caps = psci_caps;
331
332 /* Check if it is a 64 bit function */
333 if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
334 local_caps &= PSCI_CAP_64BIT_MASK;
335
336 /* Check for invalid fid */
337 if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
338 && is_psci_fid(psci_fid)))
339 return PSCI_E_NOT_SUPPORTED;
340
341
342 /* Check if the psci fid is supported or not */
343 if (!(local_caps & define_psci_cap(psci_fid)))
344 return PSCI_E_NOT_SUPPORTED;
345
346 /* Format the feature flags */
347 if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
348 psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
349 /*
350 * The trusted firmware uses the original power state format
351 * and does not support OS Initiated Mode.
352 */
353 return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) |
354 ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
355 }
356
357 /* Return 0 for all other fid's */
358 return PSCI_E_SUCCESS;
359}
360
361/*******************************************************************************
362 * PSCI top level handler for servicing SMCs.
363 ******************************************************************************/
364uint64_t psci_smc_handler(uint32_t smc_fid,
365 uint64_t x1,
366 uint64_t x2,
367 uint64_t x3,
368 uint64_t x4,
369 void *cookie,
370 void *handle,
371 uint64_t flags)
372{
373 if (is_caller_secure(flags))
374 SMC_RET1(handle, SMC_UNK);
375
376 /* Check the fid against the capabilities */
377 if (!(psci_caps & define_psci_cap(smc_fid)))
378 SMC_RET1(handle, SMC_UNK);
379
380 if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
381 /* 32-bit PSCI function, clear top parameter bits */
382
383 x1 = (uint32_t)x1;
384 x2 = (uint32_t)x2;
385 x3 = (uint32_t)x3;
386
387 switch (smc_fid) {
388 case PSCI_VERSION:
389 SMC_RET1(handle, psci_version());
390
391 case PSCI_CPU_OFF:
392 SMC_RET1(handle, psci_cpu_off());
393
394 case PSCI_CPU_SUSPEND_AARCH32:
395 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
396
397 case PSCI_CPU_ON_AARCH32:
398 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
399
400 case PSCI_AFFINITY_INFO_AARCH32:
401 SMC_RET1(handle, psci_affinity_info(x1, x2));
402
403 case PSCI_MIG_AARCH32:
404 SMC_RET1(handle, psci_migrate(x1));
405
406 case PSCI_MIG_INFO_TYPE:
407 SMC_RET1(handle, psci_migrate_info_type());
408
409 case PSCI_MIG_INFO_UP_CPU_AARCH32:
410 SMC_RET1(handle, psci_migrate_info_up_cpu());
411
412 case PSCI_SYSTEM_SUSPEND_AARCH32:
413 SMC_RET1(handle, psci_system_suspend(x1, x2));
414
415 case PSCI_SYSTEM_OFF:
416 psci_system_off();
417 /* We should never return from psci_system_off() */
418
419 case PSCI_SYSTEM_RESET:
420 psci_system_reset();
421 /* We should never return from psci_system_reset() */
422
423 case PSCI_FEATURES:
424 SMC_RET1(handle, psci_features(x1));
425
426 default:
427 break;
428 }
429 } else {
430 /* 64-bit PSCI function */
431
432 switch (smc_fid) {
433 case PSCI_CPU_SUSPEND_AARCH64:
434 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
435
436 case PSCI_CPU_ON_AARCH64:
437 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
438
439 case PSCI_AFFINITY_INFO_AARCH64:
440 SMC_RET1(handle, psci_affinity_info(x1, x2));
441
442 case PSCI_MIG_AARCH64:
443 SMC_RET1(handle, psci_migrate(x1));
444
445 case PSCI_MIG_INFO_UP_CPU_AARCH64:
446 SMC_RET1(handle, psci_migrate_info_up_cpu());
447
448 case PSCI_SYSTEM_SUSPEND_AARCH64:
449 SMC_RET1(handle, psci_system_suspend(x1, x2));
450
451 default:
452 break;
453 }
454 }
455
456 WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
457 SMC_RET1(handle, SMC_UNK);
458}