blob: d856abe3c1c5be621910a2bffd4d54ca9b2583b4 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
2 * Copyright (c) 2018, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02009#include <debug.h>
Antonio Nino Diaz09a00ef2019-01-11 13:12:58 +000010#include <drivers/arm/arm_gic.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020011#include <errno.h>
12#include <irq.h>
13#include <mmio.h>
14#include <platform.h>
15#include <platform_def.h>
16#include <power_management.h>
17#include <sgi.h>
18#include <spinlock.h>
19#include <stddef.h>
Ambroise Vincent602b7f52019-02-11 14:13:43 +000020#include <stdint.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020021#include <tftf.h>
22#include <timer.h>
23
24
25/* Helper macros */
26#define TIMER_STEP_VALUE (plat_timer_info->timer_step_value)
27#define TIMER_IRQ (plat_timer_info->timer_irq)
28#define PROGRAM_TIMER(a) plat_timer_info->program(a)
29#define INVALID_CORE UINT32_MAX
30#define INVALID_TIME UINT64_MAX
31#define MAX_TIME_OUT_MS 10000
32
33/*
34 * Pointer containing available timer information for the platform.
35 */
36static const plat_timer_t *plat_timer_info;
37/*
38 * Interrupt requested time by cores in terms of absolute time.
39 */
40static volatile unsigned long long interrupt_req_time[PLATFORM_CORE_COUNT];
41/*
42 * Contains the target core number of the timer interrupt.
43 */
44static unsigned int current_prog_core = INVALID_CORE;
45/*
46 * Lock to get a consistent view for programming the timer
47 */
48static spinlock_t timer_lock;
49/*
50 * Number of system ticks per millisec
51 */
52static unsigned int systicks_per_ms;
53
54/*
55 * Stores per CPU timer handler invoked on expiration of the requested timeout.
56 */
57static irq_handler_t timer_handler[PLATFORM_CORE_COUNT];
58
59/* Helper function */
60static inline unsigned long long get_current_time_ms(void)
61{
62 assert(systicks_per_ms);
63 return mmio_read_64(SYS_CNT_BASE1 + CNTPCT_LO) / systicks_per_ms;
64}
65
66static inline unsigned long long get_current_prog_time(void)
67{
68 return current_prog_core == INVALID_CORE ?
69 0 : interrupt_req_time[current_prog_core];
70}
71
72int tftf_initialise_timer(void)
73{
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020074 /*
75 * Get platform specific timer information
76 */
Sandrine Bailleuxdfa5ed92018-12-13 17:08:50 +010077 int rc = plat_initialise_timer_ops(&plat_timer_info);
78 if (rc != 0) {
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020079 return rc;
80 }
81
82 /* Systems can't support single tick as a step value */
83 assert(TIMER_STEP_VALUE);
84
85 /* Initialise the array to max possible time */
Sandrine Bailleuxdfa5ed92018-12-13 17:08:50 +010086 for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020087 interrupt_req_time[i] = INVALID_TIME;
88
89 tftf_irq_register_handler(TIMER_IRQ, tftf_timer_framework_handler);
90 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
91 arm_gic_intr_enable(TIMER_IRQ);
92
93 /* Save the systicks per millisecond */
94 systicks_per_ms = read_cntfrq_el0() / 1000;
95
96 return 0;
97}
98
99/*
100 * It returns the core number of next timer request to be serviced or
101 * -1 if there is no request from any core. The next service request
102 * is the core whose interrupt needs to be fired first.
103 */
104static inline unsigned int get_lowest_req_core(void)
105{
106 unsigned long long lowest_timer = INVALID_TIME;
107 unsigned int lowest_core_req = INVALID_CORE;
108 unsigned int i;
109
110 /*
111 * If 2 cores requested same value, give precedence
112 * to the core with lowest core number
113 */
114 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
115 if (interrupt_req_time[i] < lowest_timer) {
116 lowest_timer = interrupt_req_time[i];
117 lowest_core_req = i;
118 }
119 }
120
121 return lowest_core_req;
122}
123
124int tftf_program_timer(unsigned long time_out_ms)
125{
126 unsigned int core_pos;
127 unsigned long long current_time;
128 u_register_t flags;
129 int rc = 0;
130
131 /*
132 * Some timer implementations have a very small max timeouts due to
133 * this if a request is asked for greater than the max time supported
134 * by them either it has to be broken down and remembered or use
135 * some other technique. Since that use case is not intended and
136 * and to make the timer framework simple, max timeout requests
137 * accepted by timer implementations can't be greater than
138 * 10 seconds. Hence, all timer peripherals used in timer framework
139 * has to support a timeout with interval of at least MAX_TIMEOUT.
140 */
141 if ((time_out_ms > MAX_TIME_OUT_MS) || (time_out_ms == 0)) {
142 ERROR("%s : Greater than max timeout request\n", __func__);
143 return -1;
144 } else if (time_out_ms < TIMER_STEP_VALUE) {
145 time_out_ms = TIMER_STEP_VALUE;
146 }
147
148 core_pos = platform_get_core_pos(read_mpidr_el1());
149 /* A timer interrupt request is already available for the core */
150 assert(interrupt_req_time[core_pos] == INVALID_TIME);
151
152 flags = read_daif();
153 disable_irq();
154 spin_lock(&timer_lock);
155
156 assert((current_prog_core < PLATFORM_CORE_COUNT) ||
157 (current_prog_core == INVALID_CORE));
158
159 /*
160 * Read time after acquiring timer_lock to account for any time taken
161 * by lock contention.
162 */
163 current_time = get_current_time_ms();
164
165 /* Update the requested time */
166 interrupt_req_time[core_pos] = current_time + time_out_ms;
167
168 VERBOSE("Need timer interrupt at: %lld current_prog_time:%lld\n"
169 " current time: %lld\n", interrupt_req_time[core_pos],
170 get_current_prog_time(),
171 get_current_time_ms());
172
173 /*
174 * If the interrupt request time is less than the current programmed
175 * by timer_step_value or timer is not programmed. Program it with
176 * requested time and retarget the timer interrupt to the current
177 * core.
178 */
179 if ((!get_current_prog_time()) || (interrupt_req_time[core_pos] <
180 (get_current_prog_time() - TIMER_STEP_VALUE))) {
181
182 arm_gic_set_intr_target(TIMER_IRQ, core_pos);
183
184 rc = PROGRAM_TIMER(time_out_ms);
185 /* We don't expect timer programming to fail */
186 if (rc)
187 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
188
189 current_prog_core = core_pos;
190 }
191
192 spin_unlock(&timer_lock);
193 /* Restore DAIF flags */
194 write_daif(flags);
195 isb();
196
197 return rc;
198}
199
200int tftf_program_timer_and_suspend(unsigned long milli_secs,
201 unsigned int pwr_state,
202 int *timer_rc, int *suspend_rc)
203{
204 int rc = 0;
205 u_register_t flags;
206
207 /* Default to successful return codes */
208 int timer_rc_val = 0;
209 int suspend_rc_val = PSCI_E_SUCCESS;
210
211 /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
212 flags = read_daif();
213 disable_irq();
214
215 /*
216 * Even with IRQs masked, the timer IRQ will wake the CPU up.
217 *
218 * If the timer IRQ happens before entering suspend mode (because the
219 * timer took too long to program, for example) the fact that the IRQ is
220 * pending will prevent the CPU from entering suspend mode and not being
221 * able to wake up.
222 */
223 timer_rc_val = tftf_program_timer(milli_secs);
224 if (timer_rc_val == 0) {
225 suspend_rc_val = tftf_cpu_suspend(pwr_state);
226 if (suspend_rc_val != PSCI_E_SUCCESS) {
227 rc = -1;
228 INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
229 suspend_rc_val);
230 }
231 } else {
232 rc = -1;
233 INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
234 }
235
236 /* Restore previous DAIF flags */
237 write_daif(flags);
238 isb();
239
240 if (timer_rc)
241 *timer_rc = timer_rc_val;
242 if (suspend_rc)
243 *suspend_rc = suspend_rc_val;
244 /*
245 * If IRQs were disabled when calling this function, the timer IRQ
246 * handler won't be called and the timer interrupt will be pending, but
247 * that isn't necessarily a problem.
248 */
249
250 return rc;
251}
252
253int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
254 int *timer_rc, int *suspend_rc)
255{
256 int rc = 0;
257 u_register_t flags;
258
259 /* Default to successful return codes */
260 int timer_rc_val = 0;
261 int suspend_rc_val = PSCI_E_SUCCESS;
262
263 /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
264 flags = read_daif();
265 disable_irq();
266
267 /*
268 * Even with IRQs masked, the timer IRQ will wake the CPU up.
269 *
270 * If the timer IRQ happens before entering suspend mode (because the
271 * timer took too long to program, for example) the fact that the IRQ is
272 * pending will prevent the CPU from entering suspend mode and not being
273 * able to wake up.
274 */
275 timer_rc_val = tftf_program_timer(milli_secs);
276 if (timer_rc_val == 0) {
277 suspend_rc_val = tftf_system_suspend();
278 if (suspend_rc_val != PSCI_E_SUCCESS) {
279 rc = -1;
280 INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
281 suspend_rc_val);
282 }
283 } else {
284 rc = -1;
285 INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
286 }
287
288 /* Restore previous DAIF flags */
289 write_daif(flags);
290 isb();
291
292 /*
293 * If IRQs were disabled when calling this function, the timer IRQ
294 * handler won't be called and the timer interrupt will be pending, but
295 * that isn't necessarily a problem.
296 */
297 if (timer_rc)
298 *timer_rc = timer_rc_val;
299 if (suspend_rc)
300 *suspend_rc = suspend_rc_val;
301
302 return rc;
303}
304
305int tftf_timer_sleep(unsigned long milli_secs)
306{
307 int ret, power_state;
308 uint32_t stateid;
309
310 ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
311 PSTATE_TYPE_STANDBY, &stateid);
312 if (ret != PSCI_E_SUCCESS)
313 return -1;
314
315 power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY,
316 stateid);
317 ret = tftf_program_timer_and_suspend(milli_secs, power_state,
318 NULL, NULL);
319 if (ret != 0)
320 return -1;
321
322 return 0;
323}
324
325int tftf_cancel_timer(void)
326{
327 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
328 unsigned int next_timer_req_core_pos;
329 unsigned long long current_time;
330 u_register_t flags;
331 int rc = 0;
332
333 /*
334 * IRQ is disabled so that if a timer is fired after taking a lock,
335 * it will remain pending and a core does not hit IRQ handler trying
336 * to acquire an already locked spin_lock causing dead lock.
337 */
338 flags = read_daif();
339 disable_irq();
340 spin_lock(&timer_lock);
341
342 interrupt_req_time[core_pos] = INVALID_TIME;
343
344 if (core_pos == current_prog_core) {
345 /*
346 * Cancel the programmed interrupt at the peripheral. If the
347 * timer interrupt is level triggered and fired this also
348 * deactivates the pending interrupt.
349 */
350 rc = plat_timer_info->cancel();
351 /* We don't expect cancel timer to fail */
352 if (rc) {
353 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
354 goto exit;
355 }
356
357 /*
358 * For edge triggered interrupts, if an IRQ is fired before
359 * cancel timer is executed, the signal remains pending. So,
360 * clear the Timer IRQ if it is already pending.
361 */
362 if (arm_gic_is_intr_pending(TIMER_IRQ))
363 arm_gic_intr_clear(TIMER_IRQ);
364
365 /* Get next timer consumer */
366 next_timer_req_core_pos = get_lowest_req_core();
367 if (next_timer_req_core_pos != INVALID_CORE) {
368
369 /* Retarget to the next_timer_req_core_pos */
370 arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
371 current_prog_core = next_timer_req_core_pos;
372
373 current_time = get_current_time_ms();
374
375 /*
376 * If the next timer request is lesser than or in a
377 * window of TIMER_STEP_VALUE from current time,
378 * program it to fire after TIMER_STEP_VALUE.
379 */
380 if (interrupt_req_time[next_timer_req_core_pos] >
381 current_time + TIMER_STEP_VALUE)
382 rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos] - current_time);
383 else
384 rc = PROGRAM_TIMER(TIMER_STEP_VALUE);
385 VERBOSE("Cancel and program new timer for core_pos: "
386 "%d %lld\n",
387 next_timer_req_core_pos,
388 get_current_prog_time());
389 /* We don't expect timer programming to fail */
390 if (rc)
391 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
392 } else {
393 current_prog_core = INVALID_CORE;
394 VERBOSE("Cancelling timer : %d\n", core_pos);
395 }
396 }
397exit:
398 spin_unlock(&timer_lock);
399
400 /* Restore DAIF flags */
401 write_daif(flags);
402 isb();
403
404 return rc;
405}
406
407int tftf_timer_framework_handler(void *data)
408{
409 unsigned int handler_core_pos = platform_get_core_pos(read_mpidr_el1());
410 unsigned int next_timer_req_core_pos;
411 unsigned long long current_time;
412 int rc = 0;
413
414 assert(interrupt_req_time[handler_core_pos] != INVALID_TIME);
415 spin_lock(&timer_lock);
416
417 current_time = get_current_time_ms();
418 /* Check if we interrupt is targeted correctly */
419 assert(handler_core_pos == current_prog_core);
420
421 interrupt_req_time[handler_core_pos] = INVALID_TIME;
422
423 /* Execute the driver handler */
424 if (plat_timer_info->handler)
425 plat_timer_info->handler();
426
427 if (arm_gic_is_intr_pending(TIMER_IRQ)) {
428 /*
429 * We might never manage to acquire the printf lock here
430 * (because we are in ISR context) but we're gonna panic right
431 * after anyway so it doesn't really matter.
432 */
433 ERROR("Timer IRQ still pending. Fatal error.\n");
434 panic();
435 }
436
437 /*
438 * Execute the handler requested by the core, the handlers for the
439 * other cores will be executed as part of handling IRQ_WAKE_SGI.
440 */
441 if (timer_handler[handler_core_pos])
442 timer_handler[handler_core_pos](data);
443
444 /* Send interrupts to all the CPUS in the min time block */
445 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
446 if ((interrupt_req_time[i] <=
447 (current_time + TIMER_STEP_VALUE))) {
448 interrupt_req_time[i] = INVALID_TIME;
449 tftf_send_sgi(IRQ_WAKE_SGI, i);
450 }
451 }
452
453 /* Get the next lowest requested timer core and program it */
454 next_timer_req_core_pos = get_lowest_req_core();
455 if (next_timer_req_core_pos != INVALID_CORE) {
456 /* Check we have not exceeded the time for next core */
457 assert(interrupt_req_time[next_timer_req_core_pos] >
458 current_time);
459 arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
460 rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos]
461 - current_time);
462 }
463 /* Update current program core to the newer one */
464 current_prog_core = next_timer_req_core_pos;
465
466 spin_unlock(&timer_lock);
467
468 return rc;
469}
470
471int tftf_timer_register_handler(irq_handler_t irq_handler)
472{
473 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
474 int ret;
475
476 /* Validate no handler is registered */
477 assert(!timer_handler[core_pos]);
478 timer_handler[core_pos] = irq_handler;
479
480 /*
481 * Also register same handler to IRQ_WAKE_SGI, as it can be waken
482 * by it.
483 */
484 ret = tftf_irq_register_handler(IRQ_WAKE_SGI, irq_handler);
485 assert(!ret);
486
487 return ret;
488}
489
490int tftf_timer_unregister_handler(void)
491{
492 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
493 int ret;
494
495 /*
496 * Unregister the handler for IRQ_WAKE_SGI also
497 */
498 ret = tftf_irq_unregister_handler(IRQ_WAKE_SGI);
499 assert(!ret);
500 /* Validate a handler is registered */
501 assert(timer_handler[core_pos]);
502 timer_handler[core_pos] = 0;
503
504 return ret;
505}
506
507unsigned int tftf_get_timer_irq(void)
508{
509 /*
510 * Check if the timer info is initialised
511 */
512 assert(TIMER_IRQ);
513 return TIMER_IRQ;
514}
515
516unsigned int tftf_get_timer_step_value(void)
517{
518 assert(TIMER_STEP_VALUE);
519
520 return TIMER_STEP_VALUE;
521}
522
523/*
524 * There are 4 cases that could happen when a system is resuming from system
525 * suspend. The cases are:
526 * 1. The resumed core is the last core to power down and the
527 * timer interrupt was targeted to it. In this case, target the
528 * interrupt to our core and set the appropriate priority and enable it.
529 *
530 * 2. The resumed core was the last core to power down but the timer interrupt
531 * is targeted to another core because of timer request grouping within
532 * TIMER_STEP_VALUE. In this case, re-target the interrupt to our core
533 * and set the appropriate priority and enable it
534 *
535 * 3. The system suspend request was down-graded by firmware and the timer
536 * interrupt is targeted to another core which woke up first. In this case,
537 * that core will wake us up and the interrupt_req_time[] corresponding to
538 * our core will be cleared. In this case, no need to do anything as GIC
539 * state is preserved.
540 *
541 * 4. The system suspend is woken up by another external interrupt other
542 * than the timer framework interrupt. In this case, just enable the
543 * timer interrupt and set the correct priority at GICD.
544 */
545void tftf_timer_gic_state_restore(void)
546{
547 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
548 spin_lock(&timer_lock);
549
550 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
551 arm_gic_intr_enable(TIMER_IRQ);
552
553 /* Check if the programmed core is the woken up core */
554 if (interrupt_req_time[core_pos] == INVALID_TIME) {
555 INFO("The programmed core is not the one woken up\n");
556 } else {
557 current_prog_core = core_pos;
558 arm_gic_set_intr_target(TIMER_IRQ, core_pos);
559 }
560
561 spin_unlock(&timer_lock);
562}
563