blob: 46fe9871bf39f3089fbe4fbf100348d4ce7fbc84 [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
Varun Wadekar5904da42020-05-22 10:52:23 -07002 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02009#include <debug.h>
Antonio Nino Diaz09a00ef2019-01-11 13:12:58 +000010#include <drivers/arm/arm_gic.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020011#include <errno.h>
12#include <irq.h>
13#include <mmio.h>
14#include <platform.h>
15#include <platform_def.h>
16#include <power_management.h>
17#include <sgi.h>
18#include <spinlock.h>
19#include <stddef.h>
Ambroise Vincent602b7f52019-02-11 14:13:43 +000020#include <stdint.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020021#include <tftf.h>
22#include <timer.h>
23
24
25/* Helper macros */
26#define TIMER_STEP_VALUE (plat_timer_info->timer_step_value)
27#define TIMER_IRQ (plat_timer_info->timer_irq)
28#define PROGRAM_TIMER(a) plat_timer_info->program(a)
29#define INVALID_CORE UINT32_MAX
30#define INVALID_TIME UINT64_MAX
31#define MAX_TIME_OUT_MS 10000
32
33/*
34 * Pointer containing available timer information for the platform.
35 */
36static const plat_timer_t *plat_timer_info;
37/*
38 * Interrupt requested time by cores in terms of absolute time.
39 */
40static volatile unsigned long long interrupt_req_time[PLATFORM_CORE_COUNT];
41/*
42 * Contains the target core number of the timer interrupt.
43 */
44static unsigned int current_prog_core = INVALID_CORE;
45/*
46 * Lock to get a consistent view for programming the timer
47 */
48static spinlock_t timer_lock;
49/*
50 * Number of system ticks per millisec
51 */
52static unsigned int systicks_per_ms;
53
54/*
55 * Stores per CPU timer handler invoked on expiration of the requested timeout.
56 */
57static irq_handler_t timer_handler[PLATFORM_CORE_COUNT];
58
59/* Helper function */
60static inline unsigned long long get_current_time_ms(void)
61{
62 assert(systicks_per_ms);
Varun Wadekar5904da42020-05-22 10:52:23 -070063 return syscounter_read() / systicks_per_ms;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020064}
65
66static inline unsigned long long get_current_prog_time(void)
67{
68 return current_prog_core == INVALID_CORE ?
69 0 : interrupt_req_time[current_prog_core];
70}
71
72int tftf_initialise_timer(void)
73{
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020074 /*
75 * Get platform specific timer information
76 */
Sandrine Bailleuxdfa5ed92018-12-13 17:08:50 +010077 int rc = plat_initialise_timer_ops(&plat_timer_info);
78 if (rc != 0) {
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020079 return rc;
80 }
81
82 /* Systems can't support single tick as a step value */
83 assert(TIMER_STEP_VALUE);
84
85 /* Initialise the array to max possible time */
Sandrine Bailleuxdfa5ed92018-12-13 17:08:50 +010086 for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020087 interrupt_req_time[i] = INVALID_TIME;
88
89 tftf_irq_register_handler(TIMER_IRQ, tftf_timer_framework_handler);
90 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
91 arm_gic_intr_enable(TIMER_IRQ);
92
93 /* Save the systicks per millisecond */
94 systicks_per_ms = read_cntfrq_el0() / 1000;
95
96 return 0;
97}
98
Jens Wiklander5a440782024-06-25 12:36:20 +020099void tftf_initialise_timer_secondary_core(void)
100{
101 if (!IS_SPI(TIMER_IRQ)) {
102 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
103 arm_gic_intr_enable(TIMER_IRQ);
104 }
105}
106
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200107/*
108 * It returns the core number of next timer request to be serviced or
109 * -1 if there is no request from any core. The next service request
110 * is the core whose interrupt needs to be fired first.
111 */
112static inline unsigned int get_lowest_req_core(void)
113{
114 unsigned long long lowest_timer = INVALID_TIME;
115 unsigned int lowest_core_req = INVALID_CORE;
116 unsigned int i;
117
118 /*
119 * If 2 cores requested same value, give precedence
120 * to the core with lowest core number
121 */
122 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
123 if (interrupt_req_time[i] < lowest_timer) {
124 lowest_timer = interrupt_req_time[i];
125 lowest_core_req = i;
126 }
127 }
128
129 return lowest_core_req;
130}
131
132int tftf_program_timer(unsigned long time_out_ms)
133{
134 unsigned int core_pos;
135 unsigned long long current_time;
136 u_register_t flags;
137 int rc = 0;
138
139 /*
140 * Some timer implementations have a very small max timeouts due to
141 * this if a request is asked for greater than the max time supported
142 * by them either it has to be broken down and remembered or use
143 * some other technique. Since that use case is not intended and
144 * and to make the timer framework simple, max timeout requests
145 * accepted by timer implementations can't be greater than
146 * 10 seconds. Hence, all timer peripherals used in timer framework
147 * has to support a timeout with interval of at least MAX_TIMEOUT.
148 */
149 if ((time_out_ms > MAX_TIME_OUT_MS) || (time_out_ms == 0)) {
150 ERROR("%s : Greater than max timeout request\n", __func__);
151 return -1;
152 } else if (time_out_ms < TIMER_STEP_VALUE) {
153 time_out_ms = TIMER_STEP_VALUE;
154 }
155
156 core_pos = platform_get_core_pos(read_mpidr_el1());
157 /* A timer interrupt request is already available for the core */
158 assert(interrupt_req_time[core_pos] == INVALID_TIME);
159
160 flags = read_daif();
161 disable_irq();
162 spin_lock(&timer_lock);
163
164 assert((current_prog_core < PLATFORM_CORE_COUNT) ||
165 (current_prog_core == INVALID_CORE));
166
167 /*
168 * Read time after acquiring timer_lock to account for any time taken
169 * by lock contention.
170 */
171 current_time = get_current_time_ms();
172
173 /* Update the requested time */
174 interrupt_req_time[core_pos] = current_time + time_out_ms;
175
176 VERBOSE("Need timer interrupt at: %lld current_prog_time:%lld\n"
177 " current time: %lld\n", interrupt_req_time[core_pos],
178 get_current_prog_time(),
179 get_current_time_ms());
180
181 /*
182 * If the interrupt request time is less than the current programmed
183 * by timer_step_value or timer is not programmed. Program it with
184 * requested time and retarget the timer interrupt to the current
185 * core.
186 */
187 if ((!get_current_prog_time()) || (interrupt_req_time[core_pos] <
188 (get_current_prog_time() - TIMER_STEP_VALUE))) {
189
Jens Wiklander5a440782024-06-25 12:36:20 +0200190 if (IS_SPI(TIMER_IRQ)) {
191 arm_gic_set_intr_target(TIMER_IRQ, core_pos);
192 }
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200193
194 rc = PROGRAM_TIMER(time_out_ms);
195 /* We don't expect timer programming to fail */
196 if (rc)
197 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
198
199 current_prog_core = core_pos;
200 }
201
202 spin_unlock(&timer_lock);
203 /* Restore DAIF flags */
204 write_daif(flags);
205 isb();
206
207 return rc;
208}
209
210int tftf_program_timer_and_suspend(unsigned long milli_secs,
211 unsigned int pwr_state,
212 int *timer_rc, int *suspend_rc)
213{
214 int rc = 0;
215 u_register_t flags;
216
217 /* Default to successful return codes */
218 int timer_rc_val = 0;
219 int suspend_rc_val = PSCI_E_SUCCESS;
220
221 /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
222 flags = read_daif();
223 disable_irq();
224
225 /*
226 * Even with IRQs masked, the timer IRQ will wake the CPU up.
227 *
228 * If the timer IRQ happens before entering suspend mode (because the
229 * timer took too long to program, for example) the fact that the IRQ is
230 * pending will prevent the CPU from entering suspend mode and not being
231 * able to wake up.
232 */
233 timer_rc_val = tftf_program_timer(milli_secs);
234 if (timer_rc_val == 0) {
235 suspend_rc_val = tftf_cpu_suspend(pwr_state);
236 if (suspend_rc_val != PSCI_E_SUCCESS) {
237 rc = -1;
238 INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
239 suspend_rc_val);
240 }
241 } else {
242 rc = -1;
243 INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
244 }
245
246 /* Restore previous DAIF flags */
247 write_daif(flags);
248 isb();
249
250 if (timer_rc)
251 *timer_rc = timer_rc_val;
252 if (suspend_rc)
253 *suspend_rc = suspend_rc_val;
254 /*
255 * If IRQs were disabled when calling this function, the timer IRQ
256 * handler won't be called and the timer interrupt will be pending, but
257 * that isn't necessarily a problem.
258 */
259
260 return rc;
261}
262
263int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
264 int *timer_rc, int *suspend_rc)
265{
266 int rc = 0;
267 u_register_t flags;
268
269 /* Default to successful return codes */
270 int timer_rc_val = 0;
271 int suspend_rc_val = PSCI_E_SUCCESS;
272
273 /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
274 flags = read_daif();
275 disable_irq();
276
277 /*
278 * Even with IRQs masked, the timer IRQ will wake the CPU up.
279 *
280 * If the timer IRQ happens before entering suspend mode (because the
281 * timer took too long to program, for example) the fact that the IRQ is
282 * pending will prevent the CPU from entering suspend mode and not being
283 * able to wake up.
284 */
285 timer_rc_val = tftf_program_timer(milli_secs);
286 if (timer_rc_val == 0) {
287 suspend_rc_val = tftf_system_suspend();
288 if (suspend_rc_val != PSCI_E_SUCCESS) {
289 rc = -1;
290 INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
291 suspend_rc_val);
292 }
293 } else {
294 rc = -1;
295 INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
296 }
297
298 /* Restore previous DAIF flags */
299 write_daif(flags);
300 isb();
301
302 /*
303 * If IRQs were disabled when calling this function, the timer IRQ
304 * handler won't be called and the timer interrupt will be pending, but
305 * that isn't necessarily a problem.
306 */
307 if (timer_rc)
308 *timer_rc = timer_rc_val;
309 if (suspend_rc)
310 *suspend_rc = suspend_rc_val;
311
312 return rc;
313}
314
315int tftf_timer_sleep(unsigned long milli_secs)
316{
317 int ret, power_state;
318 uint32_t stateid;
319
320 ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
321 PSTATE_TYPE_STANDBY, &stateid);
322 if (ret != PSCI_E_SUCCESS)
323 return -1;
324
325 power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY,
326 stateid);
327 ret = tftf_program_timer_and_suspend(milli_secs, power_state,
328 NULL, NULL);
329 if (ret != 0)
330 return -1;
331
332 return 0;
333}
334
335int tftf_cancel_timer(void)
336{
337 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
338 unsigned int next_timer_req_core_pos;
339 unsigned long long current_time;
340 u_register_t flags;
341 int rc = 0;
342
343 /*
344 * IRQ is disabled so that if a timer is fired after taking a lock,
345 * it will remain pending and a core does not hit IRQ handler trying
346 * to acquire an already locked spin_lock causing dead lock.
347 */
348 flags = read_daif();
349 disable_irq();
350 spin_lock(&timer_lock);
351
352 interrupt_req_time[core_pos] = INVALID_TIME;
353
354 if (core_pos == current_prog_core) {
355 /*
356 * Cancel the programmed interrupt at the peripheral. If the
357 * timer interrupt is level triggered and fired this also
358 * deactivates the pending interrupt.
359 */
360 rc = plat_timer_info->cancel();
361 /* We don't expect cancel timer to fail */
362 if (rc) {
363 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
364 goto exit;
365 }
366
367 /*
368 * For edge triggered interrupts, if an IRQ is fired before
369 * cancel timer is executed, the signal remains pending. So,
370 * clear the Timer IRQ if it is already pending.
371 */
372 if (arm_gic_is_intr_pending(TIMER_IRQ))
373 arm_gic_intr_clear(TIMER_IRQ);
374
375 /* Get next timer consumer */
376 next_timer_req_core_pos = get_lowest_req_core();
377 if (next_timer_req_core_pos != INVALID_CORE) {
378
379 /* Retarget to the next_timer_req_core_pos */
380 arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
381 current_prog_core = next_timer_req_core_pos;
382
383 current_time = get_current_time_ms();
384
385 /*
386 * If the next timer request is lesser than or in a
387 * window of TIMER_STEP_VALUE from current time,
388 * program it to fire after TIMER_STEP_VALUE.
389 */
390 if (interrupt_req_time[next_timer_req_core_pos] >
391 current_time + TIMER_STEP_VALUE)
392 rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos] - current_time);
393 else
394 rc = PROGRAM_TIMER(TIMER_STEP_VALUE);
395 VERBOSE("Cancel and program new timer for core_pos: "
396 "%d %lld\n",
397 next_timer_req_core_pos,
398 get_current_prog_time());
399 /* We don't expect timer programming to fail */
400 if (rc)
401 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
402 } else {
403 current_prog_core = INVALID_CORE;
404 VERBOSE("Cancelling timer : %d\n", core_pos);
405 }
406 }
407exit:
408 spin_unlock(&timer_lock);
409
410 /* Restore DAIF flags */
411 write_daif(flags);
412 isb();
413
414 return rc;
415}
416
417int tftf_timer_framework_handler(void *data)
418{
419 unsigned int handler_core_pos = platform_get_core_pos(read_mpidr_el1());
420 unsigned int next_timer_req_core_pos;
421 unsigned long long current_time;
422 int rc = 0;
423
424 assert(interrupt_req_time[handler_core_pos] != INVALID_TIME);
425 spin_lock(&timer_lock);
426
427 current_time = get_current_time_ms();
428 /* Check if we interrupt is targeted correctly */
429 assert(handler_core_pos == current_prog_core);
430
431 interrupt_req_time[handler_core_pos] = INVALID_TIME;
432
433 /* Execute the driver handler */
434 if (plat_timer_info->handler)
435 plat_timer_info->handler();
436
437 if (arm_gic_is_intr_pending(TIMER_IRQ)) {
438 /*
439 * We might never manage to acquire the printf lock here
440 * (because we are in ISR context) but we're gonna panic right
441 * after anyway so it doesn't really matter.
442 */
443 ERROR("Timer IRQ still pending. Fatal error.\n");
444 panic();
445 }
446
447 /*
448 * Execute the handler requested by the core, the handlers for the
449 * other cores will be executed as part of handling IRQ_WAKE_SGI.
450 */
451 if (timer_handler[handler_core_pos])
452 timer_handler[handler_core_pos](data);
453
454 /* Send interrupts to all the CPUS in the min time block */
455 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
456 if ((interrupt_req_time[i] <=
457 (current_time + TIMER_STEP_VALUE))) {
458 interrupt_req_time[i] = INVALID_TIME;
459 tftf_send_sgi(IRQ_WAKE_SGI, i);
460 }
461 }
462
463 /* Get the next lowest requested timer core and program it */
464 next_timer_req_core_pos = get_lowest_req_core();
465 if (next_timer_req_core_pos != INVALID_CORE) {
466 /* Check we have not exceeded the time for next core */
467 assert(interrupt_req_time[next_timer_req_core_pos] >
468 current_time);
469 arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
470 rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos]
471 - current_time);
472 }
473 /* Update current program core to the newer one */
474 current_prog_core = next_timer_req_core_pos;
475
476 spin_unlock(&timer_lock);
477
478 return rc;
479}
480
481int tftf_timer_register_handler(irq_handler_t irq_handler)
482{
483 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
484 int ret;
485
486 /* Validate no handler is registered */
487 assert(!timer_handler[core_pos]);
488 timer_handler[core_pos] = irq_handler;
489
490 /*
491 * Also register same handler to IRQ_WAKE_SGI, as it can be waken
492 * by it.
493 */
494 ret = tftf_irq_register_handler(IRQ_WAKE_SGI, irq_handler);
495 assert(!ret);
496
497 return ret;
498}
499
500int tftf_timer_unregister_handler(void)
501{
502 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
503 int ret;
504
505 /*
506 * Unregister the handler for IRQ_WAKE_SGI also
507 */
508 ret = tftf_irq_unregister_handler(IRQ_WAKE_SGI);
509 assert(!ret);
510 /* Validate a handler is registered */
511 assert(timer_handler[core_pos]);
512 timer_handler[core_pos] = 0;
513
514 return ret;
515}
516
517unsigned int tftf_get_timer_irq(void)
518{
519 /*
520 * Check if the timer info is initialised
521 */
522 assert(TIMER_IRQ);
523 return TIMER_IRQ;
524}
525
526unsigned int tftf_get_timer_step_value(void)
527{
528 assert(TIMER_STEP_VALUE);
529
530 return TIMER_STEP_VALUE;
531}
532
533/*
534 * There are 4 cases that could happen when a system is resuming from system
535 * suspend. The cases are:
536 * 1. The resumed core is the last core to power down and the
537 * timer interrupt was targeted to it. In this case, target the
538 * interrupt to our core and set the appropriate priority and enable it.
539 *
540 * 2. The resumed core was the last core to power down but the timer interrupt
541 * is targeted to another core because of timer request grouping within
542 * TIMER_STEP_VALUE. In this case, re-target the interrupt to our core
543 * and set the appropriate priority and enable it
544 *
545 * 3. The system suspend request was down-graded by firmware and the timer
546 * interrupt is targeted to another core which woke up first. In this case,
547 * that core will wake us up and the interrupt_req_time[] corresponding to
548 * our core will be cleared. In this case, no need to do anything as GIC
549 * state is preserved.
550 *
551 * 4. The system suspend is woken up by another external interrupt other
552 * than the timer framework interrupt. In this case, just enable the
553 * timer interrupt and set the correct priority at GICD.
554 */
555void tftf_timer_gic_state_restore(void)
556{
557 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
558 spin_lock(&timer_lock);
559
560 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
561 arm_gic_intr_enable(TIMER_IRQ);
562
563 /* Check if the programmed core is the woken up core */
564 if (interrupt_req_time[core_pos] == INVALID_TIME) {
565 INFO("The programmed core is not the one woken up\n");
566 } else {
567 current_prog_core = core_pos;
568 arm_gic_set_intr_target(TIMER_IRQ, core_pos);
569 }
570
571 spin_unlock(&timer_lock);
572}
573