Sandrine Bailleux | 3cd87d7 | 2018-10-09 11:12:55 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018, Arm Limited. All rights reserved. |
| 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #include <arch.h> |
| 8 | #include <assert_macros.S> |
| 9 | #include <asm_macros.S> |
| 10 | |
| 11 | .globl amu_group0_cnt_read_internal |
| 12 | .globl amu_group1_cnt_read_internal |
| 13 | |
| 14 | /* |
| 15 | * uint64_t amu_group0_cnt_read_internal(int idx); |
| 16 | * |
| 17 | * Given `idx`, read the corresponding AMU counter |
| 18 | * and return it in `r0`. |
| 19 | */ |
| 20 | func amu_group0_cnt_read_internal |
| 21 | #if ENABLE_ASSERTIONS |
| 22 | /* `idx` should be between [0, 3] */ |
| 23 | mov r1, r0 |
| 24 | lsr r1, r1, #2 |
| 25 | cmp r1, #0 |
| 26 | ASM_ASSERT(eq) |
| 27 | #endif |
| 28 | |
| 29 | /* |
| 30 | * Given `idx` calculate address of ldcopr16/bx lr instruction pair |
| 31 | * in the table below. |
| 32 | */ |
| 33 | adr r1, 1f |
| 34 | lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */ |
| 35 | add r1, r1, r0 |
| 36 | bx r1 |
| 37 | 1: |
| 38 | ldcopr16 r0, r1, AMEVCNTR00 /* index 0 */ |
| 39 | bx lr |
| 40 | ldcopr16 r0, r1, AMEVCNTR01 /* index 1 */ |
| 41 | bx lr |
| 42 | ldcopr16 r0, r1, AMEVCNTR02 /* index 2 */ |
| 43 | bx lr |
| 44 | ldcopr16 r0, r1, AMEVCNTR03 /* index 3 */ |
| 45 | bx lr |
| 46 | endfunc amu_group0_cnt_read_internal |
| 47 | |
| 48 | /* |
| 49 | * uint64_t amu_group1_cnt_read_internal(int idx); |
| 50 | * |
| 51 | * Given `idx`, read the corresponding AMU counter |
| 52 | * and return it in `r0`. |
| 53 | */ |
| 54 | func amu_group1_cnt_read_internal |
| 55 | #if ENABLE_ASSERTIONS |
| 56 | /* `idx` should be between [0, 15] */ |
| 57 | mov r1, r0 |
| 58 | lsr r1, r1, #4 |
| 59 | cmp r1, #0 |
| 60 | ASM_ASSERT(eq) |
| 61 | #endif |
| 62 | |
| 63 | /* |
| 64 | * Given `idx` calculate address of ldcopr16/bx lr instruction pair |
| 65 | * in the table below. |
| 66 | */ |
| 67 | adr r1, 1f |
| 68 | lsl r0, r0, #3 /* each ldcopr16/bx lr sequence is 8 bytes */ |
| 69 | add r1, r1, r0 |
| 70 | bx r1 |
| 71 | |
| 72 | 1: |
| 73 | ldcopr16 r0,r1, AMEVCNTR10 /* index 0 */ |
| 74 | bx lr |
| 75 | ldcopr16 r0,r1, AMEVCNTR11 /* index 1 */ |
| 76 | bx lr |
| 77 | ldcopr16 r0,r1, AMEVCNTR12 /* index 2 */ |
| 78 | bx lr |
| 79 | ldcopr16 r0,r1, AMEVCNTR13 /* index 3 */ |
| 80 | bx lr |
| 81 | ldcopr16 r0,r1, AMEVCNTR14 /* index 4 */ |
| 82 | bx lr |
| 83 | ldcopr16 r0,r1, AMEVCNTR15 /* index 5 */ |
| 84 | bx lr |
| 85 | ldcopr16 r0,r1, AMEVCNTR16 /* index 6 */ |
| 86 | bx lr |
| 87 | ldcopr16 r0,r1, AMEVCNTR17 /* index 7 */ |
| 88 | bx lr |
| 89 | ldcopr16 r0,r1, AMEVCNTR18 /* index 8 */ |
| 90 | bx lr |
| 91 | ldcopr16 r0,r1, AMEVCNTR19 /* index 9 */ |
| 92 | bx lr |
| 93 | ldcopr16 r0,r1, AMEVCNTR1A /* index 10 */ |
| 94 | bx lr |
| 95 | ldcopr16 r0,r1, AMEVCNTR1B /* index 11 */ |
| 96 | bx lr |
| 97 | ldcopr16 r0,r1, AMEVCNTR1C /* index 12 */ |
| 98 | bx lr |
| 99 | ldcopr16 r0,r1, AMEVCNTR1D /* index 13 */ |
| 100 | bx lr |
| 101 | ldcopr16 r0,r1, AMEVCNTR1E /* index 14 */ |
| 102 | bx lr |
| 103 | ldcopr16 r0,r1, AMEVCNTR1F /* index 15 */ |
| 104 | bx lr |
| 105 | endfunc amu_group1_cnt_read_internal |