test(pmu): check if PMUv3 is functional
The PMU is tested for secure world leakage but there are no checks
whether it works in the first place.
The counter and event counters are exercised separately. This is because
the functionality of one does not imply the functionality of the other
(EL3 has separate controls for both). This additionally catches a corner
case with FEAT_HPMN0 missing without failing all tests.
Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
Change-Id: I966d3155cdd6edfde01af32f7c50c3bb3644274a
diff --git a/include/common/test_helpers.h b/include/common/test_helpers.h
index 9fc7579..43203a7 100644
--- a/include/common/test_helpers.h
+++ b/include/common/test_helpers.h
@@ -343,6 +343,14 @@
} \
} while (false)
+#define SKIP_TEST_IF_PMUV3_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_pmuv3_supported()) { \
+ tftf_testcase_printf("FEAT_PMUv3 not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
/* Helper macro to verify if system suspend API is supported */
#define is_psci_sys_susp_supported() \
(tftf_get_psci_feature_info(SMC_PSCI_SYSTEM_SUSPEND) \
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index d0b7566..e48e51c 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -173,6 +173,11 @@
#define ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED U(8)
#define ID_AA64DFR0_V8_4_DEBUG_ARCH_SUPPORTED U(9)
+/* ID_AA64DFR0_EL1.HPMN0 definitions */
+#define ID_AA64DFR0_HPMN0_SHIFT U(60)
+#define ID_AA64DFR0_HPMN0_MASK ULL(0xf)
+#define ID_AA64DFR0_HPMN0_SUPPORTED ULL(1)
+
/* ID_AA64DFR0_EL1.BRBE definitions */
#define ID_AA64DFR0_BRBE_SHIFT U(52)
#define ID_AA64DFR0_BRBE_MASK ULL(0xf)
@@ -188,6 +193,11 @@
#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
+/* ID_AA64DFR0_EL1.PMUVer definitions */
+#define ID_AA64DFR0_PMUVER_SHIFT U(8)
+#define ID_AA64DFR0_PMUVER_MASK ULL(0xf)
+#define ID_AA64DFR0_PMUVER_NOT_SUPPORTED ULL(0)
+
/* ID_AA64DFR0_EL1.TraceVer definitions */
#define ID_AA64DFR0_TRACEVER_SHIFT U(4)
#define ID_AA64DFR0_TRACEVER_MASK ULL(0xf)
@@ -416,6 +426,8 @@
#define MDCR_EL2_HPME_BIT (U(1) << 7)
#define MDCR_EL2_TPM_BIT (U(1) << 6)
#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
+#define MDCR_EL2_HPMN_SHIFT U(0)
+#define MDCR_EL2_HPMN_MASK ULL(0x1f)
#define MDCR_EL2_RESET_VAL U(0x0)
/* HSTR_EL2 definitions */
@@ -786,6 +798,8 @@
#define PMCR_EL0_DP_BIT (U(1) << 5)
#define PMCR_EL0_X_BIT (U(1) << 4)
#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_C_BIT (U(1) << 2)
+#define PMCR_EL0_P_BIT (U(1) << 1)
#define PMCR_EL0_E_BIT (U(1) << 0)
/* PMCNTENSET_EL0 definitions */
@@ -809,6 +823,10 @@
#define PMCCFILTR_EL0_MT_BIT (U(1) << 25)
#define PMCCFILTR_EL0_SH_BIT (U(1) << 24)
+/* PMSELR_EL0 definitions */
+#define PMSELR_EL0_SEL_SHIFT U(0)
+#define PMSELR_EL0_SEL_MASK U(0x1f)
+
/* PMU event counter ID definitions */
#define PMU_EV_PC_WRITE_RETIRED U(0x000C)
diff --git a/include/lib/aarch64/arch_features.h b/include/lib/aarch64/arch_features.h
index 705d98f..a6ce5ae 100644
--- a/include/lib/aarch64/arch_features.h
+++ b/include/lib/aarch64/arch_features.h
@@ -204,4 +204,15 @@
ID_AA64DFR0_PMS_MASK);
}
+static inline bool get_feat_pmuv3_supported(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMUVER_SHIFT) &
+ ID_AA64DFR0_PMUVER_MASK) != ID_AA64DFR0_PMUVER_NOT_SUPPORTED);
+}
+
+static inline bool get_feat_hpmn0_supported(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_HPMN0_SHIFT) &
+ ID_AA64DFR0_HPMN0_MASK) == ID_AA64DFR0_HPMN0_SUPPORTED);
+}
#endif /* ARCH_FEATURES_H */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index e10ddab..f79174e 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -416,6 +416,34 @@
DEFINE_SYSREG_RW_FUNCS(pmevtyper0_el0)
DEFINE_SYSREG_READ_FUNC(pmevcntr0_el0)
+DEFINE_SYSREG_RW_FUNCS(pmselr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmxevtyper_el0)
+DEFINE_SYSREG_RW_FUNCS(pmxevcntr_el0)
+
+/* parameterised event counter accessors */
+static inline u_register_t read_pmevcntrn_el0(int ctr_num)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ return read_pmxevcntr_el0();
+}
+
+static inline void write_pmevcntrn_el0(int ctr_num, u_register_t val)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ write_pmxevcntr_el0(val);
+}
+
+static inline u_register_t read_pmevtypern_el0(int ctr_num)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ return read_pmxevtyper_el0();
+}
+
+static inline void write_pmevtypern_el0(int ctr_num, u_register_t val)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ write_pmxevtyper_el0(val);
+}
/* Armv8.5 FEAT_RNG Registers */
DEFINE_SYSREG_READ_FUNC(rndr)
diff --git a/tftf/tests/extensions/pmuv3/test_pmuv3.c b/tftf/tests/extensions/pmuv3/test_pmuv3.c
new file mode 100644
index 0000000..fd7a077
--- /dev/null
+++ b/tftf/tests/extensions/pmuv3/test_pmuv3.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <test_helpers.h>
+
+/* tests target aarch64. Aarch32 is too different to even build */
+#if defined(__aarch64__)
+
+#define PMU_EVT_INST_RETIRED 0x0008
+#define NOP_REPETITIONS 50
+
+static inline void enable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
+ /* this function means we are about to use the PMU, synchronize */
+ isb();
+}
+
+static inline void disable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
+ /* we also rely that disabling really did work */
+ isb();
+}
+
+static inline void clear_counters(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+}
+
+/*
+ * tftf runs in EL2, don't bother enabling counting at lower ELs and secure
+ * world. TF-A has other controls for them and counting there doesn't impact us
+ */
+static inline void enable_cycle_counter(void)
+{
+ write_pmccfiltr_el0(PMCCFILTR_EL0_NSH_BIT);
+ write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
+}
+
+static inline void enable_event_counter(int ctr_num)
+{
+ write_pmevtypern_el0(ctr_num, PMEVTYPER_EL0_NSH_BIT |
+ (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
+ write_pmcntenset_el0(read_pmcntenset_el0() |
+ PMCNTENSET_EL0_P_BIT(ctr_num));
+}
+
+/* doesn't really matter what happens, as long as it happens a lot */
+static inline void execute_nops(void)
+{
+ for (int i = 0; i < NOP_REPETITIONS; i++) {
+ __asm__ ("orr x0, x0, x0\n");
+ }
+}
+
+#endif /* defined(__aarch64__) */
+
+/*
+ * try the cycle counter with some NOPs to see if it works
+ */
+test_result_t test_pmuv3_cycle_works_ns(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t ccounter_start;
+ u_register_t ccounter_end;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ enable_cycle_counter();
+ enable_counting();
+
+ ccounter_start = read_pmccntr_el0();
+ execute_nops();
+ ccounter_end = read_pmccntr_el0();
+ disable_counting();
+ clear_counters();
+
+ tftf_testcase_printf("Counted from %ld to %ld\n",
+ ccounter_start, ccounter_end);
+ if (ccounter_start != ccounter_end) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+#endif /* defined(__aarch64__) */
+}
+
+/*
+ * try an event counter with some NOPs to see if it works. MDCR_EL2.HPMN can
+ * make this tricky so take extra care.
+ */
+test_result_t test_pmuv3_event_works_ns(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t evcounter_start;
+ u_register_t evcounter_end;
+ u_register_t mdcr_el2 = ~0;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ /* use the real value or use the dummy value to skip checks later */
+ if (IS_IN_EL2()) {
+ mdcr_el2 = read_mdcr_el2();
+ }
+
+ if (((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK) == 0) {
+ tftf_testcase_printf("No event counters implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* FEAT_HPMN0 only affects event counters */
+ if ((mdcr_el2 & MDCR_EL2_HPMN_MASK) == 0) {
+ if (!get_feat_hpmn0_supported()) {
+ tftf_testcase_printf(
+ "FEAT_HPMN0 not implemented but HPMN is 0\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* the test will fail in this case */
+ if ((mdcr_el2 & MDCR_EL2_HPME_BIT) == 0) {
+ tftf_testcase_printf(
+ "HPMN is 0 and HPME is not set!\n");
+ }
+ }
+
+ enable_event_counter(0);
+ enable_counting();
+
+ /*
+ * if any are enabled it will be the very first one. HPME can disable
+ * the higher end of the counters and HPMN can put the boundary
+ * anywhere
+ */
+ evcounter_start = read_pmevcntrn_el0(0);
+ execute_nops();
+ evcounter_end = read_pmevcntrn_el0(0);
+ disable_counting();
+ clear_counters();
+
+ tftf_testcase_printf("Counted from %ld to %ld\n",
+ evcounter_start, evcounter_end);
+ if (evcounter_start != evcounter_end) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+#endif /* defined(__aarch64__) */
+}
diff --git a/tftf/tests/tests-cpu-extensions.mk b/tftf/tests/tests-cpu-extensions.mk
index 3616cf4..3b37673 100644
--- a/tftf/tests/tests-cpu-extensions.mk
+++ b/tftf/tests/tests-cpu-extensions.mk
@@ -10,6 +10,7 @@
extensions/brbe/test_brbe.c \
extensions/ecv/test_ecv.c \
extensions/fgt/test_fgt.c \
+ extensions/pmuv3/test_pmuv3.c \
extensions/mte/test_mte.c \
extensions/pauth/test_pauth.c \
extensions/sve/sve_operations.S \
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
index d45cf62..e0c1b20 100644
--- a/tftf/tests/tests-cpu-extensions.xml
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -29,6 +29,8 @@
<testcase name="AFP support" function="test_afp_support" />
<testcase name="Test wfit instruction" function="test_wfit_instruction" />
<testcase name="Test wfet instruction" function="test_wfet_instruction" />
+ <testcase name="PMUv3 cycle counter functional in NS" function="test_pmuv3_cycle_works_ns" />
+ <testcase name="PMUv3 event counter functional in NS" function="test_pmuv3_event_works_ns" />
</testsuite>
<testsuite name="ARM_ARCH_SVC" description="Arm Architecture Service tests">