cactus: adding memory sharing tests
Added tests to FF-A memory sharing interface. To enable them:
- The message loop was refactored to support multiple requests/commands
coming through direct messaging, leveraging command wrappers
from the previous commit.
- Enabled adding dynamic memory regions to translation table, according
to xlat_tables_v2 library documentation [1];
- Mapped translation tables into cactus memory mapping so that they can
be updated after a memory retrieve.
[1] https://trustedfirmware-a.readthedocs.io/en/latest/components/xlat-tables-lib-v2-design.html?highlight=xlat
Signed-off-by: J-Alves <joao.alves@arm.com>
Change-Id: I73c9ba66dd89c53c50eca6ba6aca2e0ad115cb35
diff --git a/spm/cactus/cactus.ld.S b/spm/cactus/cactus.ld.S
index 30ad0da..11b28ba 100644
--- a/spm/cactus/cactus.ld.S
+++ b/spm/cactus/cactus.ld.S
@@ -72,6 +72,7 @@
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
+ *(xlat_table*)
. = NEXT(PAGE_SIZE);
__BSS_END__ = .;
}
diff --git a/spm/cactus/cactus.mk b/spm/cactus/cactus.mk
index 779fd38..4b3f0bd 100644
--- a/spm/cactus/cactus.mk
+++ b/spm/cactus/cactus.mk
@@ -74,6 +74,7 @@
$(eval $(call add_define,CACTUS_DEFINES,FVP_MAX_PE_PER_CPU))
$(eval $(call add_define,CACTUS_DEFINES,LOG_LEVEL))
$(eval $(call add_define,CACTUS_DEFINES,PLAT_${PLAT}))
+$(eval $(call add_define,CACTUS_DEFINES,PLAT_XLAT_TABLES_DYNAMIC))
$(CACTUS_DTB) : $(BUILD_PLAT)/cactus $(BUILD_PLAT)/cactus/cactus.elf
$(CACTUS_DTB) : $(CACTUS_DTS)
diff --git a/spm/cactus/cactus_ffa_tests.c b/spm/cactus/cactus_ffa_tests.c
index 28555af..1b12fb7 100644
--- a/spm/cactus/cactus_ffa_tests.c
+++ b/spm/cactus/cactus_ffa_tests.c
@@ -11,6 +11,9 @@
#include <ffa_helpers.h>
#include <sp_helpers.h>
+#include <lib/libc/string.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
/* FFA version test helpers */
#define FFA_MAJOR 1U
#define FFA_MINOR 0U
@@ -189,6 +192,161 @@
announce_test_end(test_ffa_version);
}
+bool ffa_memory_retrieve_test(struct mailbox_buffers *mb,
+ struct ffa_memory_region *retrieved,
+ uint64_t handle, ffa_vm_id_t sender,
+ ffa_vm_id_t receiver, uint32_t mem_func)
+{
+ smc_ret_values ret;
+ uint32_t fragment_size;
+ uint32_t total_size;
+ uint32_t descriptor_size;
+
+ if (retrieved == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+
+ /*
+ * TODO: Revise shareability attribute in function call
+ * below.
+ * https://lists.trustedfirmware.org/pipermail/hafnium/2020-June/000023.html
+ */
+ descriptor_size = ffa_memory_retrieve_request_init(
+ mb->send, handle, sender, receiver, 0, 0,
+ FFA_DATA_ACCESS_RW,
+ FFA_INSTRUCTION_ACCESS_NX,
+ FFA_MEMORY_NORMAL_MEM,
+ FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_OUTER_SHAREABLE);
+
+ ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
+
+ if (ret.ret0 != FFA_MEM_RETRIEVE_RESP) {
+ ERROR("Couldn't retrieve the memory page!\n");
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ * This is a simple test with only one segment. As such, upon
+ * successful ffa_mem_retrieve_req, total_size must be equal to
+ * fragment_size.
+ */
+ total_size = ret.ret1;
+ fragment_size = ret.ret2;
+
+ if (total_size != fragment_size) {
+ ERROR("Only expect one memory segment to be sent!\n");
+ return false;
+ }
+
+ if (fragment_size > PAGE_SIZE) {
+ ERROR("Fragment should be smaller than RX buffer!\n");
+ return false;
+ }
+
+ memcpy((void *)retrieved, mb->recv, fragment_size);
+
+ if (ffa_rx_release().ret0 != FFA_SUCCESS_SMC32) {
+ ERROR("Failed to release Rx buffer!\n");
+ return false;
+ }
+
+ if (retrieved->receiver_count != 1) {
+ VERBOSE("This memory has been shared with multiple"
+ " receivers!\n");
+ }
+
+ NOTICE("Memory Retrieved!\n");
+
+ return true;
+}
+
+bool ffa_memory_relinquish_test(struct ffa_mem_relinquish *m,
+ uint64_t handle,
+ ffa_vm_id_t id)
+{
+ ffa_mem_relinquish_init(m, handle, 0, id);
+
+ if (ffa_mem_relinquish().ret0 != FFA_SUCCESS_SMC32) {
+ ERROR("%s failed to relinquish memory!\n", __func__);
+ return false;
+ }
+
+ NOTICE("Memory Relinquished!\n");
+ return true;
+}
+
+void ffa_memory_management_test(struct mailbox_buffers *mb, ffa_vm_id_t vm_id,
+ ffa_vm_id_t sender, uint32_t mem_func,
+ uint64_t handle)
+{
+ const char *test_ffa = "Memory Management";
+ struct ffa_memory_region m;
+ struct ffa_composite_memory_region *composite;
+ int ret;
+ unsigned int mem_attrs;
+ uint32_t *ptr;
+
+ announce_test_section_start(test_ffa);
+
+ expect(ffa_memory_retrieve_test(
+ mb, &m, handle, sender, vm_id, mem_func),
+ true);
+
+ composite = ffa_memory_region_get_composite(&m, 0);
+
+ NOTICE("Address: %p; page_count: %x %x\n",
+ composite->constituents[0].address,
+ composite->constituents[0].page_count, PAGE_SIZE);
+
+ /* This test is only concerned with RW permissions. */
+ expect(ffa_get_data_access_attr(
+ m.receivers[0].receiver_permissions.permissions),
+ FFA_DATA_ACCESS_RW);
+
+ mem_attrs = MT_RW_DATA | MT_NS | MT_EXECUTE_NEVER;
+
+ ret = mmap_add_dynamic_region(
+ (uint64_t)composite->constituents[0].address,
+ (uint64_t)composite->constituents[0].address,
+ composite->constituents[0].page_count * PAGE_SIZE,
+ mem_attrs);
+ expect(ret, 0);
+
+ VERBOSE("Memory has been mapped\n");
+
+ ptr = (uint32_t *) composite->constituents[0].address;
+
+ /* Write mem_func to retrieved memory region for validation purposes. */
+ VERBOSE("Writing: %x\n", mem_func);
+ for (unsigned int i = 0U; i < 5U; i++)
+ ptr[i] = mem_func;
+
+ /*
+ * A FFA_MEM_DONATE changes the ownership of the page, as such no
+ * relinquish is needed.
+ */
+ if (mem_func != FFA_MEM_DONATE_SMC32) {
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)composite->constituents[0].address,
+ composite->constituents[0].page_count * PAGE_SIZE);
+ expect(ret, 0);
+
+ expect(ffa_memory_relinquish_test(
+ (struct ffa_mem_relinquish *)mb->send,
+ m.handle, vm_id),
+ true);
+ }
+
+ announce_test_section_end(test_ffa);
+}
+
void ffa_tests(struct mailbox_buffers *mb)
{
const char *test_ffa = "FFA Interfaces";
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
index acbe2af..7c70d67 100644
--- a/spm/cactus/cactus_main.c
+++ b/spm/cactus/cactus_main.c
@@ -24,6 +24,8 @@
#include <plat_arm.h>
#include <platform_def.h>
+#include <cactus_test_cmds.h>
+
/* Host machine information injected by the build system in the ELF file. */
extern const char build_message[];
extern const char version_string[];
@@ -36,21 +38,25 @@
* but rather through Hafnium print hypercall.
*
*/
-static void __dead2 message_loop(ffa_vm_id_t vm_id)
+static void __dead2 message_loop(ffa_vm_id_t vm_id, struct mailbox_buffers *mb)
{
smc_ret_values ffa_ret;
uint32_t sp_response;
+ ffa_vm_id_t source;
/*
- * This initial wait call is necessary to inform SPMD that
- * SP initialization has completed. It blocks until receiving
- * a direct message request.
- */
+ * This initial wait call is necessary to inform SPMD that
+ * SP initialization has completed. It blocks until receiving
+ * a direct message request.
+ */
+
ffa_ret = ffa_msg_wait();
for (;;) {
+ VERBOSE("Woke up with func id: %lx\n", ffa_ret.ret0);
- if (ffa_ret.ret0 != FFA_MSG_SEND_DIRECT_REQ_SMC32) {
+ if (ffa_ret.ret0 != FFA_MSG_SEND_DIRECT_REQ_SMC32 &&
+ ffa_ret.ret0 != FFA_MSG_SEND_DIRECT_REQ_SMC64) {
ERROR("%s(%u) unknown func id 0x%lx\n",
__func__, vm_id, ffa_ret.ret0);
break;
@@ -61,24 +67,46 @@
__func__, vm_id, ffa_ret.ret1);
break;
}
+ source = ffa_ret.ret2;
- if (ffa_ret.ret2 != HYP_ID) {
+ if (source != HYP_ID) {
ERROR("%s(%u) invalid hyp id 0x%lx\n",
__func__, vm_id, ffa_ret.ret2);
break;
}
- /*
- * For the sake of testing, add the vm id to the
- * received message.
- */
- sp_response = ffa_ret.ret3 | vm_id;
+ PRINT_CMD(ffa_ret);
- /*
- * Send a response through direct messaging then block
- * until receiving a new message request.
- */
- ffa_ret = ffa_msg_send_direct_resp(vm_id, HYP_ID, sp_response);
+ switch (CACTUS_GET_CMD(ffa_ret)) {
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_DONATE_SMC32:
+ ffa_memory_management_test(
+ mb, vm_id, source,
+ CACTUS_GET_CMD(ffa_ret),
+ CACTUS_MEM_SEND_GET_HANDLE(ffa_ret));
+
+ /*
+ * If execution gets to this point means all operations
+ * with memory retrieval went well, as such replying
+ */
+ ffa_ret = CACTUS_SUCCESS_RESP(vm_id, source);
+ break;
+ default:
+ /*
+ * Currently direct message test is handled here.
+ * TODO: create a case within the switch case
+ * For the sake of testing, add the vm id to the
+ * received message.
+ */
+ NOTICE("Replying to Direct MSG test\n");
+ sp_response = ffa_ret.ret3 | vm_id;
+ ffa_ret = ffa_msg_send_direct_resp(vm_id,
+ HYP_ID,
+ sp_response);
+
+ break;
+ }
}
panic();
@@ -227,7 +255,7 @@
ffa_tests(&mb);
/* End up to message loop */
- message_loop(ffa_id);
+ message_loop(ffa_id, &mb);
/* Not reached */
}
diff --git a/spm/cactus/cactus_tests.h b/spm/cactus/cactus_tests.h
index 2e13a6f..fd229bf 100644
--- a/spm/cactus/cactus_tests.h
+++ b/spm/cactus/cactus_tests.h
@@ -16,6 +16,10 @@
/*
* Test to FFA interfaces.
*/
+void ffa_memory_management_test(struct mailbox_buffers *mb, ffa_vm_id_t vm_id,
+ ffa_vm_id_t sender, uint32_t mem_func,
+ uint64_t handle);
+
void ffa_tests(struct mailbox_buffers *mb);
/*