fix: skip CPU cycle allocation for SP vCPU to reach message loop

With the support added in Hafnium SPMC for secondary CPU cold boot,
secondary execution contexts of SPs dont need a round of CPU cycles
through ffa_run to reach the message loop.

Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
Change-Id: Ib02f51abb31d30329d43f0937ba30d721504bc53
diff --git a/include/runtime_services/spm_test_helpers.h b/include/runtime_services/spm_test_helpers.h
index 93027d6..14b2fb5 100644
--- a/include/runtime_services/spm_test_helpers.h
+++ b/include/runtime_services/spm_test_helpers.h
@@ -104,12 +104,6 @@
                                       event_t *cpu_booted);
 
 /**
- * Call FFA_RUN in the designated SP to make it reach the message loop.
- * Used within CPU_ON handlers, to bring up the SP in the current core.
- */
-bool spm_core_sp_init(ffa_id_t sp_id);
-
-/**
  * Initializes the Mailbox for other SPM related tests that need to use
  * RXTX buffers.
  */
diff --git a/lib/power_management/hotplug/hotplug.c b/lib/power_management/hotplug/hotplug.c
index 76fa287..aac7b0f 100644
--- a/lib/power_management/hotplug/hotplug.c
+++ b/lib/power_management/hotplug/hotplug.c
@@ -254,7 +254,7 @@
 	tftf_prepare_cpu_off();
 	tftf_set_cpu_offline();
 
-	INFO("Powering off\n");
+	INFO("Powering off CPU:%lx\n", read_mpidr_el1());
 
 	/* Flush console before the last CPU is powered off. */
 	if (tftf_get_ref_cnt() == 0)
@@ -306,7 +306,7 @@
 
 	enable_irq();
 
-	INFO("Booting\n");
+	INFO("Booting CPU:%lx\n", read_mpidr_el1());
 
 	tftf_set_cpu_online();
 
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
index b608d7b..c15ee4e 100644
--- a/spm/cactus/cactus_main.c
+++ b/spm/cactus/cactus_main.c
@@ -61,8 +61,9 @@
 	ffa_ret = ffa_msg_wait();
 
 	for (;;) {
-		VERBOSE("Woke up with func id: %s\n",
-			ffa_func_name(ffa_func_id(ffa_ret)));
+		VERBOSE("Woke up with func:%s  id: %x\n",
+			ffa_func_name(ffa_func_id(ffa_ret)),
+			ffa_func_id(ffa_ret));
 
 		if (ffa_func_id(ffa_ret) == FFA_ERROR) {
 			ERROR("Error: %s\n",
@@ -74,7 +75,7 @@
 		    ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC64 &&
 		    ffa_func_id(ffa_ret) != FFA_INTERRUPT &&
 		    ffa_func_id(ffa_ret) != FFA_RUN) {
-			ERROR("%s(%u) unknown func id %s\n", __func__, vm_id,
+			ERROR("%s(%x) unknown func id %s\n", __func__, vm_id,
 			      ffa_func_name(ffa_func_id(ffa_ret)));
 			break;
 		}
@@ -87,10 +88,15 @@
 			 * informational as we're running with virtual
 			 * interrupts unmasked and the interrupt is processed
 			 * by the interrupt handler.
-			 *
-			 * Received FFA_RUN in waiting state, the endpoint
-			 * simply returns by FFA_MSG_WAIT.
 			 */
+			if (ffa_func_id(ffa_ret) == FFA_RUN) {
+				/*
+				 * Received FFA_RUN in waiting state, the
+				 * endpoint simply returns by FFA_MSG_WAIT.
+				 */
+				VERBOSE("Nothing to do. Exit to NWd\n");
+			}
+
 			ffa_ret = ffa_msg_wait();
 			continue;
 		}
diff --git a/tftf/tests/runtime_services/secure_service/spm_test_helpers.c b/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
index 054e774..09482f2 100644
--- a/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
+++ b/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
@@ -106,23 +106,3 @@
 
 	return TEST_RESULT_SUCCESS;
 }
-
-bool spm_core_sp_init(ffa_id_t sp_id)
-{
-	/*
-	 * Secure Partitions secondary ECs need one round of ffa_run to reach
-	 * the message loop.
-	 */
-	if (sp_id != SP_ID(1)) {
-		uint32_t core_pos = get_current_core_id();
-		struct ffa_value ret = ffa_run(sp_id, core_pos);
-
-		if (ffa_func_id(ret) != FFA_MSG_WAIT) {
-			ERROR("Failed to run SP%x on core %u\n",
-			      sp_id, core_pos);
-			return false;
-		}
-	}
-
-	return true;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
index 1f8e81c..4686e4c 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
@@ -185,19 +185,6 @@
 	}
 
 	/*
-	 * Secure Partitions beyond the first SP only have their first
-	 * EC (or vCPU0) woken up at boot time by the SPMC.
-	 * Other ECs need one round of ffa_run to reach the message loop.
-	 */
-	ffa_ret = ffa_run(SP_ID(2), core_pos);
-	if (ffa_func_id(ffa_ret) != FFA_MSG_WAIT) {
-		ERROR("Failed to run SP%x on core %u\n", SP_ID(2),
-				core_pos);
-		ret = TEST_RESULT_FAIL;
-		goto out;
-	}
-
-	/*
 	 * Send a direct message request to SP2 (MP SP) from current physical
 	 * CPU. The SPMC uses the MP pinned context corresponding to the
 	 * physical CPU emitting the request.
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c b/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
index 4b61565..fe04751 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
@@ -981,14 +981,6 @@
 		 per_vcpu_receiver, core_pos);
 
 	/*
-	 * Secure Partitions secondary ECs need one round of ffa_run to reach
-	 * the message loop.
-	 */
-	if (!spm_core_sp_init(per_vcpu_receiver)) {
-		goto out;
-	}
-
-	/*
 	 * Request to get notifications sent to the respective vCPU.
 	 * Check also if NPI was handled by the receiver. It should have been
 	 * pended at notifications set, in the respective vCPU.
@@ -1002,6 +994,8 @@
 	result = TEST_RESULT_SUCCESS;
 
 out:
+	INFO("Request get per-vCPU notification to %x, core: %u.\n",
+		 per_vcpu_receiver, core_pos);
 	/* Tell the lead CPU that the calling CPU has completed the test. */
 	tftf_send_event(&per_vcpu_finished[core_pos]);
 
@@ -1016,17 +1010,8 @@
 	VERBOSE("Request SP %x to enable NPI in core %u\n",
 		 per_vcpu_receiver, core_pos);
 
-	/*
-	 * Secure Partitions secondary ECs need one round of ffa_run to reach
-	 * the message loop.
-	 */
-	if (!spm_core_sp_init(per_vcpu_receiver)) {
-		goto out;
-	}
-
 	result = TEST_RESULT_SUCCESS;
 
-out:
 	/* Tell the lead CPU that the calling CPU has completed the test. */
 	tftf_send_event(&per_vcpu_finished[core_pos]);
 
@@ -1083,6 +1068,7 @@
 	per_vcpu_receiver = receiver;
 	per_vcpu_sender = sender;
 
+	INFO("Execute npi_enable_per_vcpu_on_handler\n");
 	/* Boot all cores and enable the NPI in all of them. */
 	if (spm_run_multi_core_test(
 		(uintptr_t)npi_enable_per_vcpu_on_handler,
@@ -1134,6 +1120,7 @@
 		result = TEST_RESULT_FAIL;
 	}
 
+	INFO("Execute request_notification_get_per_vcpu_on_handler\n");
 	/*
 	 * Bring up all the cores, and request the receiver to get notifications
 	 * in each one of them.
@@ -1145,6 +1132,7 @@
 	}
 
 out:
+	INFO("UNbind message on CPU:%lx\n", read_mpidr_el1());
 	/* As a clean-up, unbind notifications. */
 	if (!request_notification_unbind(receiver, receiver,
 					 sender,
@@ -1153,6 +1141,7 @@
 		result = TEST_RESULT_FAIL;
 	}
 
+	INFO("Execute npi_disable_per_vcpu_on_handler\n");
 	/* Boot all cores and DISABLE the NPI in all of them. */
 	if (spm_run_multi_core_test(
 		(uintptr_t)npi_disable_per_vcpu_on_handler,
@@ -1187,10 +1176,6 @@
 	VERBOSE("Getting per-vCPU notifications from %x, core: %u.\n",
 		 per_vcpu_receiver, core_pos);
 
-	if (!spm_core_sp_init(per_vcpu_sender)) {
-		goto out;
-	}
-
 	if (!notification_get_and_validate(per_vcpu_receiver,
 					   FFA_NOTIFICATION(core_pos), 0,
 					   core_pos,
@@ -1199,7 +1184,6 @@
 		result = TEST_RESULT_FAIL;
 	}
 
-out:
 	/* Tell the lead CPU that the calling CPU has completed the test. */
 	tftf_send_event(&per_vcpu_finished[core_pos]);
 
@@ -1498,10 +1482,6 @@
 	unsigned int core_pos = get_current_core_id();
 	test_result_t result = TEST_RESULT_FAIL;
 
-	if (!spm_core_sp_init(per_vcpu_sender)) {
-		goto out;
-	}
-
 	if (!notification_set(per_vcpu_receiver, per_vcpu_sender,
 			      FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
 			      FFA_NOTIFICATIONS_FLAG_PER_VCPU  |