@@ -69,6 +69,7 @@ void ffa_handle_notification_info_get(struct cpu_user_regs *regs)
{
struct domain *d = current->domain;
struct ffa_ctx *ctx = d->arch.tee;
+ bool notif_pending;
if ( !notif_enabled )
{
@@ -76,7 +77,11 @@ void ffa_handle_notification_info_get(struct cpu_user_regs *regs)
return;
}
- if ( test_and_clear_bool(ctx->notif.secure_pending) )
+ notif_pending = test_and_clear_bool(ctx->notif.secure_pending);
+ if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
+ notif_pending |= test_and_clear_bool(ctx->notif.vm_pending);
+
+ if ( notif_pending )
{
/* A pending global notification for the guest */
ffa_set_regs(regs, FFA_SUCCESS_64, 0,
@@ -93,6 +98,7 @@ void ffa_handle_notification_info_get(struct cpu_user_regs *regs)
void ffa_handle_notification_get(struct cpu_user_regs *regs)
{
struct domain *d = current->domain;
+ struct ffa_ctx *ctx = d->arch.tee;
uint32_t recv = get_user_reg(regs, 1);
uint32_t flags = get_user_reg(regs, 2);
uint32_t w2 = 0;
@@ -132,11 +138,7 @@ void ffa_handle_notification_get(struct cpu_user_regs *regs)
*/
if ( ( flags & FFA_NOTIF_FLAG_BITMAP_SP ) &&
( flags & FFA_NOTIF_FLAG_BITMAP_SPM ) )
- {
- struct ffa_ctx *ctx = d->arch.tee;
-
- ACCESS_ONCE(ctx->notif.secure_pending) = false;
- }
+ ACCESS_ONCE(ctx->notif.secure_pending) = false;
arm_smccc_1_2_smc(&arg, &resp);
e = ffa_get_ret_code(&resp);
@@ -156,6 +158,14 @@ void ffa_handle_notification_get(struct cpu_user_regs *regs)
w6 = resp.a6;
}
+ if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) &&
+ flags & FFA_NOTIF_FLAG_BITMAP_HYP &&
+ test_and_clear_bool(ctx->notif.buff_full_pending) )
+ {
+ ACCESS_ONCE(ctx->notif.vm_pending) = false;
+ w7 = FFA_NOTIF_RX_BUFFER_FULL;
+ }
+
ffa_set_regs(regs, FFA_SUCCESS_32, 0, w2, w3, w4, w5, w6, w7);
}
@@ -178,6 +188,20 @@ int ffa_handle_notification_set(struct cpu_user_regs *regs)
bitmap_hi);
}
+#ifdef CONFIG_FFA_VM_TO_VM
+void ffa_raise_rx_buffer_full(struct domain *d)
+{
+ struct ffa_ctx *ctx = d->arch.tee;
+
+ if ( !ctx )
+ return;
+
+ ACCESS_ONCE(ctx->notif.buff_full_pending) = true;
+ if ( !test_and_set_bool(ctx->notif.vm_pending) )
+ vgic_inject_irq(d, d->vcpu[0], GUEST_FFA_NOTIF_PEND_INTR_ID, true);
+}
+#endif
+
/*
* Extract a 16-bit ID (index n) from the successful return value from
* FFA_NOTIFICATION_INFO_GET_64 or FFA_NOTIFICATION_INFO_GET_32. IDs are
@@ -210,6 +210,8 @@
#define FFA_NOTIF_INFO_GET_ID_COUNT_SHIFT 7
#define FFA_NOTIF_INFO_GET_ID_COUNT_MASK 0x1F
+#define FFA_NOTIF_RX_BUFFER_FULL BIT(0, U)
+
/* Feature IDs used with FFA_FEATURES */
#define FFA_FEATURE_NOTIF_PEND_INTR 0x1U
#define FFA_FEATURE_SCHEDULE_RECV_INTR 0x2U
@@ -295,9 +297,20 @@ struct ffa_mem_region {
struct ffa_ctx_notif {
/*
* True if domain is reported by FFA_NOTIFICATION_INFO_GET to have
- * pending global notifications.
+ * pending notifications from the secure world.
*/
bool secure_pending;
+
+ /*
+ * True if domain is reported by FFA_NOTIFICATION_INFO_GET to have
+ * pending notifications from VMs (including framework ones).
+ */
+ bool vm_pending;
+
+ /*
+ * True if domain has buffer full notification pending
+ */
+ bool buff_full_pending;
};
struct ffa_ctx {
@@ -369,6 +382,14 @@ void ffa_handle_notification_info_get(struct cpu_user_regs *regs);
void ffa_handle_notification_get(struct cpu_user_regs *regs);
int ffa_handle_notification_set(struct cpu_user_regs *regs);
+#ifdef CONFIG_FFA_VM_TO_VM
+void ffa_raise_rx_buffer_full(struct domain *d);
+#else
+static inline void ffa_raise_rx_buffer_full(struct domain *d)
+{
+}
+#endif
+
void ffa_handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid);
int32_t ffa_handle_msg_send2(struct cpu_user_regs *regs);