diff mbox

[25/28] ARM: vITS: handle INVALL command

Message ID 20170130183153.28566-26-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andre Przywara Jan. 30, 2017, 6:31 p.m. UTC
The INVALL command instructs an ITS to invalidate the configuration
data for all LPIs associated with a given redistributor (read: VCPU).
This is nasty to emulate exactly with our architecture, so we just scan
the pending table and inject _every_ LPI found there that got enabled.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 xen/arch/arm/vgic-v3-its.c | 37 +++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)
diff mbox

Patch

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index 82f7bcc..75d1e12 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -316,6 +316,40 @@  static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
     return 0;
 }
 
+/*
+ * INVALL updates the per-LPI configuration status for every LPI mapped to
+ * a particular redistributor. Since our property table is referenced when
+ * needed, we don't need to sync anything, really. But we have to take care
+ * of LPIs getting enabled if there is an interrupt pending.
+ * To catch every LPI without iterating through the device table we just
+ * look for set bits in our virtual pending table and check the status of
+ * the enabled bit in the respective property table entry.
+ * This actually covers every (pending) LPI from every redistributor,
+ * but update_lpi_enabled_status() is a NOP for LPIs not being mapped
+ * to the redistributor/VCPU we are interested in.
+ */
+static int its_handle_invall(struct virt_its *its, uint64_t *cmdptr)
+{
+    uint32_t collid = its_cmd_get_collection(cmdptr);
+    struct vcpu *vcpu;
+    int vlpi = 0;
+
+    spin_lock(&its->its_lock);
+    vcpu = get_vcpu_from_collection(its, collid);
+    spin_unlock(&its->its_lock);
+
+    do {
+        vlpi = find_next_bit(vcpu->arch.vgic.pendtable,
+                             its->d->arch.vgic.nr_lpis, vlpi);
+        if ( vlpi >= its->d->arch.vgic.nr_lpis )
+            break;
+
+        update_lpi_enabled_status(its, vcpu, vlpi);
+    } while (1);
+
+    return 0;
+}
+
 static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
 {
     uint32_t collid = its_cmd_get_collection(cmdptr);
@@ -462,6 +496,9 @@  static int vgic_its_handle_cmds(struct domain *d, struct virt_its *its,
         case GITS_CMD_INV:
             its_handle_inv(its, cmdptr);
 	    break;
+        case GITS_CMD_INVALL:
+            its_handle_invall(its, cmdptr);
+	    break;
         case GITS_CMD_MAPC:
             its_handle_mapc(its, cmdptr);
             break;