@@ -40,6 +40,7 @@ struct its_device {
/* the head for the list of ITTEs */
struct list_head itt;
u32 device_id;
+ u32 itt_size;
};
#define COLLECTION_NOT_MAPPED ((u32)-1)
@@ -77,15 +78,11 @@ static struct its_device *find_its_device(struct kvm *kvm, u32 device_id)
return NULL;
}
-static struct its_itte *find_itte(struct kvm *kvm, u32 device_id, u32 event_id)
+static struct its_itte *find_itte_in_device(struct its_device *device,
+ u32 event_id)
{
- struct its_device *device;
struct its_itte *itte;
- device = find_its_device(kvm, device_id);
- if (device == NULL)
- return NULL;
-
list_for_each_entry(itte, &device->itt, itte_list)
if (itte->event_id == event_id)
return itte;
@@ -93,6 +90,28 @@ static struct its_itte *find_itte(struct kvm *kvm, u32 device_id, u32 event_id)
return NULL;
}
+static struct its_itte *find_itte(struct kvm *kvm, u32 device_id,
+ u32 event_id, int *err)
+{
+ struct its_device *device;
+ struct its_itte *itte;
+
+ device = find_its_device(kvm, device_id);
+ if (device == NULL) {
+ *err = E_ITS_UNMAPPED_DEVICE;
+ return NULL;
+ }
+
+ if (event_id >= 2 << device->itt_size) {
+ *err = E_ITS_ID_OOR;
+ return NULL;
+ }
+
+ itte = find_itte_in_device(device, event_id);
+ *err = itte ? 0 : E_ITS_UNMAPPED_INTERRUPT;
+ return itte;
+}
+
/* To be used as an iterator this macro misses the enclosing parentheses */
#define for_each_lpi(dev, itte, kvm) \
list_for_each_entry(dev, &(kvm)->arch.vgic.its.device_list, dev_list) \
@@ -359,10 +378,12 @@ int vits_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
goto out_unlock;
}
- itte = find_itte(kvm, msi->devid, msi->data);
+ itte = find_itte(kvm, msi->devid, msi->data, &ret);
/* Triggering an unmapped IRQ gets silently dropped. */
- if (!itte || !its_is_collection_mapped(itte->collection))
+ if (!itte || !its_is_collection_mapped(itte->collection)) {
+ ret = 0;
goto out_unlock;
+ }
cpuid = itte->collection->target_addr;
__set_bit(cpuid, itte->pending);
@@ -476,6 +497,7 @@ static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
+#define its_cmd_get_size(cmd) its_cmd_mask_field(cmd, 1, 0, 5)
#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
@@ -489,21 +511,23 @@ static int vits_cmd_handle_discard(struct kvm *kvm, u64 *its_cmd)
u32 device_id;
u32 event_id;
struct its_itte *itte;
- int ret = E_ITS_DISCARD_UNMAPPED_INTERRUPT;
+ int ret;
device_id = its_cmd_get_deviceid(its_cmd);
event_id = its_cmd_get_id(its_cmd);
spin_lock(&its->lock);
- itte = find_itte(kvm, device_id, event_id);
- if (itte && itte->collection) {
+ itte = find_itte(kvm, device_id, event_id, &ret);
+ if (itte) {
/*
* Though the spec talks about removing the pending state, we
* don't bother here since we clear the ITTE anyway and the
* pending state is a property of the ITTE struct.
*/
- its_free_itte(itte);
- ret = 0;
+ if (itte->collection)
+ its_free_itte(itte);
+ else
+ ret = E_ITS_UNMAPPED_INTERRUPT;
}
spin_unlock(&its->lock);
@@ -522,19 +546,18 @@ static int vits_cmd_handle_movi(struct kvm *kvm, u64 *its_cmd)
int ret;
spin_lock(&its->lock);
- itte = find_itte(kvm, device_id, event_id);
- if (!itte) {
- ret = E_ITS_MOVI_UNMAPPED_INTERRUPT;
+ itte = find_itte(kvm, device_id, event_id, &ret);
+ if (!itte)
goto out_unlock;
- }
+
if (!its_is_collection_mapped(itte->collection)) {
- ret = E_ITS_MOVI_UNMAPPED_COLLECTION;
+ ret = E_ITS_UNMAPPED_COLLECTION;
goto out_unlock;
}
collection = find_collection(kvm, coll_id);
if (!its_is_collection_mapped(collection)) {
- ret = E_ITS_MOVI_UNMAPPED_COLLECTION;
+ ret = E_ITS_UNMAPPED_COLLECTION;
goto out_unlock;
}
@@ -582,7 +605,7 @@ static int vits_cmd_handle_mapi(struct kvm *kvm, u64 *its_cmd, u8 cmd)
device = find_its_device(kvm, device_id);
if (!device) {
- ret = E_ITS_MAPTI_UNMAPPED_DEVICE;
+ ret = E_ITS_UNMAPPED_DEVICE;
goto out_unlock;
}
@@ -598,11 +621,11 @@ static int vits_cmd_handle_mapi(struct kvm *kvm, u64 *its_cmd, u8 cmd)
lpi_nr = event_id;
if (lpi_nr < GIC_LPI_OFFSET ||
lpi_nr >= nr_idbits_propbase(dist->propbaser)) {
- ret = E_ITS_MAPTI_PHYSICALID_OOR;
+ ret = E_ITS_PHYSICALID_OOR;
goto out_unlock;
}
- itte = find_itte(kvm, device_id, event_id);
+ itte = find_itte_in_device(device, event_id);
if (!itte) {
if (!new_itte || !new_itte->pending) {
ret = -ENOMEM;
@@ -686,6 +709,7 @@ static int vits_cmd_handle_mapd(struct kvm *kvm, u64 *its_cmd)
device = new_device;
device->device_id = device_id;
+ device->itt_size = its_cmd_get_size(its_cmd);
INIT_LIST_HEAD(&device->itt);
list_add_tail(&device->dev_list,
@@ -710,7 +734,7 @@ static int vits_cmd_handle_mapc(struct kvm *kvm, u64 *its_cmd)
target_addr = its_cmd_get_target_addr(its_cmd);
if (target_addr >= atomic_read(&kvm->online_vcpus))
- return E_ITS_MAPC_PROCNUM_OOR;
+ return -EINVAL;
/* We preallocate memory outside of the lock here */
if (valid) {
@@ -769,11 +793,9 @@ static int vits_cmd_handle_clear(struct kvm *kvm, u64 *its_cmd)
spin_lock(&its->lock);
- itte = find_itte(kvm, device_id, event_id);
- if (!itte) {
- ret = E_ITS_CLEAR_UNMAPPED_INTERRUPT;
+ itte = find_itte(kvm, device_id, event_id, &ret);
+ if (!itte)
goto out_unlock;
- }
if (its_is_collection_mapped(itte->collection))
__clear_bit(itte->collection->target_addr, itte->pending);
@@ -798,10 +820,10 @@ static int vits_cmd_handle_inv(struct kvm *kvm, u64 *its_cmd)
event_id = its_cmd_get_id(its_cmd);
spin_lock(&dist->its.lock);
- itte = find_itte(kvm, device_id, event_id);
+ itte = find_itte(kvm, device_id, event_id, &ret);
spin_unlock(&dist->its.lock);
if (!itte)
- return E_ITS_INV_UNMAPPED_INTERRUPT;
+ return ret;
/*
* We cannot read from guest memory inside the spinlock, so we
@@ -816,7 +838,7 @@ static int vits_cmd_handle_inv(struct kvm *kvm, u64 *its_cmd)
return ret;
spin_lock(&dist->its.lock);
- new_itte = find_itte(kvm, device_id, event_id);
+ new_itte = find_itte(kvm, device_id, event_id, &ret);
if (new_itte->lpi != itte->lpi) {
itte = new_itte;
spin_unlock(&dist->its.lock);
@@ -839,7 +861,7 @@ static int vits_cmd_handle_invall(struct kvm *kvm, u64 *its_cmd)
collection = find_collection(kvm, coll_id);
if (!its_is_collection_mapped(collection))
- return E_ITS_INVALL_UNMAPPED_COLLECTION;
+ return E_ITS_UNMAPPED_COLLECTION;
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
@@ -866,7 +888,7 @@ static int vits_cmd_handle_movall(struct kvm *kvm, u64 *its_cmd)
if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
target2_addr >= atomic_read(&kvm->online_vcpus))
- return E_ITS_MOVALL_PROCNUM_OOR;
+ return -EINVAL;
if (target1_addr == target2_addr)
return 0;
@@ -43,15 +43,10 @@ bool vits_queue_lpis(struct kvm_vcpu *vcpu);
void vits_unqueue_lpi(struct kvm_vcpu *vcpu, int irq);
bool vits_check_lpis(struct kvm_vcpu *vcpu);
-#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
-#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
-#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
-#define E_ITS_MAPC_PROCNUM_OOR 0x010902
-#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04
-#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06
-#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07
-#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09
-#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01
-#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07
+#define E_ITS_UNMAPPED_DEVICE 0x010004
+#define E_ITS_ID_OOR 0x010005
+#define E_ITS_PHYSICALID_OOR 0x010006
+#define E_ITS_UNMAPPED_INTERRUPT 0x010007
+#define E_ITS_UNMAPPED_COLLECTION 0x010009
#endif