@@ -256,6 +256,7 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu,
struct kvm_run *run,
struct kvm_exit_mmio *mmio);
bool irqchip_in_kernel(struct kvm *kvm);
+#define irqchip_in_kernel(k) ((k)->arch.vgic.vctrl_base)
#define vgic_initialized(k) ((k)->arch.vgic.ready)
#define vgic_active_irq(v) (atomic_read(&(v)->arch.vgic_cpu.irq_active_count)
== 0)
@@ -422,7 +422,7 @@ static void stage2_clear_pte(struct kvm *kvm,
phys_addr_t addr)
}
static void stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr, const pte_t *new_pte)
+ phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
pgd_t *pgd;
pud_t *pud;
@@ -454,6 +454,9 @@ static void stage2_set_pte(struct kvm *kvm, struct
kvm_mmu_memory_cache *cache,
} else
pte = pte_offset_kernel(pmd, addr);
+ if (iomap && pte_present(old_pte))
+ return -EFAULT;
+
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
set_pte_ext(pte, *new_pte, 0);
@@ -489,7 +492,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm,
phys_addr_t guest_ipa,
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
- stage2_set_pte(kvm, &cache, addr, &pte);
+ stage2_set_pte(kvm, &cache, addr, &pte, true);
spin_unlock(&kvm->mmu_lock);
pfn++;
@@ -565,7 +568,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu,
phys_addr_t fault_ipa,
pte_val(new_pte) |= L_PTE_S2_RDWR;
kvm_set_pfn_dirty(pfn);
}
- stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte);
+ stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -716,7 +719,7 @@ static void kvm_set_spte_handler(struct kvm *kvm,
gpa_t gpa, void *data)
{
pte_t *pte = (pte_t *)data;
- stage2_set_pte(kvm, NULL, gpa, pte);
+ stage2_set_pte(kvm, NULL, gpa, pte, false);
}
@@ -1054,6 +1054,7 @@ int kvm_vgic_hyp_init(void)
int ret;
unsigned int irq;
struct resource vctrl_res;
+ struct resource vcpu_res;
vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
if (!vgic_node)
@@ -1094,6 +1095,13 @@ int kvm_vgic_hyp_init(void)
kvm_info("%s@%llx IRQ%d\n", vgic_node->name, vctrl_res.start, irq);
on_each_cpu(vgic_init_maintenance_interrupt, &irq, 1);
+ if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
+ kvm_err("Cannot obtain VCPU resource\n");
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+ vgic_vcpu_base = vcpu_res.start;
+
return 0;
out_unmap:
@@ -1138,24 +1146,12 @@ out:
return ret;
}
-bool irqchip_in_kernel(struct kvm *kvm)
-{
- return !(IS_VGIC_ADDR_UNDEF(vgic_vcpu_base));
-}
-
int kvm_vgic_create(struct kvm *kvm)
{
int ret;
- struct resource vcpu_res;
mutex_lock(&kvm->lock);
- if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
- kvm_err("Cannot obtain VCPU resource\n");
- ret = -ENXIO;
- goto out;
- }
-
if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) {
ret = -EEXIST;
goto out;
@@ -1166,36 +1162,62 @@ int kvm_vgic_create(struct kvm *kvm)
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
- vgic_vcpu_base = vcpu_res.start;
ret = 0;
out:
mutex_unlock(&kvm->lock);
return ret;
}
+static bool vgic_ioaddr_overlap(struct kvm *kvm)
+{
+ phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
+ phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
+
+ if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
+ return false;
+ if ((dist <= cpu && dist + VGIC_DIST_SIZE > cpu) ||
+ (cpu <= dist && cpu + VGIC_CPU_SIZE > dist))
+ return true;
+ return false;
+}
+
int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
{
int r = 0;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
if (addr & ~KVM_PHYS_MASK)
return -E2BIG;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+
mutex_lock(&kvm->lock);
switch (type) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
- if (!IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base))
+ if (!IS_VGIC_ADDR_UNDEF(vgic->vgic_dist_base))
return -EEXIST;
+ if (addr + VGIC_DIST_SIZE < addr)
+ return -EINVAL;
kvm->arch.vgic.vgic_dist_base = addr;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
- if (!IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base))
+ if (!IS_VGIC_ADDR_UNDEF(vgic->vgic_cpu_base))
return -EEXIST;
+ if (addr + VGIC_CPU_SIZE < addr)
+ return -EINVAL;
kvm->arch.vgic.vgic_cpu_base = addr;
break;
default:
r = -ENODEV;
}
+ if (vgic_ioaddr_overlap(kvm)) {
+ kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ return -EINVAL;
+ }
+
mutex_unlock(&kvm->lock);
return r;
}