@@ -675,6 +675,8 @@ struct kvm_ppc_cpu_char {
/* POWER9 XIVE Native Interrupt Controller */
#define KVM_DEV_XIVE_GRP_SOURCES 1 /* 64-bit source attributes */
+#define KVM_DEV_XIVE_GRP_CTRL 2
+#define KVM_DEV_XIVE_GET_ESB_FD 1
/* Layout of 64-bit XIVE source attribute values */
#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
@@ -249,6 +249,78 @@ static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
return 0;
}
+static int xive_native_esb_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct kvmppc_xive *xive = vma->vm_file->private_data;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u16 src;
+ unsigned long irq, pfn;
+
+ /* There are two pages (trigger and EOI) per IRQ */
+ irq = vmf->pgoff / 2;
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_err("%s: source %lx not found !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ arch_spin_lock(&sb->lock);
+
+ /* First page is the trigger page */
+ if (vmf->pgoff % 2)
+ pfn = xd->eoi_page >> PAGE_SHIFT;
+ else
+ pfn = xd->trig_page >> PAGE_SHIFT;
+
+ arch_spin_unlock(&sb->lock);
+
+ vm_insert_pfn(vma, vmf->address, pfn);
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct xive_native_esb_vmops = {
+ .fault = xive_native_esb_fault,
+};
+
+static int xive_native_esb_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long len = vma->vm_end - vma->vm_start;
+
+ /* There are two pages (trigger and EOI) per IRQ */
+ if ((len >> PAGE_SHIFT) >= KVMPPC_XIVE_NR_IRQS * 2)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &xive_native_esb_vmops;
+ return 0;
+}
+
+static const struct file_operations xive_native_esb_fops = {
+ .mmap = xive_native_esb_mmap,
+};
+
+static int kvmppc_xive_native_get_esb_fd(struct kvmppc_xive *xive, u64 addr)
+{
+ u64 __user *ubufp = (u64 __user *) addr;
+ int ret;
+
+ ret = anon_inode_getfd("esb", &xive_native_esb_fops, xive,
+ O_RDWR | O_CLOEXEC);
+ if (ret < 0)
+ return ret;
+
+ return put_user(ret, ubufp);
+}
+
static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
@@ -265,6 +337,16 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
+ struct kvmppc_xive *xive = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XIVE_GET_ESB_FD:
+ return kvmppc_xive_native_get_esb_fd(xive, attr->addr);
+ }
+ break;
+ }
return -ENXIO;
}
@@ -277,6 +359,12 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
attr->attr < KVMPPC_XIVE_NR_IRQS)
return 0;
break;
+ case KVM_DEV_XIVE_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XIVE_GET_ESB_FD:
+ return 0;
+ }
+ break;
}
return -ENXIO;
}
This will let the guest create a memory mapping to expose the ESB MMIO regions used to control the interrupt sources, to trigger events, to EOI or to turn off the sources. Signed-off-by: Cédric Le Goater <clg@kaod.org> --- arch/powerpc/include/uapi/asm/kvm.h | 2 + arch/powerpc/kvm/book3s_xive_native.c | 88 +++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+)