arch/x86/kvm/debugfs.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 90 insertions(+)
@@ -10,6 +10,11 @@
#include "mmu.h"
#include "mmu/mmu_internal.h"
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#include <linux/kvm_irqfd.h>
+#include <asm/irq_remapping.h>
+#endif
+
static int vcpu_get_timer_advance_ns(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
@@ -181,9 +186,94 @@ static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
.release = kvm_mmu_rmaps_stat_release,
};
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+static int kvm_vfio_intr_stat_show(struct seq_file *m, void *v)
+{
+ struct kvm_kernel_irq_routing_entry *e;
+ struct kvm_irq_routing_table *irq_rt;
+ unsigned int host_irq, guest_irq;
+ struct kvm_kernel_irqfd *irqfd;
+ struct kvm *kvm = m->private;
+ struct kvm_lapic_irq irq;
+ struct kvm_vcpu *vcpu;
+ int idx;
+
+ if (!kvm_arch_has_assigned_device(kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP)) {
+ return 0;
+ }
+
+ seq_printf(m, "%12s %12s %12s %12s\n",
+ "guest_irq", "host_irq", "vector", "vcpu");
+
+ spin_lock_irq(&kvm->irqfds.lock);
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+
+ list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
+ if (!irqfd->producer)
+ continue;
+
+ host_irq = irqfd->producer->irq;
+ guest_irq = irqfd->gsi;
+
+ if (guest_irq >= irq_rt->nr_rt_entries ||
+ hlist_empty(&irq_rt->map[guest_irq])) {
+ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+ guest_irq, irq_rt->nr_rt_entries);
+ continue;
+ }
+
+ hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+ if (e->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+
+ kvm_set_msi_irq(kvm, e, &irq);
+ if (kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
+ seq_printf(m, "%12u %12u %12u %12u\n",
+ guest_irq, host_irq, irq.vector, vcpu->vcpu_id);
+ }
+ }
+ }
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ spin_unlock_irq(&kvm->irqfds.lock);
+ return 0;
+}
+
+static int kvm_vfio_intr_stat_open(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = inode->i_private;
+
+ if (!kvm_get_kvm_safe(kvm))
+ return -ENOENT;
+
+ return single_open(file, kvm_vfio_intr_stat_show, kvm);
+}
+
+static int kvm_vfio_intr_stat_release(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = inode->i_private;
+
+ kvm_put_kvm(kvm);
+ return single_release(inode, file);
+}
+
+static const struct file_operations vfio_intr_stat_fops = {
+ .open = kvm_vfio_intr_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = kvm_vfio_intr_stat_release,
+};
+#endif
+
int kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
debugfs_create_file("mmu_rmaps_stat", 0644, kvm->debugfs_dentry, kvm,
&mmu_rmaps_stat_fops);
+
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ debugfs_create_file("vfio_intr_stat", 0444, kvm->debugfs_dentry, kvm,
+ &vfio_intr_stat_fops);
+#endif
return 0;
}