diff mbox

[1/4] KVM: count number of assigned devices

Message ID 1436368710-5452-2-git-send-email-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini July 8, 2015, 3:18 p.m. UTC
If there are no assigned devices, the guest PAT are not providing
any useful information and can be overridden to writeback; VMX
always does this because it has the "IPAT" bit in its extended
page table entries, but SVM does not have anything similar.
Hook into VFIO and legacy device assignment so that they
provide this information to KVM.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/iommu.c            |  2 ++
 arch/x86/kvm/x86.c              | 18 ++++++++++++++++++
 include/linux/kvm_host.h        | 18 ++++++++++++++++++
 virt/kvm/vfio.c                 |  5 +++++
 5 files changed, 45 insertions(+)

Comments

Alex Williamson July 8, 2015, 3:29 p.m. UTC | #1
On Wed, 2015-07-08 at 17:18 +0200, Paolo Bonzini wrote:
> If there are no assigned devices, the guest PAT are not providing
> any useful information and can be overridden to writeback; VMX
> always does this because it has the "IPAT" bit in its extended
> page table entries, but SVM does not have anything similar.
> Hook into VFIO and legacy device assignment so that they
> provide this information to KVM.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  2 ++
>  arch/x86/kvm/iommu.c            |  2 ++
>  arch/x86/kvm/x86.c              | 18 ++++++++++++++++++
>  include/linux/kvm_host.h        | 18 ++++++++++++++++++
>  virt/kvm/vfio.c                 |  5 +++++
>  5 files changed, 45 insertions(+)


Reviewed-by: Alex Williamson <alex.williamson@redhat.com>


> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 2a7f5d782c33..49ec9038ec14 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -604,6 +604,8 @@ struct kvm_arch {
>  	bool iommu_noncoherent;
>  #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
>  	atomic_t noncoherent_dma_count;
> +#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
> +	atomic_t assigned_device_count;
>  	struct kvm_pic *vpic;
>  	struct kvm_ioapic *vioapic;
>  	struct kvm_pit *vpit;
> diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
> index 7dbced309ddb..5c520ebf6343 100644
> --- a/arch/x86/kvm/iommu.c
> +++ b/arch/x86/kvm/iommu.c
> @@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
>  			goto out_unmap;
>  	}
>  
> +	kvm_arch_start_assignment(kvm);
>  	pci_set_dev_assigned(pdev);
>  
>  	dev_info(&pdev->dev, "kvm assign device\n");
> @@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
>  	iommu_detach_device(domain, &pdev->dev);
>  
>  	pci_clear_dev_assigned(pdev);
> +	kvm_arch_end_assignment(kvm);
>  
>  	dev_info(&pdev->dev, "kvm deassign device\n");
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 6bd19c7abc65..0024968b342d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -8213,6 +8213,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
>  			kvm_x86_ops->interrupt_allowed(vcpu);
>  }
>  
> +void kvm_arch_start_assignment(struct kvm *kvm)
> +{
> +	atomic_inc(&kvm->arch.assigned_device_count);
> +}
> +EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
> +
> +void kvm_arch_end_assignment(struct kvm *kvm)
> +{
> +	atomic_dec(&kvm->arch.assigned_device_count);
> +}
> +EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
> +
> +bool kvm_arch_has_assigned_device(struct kvm *kvm)
> +{
> +	return atomic_read(&kvm->arch.assigned_device_count);
> +}
> +EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
> +
>  void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
>  {
>  	atomic_inc(&kvm->arch.noncoherent_dma_count);
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 9564fd78c547..05e99b8ef465 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
>  	return false;
>  }
>  #endif
> +#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
> +void kvm_arch_start_assignment(struct kvm *kvm);
> +void kvm_arch_end_assignment(struct kvm *kvm);
> +bool kvm_arch_has_assigned_device(struct kvm *kvm);
> +#else
> +static inline void kvm_arch_start_assignment(struct kvm *kvm)
> +{
> +}
> +
> +static inline void kvm_arch_end_assignment(struct kvm *kvm)
> +{
> +}
> +
> +static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
> +{
> +	return false;
> +}
> +#endif
>  
>  static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
>  {
> diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
> index 620e37f741b8..1dd087da6f31 100644
> --- a/virt/kvm/vfio.c
> +++ b/virt/kvm/vfio.c
> @@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
>  		list_add_tail(&kvg->node, &kv->group_list);
>  		kvg->vfio_group = vfio_group;
>  
> +		kvm_arch_start_assignment(dev->kvm);
> +
>  		mutex_unlock(&kv->lock);
>  
>  		kvm_vfio_update_coherency(dev);
> @@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
>  			break;
>  		}
>  
> +		kvm_arch_end_assignment(dev->kvm);
> +
>  		mutex_unlock(&kv->lock);
>  
>  		kvm_vfio_group_put_external_user(vfio_group);
> @@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
>  		kvm_vfio_group_put_external_user(kvg->vfio_group);
>  		list_del(&kvg->node);
>  		kfree(kvg);
> +		kvm_arch_end_assignment(dev->kvm);
>  	}
>  
>  	kvm_vfio_update_coherency(dev);



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2a7f5d782c33..49ec9038ec14 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -604,6 +604,8 @@  struct kvm_arch {
 	bool iommu_noncoherent;
 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
 	atomic_t noncoherent_dma_count;
+#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+	atomic_t assigned_device_count;
 	struct kvm_pic *vpic;
 	struct kvm_ioapic *vioapic;
 	struct kvm_pit *vpit;
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 7dbced309ddb..5c520ebf6343 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -200,6 +200,7 @@  int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
 			goto out_unmap;
 	}
 
+	kvm_arch_start_assignment(kvm);
 	pci_set_dev_assigned(pdev);
 
 	dev_info(&pdev->dev, "kvm assign device\n");
@@ -224,6 +225,7 @@  int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
 	iommu_detach_device(domain, &pdev->dev);
 
 	pci_clear_dev_assigned(pdev);
+	kvm_arch_end_assignment(kvm);
 
 	dev_info(&pdev->dev, "kvm deassign device\n");
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6bd19c7abc65..0024968b342d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8213,6 +8213,24 @@  bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
 			kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
+void kvm_arch_start_assignment(struct kvm *kvm)
+{
+	atomic_inc(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
+
+void kvm_arch_end_assignment(struct kvm *kvm)
+{
+	atomic_dec(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
+
+bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+	return atomic_read(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
+
 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
 {
 	atomic_inc(&kvm->arch.noncoherent_dma_count);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9564fd78c547..05e99b8ef465 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -734,6 +734,24 @@  static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
 	return false;
 }
 #endif
+#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+void kvm_arch_start_assignment(struct kvm *kvm);
+void kvm_arch_end_assignment(struct kvm *kvm);
+bool kvm_arch_has_assigned_device(struct kvm *kvm);
+#else
+static inline void kvm_arch_start_assignment(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_end_assignment(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+	return false;
+}
+#endif
 
 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 {
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 620e37f741b8..1dd087da6f31 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -155,6 +155,8 @@  static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
 		list_add_tail(&kvg->node, &kv->group_list);
 		kvg->vfio_group = vfio_group;
 
+		kvm_arch_start_assignment(dev->kvm);
+
 		mutex_unlock(&kv->lock);
 
 		kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@  static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
 			break;
 		}
 
+		kvm_arch_end_assignment(dev->kvm);
+
 		mutex_unlock(&kv->lock);
 
 		kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@  static void kvm_vfio_destroy(struct kvm_device *dev)
 		kvm_vfio_group_put_external_user(kvg->vfio_group);
 		list_del(&kvg->node);
 		kfree(kvg);
+		kvm_arch_end_assignment(dev->kvm);
 	}
 
 	kvm_vfio_update_coherency(dev);