diff mbox series

[v5,17/16] KVM: x86/xen: Fix initialisation of gfn caches for Xen shared pages

Message ID 4df34bfb46e5c545dea3517fc6475f56ef2cb358.camel@infradead.org (mailing list archive)
State New, archived
Headers show
Series KVM: Add minimal support for Xen HVM guests | expand

Commit Message

David Woodhouse Jan. 21, 2021, 11:56 a.m. UTC
From: David Woodhouse <dwmw@amazon.co.uk>

When kvm_gfn_to_hva_cache_init() is used to cache the address of the
guest pages which KVM needs to access, it uses kvm_memslots(). For
which an RCU read lock is required. Add that around the whole of
the kvm_xen_hvm_set_attr() function.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
This should fix the RCU warning reported by the 'kernel test robot'.

Paolo, please let me know if you're prefer to fold this in to the
original patch or whether it's OK as a 17th patch in the series.

I've pushed this on top for now, at
 https://git.infradead.org/users/dwmw2/linux.git/shortlog/refs/heads/xenpv

 arch/x86/kvm/xen.c | 39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 4bc9da9fcfb8..3041f774493e 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -219,11 +219,14 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 {
 	struct kvm_vcpu *v;
 	int r = -ENOENT;
+	int idx = srcu_read_lock(&kvm->srcu);
 
 	switch (data->type) {
 	case KVM_XEN_ATTR_TYPE_LONG_MODE:
-		if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode)
-			return -EINVAL;
+		if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
+			r = -EINVAL;
+			break;
+		}
 
 		kvm->arch.xen.long_mode = !!data->u.long_mode;
 		r = 0;
@@ -235,8 +238,11 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
 	case KVM_XEN_ATTR_TYPE_VCPU_INFO:
 		v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
-		if (!v)
-			return -EINVAL;
+		if (!v) {
+			r = -EINVAL;
+			break;
+		}
+
 		/* No compat necessary here. */
 		BUILD_BUG_ON(sizeof(struct vcpu_info) !=
 			     sizeof(struct compat_vcpu_info));
@@ -247,7 +253,7 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 					      data->u.vcpu_attr.gpa,
 					      sizeof(struct vcpu_info));
 		if (r)
-			return r;
+			break;
 
 		v->arch.xen.vcpu_info_set = true;
 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
@@ -255,14 +261,16 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
 	case KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO:
 		v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
-		if (!v)
-			return -EINVAL;
+		if (!v) {
+			r = -EINVAL;
+			break;
+		}
 
 		r = kvm_gfn_to_hva_cache_init(kvm, &v->arch.xen.vcpu_time_info_cache,
 					      data->u.vcpu_attr.gpa,
 					      sizeof(struct pvclock_vcpu_time_info));
 		if (r)
-			return r;
+			break;
 
 		v->arch.xen.vcpu_time_info_set = true;
 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
@@ -270,14 +278,16 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
 	case KVM_XEN_ATTR_TYPE_VCPU_RUNSTATE:
 		v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
-		if (!v)
-			return -EINVAL;
+		if (!v) {
+			r = -EINVAL;
+			break;
+		}
 
 		r = kvm_gfn_to_hva_cache_init(kvm, &v->arch.xen.runstate_cache,
 					      data->u.vcpu_attr.gpa,
 					      sizeof(struct vcpu_runstate_info));
 		if (r)
-			return r;
+			break;
 
 		v->arch.xen.runstate_set = true;
 		v->arch.xen.current_runstate = RUNSTATE_blocked;
@@ -285,8 +295,10 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 		break;
 
 	case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
-		if (data->u.vector < 0x10)
-			return -EINVAL;
+		if (data->u.vector < 0x10) {
+			r = -EINVAL;
+			break;
+		}
 
 		kvm->arch.xen.upcall_vector = data->u.vector;
 		r = 0;
@@ -296,6 +308,7 @@  int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 		break;
 	}
 
+	srcu_read_unlock(&kvm->srcu, idx);
 	return r;
 }