diff mbox series

[RFC,5/9] kvm_main.c: split __kvm_set_memory_region logic in kvm_check_mem and kvm_prepare_batch

Message ID 20220909104506.738478-6-eesposit@redhat.com (mailing list archive)
State New, archived
Headers show
Series kvm: implement atomic memslot updates | expand

Commit Message

Emanuele Giuseppe Esposito Sept. 9, 2022, 10:45 a.m. UTC
Just a function split. No functional change intended,
except for the fact that kvm_prepare_batch() does not
immediately call kvm_set_memslot() if batch->change is
KVM_MR_DELETE, but delegates the caller (__kvm_set_memory_region).

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 virt/kvm/kvm_main.c | 120 +++++++++++++++++++++++++++++---------------
 1 file changed, 79 insertions(+), 41 deletions(-)

Comments

Yang, Weijiang Sept. 13, 2022, 2:56 a.m. UTC | #1
On 9/9/2022 6:45 PM, Emanuele Giuseppe Esposito wrote:
> Just a function split. No functional change intended,
> except for the fact that kvm_prepare_batch() does not
> immediately call kvm_set_memslot() if batch->change is
> KVM_MR_DELETE, but delegates the caller (__kvm_set_memory_region).
>
>
> +static int kvm_check_memory_region(struct kvm *kvm,
> +				const struct kvm_userspace_memory_region *mem,
> +				struct kvm_internal_memory_region_list *batch)
> +{
> +	int r;
> +
> +	r = check_memory_region_flags(mem);
> +	if (r)
> +		return r;
>   
> -	r = kvm_set_memslot(kvm, batch);
> +	r = kvm_check_mem(mem);
>   	if (r)
> -		kfree(new);
> +		return r;
> +
> +	r = kvm_prepare_batch(kvm, mem, batch);
> +	if (r && batch->new)
> +		kfree(batch->new);
 From the patch, r !=0 and batch->new !=NULL are exclusive, so free() 
here is not reachable.
> +
>   	return r;
>   }
[...]
>
Emanuele Giuseppe Esposito Sept. 18, 2022, 4:22 p.m. UTC | #2
Am 13/09/2022 um 04:56 schrieb Yang, Weijiang:
> 
> On 9/9/2022 6:45 PM, Emanuele Giuseppe Esposito wrote:
>> Just a function split. No functional change intended,
>> except for the fact that kvm_prepare_batch() does not
>> immediately call kvm_set_memslot() if batch->change is
>> KVM_MR_DELETE, but delegates the caller (__kvm_set_memory_region).
>>
>>
>> +static int kvm_check_memory_region(struct kvm *kvm,
>> +                const struct kvm_userspace_memory_region *mem,
>> +                struct kvm_internal_memory_region_list *batch)
>> +{
>> +    int r;
>> +
>> +    r = check_memory_region_flags(mem);
>> +    if (r)
>> +        return r;
>>   -    r = kvm_set_memslot(kvm, batch);
>> +    r = kvm_check_mem(mem);
>>       if (r)
>> -        kfree(new);
>> +        return r;
>> +
>> +    r = kvm_prepare_batch(kvm, mem, batch);
>> +    if (r && batch->new)
>> +        kfree(batch->new);
> From the patch, r !=0 and batch->new !=NULL are exclusive, so free()
> here is not reachable.

Good point, I'll get rid of this.

Thank you,
Emanuele
>> +
>>       return r;
>>   }
> [...]
>>   
>
Paolo Bonzini Sept. 28, 2022, 5:11 p.m. UTC | #3
On 9/9/22 12:45, Emanuele Giuseppe Esposito wrote:
> Just a function split. No functional change intended,
> except for the fact that kvm_prepare_batch() does not
> immediately call kvm_set_memslot() if batch->change is
> KVM_MR_DELETE, but delegates the caller (__kvm_set_memory_region).
> 
> Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
> ---
>   virt/kvm/kvm_main.c | 120 +++++++++++++++++++++++++++++---------------
>   1 file changed, 79 insertions(+), 41 deletions(-)
> 
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 17f07546d591..9d917af30593 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1927,19 +1927,9 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
>   	return false;
>   }
>   
> -/*
> - * Allocate some memory and give it an address in the guest physical address
> - * space.
> - *
> - * Discontiguous memory is allowed, mostly for framebuffers.
> - * This function takes also care of initializing batch->new/old/invalid/change
> - * fields.
> - *
> - * Must be called holding kvm->slots_lock for write.
> - */
> -int __kvm_set_memory_region(struct kvm *kvm,
> -			    const struct kvm_userspace_memory_region *mem,
> -			    struct kvm_internal_memory_region_list *batch)
> +static int kvm_prepare_batch(struct kvm *kvm,
> +			     const struct kvm_userspace_memory_region *mem,
> +			     struct kvm_internal_memory_region_list *batch)
>   {
>   	struct kvm_memory_slot *old, *new;
>   	struct kvm_memslots *slots;
> @@ -1947,34 +1937,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
>   	unsigned long npages;
>   	gfn_t base_gfn;
>   	int as_id, id;
> -	int r;
> -
> -	r = check_memory_region_flags(mem);
> -	if (r)
> -		return r;
>   
>   	as_id = mem->slot >> 16;
>   	id = (u16)mem->slot;
>   
> -	/* General sanity checks */
> -	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
> -	    (mem->memory_size != (unsigned long)mem->memory_size))
> -		return -EINVAL;
> -	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
> -		return -EINVAL;
> -	/* We can read the guest memory with __xxx_user() later on. */
> -	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
> -	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
> -	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
> -			mem->memory_size))
> -		return -EINVAL;
> -	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
> -		return -EINVAL;
> -	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
> -		return -EINVAL;
> -	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
> -		return -EINVAL;
> -
>   	slots = __kvm_memslots(kvm, as_id);
>   
>   	/*
> @@ -1993,7 +1959,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
>   
>   		batch->change = KVM_MR_DELETE;
>   		batch->new = NULL;
> -		return kvm_set_memslot(kvm, batch);
> +		return 0;
>   	}
>   
>   	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
> @@ -2020,7 +1986,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
>   		else if (mem->flags != old->flags)
>   			change = KVM_MR_FLAGS_ONLY;
>   		else /* Nothing to change. */
> -			return 0;
> +			return 1;
>   	}
>   
>   	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
> @@ -2041,12 +2007,81 @@ int __kvm_set_memory_region(struct kvm *kvm,
>   
>   	batch->new = new;
>   	batch->change = change;
> +	return 0;
> +}
> +
> +static int kvm_check_mem(const struct kvm_userspace_memory_region *mem)
> +{
> +	int as_id, id;
> +
> +	as_id = mem->slot >> 16;
> +	id = (u16)mem->slot;
> +
> +	/* General sanity checks */
> +	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
> +	    (mem->memory_size != (unsigned long)mem->memory_size))
> +		return -EINVAL;
> +	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
> +		return -EINVAL;
> +	/* We can read the guest memory with __xxx_user() later on. */
> +	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
> +	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
> +	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
> +			mem->memory_size))
> +		return -EINVAL;
> +	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
> +		return -EINVAL;
> +	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
> +		return -EINVAL;
> +	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int kvm_check_memory_region(struct kvm *kvm,
> +				const struct kvm_userspace_memory_region *mem,
> +				struct kvm_internal_memory_region_list *batch)
> +{
> +	int r;
> +
> +	r = check_memory_region_flags(mem);
> +	if (r)
> +		return r;
>   
> -	r = kvm_set_memslot(kvm, batch);
> +	r = kvm_check_mem(mem);
>   	if (r)
> -		kfree(new);
> +		return r;
> +
> +	r = kvm_prepare_batch(kvm, mem, batch);
> +	if (r && batch->new)
> +		kfree(batch->new);
> +
>   	return r;
>   }
> +
> +/*
> + * Allocate some memory and give it an address in the guest physical address
> + * space.
> + *
> + * Discontiguous memory is allowed, mostly for framebuffers.
> + * This function takes also care of initializing batch->new/old/invalid/change
> + * fields.
> + *
> + * Must be called holding kvm->slots_lock for write.
> + */
> +int __kvm_set_memory_region(struct kvm *kvm,
> +			    const struct kvm_userspace_memory_region *mem,
> +			    struct kvm_internal_memory_region_list *batch)
> +{
> +	int r;
> +
> +	r = kvm_check_memory_region(kvm, mem, batch);
> +	if (r)
> +		return r;
> +
> +	return kvm_set_memslot(kvm, batch);
> +}
>   EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
>   
>   static int kvm_set_memory_region(struct kvm *kvm,
> @@ -2061,6 +2096,9 @@ static int kvm_set_memory_region(struct kvm *kvm,
>   	mutex_lock(&kvm->slots_lock);
>   	r = __kvm_set_memory_region(kvm, mem, batch);
>   	mutex_unlock(&kvm->slots_lock);
> +	/* r == 1 means empty request, nothing to do but no error */
> +	if (r == 1)
> +		r = 0;

This is weird...  I think you have the order of the split slightly 
messed up.  Doing this check earlier, roughly at the same time as the 
introduction of the new struct, is probably clearer.

After having reviewed more of the code, I do think you should 
disaggregate __kvm_set_memory_region() in separate functions (check, 
build entry, prepare, commit) completely.  kvm_set_memory_region() calls 
them  and __kvm_set_memory_region() disappears completely.

Paolo

>   	return r;
>   }
>
diff mbox series

Patch

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 17f07546d591..9d917af30593 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1927,19 +1927,9 @@  static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
 	return false;
 }
 
-/*
- * Allocate some memory and give it an address in the guest physical address
- * space.
- *
- * Discontiguous memory is allowed, mostly for framebuffers.
- * This function takes also care of initializing batch->new/old/invalid/change
- * fields.
- *
- * Must be called holding kvm->slots_lock for write.
- */
-int __kvm_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem,
-			    struct kvm_internal_memory_region_list *batch)
+static int kvm_prepare_batch(struct kvm *kvm,
+			     const struct kvm_userspace_memory_region *mem,
+			     struct kvm_internal_memory_region_list *batch)
 {
 	struct kvm_memory_slot *old, *new;
 	struct kvm_memslots *slots;
@@ -1947,34 +1937,10 @@  int __kvm_set_memory_region(struct kvm *kvm,
 	unsigned long npages;
 	gfn_t base_gfn;
 	int as_id, id;
-	int r;
-
-	r = check_memory_region_flags(mem);
-	if (r)
-		return r;
 
 	as_id = mem->slot >> 16;
 	id = (u16)mem->slot;
 
-	/* General sanity checks */
-	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
-	    (mem->memory_size != (unsigned long)mem->memory_size))
-		return -EINVAL;
-	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
-		return -EINVAL;
-	/* We can read the guest memory with __xxx_user() later on. */
-	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
-	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
-	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
-			mem->memory_size))
-		return -EINVAL;
-	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
-		return -EINVAL;
-	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
-		return -EINVAL;
-	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
-		return -EINVAL;
-
 	slots = __kvm_memslots(kvm, as_id);
 
 	/*
@@ -1993,7 +1959,7 @@  int __kvm_set_memory_region(struct kvm *kvm,
 
 		batch->change = KVM_MR_DELETE;
 		batch->new = NULL;
-		return kvm_set_memslot(kvm, batch);
+		return 0;
 	}
 
 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
@@ -2020,7 +1986,7 @@  int __kvm_set_memory_region(struct kvm *kvm,
 		else if (mem->flags != old->flags)
 			change = KVM_MR_FLAGS_ONLY;
 		else /* Nothing to change. */
-			return 0;
+			return 1;
 	}
 
 	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
@@ -2041,12 +2007,81 @@  int __kvm_set_memory_region(struct kvm *kvm,
 
 	batch->new = new;
 	batch->change = change;
+	return 0;
+}
+
+static int kvm_check_mem(const struct kvm_userspace_memory_region *mem)
+{
+	int as_id, id;
+
+	as_id = mem->slot >> 16;
+	id = (u16)mem->slot;
+
+	/* General sanity checks */
+	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+	    (mem->memory_size != (unsigned long)mem->memory_size))
+		return -EINVAL;
+	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
+		return -EINVAL;
+	/* We can read the guest memory with __xxx_user() later on. */
+	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
+	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
+			mem->memory_size))
+		return -EINVAL;
+	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
+		return -EINVAL;
+	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
+		return -EINVAL;
+	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int kvm_check_memory_region(struct kvm *kvm,
+				const struct kvm_userspace_memory_region *mem,
+				struct kvm_internal_memory_region_list *batch)
+{
+	int r;
+
+	r = check_memory_region_flags(mem);
+	if (r)
+		return r;
 
-	r = kvm_set_memslot(kvm, batch);
+	r = kvm_check_mem(mem);
 	if (r)
-		kfree(new);
+		return r;
+
+	r = kvm_prepare_batch(kvm, mem, batch);
+	if (r && batch->new)
+		kfree(batch->new);
+
 	return r;
 }
+
+/*
+ * Allocate some memory and give it an address in the guest physical address
+ * space.
+ *
+ * Discontiguous memory is allowed, mostly for framebuffers.
+ * This function takes also care of initializing batch->new/old/invalid/change
+ * fields.
+ *
+ * Must be called holding kvm->slots_lock for write.
+ */
+int __kvm_set_memory_region(struct kvm *kvm,
+			    const struct kvm_userspace_memory_region *mem,
+			    struct kvm_internal_memory_region_list *batch)
+{
+	int r;
+
+	r = kvm_check_memory_region(kvm, mem, batch);
+	if (r)
+		return r;
+
+	return kvm_set_memslot(kvm, batch);
+}
 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 
 static int kvm_set_memory_region(struct kvm *kvm,
@@ -2061,6 +2096,9 @@  static int kvm_set_memory_region(struct kvm *kvm,
 	mutex_lock(&kvm->slots_lock);
 	r = __kvm_set_memory_region(kvm, mem, batch);
 	mutex_unlock(&kvm->slots_lock);
+	/* r == 1 means empty request, nothing to do but no error */
+	if (r == 1)
+		r = 0;
 	return r;
 }