diff mbox series

[RFC,3/9] kvm_main.c: introduce kvm_internal_memory_region_list

Message ID 20220909104506.738478-4-eesposit@redhat.com (mailing list archive)
State New, archived
Headers show
Series kvm: implement atomic memslot updates | expand

Commit Message

Emanuele Giuseppe Esposito Sept. 9, 2022, 10:45 a.m. UTC
For now this struct is only used to pass new,old and change
variable in a single parameter instead of three.

In future, it will be used to also carry additional information
and handle atomic memslot updates.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 arch/x86/kvm/x86.c       |  3 ++-
 include/linux/kvm_host.h | 15 +++++++++++-
 virt/kvm/kvm_main.c      | 52 +++++++++++++++++++++++++++-------------
 3 files changed, 51 insertions(+), 19 deletions(-)

Comments

Paolo Bonzini Sept. 28, 2022, 4:48 p.m. UTC | #1
On 9/9/22 12:45, Emanuele Giuseppe Esposito wrote:
> +struct kvm_internal_memory_region_list {
> +	/* Fields initialized in __kvm_set_memory_region() */
> +	struct kvm_memory_slot *old;
> +	struct kvm_memory_slot *new;
> +	struct kvm_memory_slot *invalid;
> +	enum kvm_mr_change change;
> +};
> +

Alternative name: kvm_memslot_change

>  int __kvm_set_memory_region(struct kvm *kvm,
> -			    const struct kvm_userspace_memory_region *mem);
> +			    const struct kvm_userspace_memory_region *mem,
> +			    struct kvm_internal_memory_region_list *batch);

A bit weird to have this passed by the caller.  I'd rather have 
__kvm_set_memory_region with the *batch argument (which I'd call instead 
*change), and design kvm_set_memory_region so that, at the end of the 
series, it is something like:

int kvm_set_memory_region(struct kvm *kvm,
			  const struct kvm_userspace_memory_region *mem)
{
	struct kvm_memslot_change change = {};
	r = __kvm_set_memory_region(kvm, mem, &change);
	if (r < 0)
		return r;
	r = invalidate_memslot_for_change(kvm, &change);
	if (r < 0)
		return r;
	return commit_memslot_change(kvm, &change);
}

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 567d13405445..da5a5dd3d4bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12155,13 +12155,14 @@  void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
 
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
 		struct kvm_userspace_memory_region m;
+		struct kvm_internal_memory_region_list b = { 0 };
 
 		m.slot = id | (i << 16);
 		m.flags = 0;
 		m.guest_phys_addr = gpa;
 		m.userspace_addr = hva;
 		m.memory_size = size;
-		r = __kvm_set_memory_region(kvm, &m);
+		r = __kvm_set_memory_region(kvm, &m, &b);
 		if (r < 0)
 			return ERR_PTR_USR(r);
 	}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c5b7b2e35dd..69af94472b39 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1108,8 +1108,21 @@  enum kvm_mr_change {
 	KVM_MR_FLAGS_ONLY,
 };
 
+/*
+ * Internally used to atomically update multiple memslots.
+ * Must be always zeroed by the caller.
+ */
+struct kvm_internal_memory_region_list {
+	/* Fields initialized in __kvm_set_memory_region() */
+	struct kvm_memory_slot *old;
+	struct kvm_memory_slot *new;
+	struct kvm_memory_slot *invalid;
+	enum kvm_mr_change change;
+};
+
 int __kvm_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem);
+			    const struct kvm_userspace_memory_region *mem,
+			    struct kvm_internal_memory_region_list *batch);
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 339de0ed4557..e4fab15d0d4b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1583,10 +1583,11 @@  static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
 }
 
 static int kvm_prepare_memory_region(struct kvm *kvm,
-				     const struct kvm_memory_slot *old,
-				     struct kvm_memory_slot *new,
-				     enum kvm_mr_change change)
+				     struct kvm_internal_memory_region_list *batch)
 {
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
 	int r;
 
 	/*
@@ -1621,10 +1622,12 @@  static int kvm_prepare_memory_region(struct kvm *kvm,
 }
 
 static void kvm_commit_memory_region(struct kvm *kvm,
-				     struct kvm_memory_slot *old,
-				     const struct kvm_memory_slot *new,
-				     enum kvm_mr_change change)
+				     struct kvm_internal_memory_region_list *batch)
 {
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
+
 	/*
 	 * Update the total number of memslot pages before calling the arch
 	 * hook so that architectures can consume the result directly.
@@ -1788,11 +1791,12 @@  static void kvm_update_flags_memslot(struct kvm *kvm,
 }
 
 static int kvm_set_memslot(struct kvm *kvm,
-			   struct kvm_memory_slot *old,
-			   struct kvm_memory_slot *new,
-			   enum kvm_mr_change change)
+			   struct kvm_internal_memory_region_list *batch)
 {
 	struct kvm_memory_slot *invalid_slot;
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
 	int r;
 
 	/*
@@ -1830,10 +1834,11 @@  static int kvm_set_memslot(struct kvm *kvm,
 			mutex_unlock(&kvm->slots_arch_lock);
 			return -ENOMEM;
 		}
+		batch->invalid = invalid_slot;
 		kvm_invalidate_memslot(kvm, old, invalid_slot);
 	}
 
-	r = kvm_prepare_memory_region(kvm, old, new, change);
+	r = kvm_prepare_memory_region(kvm, batch);
 	if (r) {
 		/*
 		 * For DELETE/MOVE, revert the above INVALID change.  No
@@ -1877,7 +1882,7 @@  static int kvm_set_memslot(struct kvm *kvm,
 	 * will directly hit the final, active memslot.  Architectures are
 	 * responsible for knowing that new->arch may be stale.
 	 */
-	kvm_commit_memory_region(kvm, old, new, change);
+	kvm_commit_memory_region(kvm, batch);
 
 	return 0;
 }
@@ -1900,11 +1905,14 @@  static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
  * space.
  *
  * Discontiguous memory is allowed, mostly for framebuffers.
+ * This function takes also care of initializing batch->new/old/invalid/change
+ * fields.
  *
  * Must be called holding kvm->slots_lock for write.
  */
 int __kvm_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem)
+			    const struct kvm_userspace_memory_region *mem,
+			    struct kvm_internal_memory_region_list *batch)
 {
 	struct kvm_memory_slot *old, *new;
 	struct kvm_memslots *slots;
@@ -1947,6 +1955,7 @@  int __kvm_set_memory_region(struct kvm *kvm,
 	 * and/or destroyed by kvm_set_memslot().
 	 */
 	old = id_to_memslot(slots, id);
+	batch->old = old;
 
 	if (!mem->memory_size) {
 		if (!old || !old->npages)
@@ -1955,7 +1964,9 @@  int __kvm_set_memory_region(struct kvm *kvm,
 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
 			return -EIO;
 
-		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
+		batch->change = KVM_MR_DELETE;
+		batch->new = NULL;
+		return kvm_set_memslot(kvm, batch);
 	}
 
 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
@@ -1963,6 +1974,7 @@  int __kvm_set_memory_region(struct kvm *kvm,
 
 	if (!old || !old->npages) {
 		change = KVM_MR_CREATE;
+		batch->old = NULL;
 
 		/*
 		 * To simplify KVM internals, the total number of pages across
@@ -2000,7 +2012,10 @@  int __kvm_set_memory_region(struct kvm *kvm,
 	new->flags = mem->flags;
 	new->userspace_addr = mem->userspace_addr;
 
-	r = kvm_set_memslot(kvm, old, new, change);
+	batch->new = new;
+	batch->change = change;
+
+	r = kvm_set_memslot(kvm, batch);
 	if (r)
 		kfree(new);
 	return r;
@@ -2008,7 +2023,8 @@  int __kvm_set_memory_region(struct kvm *kvm,
 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 
 static int kvm_set_memory_region(struct kvm *kvm,
-				 const struct kvm_userspace_memory_region *mem)
+				 const struct kvm_userspace_memory_region *mem,
+				 struct kvm_internal_memory_region_list *batch)
 {
 	int r;
 
@@ -2016,7 +2032,7 @@  static int kvm_set_memory_region(struct kvm *kvm,
 		return -EINVAL;
 
 	mutex_lock(&kvm->slots_lock);
-	r = __kvm_set_memory_region(kvm, mem);
+	r = __kvm_set_memory_region(kvm, mem, batch);
 	mutex_unlock(&kvm->slots_lock);
 	return r;
 }
@@ -2024,7 +2040,9 @@  static int kvm_set_memory_region(struct kvm *kvm,
 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 					  struct kvm_userspace_memory_region *mem)
 {
-	return kvm_set_memory_region(kvm, mem);
+	struct kvm_internal_memory_region_list batch = { 0 };
+
+	return kvm_set_memory_region(kvm, mem, &batch);
 }
 
 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT