diff mbox series

[RFC,6/9] kvm_main.c: simplify change-specific callbacks

Message ID 20220909104506.738478-7-eesposit@redhat.com (mailing list archive)
State New, archived
Headers show
Series kvm: implement atomic memslot updates | expand

Commit Message

Emanuele Giuseppe Esposito Sept. 9, 2022, 10:45 a.m. UTC
Instead of replacing the memslot in the inactive slots and
activate for each "change" specific function (NEW, MOVE, DELETE, ...),
make kvm_set_memslot() replace the memslot in current inactive list
and swap the lists, and then kvm_finish_memslot just takes care of
updating the new inactive list (was active).

We can generalize here the pre-swap replacement with
replace(old, new) because even if in a DELETE or MOVE operation,
old will always stay in the inactive list (used by kvm_replace_memslot).

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 virt/kvm/kvm_main.c | 44 +++++++++++++++++++++++---------------------
 1 file changed, 23 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9d917af30593..6b73615891f0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1747,34 +1747,26 @@  static void kvm_invalidate_memslot(struct kvm *kvm,
 static void kvm_create_memslot(struct kvm *kvm,
 			       struct kvm_memory_slot *new)
 {
-	/* Add the new memslot to the inactive set and activate. */
+	/* Update inactive slot (was active) by adding the new slot */
 	kvm_replace_memslot(kvm, NULL, new);
-	kvm_activate_memslot(kvm, NULL, new);
 }
 
 static void kvm_delete_memslot(struct kvm *kvm,
-			       struct kvm_memory_slot *old,
 			       struct kvm_memory_slot *invalid_slot)
 {
-	/*
-	 * Remove the old memslot (in the inactive memslots) by passing NULL as
-	 * the "new" slot, and for the invalid version in the active slots.
-	 */
-	kvm_replace_memslot(kvm, old, NULL);
-	kvm_activate_memslot(kvm, invalid_slot, NULL);
+	/* Update inactive slot (was active) by removing the invalid slot */
+	kvm_replace_memslot(kvm, invalid_slot, NULL);
 }
 
 static void kvm_move_memslot(struct kvm *kvm,
-			     struct kvm_memory_slot *old,
 			     struct kvm_memory_slot *new,
 			     struct kvm_memory_slot *invalid_slot)
 {
 	/*
-	 * Replace the old memslot in the inactive slots, and then swap slots
-	 * and replace the current INVALID with the new as well.
+	 * Update inactive slot (was active) by removing the invalid slot
+	 * and adding the new one.
 	 */
-	kvm_replace_memslot(kvm, old, new);
-	kvm_activate_memslot(kvm, invalid_slot, new);
+	kvm_replace_memslot(kvm, invalid_slot, new);
 }
 
 static void kvm_update_flags_memslot(struct kvm *kvm,
@@ -1782,12 +1774,10 @@  static void kvm_update_flags_memslot(struct kvm *kvm,
 				     struct kvm_memory_slot *new)
 {
 	/*
-	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
-	 * an intermediate step. Instead, the old memslot is simply replaced
-	 * with a new, updated copy in both memslot sets.
+	 * Update inactive slot (was active) by removing the old slot
+	 * and adding the new one.
 	 */
 	kvm_replace_memslot(kvm, old, new);
-	kvm_activate_memslot(kvm, old, new);
 }
 
 /*
@@ -1880,9 +1870,9 @@  static void kvm_finish_memslot(struct kvm *kvm,
 	if (change == KVM_MR_CREATE)
 		kvm_create_memslot(kvm, new);
 	else if (change == KVM_MR_DELETE)
-		kvm_delete_memslot(kvm, old, invalid_slot);
+		kvm_delete_memslot(kvm, invalid_slot);
 	else if (change == KVM_MR_MOVE)
-		kvm_move_memslot(kvm, old, new, invalid_slot);
+		kvm_move_memslot(kvm, new, invalid_slot);
 	else if (change == KVM_MR_FLAGS_ONLY)
 		kvm_update_flags_memslot(kvm, old, new);
 	else
@@ -1903,12 +1893,24 @@  static void kvm_finish_memslot(struct kvm *kvm,
 static int kvm_set_memslot(struct kvm *kvm,
 			   struct kvm_internal_memory_region_list *batch)
 {
-	int r;
+	int r, as_id;
 
 	r = kvm_prepare_memslot(kvm, batch);
 	if (r)
 		return r;
 
+	/*
+	 * if change is DELETE or MOVE, invalid is in active memslots
+	 * and old in inactive, so replace old with new.
+	 */
+	kvm_replace_memslot(kvm, batch->old, batch->new);
+
+	/* either old or invalid is the same, since invalid is old's copy */
+	as_id = kvm_memslots_get_as_id(batch->old, batch->new);
+
+	/* releases kvm->slots_arch_lock */
+	kvm_swap_active_memslots(kvm, as_id);
+
 	kvm_finish_memslot(kvm, batch);
 
 	return 0;