diff mbox series

[RFC,4/9] kvm_main.c: split logic in kvm_set_memslots

Message ID 20220909104506.738478-5-eesposit@redhat.com (mailing list archive)
State New, archived
Headers show
Series kvm: implement atomic memslot updates | expand

Commit Message

Emanuele Giuseppe Esposito Sept. 9, 2022, 10:45 a.m. UTC
At this point it is also just a split, but later will handle atomic
memslot updates (thus avoiding swapping the memslot list every time).

No functional change intended.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 virt/kvm/kvm_main.c | 37 ++++++++++++++++++++++++++++++++-----
 1 file changed, 32 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini Sept. 28, 2022, 5:04 p.m. UTC | #1
On 9/9/22 12:45, Emanuele Giuseppe Esposito wrote:
> +/*
> + * Takes kvm->slots_arch_lock, and releases it only if
> + * invalid_slot allocation or kvm_prepare_memory_region failed.
> +*/

Much simpler: "kvm->slots_arch_lock is taken on a successful return."

This is a small change in phrasing, but it makes a lot more sense: on 
success you are preparing for the final commit operation, otherwise you 
just want the caller to return your errno value.

[...]

> +/* Must be called with kvm->slots_arch_lock held, but releases it. */
s/but/and/.  Even better, "and releases it before returning".  "But" 
tells the reader that something strange is going on, "and" tells it that 
something simple is going on.

I would also rename the functions along the lines of my review to patch 
3, to highlight that these function prepare/commit a *change* to a memslot.

> +static void kvm_finish_memslot(struct kvm *kvm,
> +			       struct kvm_internal_memory_region_list *batch)
> +{
> +	struct kvm_memory_slot *invalid_slot = batch->invalid;
> +	struct kvm_memory_slot *old = batch->old;
> +	struct kvm_memory_slot *new = batch->new;
> +	enum kvm_mr_change change = batch->change;

lockdep_assert_held(&kvm->slots_arch_lock);

>   
>   	/*
>   	 * For DELETE and MOVE, the working slot is now active as the INVALID
> @@ -1883,6 +1898,18 @@ static int kvm_set_memslot(struct kvm *kvm,
>   	 * responsible for knowing that new->arch may be stale.
>   	 */
>   	kvm_commit_memory_region(kvm, batch);
> +}
> +
> +static int kvm_set_memslot(struct kvm *kvm,
> +			   struct kvm_internal_memory_region_list *batch)
> +{
> +	int r;
> +
> +	r = kvm_prepare_memslot(kvm, batch);
> +	if (r)
> +		return r;
> +
> +	kvm_finish_memslot(kvm, batch);
>   
>   	return 0;

Ok, these are the functions I hinted at in the review of the previous 
patch, so we're not far away.  You should be able to move the 
kvm_set_memslot call to kvm_set_memory_region in patch 3, and then 
replace it with the two calls here directly in kvm_set_memory_region.

Paolo
diff mbox series

Patch

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4fab15d0d4b..17f07546d591 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1790,12 +1790,15 @@  static void kvm_update_flags_memslot(struct kvm *kvm,
 	kvm_activate_memslot(kvm, old, new);
 }
 
-static int kvm_set_memslot(struct kvm *kvm,
-			   struct kvm_internal_memory_region_list *batch)
+/*
+ * Takes kvm->slots_arch_lock, and releases it only if
+ * invalid_slot allocation or kvm_prepare_memory_region failed.
+ */
+static int kvm_prepare_memslot(struct kvm *kvm,
+			       struct kvm_internal_memory_region_list *batch)
 {
 	struct kvm_memory_slot *invalid_slot;
 	struct kvm_memory_slot *old = batch->old;
-	struct kvm_memory_slot *new = batch->new;
 	enum kvm_mr_change change = batch->change;
 	int r;
 
@@ -1829,7 +1832,8 @@  static int kvm_set_memslot(struct kvm *kvm,
 	 * invalidation needs to be reverted.
 	 */
 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
-		invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
+		invalid_slot = kzalloc(sizeof(*invalid_slot),
+				       GFP_KERNEL_ACCOUNT);
 		if (!invalid_slot) {
 			mutex_unlock(&kvm->slots_arch_lock);
 			return -ENOMEM;
@@ -1847,13 +1851,24 @@  static int kvm_set_memslot(struct kvm *kvm,
 		 * release slots_arch_lock.
 		 */
 		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+			/* kvm_activate_memslot releases kvm->slots_arch_lock */
 			kvm_activate_memslot(kvm, invalid_slot, old);
 			kfree(invalid_slot);
 		} else {
 			mutex_unlock(&kvm->slots_arch_lock);
 		}
-		return r;
 	}
+	return r;
+}
+
+/* Must be called with kvm->slots_arch_lock held, but releases it. */
+static void kvm_finish_memslot(struct kvm *kvm,
+			       struct kvm_internal_memory_region_list *batch)
+{
+	struct kvm_memory_slot *invalid_slot = batch->invalid;
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
 
 	/*
 	 * For DELETE and MOVE, the working slot is now active as the INVALID
@@ -1883,6 +1898,18 @@  static int kvm_set_memslot(struct kvm *kvm,
 	 * responsible for knowing that new->arch may be stale.
 	 */
 	kvm_commit_memory_region(kvm, batch);
+}
+
+static int kvm_set_memslot(struct kvm *kvm,
+			   struct kvm_internal_memory_region_list *batch)
+{
+	int r;
+
+	r = kvm_prepare_memslot(kvm, batch);
+	if (r)
+		return r;
+
+	kvm_finish_memslot(kvm, batch);
 
 	return 0;
 }