diff mbox series

[v4,37/40] KVM: PPC: Book3s HV: drop locking around kvmppc_uvmem_bitmap

Message ID 20240620175703.605111-38-yury.norov@gmail.com (mailing list archive)
State New, archived
Headers show
Series lib/find: add atomic find_bit() primitives | expand

Commit Message

Yury Norov June 20, 2024, 5:57 p.m. UTC
The driver operates on individual bits of the kvmppc_uvmem_bitmap.
Now that we have an atomic search API for bitmaps, we can rely on
it and drop locking around the bitmap entirely.

Signed-off-by: Yury Norov <yury.norov@gmail.com>
---
 arch/powerpc/kvm/book3s_hv_uvmem.c | 33 ++++++++++--------------------
 1 file changed, 11 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 92f33115144b..93d09137cb23 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -86,6 +86,7 @@ 
  * page-sizes, we need to break this assumption.
  */
 
+#include <linux/find_atomic.h>
 #include <linux/pagemap.h>
 #include <linux/migrate.h>
 #include <linux/kvm_host.h>
@@ -99,7 +100,6 @@ 
 
 static struct dev_pagemap kvmppc_uvmem_pgmap;
 static unsigned long *kvmppc_uvmem_bitmap;
-static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
 
 /*
  * States of a GFN
@@ -697,23 +697,20 @@  static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
 	struct page *dpage = NULL;
 	unsigned long bit, uvmem_pfn;
 	struct kvmppc_uvmem_page_pvt *pvt;
-	unsigned long pfn_last, pfn_first;
+	unsigned long num_pfns, pfn_first;
 
 	pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
-	pfn_last = pfn_first +
-		   (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
+	num_pfns = range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT;
 
-	spin_lock(&kvmppc_uvmem_bitmap_lock);
-	bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
-				  pfn_last - pfn_first);
-	if (bit >= (pfn_last - pfn_first))
-		goto out;
-	bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
-	spin_unlock(&kvmppc_uvmem_bitmap_lock);
+	bit = find_and_set_bit(kvmppc_uvmem_bitmap, num_pfns);
+	if (bit >= num_pfns)
+		return NULL;
 
 	pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
-	if (!pvt)
-		goto out_clear;
+	if (!pvt) {
+		clear_bit(bit, kvmppc_uvmem_bitmap);
+		return NULL;
+	}
 
 	uvmem_pfn = bit + pfn_first;
 	kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
@@ -725,12 +722,6 @@  static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
 	dpage->zone_device_data = pvt;
 	zone_device_page_init(dpage);
 	return dpage;
-out_clear:
-	spin_lock(&kvmppc_uvmem_bitmap_lock);
-	bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
-out:
-	spin_unlock(&kvmppc_uvmem_bitmap_lock);
-	return NULL;
 }
 
 /*
@@ -1021,9 +1012,7 @@  static void kvmppc_uvmem_page_free(struct page *page)
 			(kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
 	struct kvmppc_uvmem_page_pvt *pvt;
 
-	spin_lock(&kvmppc_uvmem_bitmap_lock);
-	bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
-	spin_unlock(&kvmppc_uvmem_bitmap_lock);
+	clear_bit(pfn, kvmppc_uvmem_bitmap);
 
 	pvt = page->zone_device_data;
 	page->zone_device_data = NULL;