diff mbox series

[v2,1/3] brd: extend the rcu regions to cover read and write

Message ID 7d99fa-9c13-ab2a-acde-1f8bbc63bf3@redhat.com (mailing list archive)
State New, archived
Headers show
Series brd discard patches | expand

Commit Message

Mikulas Patocka July 21, 2023, 1:49 p.m. UTC
This patch extends the rcu regions, so that lookup followed by a read or
write of a page is done inside rcu read lock. This is needed for the
following patch that enables discard.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>

---
 drivers/block/brd.c |    8 ++++++++
 1 file changed, 8 insertions(+)

Comments

Pankaj Raghav July 27, 2023, 8:31 a.m. UTC | #1
> Index: linux-2.6/drivers/block/brd.c
> ===================================================================
> --- linux-2.6.orig/drivers/block/brd.c
> +++ linux-2.6/drivers/block/brd.c
> @@ -150,23 +150,27 @@ static void copy_to_brd(struct brd_devic
>  	size_t copy;
>  
>  	copy = min_t(size_t, n, PAGE_SIZE - offset);
> +	rcu_read_lock();
>  	page = brd_lookup_page(brd, sector);

xa_load() inside brd_lookup_page() also calls rcu read lock. Instead of
nesting rcu locks, could we modify the brd_lookup_page to use:

diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 970bd6ff38c4..acc37bfdd181 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -55,7 +55,9 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
        struct page *page;
 
        idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
-       page = xa_load(&brd->brd_pages, idx);
+
+       XA_STATE(xas, &brd->brd_pages, idx);
+       page = xas_load(&xas);
 
        BUG_ON(page && page->index != idx);

>  	BUG_ON(!page);
>  
>  	dst = kmap_atomic(page);
>  	memcpy(dst + offset, src, copy);
>  	kunmap_atomic(dst);
diff mbox series

Patch

Index: linux-2.6/drivers/block/brd.c
===================================================================
--- linux-2.6.orig/drivers/block/brd.c
+++ linux-2.6/drivers/block/brd.c
@@ -150,23 +150,27 @@  static void copy_to_brd(struct brd_devic
 	size_t copy;
 
 	copy = min_t(size_t, n, PAGE_SIZE - offset);
+	rcu_read_lock();
 	page = brd_lookup_page(brd, sector);
 	BUG_ON(!page);
 
 	dst = kmap_atomic(page);
 	memcpy(dst + offset, src, copy);
 	kunmap_atomic(dst);
+	rcu_read_unlock();
 
 	if (copy < n) {
 		src += copy;
 		sector += copy >> SECTOR_SHIFT;
 		copy = n - copy;
+		rcu_read_lock();
 		page = brd_lookup_page(brd, sector);
 		BUG_ON(!page);
 
 		dst = kmap_atomic(page);
 		memcpy(dst, src, copy);
 		kunmap_atomic(dst);
+		rcu_read_unlock();
 	}
 }
 
@@ -182,6 +186,7 @@  static void copy_from_brd(void *dst, str
 	size_t copy;
 
 	copy = min_t(size_t, n, PAGE_SIZE - offset);
+	rcu_read_lock();
 	page = brd_lookup_page(brd, sector);
 	if (page) {
 		src = kmap_atomic(page);
@@ -189,11 +194,13 @@  static void copy_from_brd(void *dst, str
 		kunmap_atomic(src);
 	} else
 		memset(dst, 0, copy);
+	rcu_read_unlock();
 
 	if (copy < n) {
 		dst += copy;
 		sector += copy >> SECTOR_SHIFT;
 		copy = n - copy;
+		rcu_read_lock();
 		page = brd_lookup_page(brd, sector);
 		if (page) {
 			src = kmap_atomic(page);
@@ -201,6 +208,7 @@  static void copy_from_brd(void *dst, str
 			kunmap_atomic(src);
 		} else
 			memset(dst, 0, copy);
+		rcu_read_unlock();
 	}
 }