diff mbox series

[2/2] scsi: target: tcmu: Replace radix_tree with XArray

Message ID 20210224185335.13844-3-bostroesser@gmail.com (mailing list archive)
State Accepted
Headers show
Series scsi: target: tcmu: Replace IDR and radix_tree with XArray | expand

Commit Message

Bodo Stroesser Feb. 24, 2021, 6:53 p.m. UTC
An attempt from Matthew Wilcox to replace radix-tree usage by
XArray in tcmu more than 1 year ago unfortunately got lost.

I rebased that work on latest tcmu and tested it.

Signed-off-by: Bodo Stroesser <bostroesser@gmail.com>
---
 drivers/target/target_core_user.c | 34 ++++++++++++++++------------------
 1 file changed, 16 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1fbfb307d5e5..067e00e37673 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -14,7 +14,6 @@ 
 #include <linux/vmalloc.h>
 #include <linux/uio_driver.h>
 #include <linux/xarray.h>
-#include <linux/radix-tree.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/highmem.h>
@@ -143,7 +142,7 @@  struct tcmu_dev {
 	uint32_t dbi_max;
 	uint32_t dbi_thresh;
 	unsigned long *data_bitmap;
-	struct radix_tree_root data_blocks;
+	struct xarray data_blocks;
 
 	struct xarray commands;
 
@@ -500,13 +499,13 @@  static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
 				       int prev_dbi, int *iov_cnt)
 {
 	struct page *page;
-	int ret, dbi;
+	int dbi;
 
 	dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
 	if (dbi == udev->dbi_thresh)
 		return -1;
 
-	page = radix_tree_lookup(&udev->data_blocks, dbi);
+	page = xa_load(&udev->data_blocks, dbi);
 	if (!page) {
 		if (atomic_add_return(1, &global_db_count) >
 				      tcmu_global_max_blocks)
@@ -517,8 +516,7 @@  static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
 		if (!page)
 			goto err_alloc;
 
-		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
-		if (ret)
+		if (xa_store(&udev->data_blocks, dbi, page, GFP_KERNEL))
 			goto err_insert;
 	}
 
@@ -557,7 +555,7 @@  static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
 static inline struct page *
 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 {
-	return radix_tree_lookup(&udev->data_blocks, dbi);
+	return xa_load(&udev->data_blocks, dbi);
 }
 
 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
@@ -1561,7 +1559,7 @@  static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
 	timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
 	timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
 
-	INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+	xa_init(&udev->data_blocks);
 
 	return &udev->se_dev;
 }
@@ -1585,19 +1583,19 @@  static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 	return -EINVAL;
 }
 
-static void tcmu_blocks_release(struct radix_tree_root *blocks,
-				int start, int end)
+static void tcmu_blocks_release(struct xarray *blocks, unsigned long first,
+				unsigned long last)
 {
-	int i;
+	XA_STATE(xas, blocks, first);
 	struct page *page;
 
-	for (i = start; i < end; i++) {
-		page = radix_tree_delete(blocks, i);
-		if (page) {
-			__free_page(page);
-			atomic_dec(&global_db_count);
-		}
+	xas_lock(&xas);
+	xas_for_each(&xas, page, last) {
+		xas_store(&xas, NULL);
+		__free_page(page);
+		atomic_dec(&global_db_count);
 	}
+	xas_unlock(&xas);
 }
 
 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
@@ -2923,7 +2921,7 @@  static void find_free_blocks(void)
 		unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
 
 		/* Release the block pages */
-		tcmu_blocks_release(&udev->data_blocks, start, end);
+		tcmu_blocks_release(&udev->data_blocks, start, end - 1);
 		mutex_unlock(&udev->cmdr_lock);
 
 		total_freed += end - start;