diff mbox series

[09/32] mlx4: Convert pv_id_table to XArray

Message ID 20190221002107.22625-10-willy@infradead.org (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show
Series Convert the Infiniband subsystem to XArray | expand

Commit Message

Matthew Wilcox Feb. 21, 2019, 12:20 a.m. UTC
Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 drivers/infiniband/hw/mlx4/cm.c      | 36 ++++++++++------------------
 drivers/infiniband/hw/mlx4/mlx4_ib.h |  5 ++--
 2 files changed, 16 insertions(+), 25 deletions(-)

Comments

Jason Gunthorpe Feb. 21, 2019, 6:28 p.m. UTC | #1
On Wed, Feb 20, 2019 at 04:20:44PM -0800, Matthew Wilcox wrote:

> @@ -196,13 +193,12 @@ static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
>  	struct id_map_entry *ent, *found_ent;
>  
>  	spin_lock(&sriov->id_map_lock);
> -	ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
> +	ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
>  	if (!ent)
>  		goto out;
>  	found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
>  	if (found_ent && found_ent == ent)
>  		rb_erase(&found_ent->node, sl_id_map);
> -	idr_remove(&sriov->pv_id_table, pv_cm_id);
>  out:
>  	spin_unlock(&sriov->id_map_lock);
>  }
> @@ -256,25 +252,19 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
>  	ent->dev = to_mdev(ibdev);
>  	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
>  
> -	idr_preload(GFP_KERNEL);
> -	spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
> -
> -	ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
> +	ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
> +			xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);

Why drop the NO_WAIT? Explain in the commit message?

Jason
Matthew Wilcox Feb. 21, 2019, 6:52 p.m. UTC | #2
On Thu, Feb 21, 2019 at 11:28:49AM -0700, Jason Gunthorpe wrote:
> On Wed, Feb 20, 2019 at 04:20:44PM -0800, Matthew Wilcox wrote:
> > @@ -256,25 +252,19 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
> >  	ent->dev = to_mdev(ibdev);
> >  	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
> >  
> > -	idr_preload(GFP_KERNEL);
> > -	spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
> > -
> > -	ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
> > +	ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
> > +			xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
> 
> Why drop the NO_WAIT? Explain in the commit message?

That's the standard pattern.

idr_preload(GFP_KERNEL);
spin_lock()
idr_alloc(..., GFP_NOWAIT);

becomes just

xa_alloc(..., GFP_KERNEL);
Leon Romanovsky March 26, 2019, 9:04 a.m. UTC | #3
On Wed, Feb 20, 2019 at 04:20:44PM -0800, Matthew Wilcox wrote:
> Signed-off-by: Matthew Wilcox <willy@infradead.org>
> ---
>  drivers/infiniband/hw/mlx4/cm.c      | 36 ++++++++++------------------
>  drivers/infiniband/hw/mlx4/mlx4_ib.h |  5 ++--
>  2 files changed, 16 insertions(+), 25 deletions(-)
>

This "id_map_lock" is worth to be revised, but it is not relevant to
this patch.

Thanks,
Acked-by: Leon Romanovsky <leonro@mellanox.com>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index fedaf8260105..c0e53d8b7c95 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -168,20 +168,17 @@  static void id_map_ent_timeout(struct work_struct *work)
 {
 	struct delayed_work *delay = to_delayed_work(work);
 	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
-	struct id_map_entry *db_ent, *found_ent;
+	struct id_map_entry *found_ent;
 	struct mlx4_ib_dev *dev = ent->dev;
 	struct mlx4_ib_sriov *sriov = &dev->sriov;
 	struct rb_root *sl_id_map = &sriov->sl_id_map;
-	int pv_id = (int) ent->pv_cm_id;
 
 	spin_lock(&sriov->id_map_lock);
-	db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
-	if (!db_ent)
+	if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
 		goto out;
 	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
 	if (found_ent && found_ent == ent)
 		rb_erase(&found_ent->node, sl_id_map);
-	idr_remove(&sriov->pv_id_table, pv_id);
 
 out:
 	list_del(&ent->list);
@@ -196,13 +193,12 @@  static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
 	struct id_map_entry *ent, *found_ent;
 
 	spin_lock(&sriov->id_map_lock);
-	ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
+	ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
 	if (!ent)
 		goto out;
 	found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
 	if (found_ent && found_ent == ent)
 		rb_erase(&found_ent->node, sl_id_map);
-	idr_remove(&sriov->pv_id_table, pv_cm_id);
 out:
 	spin_unlock(&sriov->id_map_lock);
 }
@@ -256,25 +252,19 @@  id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
 	ent->dev = to_mdev(ibdev);
 	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
 
-	idr_preload(GFP_KERNEL);
-	spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
-
-	ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
+	ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
+			xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
 	if (ret >= 0) {
-		ent->pv_cm_id = (u32)ret;
+		spin_lock(&sriov->id_map_lock);
 		sl_id_map_add(ibdev, ent);
 		list_add_tail(&ent->list, &sriov->cm_list);
-	}
-
-	spin_unlock(&sriov->id_map_lock);
-	idr_preload_end();
-
-	if (ret >= 0)
+		spin_unlock(&sriov->id_map_lock);
 		return ent;
+	}
 
 	/*error flow*/
 	kfree(ent);
-	mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
+	mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
 	return ERR_PTR(-ENOMEM);
 }
 
@@ -290,7 +280,7 @@  id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
 		if (ent)
 			*pv_cm_id = (int) ent->pv_cm_id;
 	} else
-		ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
+		ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
 	spin_unlock(&sriov->id_map_lock);
 
 	return ent;
@@ -407,7 +397,7 @@  void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
 	spin_lock_init(&dev->sriov.id_map_lock);
 	INIT_LIST_HEAD(&dev->sriov.cm_list);
 	dev->sriov.sl_id_map = RB_ROOT;
-	idr_init(&dev->sriov.pv_id_table);
+	xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
 }
 
 /* slave = -1 ==> all slaves */
@@ -444,7 +434,7 @@  void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
 					 struct id_map_entry, node);
 
 			rb_erase(&ent->node, sl_id_map);
-			idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
+			xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
 		}
 		list_splice_init(&dev->sriov.cm_list, &lh);
 	} else {
@@ -460,7 +450,7 @@  void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
 		/* remove those nodes from databases */
 		list_for_each_entry_safe(map, tmp_map, &lh, list) {
 			rb_erase(&map->node, sl_id_map);
-			idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
+			xa_erase(&sriov->pv_id_table, map->pv_cm_id);
 		}
 
 		/* add remaining nodes from cm_list */
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e491f3eda6e7..4c774304cc63 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -492,10 +492,11 @@  struct mlx4_ib_sriov {
 	struct mlx4_sriov_alias_guid alias_guid;
 
 	/* CM paravirtualization fields */
-	struct list_head cm_list;
+	struct xarray pv_id_table;
+	u32 pv_id_next;
 	spinlock_t id_map_lock;
 	struct rb_root sl_id_map;
-	struct idr pv_id_table;
+	struct list_head cm_list;
 };
 
 struct gid_cache_context {