diff mbox series

[v2,1/2] RDMA/rxe: Remove struct rxe_phys_buf

Message ID 1668153085-15-1-git-send-email-yangx.jy@fujitsu.com (mailing list archive)
State Changes Requested
Delegated to: Jason Gunthorpe
Headers show
Series [v2,1/2] RDMA/rxe: Remove struct rxe_phys_buf | expand

Commit Message

Xiao Yang Nov. 11, 2022, 7:51 a.m. UTC
1) Remove rxe_phys_buf[n].size by using ibmr.page_size.
2) Replace rxe_phys_buf[n].buf with addrs[n].

Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
---
 drivers/infiniband/sw/rxe/rxe_mr.c    | 45 +++++++++------------------
 drivers/infiniband/sw/rxe/rxe_verbs.c |  6 +---
 drivers/infiniband/sw/rxe/rxe_verbs.h |  9 ++----
 3 files changed, 18 insertions(+), 42 deletions(-)

Comments

Xiao Yang Nov. 18, 2022, 5:22 a.m. UTC | #1
Hi Jason, Ira and others

Kindly ping. I hope you can review them.
Thank you in advance.

Best Regards,
Xiao Yang

On 2022/11/11 15:51, Xiao Yang wrote:
> 1) Remove rxe_phys_buf[n].size by using ibmr.page_size.
> 2) Replace rxe_phys_buf[n].buf with addrs[n].
> 
> Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
> ---
>   drivers/infiniband/sw/rxe/rxe_mr.c    | 45 +++++++++------------------
>   drivers/infiniband/sw/rxe/rxe_verbs.c |  6 +---
>   drivers/infiniband/sw/rxe/rxe_verbs.h |  9 ++----
>   3 files changed, 18 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index bc081002bddc..4438eb8a3727 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -115,7 +115,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   		     int access, struct rxe_mr *mr)
>   {
>   	struct rxe_map		**map;
> -	struct rxe_phys_buf	*buf = NULL;
>   	struct ib_umem		*umem;
>   	struct sg_page_iter	sg_iter;
>   	int			num_buf;
> @@ -144,16 +143,14 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   
>   	mr->page_shift = PAGE_SHIFT;
>   	mr->page_mask = PAGE_SIZE - 1;
> +	mr->ibmr.page_size = PAGE_SIZE;
>   
> -	num_buf			= 0;
> +	num_buf	= 0;
>   	map = mr->map;
>   	if (length > 0) {
> -		buf = map[0]->buf;
> -
>   		for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
>   			if (num_buf >= RXE_BUF_PER_MAP) {
>   				map++;
> -				buf = map[0]->buf;
>   				num_buf = 0;
>   			}
>   
> @@ -165,10 +162,8 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   				goto err_cleanup_map;
>   			}
>   
> -			buf->addr = (uintptr_t)vaddr;
> -			buf->size = PAGE_SIZE;
> +			map[0]->addrs[num_buf] = (uintptr_t)vaddr;
>   			num_buf++;
> -			buf++;
>   
>   		}
>   	}
> @@ -216,9 +211,9 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
>   			size_t *offset_out)
>   {
>   	size_t offset = iova - mr->ibmr.iova + mr->offset;
> +	u64 length = mr->ibmr.page_size;
>   	int			map_index;
> -	int			buf_index;
> -	u64			length;
> +	int			addr_index;
>   
>   	if (likely(mr->page_shift)) {
>   		*offset_out = offset & mr->page_mask;
> @@ -227,23 +222,20 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
>   		*m_out = offset >> mr->map_shift;
>   	} else {
>   		map_index = 0;
> -		buf_index = 0;
> -
> -		length = mr->map[map_index]->buf[buf_index].size;
> +		addr_index = 0;
>   
>   		while (offset >= length) {
>   			offset -= length;
> -			buf_index++;
> +			addr_index++;
>   
> -			if (buf_index == RXE_BUF_PER_MAP) {
> +			if (addr_index == RXE_BUF_PER_MAP) {
>   				map_index++;
> -				buf_index = 0;
> +				addr_index = 0;
>   			}
> -			length = mr->map[map_index]->buf[buf_index].size;
>   		}
>   
>   		*m_out = map_index;
> -		*n_out = buf_index;
> +		*n_out = addr_index;
>   		*offset_out = offset;
>   	}
>   }
> @@ -273,13 +265,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
>   
>   	lookup_iova(mr, iova, &m, &n, &offset);
>   
> -	if (offset + length > mr->map[m]->buf[n].size) {
> +	if (offset + length > mr->ibmr.page_size) {
>   		pr_warn("crosses page boundary\n");
>   		addr = NULL;
>   		goto out;
>   	}
>   
> -	addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
> +	addr = (void *)(uintptr_t)mr->map[m]->addrs[n] + offset;
>   
>   out:
>   	return addr;
> @@ -294,8 +286,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
>   	int			err;
>   	int			bytes;
>   	u8			*va;
> -	struct rxe_map		**map;
> -	struct rxe_phys_buf	*buf;
>   	int			m;
>   	int			i;
>   	size_t			offset;
> @@ -325,17 +315,14 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
>   
>   	lookup_iova(mr, iova, &m, &i, &offset);
>   
> -	map = mr->map + m;
> -	buf	= map[0]->buf + i;
> -
>   	while (length > 0) {
>   		u8 *src, *dest;
>   
> -		va	= (u8 *)(uintptr_t)buf->addr + offset;
> +		va	= (u8 *)(uintptr_t)mr->map[m]->addrs[i] + offset;
>   		src = (dir == RXE_TO_MR_OBJ) ? addr : va;
>   		dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
>   
> -		bytes	= buf->size - offset;
> +		bytes	= mr->ibmr.page_size - offset;
>   
>   		if (bytes > length)
>   			bytes = length;
> @@ -346,13 +333,11 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
>   		addr	+= bytes;
>   
>   		offset	= 0;
> -		buf++;
>   		i++;
>   
>   		if (i == RXE_BUF_PER_MAP) {
>   			i = 0;
> -			map++;
> -			buf = map[0]->buf;
> +			m++;
>   		}
>   	}
>   
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index bcdfdadaebbc..13e4d660cb02 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -948,16 +948,12 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
>   {
>   	struct rxe_mr *mr = to_rmr(ibmr);
>   	struct rxe_map *map;
> -	struct rxe_phys_buf *buf;
>   
>   	if (unlikely(mr->nbuf == mr->num_buf))
>   		return -ENOMEM;
>   
>   	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
> -	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
> -
> -	buf->addr = addr;
> -	buf->size = ibmr->page_size;
> +	map->addrs[mr->nbuf % RXE_BUF_PER_MAP] = addr;
>   	mr->nbuf++;
>   
>   	return 0;
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
> index 22a299b0a9f0..d136f02d5b56 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.h
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
> @@ -277,15 +277,10 @@ enum rxe_mr_lookup_type {
>   	RXE_LOOKUP_REMOTE,
>   };
>   
> -#define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
> -
> -struct rxe_phys_buf {
> -	u64      addr;
> -	u64      size;
> -};
> +#define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(u64))
>   
>   struct rxe_map {
> -	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
> +	u64 addrs[RXE_BUF_PER_MAP];
>   };
>   
>   static inline int rkey_is_mw(u32 rkey)
Jason Gunthorpe Nov. 19, 2022, 1:20 a.m. UTC | #2
On Fri, Nov 11, 2022 at 07:51:24AM +0000, Xiao Yang wrote:
> 1) Remove rxe_phys_buf[n].size by using ibmr.page_size.
> 2) Replace rxe_phys_buf[n].buf with addrs[n].

This almost certainly doesn't work, but here is a general sketch how
all of this really should look:

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index a22476d27b3843..7539cf3e00db55 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -68,7 +68,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 		enum rxe_mr_copy_dir dir);
 int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
 	      void *addr, int length, enum rxe_mr_copy_dir dir);
-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
 struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
 			 enum rxe_mr_lookup_type type);
 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index b1423000e4bcda..7cd76f0213c265 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -65,41 +65,23 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
 
 static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
 {
-	int i;
-	int num_map;
-	struct rxe_map **map = mr->map;
+	XA_STATE(xas, &mr->pages, 0);
+	int i = 0;
 
-	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+	xa_init(&mr->pages);
 
-	mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
-	if (!mr->map)
-		goto err1;
-
-	for (i = 0; i < num_map; i++) {
-		mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
-		if (!mr->map[i])
-			goto err2;
-	}
-
-	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
-
-	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
-	mr->map_mask = RXE_BUF_PER_MAP - 1;
-
-	mr->num_buf = num_buf;
-	mr->num_map = num_map;
-	mr->max_buf = num_map * RXE_BUF_PER_MAP;
-
-	return 0;
-
-err2:
-	for (i--; i >= 0; i--)
-		kfree(mr->map[i]);
-
-	kfree(mr->map);
-	mr->map = NULL;
-err1:
-	return -ENOMEM;
+	do {
+		xas_lock(&xas);
+		while (i != num_buf) {
+			xas_store(&xas, XA_ZERO_ENTRY);
+			if (xas_error(&xas))
+				break;
+			xas_next(&xas);
+			i++;
+		}
+		xas_unlock(&xas);
+	} while (xas_nomem(&xas, GFP_KERNEL));
+	return xas_error(&xas);
 }
 
 void rxe_mr_init_dma(int access, struct rxe_mr *mr)
@@ -111,75 +93,66 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr)
 	mr->ibmr.type = IB_MR_TYPE_DMA;
 }
 
+static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
+{
+	XA_STATE(xas, &mr->pages, 0);
+	struct sg_page_iter sg_iter;
+
+	__sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
+	if (!__sg_page_iter_next(&sg_iter))
+		return 0;
+	do {
+		xas_lock(&xas);
+		while (true) {
+			if (xas.xa_index &&
+			    WARN_ON(sg_iter.sg_pgoffset % PAGE_SIZE)) {
+				xas_set_err(&xas, -EINVAL);
+				break;
+			}
+			xas_store(&xas, sg_page_iter_page(&sg_iter));
+			if (xas_error(&xas))
+				break;
+			xas_next(&xas);
+			if (!__sg_page_iter_next(&sg_iter))
+				break;
+		}
+		xas_unlock(&xas);
+	} while (xas_nomem(&xas, GFP_KERNEL));
+
+	return xas_error(&xas);
+}
+
 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf = NULL;
-	struct ib_umem		*umem;
-	struct sg_page_iter	sg_iter;
-	int			num_buf;
-	void			*vaddr;
+	struct ib_umem *umem;
 	int err;
 
+	xa_init(&mr->pages);
+
 	umem = ib_umem_get(&rxe->ib_dev, start, length, access);
 	if (IS_ERR(umem)) {
 		rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
 			(int)PTR_ERR(umem));
-		err = PTR_ERR(umem);
-		goto err_out;
-	}
-
-	num_buf = ib_umem_num_pages(umem);
-
-	rxe_mr_init(access, mr);
-
-	err = rxe_mr_alloc(mr, num_buf);
-	if (err) {
-		rxe_dbg_mr(mr, "Unable to allocate memory for map\n");
-		goto err_release_umem;
+		return PTR_ERR(umem);
 	}
 
 	mr->page_shift = PAGE_SHIFT;
 	mr->page_mask = PAGE_SIZE - 1;
+	err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
+	if (err)
+		goto err_release_umem;
 
-	num_buf			= 0;
-	map = mr->map;
-	if (length > 0) {
-		buf = map[0]->buf;
-
-		for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
-			if (num_buf >= RXE_BUF_PER_MAP) {
-				map++;
-				buf = map[0]->buf;
-				num_buf = 0;
-			}
-
-			vaddr = page_address(sg_page_iter_page(&sg_iter));
-			if (!vaddr) {
-				rxe_dbg_mr(mr, "Unable to get virtual address\n");
-				err = -ENOMEM;
-				goto err_release_umem;
-			}
-			buf->addr = (uintptr_t)vaddr;
-			buf->size = PAGE_SIZE;
-			num_buf++;
-			buf++;
-
-		}
-	}
-
+	rxe_mr_init(access, mr);
 	mr->umem = umem;
 	mr->access = access;
 	mr->offset = ib_umem_offset(umem);
 	mr->state = RXE_MR_STATE_VALID;
 	mr->ibmr.type = IB_MR_TYPE_USER;
-
 	return 0;
 
 err_release_umem:
 	ib_umem_release(umem);
-err_out:
 	return err;
 }
 
@@ -204,77 +177,44 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
 	return err;
 }
 
-static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
-			size_t *offset_out)
+static int rxe_mr_copy_xarray(struct rxe_mr *mr, void *mem,
+			      unsigned long start_index,
+			      unsigned int start_offset, unsigned int length,
+			      enum rxe_mr_copy_dir dir)
 {
-	size_t offset = iova - mr->ibmr.iova + mr->offset;
-	int			map_index;
-	int			buf_index;
-	u64			length;
-
-	if (likely(mr->page_shift)) {
-		*offset_out = offset & mr->page_mask;
-		offset >>= mr->page_shift;
-		*n_out = offset & mr->map_mask;
-		*m_out = offset >> mr->map_shift;
-	} else {
-		map_index = 0;
-		buf_index = 0;
-
-		length = mr->map[map_index]->buf[buf_index].size;
-
-		while (offset >= length) {
-			offset -= length;
-			buf_index++;
-
-			if (buf_index == RXE_BUF_PER_MAP) {
-				map_index++;
-				buf_index = 0;
-			}
-			length = mr->map[map_index]->buf[buf_index].size;
-		}
+	XA_STATE(xas, &mr->pages, start_index);
+	struct page *entry;
 
-		*m_out = map_index;
-		*n_out = buf_index;
-		*offset_out = offset;
-	}
-}
+	rcu_read_lock();
+	while (length) {
+		unsigned int nbytes;
+		void *vpage;
 
-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
-{
-	size_t offset;
-	int m, n;
-	void *addr;
-
-	if (mr->state != RXE_MR_STATE_VALID) {
-		rxe_dbg_mr(mr, "Not in valid state\n");
-		addr = NULL;
-		goto out;
-	}
+		entry = xas_next(&xas);
+		if (xas_retry(&xas, entry))
+			continue;
 
-	if (!mr->map) {
-		addr = (void *)(uintptr_t)iova;
-		goto out;
-	}
+		/* Walked pass the end of the array */
+		if (WARN_ON(!entry)) {
+			rcu_read_unlock();
+			return -1;
+		}
 
-	if (mr_check_range(mr, iova, length)) {
-		rxe_dbg_mr(mr, "Range violation\n");
-		addr = NULL;
-		goto out;
-	}
+		nbytes = min_t(unsigned int, length, PAGE_SIZE - start_offset);
 
-	lookup_iova(mr, iova, &m, &n, &offset);
+		vpage = kmap_local_page(entry);
+		if (dir == RXE_FROM_MR_OBJ)
+			memcpy(mem, vpage + start_offset, nbytes);
+		else
+			memcpy(vpage + start_offset, mem, nbytes);
+		kunmap_local(vpage);
 
-	if (offset + length > mr->map[m]->buf[n].size) {
-		rxe_dbg_mr(mr, "Crosses page boundary\n");
-		addr = NULL;
-		goto out;
+		mem += nbytes;
+		start_offset = 0;
+		length -= nbytes;
 	}
-
-	addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
-
-out:
-	return addr;
+	rcu_read_unlock();
+	return 0;
 }
 
 /* copy data from a range (vaddr, vaddr+length-1) to or from
@@ -283,75 +223,9 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
 int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 		enum rxe_mr_copy_dir dir)
 {
-	int			err;
-	int			bytes;
-	u8			*va;
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf;
-	int			m;
-	int			i;
-	size_t			offset;
-
-	if (length == 0)
-		return 0;
-
-	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
-		u8 *src, *dest;
-
-		src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
-
-		dest = (dir == RXE_TO_MR_OBJ) ? ((void *)(uintptr_t)iova) : addr;
-
-		memcpy(dest, src, length);
-
-		return 0;
-	}
-
-	WARN_ON_ONCE(!mr->map);
-
-	err = mr_check_range(mr, iova, length);
-	if (err) {
-		err = -EFAULT;
-		goto err1;
-	}
-
-	lookup_iova(mr, iova, &m, &i, &offset);
-
-	map = mr->map + m;
-	buf	= map[0]->buf + i;
-
-	while (length > 0) {
-		u8 *src, *dest;
-
-		va	= (u8 *)(uintptr_t)buf->addr + offset;
-		src = (dir == RXE_TO_MR_OBJ) ? addr : va;
-		dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
-
-		bytes	= buf->size - offset;
-
-		if (bytes > length)
-			bytes = length;
-
-		memcpy(dest, src, bytes);
-
-		length	-= bytes;
-		addr	+= bytes;
-
-		offset	= 0;
-		buf++;
-		i++;
-
-		if (i == RXE_BUF_PER_MAP) {
-			i = 0;
-			map++;
-			buf = map[0]->buf;
-		}
-	}
-
-	return 0;
-
-err1:
-	return err;
+	/* FIXME: Check that IOVA & length are valid, permissions, etc */
+	return rxe_mr_copy_xarray(mr, addr, rxe_mr_iova_to_index(iova),
+				  iova % PAGE_SIZE, length, dir);
 }
 
 /* copy data in or out of a wqe, i.e. sg list
@@ -609,15 +483,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 void rxe_mr_cleanup(struct rxe_pool_elem *elem)
 {
 	struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
-	int i;
 
 	rxe_put(mr_pd(mr));
 	ib_umem_release(mr->umem);
 
-	if (mr->map) {
-		for (i = 0; i < mr->num_map; i++)
-			kfree(mr->map[i]);
-
-		kfree(mr->map);
-	}
+	xa_destroy(&mr->pages);
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 6761bcd1d4d8f7..c1ed200e797779 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -631,22 +631,30 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
 	}
 
 	if (!res->replay) {
+		u64 iova = qp->resp.va + qp->resp.offset;
+		unsigned int page_offset = iova % PAGE_SIZE;
+		struct page *page;
+
 		if (mr->state != RXE_MR_STATE_VALID) {
 			ret = RESPST_ERR_RKEY_VIOLATION;
 			goto out;
 		}
 
-		vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
-					sizeof(u64));
-
 		/* check vaddr is 8 bytes aligned. */
-		if (!vaddr || (uintptr_t)vaddr & 7) {
+		if (iova & 7) {
 			ret = RESPST_ERR_MISALIGNED_ATOMIC;
 			goto out;
 		}
 
+		/*
+		 * FIXME: Need to ensure the xarray isn't changing while
+		 * this is happening
+		 */
+		page = xa_load(&mr->pages, rxe_mr_iova_to_index(iova));
+
+		vaddr = kmap_local_page(page);
 		spin_lock_bh(&atomic_ops_lock);
-		res->atomic.orig_val = value = *vaddr;
+		res->atomic.orig_val = value = *(vaddr + page_offset);
 
 		if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
 			if (value == atmeth_comp(pkt))
@@ -655,8 +663,9 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
 			value += atmeth_swap_add(pkt);
 		}
 
-		*vaddr = value;
+		*(vaddr + page_offset) = value;
 		spin_unlock_bh(&atomic_ops_lock);
+		kunmap_local(vaddr);
 
 		qp->resp.msn++;
 
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 025b35bf014e2a..092994a0ec947a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -948,23 +948,44 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
 	return ERR_PTR(err);
 }
 
-static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+static int rxe_mr_fill_pages_from_sgl_prefix(struct rxe_mr *mr,
+					     struct scatterlist *sgl,
+					     unsigned int sg_nents,
+					     unsigned int *sg_offset)
 {
-	struct rxe_mr *mr = to_rmr(ibmr);
-	struct rxe_map *map;
-	struct rxe_phys_buf *buf;
-
-	if (unlikely(mr->nbuf == mr->num_buf))
-		return -ENOMEM;
-
-	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
-	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+	XA_STATE(xas, &mr->pages, 0);
+	struct sg_page_iter sg_iter;
+	struct scatterlist *cur_sg;
+	unsigned int done_sg = 1;
 
-	buf->addr = addr;
-	buf->size = ibmr->page_size;
-	mr->nbuf++;
+	__sg_page_iter_start(&sg_iter, sgl, sg_nents, *sg_offset);
+	if (!__sg_page_iter_next(&sg_iter))
+		return 0;
+	cur_sg = sg_iter.sg;
+	do {
+		xas_lock(&xas);
+		while (true) {
+			if (xas.xa_index && sg_iter.sg_pgoffset % PAGE_SIZE) {
+				*sg_offset = sg_iter.sg_pgoffset;
+				break;
+			}
+			xas_store(&xas, sg_page_iter_page(&sg_iter));
+			if (xas_error(&xas))
+				break;
+			xas_next(&xas);
+			if (!__sg_page_iter_next(&sg_iter))
+				break;
+			if (cur_sg != sg_iter.sg) {
+				done_sg++;
+				cur_sg = sg_iter.sg;
+			}
+		}
+		xas_unlock(&xas);
+	} while (xas_nomem(&xas, GFP_KERNEL));
 
-	return 0;
+	if (xas_error(&xas))
+		return xas_error(&xas);
+	return done_sg;
 }
 
 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
@@ -974,8 +995,7 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
 	int n;
 
 	mr->nbuf = 0;
-
-	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+	n = rxe_mr_fill_pages_from_sgl_prefix(mr, sg, sg_nents, sg_offset);
 
 	mr->page_shift = ilog2(ibmr->page_size);
 	mr->page_mask = ibmr->page_size - 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 22a299b0a9f0a8..6eebbd7b91a687 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -320,7 +320,7 @@ struct rxe_mr {
 
 	atomic_t		num_mw;
 
-	struct rxe_map		**map;
+	struct xarray		pages;
 };
 
 enum rxe_mw_state {
Xiao Yang Nov. 20, 2022, 1:36 p.m. UTC | #3
On 2022/11/19 9:20, Jason Gunthorpe wrote:
> This almost certainly doesn't work, but here is a general sketch how
> all of this really should look:

Hi Jason,

Thank you very much for the sketch. I will try to understand it.

Best Regards,
Xiao Yang
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index bc081002bddc..4438eb8a3727 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -115,7 +115,6 @@  int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
 	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf = NULL;
 	struct ib_umem		*umem;
 	struct sg_page_iter	sg_iter;
 	int			num_buf;
@@ -144,16 +143,14 @@  int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 
 	mr->page_shift = PAGE_SHIFT;
 	mr->page_mask = PAGE_SIZE - 1;
+	mr->ibmr.page_size = PAGE_SIZE;
 
-	num_buf			= 0;
+	num_buf	= 0;
 	map = mr->map;
 	if (length > 0) {
-		buf = map[0]->buf;
-
 		for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
 			if (num_buf >= RXE_BUF_PER_MAP) {
 				map++;
-				buf = map[0]->buf;
 				num_buf = 0;
 			}
 
@@ -165,10 +162,8 @@  int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 				goto err_cleanup_map;
 			}
 
-			buf->addr = (uintptr_t)vaddr;
-			buf->size = PAGE_SIZE;
+			map[0]->addrs[num_buf] = (uintptr_t)vaddr;
 			num_buf++;
-			buf++;
 
 		}
 	}
@@ -216,9 +211,9 @@  static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
 			size_t *offset_out)
 {
 	size_t offset = iova - mr->ibmr.iova + mr->offset;
+	u64 length = mr->ibmr.page_size;
 	int			map_index;
-	int			buf_index;
-	u64			length;
+	int			addr_index;
 
 	if (likely(mr->page_shift)) {
 		*offset_out = offset & mr->page_mask;
@@ -227,23 +222,20 @@  static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
 		*m_out = offset >> mr->map_shift;
 	} else {
 		map_index = 0;
-		buf_index = 0;
-
-		length = mr->map[map_index]->buf[buf_index].size;
+		addr_index = 0;
 
 		while (offset >= length) {
 			offset -= length;
-			buf_index++;
+			addr_index++;
 
-			if (buf_index == RXE_BUF_PER_MAP) {
+			if (addr_index == RXE_BUF_PER_MAP) {
 				map_index++;
-				buf_index = 0;
+				addr_index = 0;
 			}
-			length = mr->map[map_index]->buf[buf_index].size;
 		}
 
 		*m_out = map_index;
-		*n_out = buf_index;
+		*n_out = addr_index;
 		*offset_out = offset;
 	}
 }
@@ -273,13 +265,13 @@  void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
 
 	lookup_iova(mr, iova, &m, &n, &offset);
 
-	if (offset + length > mr->map[m]->buf[n].size) {
+	if (offset + length > mr->ibmr.page_size) {
 		pr_warn("crosses page boundary\n");
 		addr = NULL;
 		goto out;
 	}
 
-	addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+	addr = (void *)(uintptr_t)mr->map[m]->addrs[n] + offset;
 
 out:
 	return addr;
@@ -294,8 +286,6 @@  int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 	int			err;
 	int			bytes;
 	u8			*va;
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf;
 	int			m;
 	int			i;
 	size_t			offset;
@@ -325,17 +315,14 @@  int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 
 	lookup_iova(mr, iova, &m, &i, &offset);
 
-	map = mr->map + m;
-	buf	= map[0]->buf + i;
-
 	while (length > 0) {
 		u8 *src, *dest;
 
-		va	= (u8 *)(uintptr_t)buf->addr + offset;
+		va	= (u8 *)(uintptr_t)mr->map[m]->addrs[i] + offset;
 		src = (dir == RXE_TO_MR_OBJ) ? addr : va;
 		dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
 
-		bytes	= buf->size - offset;
+		bytes	= mr->ibmr.page_size - offset;
 
 		if (bytes > length)
 			bytes = length;
@@ -346,13 +333,11 @@  int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 		addr	+= bytes;
 
 		offset	= 0;
-		buf++;
 		i++;
 
 		if (i == RXE_BUF_PER_MAP) {
 			i = 0;
-			map++;
-			buf = map[0]->buf;
+			m++;
 		}
 	}
 
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index bcdfdadaebbc..13e4d660cb02 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -948,16 +948,12 @@  static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
 {
 	struct rxe_mr *mr = to_rmr(ibmr);
 	struct rxe_map *map;
-	struct rxe_phys_buf *buf;
 
 	if (unlikely(mr->nbuf == mr->num_buf))
 		return -ENOMEM;
 
 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
-	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
-
-	buf->addr = addr;
-	buf->size = ibmr->page_size;
+	map->addrs[mr->nbuf % RXE_BUF_PER_MAP] = addr;
 	mr->nbuf++;
 
 	return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 22a299b0a9f0..d136f02d5b56 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -277,15 +277,10 @@  enum rxe_mr_lookup_type {
 	RXE_LOOKUP_REMOTE,
 };
 
-#define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
-
-struct rxe_phys_buf {
-	u64      addr;
-	u64      size;
-};
+#define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(u64))
 
 struct rxe_map {
-	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
+	u64 addrs[RXE_BUF_PER_MAP];
 };
 
 static inline int rkey_is_mw(u32 rkey)