diff mbox series

[v2] RDMA: Add ib_virt_dma_to_page()

Message ID 0-v2-05ea785520ed+10-ib_virt_page_jgg@nvidia.com (mailing list archive)
State Accepted
Headers show
Series [v2] RDMA: Add ib_virt_dma_to_page() | expand

Commit Message

Jason Gunthorpe April 14, 2023, 1:58 p.m. UTC
Make it clearer what is going on by adding a function to go back from the
"virtual" dma_addr to a kva and another to a struct page. This is used in the
ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib).

Call them instead of a naked casting and  virt_to_page() when working with dma_addr
values encoded by the various ib_map functions.

This also fixes the virt_to_page() casting problem Linus Walleij has been
chasing.

Cc: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/infiniband/sw/rxe/rxe_mr.c    | 16 ++++++++--------
 drivers/infiniband/sw/rxe/rxe_verbs.c |  2 +-
 drivers/infiniband/sw/siw/siw_qp_rx.c |  6 +++---
 drivers/infiniband/sw/siw/siw_qp_tx.c | 19 ++++++-------------
 drivers/infiniband/sw/siw/siw_verbs.c |  4 ++--
 include/rdma/ib_verbs.h               | 25 +++++++++++++++++++++++++
 6 files changed, 45 insertions(+), 27 deletions(-)

v2:
 - Add ib_virt_dma_to_ptr() as well to solve the compilation warning problem on 32 bit.
v1: https://lore.kernel.org/r/0-v1-789ba72d2950+2e-ib_virt_page_jgg@nvidia.com/


base-commit: a2e20b29cf9ce6d2070a6e36666e2239f7f9625b

Comments

Leon Romanovsky April 16, 2023, 10:29 a.m. UTC | #1
On Fri, 14 Apr 2023 10:58:29 -0300, Jason Gunthorpe wrote:
> Make it clearer what is going on by adding a function to go back from the
> "virtual" dma_addr to a kva and another to a struct page. This is used in the
> ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib).
> 
> Call them instead of a naked casting and  virt_to_page() when working with dma_addr
> values encoded by the various ib_map functions.
> 
> [...]

Applied, thanks!

[1/1] RDMA: Add ib_virt_dma_to_page()
      https://git.kernel.org/rdma/rdma/c/8d7c7c0eeb7428

Best regards,
Linus Walleij April 17, 2023, 7:27 a.m. UTC | #2
On Fri, Apr 14, 2023 at 3:58 PM Jason Gunthorpe <jgg@nvidia.com> wrote:

> Make it clearer what is going on by adding a function to go back from the
> "virtual" dma_addr to a kva and another to a struct page. This is used in the
> ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib).
>
> Call them instead of a naked casting and  virt_to_page() when working with dma_addr
> values encoded by the various ib_map functions.
>
> This also fixes the virt_to_page() casting problem Linus Walleij has been
> chasing.
>
> Cc: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

Too late but FWIW:
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>

Thanks for fixing this up so meticulously Jason!

Yours,
Linus Walleij
Bernard Metzler April 17, 2023, 9:02 a.m. UTC | #3
> -----Original Message-----
> From: Jason Gunthorpe <jgg@nvidia.com>
> Sent: Friday, 14 April 2023 15:58
> To: Bernard Metzler <BMT@zurich.ibm.com>; Leon Romanovsky
> <leon@kernel.org>; linux-rdma@vger.kernel.org; Zhu Yanjun
> <zyjzyj2000@gmail.com>
> Cc: Linus Walleij <linus.walleij@linaro.org>
> Subject: [EXTERNAL] [PATCH v2] RDMA: Add ib_virt_dma_to_page()
> 
> Make it clearer what is going on by adding a function to go back from the
> "virtual" dma_addr to a kva and another to a struct page. This is used in
> the
> ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib).
> 
> Call them instead of a naked casting and  virt_to_page() when working with
> dma_addr
> values encoded by the various ib_map functions.
> 
> This also fixes the virt_to_page() casting problem Linus Walleij has been
> chasing.
> 
> Cc: Linus Walleij <linus.walleij@linaro.org>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_mr.c    | 16 ++++++++--------
>  drivers/infiniband/sw/rxe/rxe_verbs.c |  2 +-
>  drivers/infiniband/sw/siw/siw_qp_rx.c |  6 +++---
>  drivers/infiniband/sw/siw/siw_qp_tx.c | 19 ++++++-------------
>  drivers/infiniband/sw/siw/siw_verbs.c |  4 ++--
>  include/rdma/ib_verbs.h               | 25 +++++++++++++++++++++++++
>  6 files changed, 45 insertions(+), 27 deletions(-)
> 
> v2:
>  - Add ib_virt_dma_to_ptr() as well to solve the compilation warning
> problem on 32 bit.
> v1: INVALID URI REMOVED
> 3A__lore.kernel.org_r_0-2Dv1-2D789ba72d2950-2B2e-2Dib-5Fvirt-5Fpage-5Fjgg-
> 40nvidia.com_&d=DwIDAg&c=jf_iaSHvJObTbx-siA1ZOg&r=2TaYXQ0T-
> r8ZO1PP1alNwU_QJcRRLfmYTAgd3QCvqSc&m=x0pyaJvLFc1vj23NB1Tod-
> R4uo7IlVzFtfaXh8o37pWvab8JylJjtJi2c1qVOtGK&s=u1W6B9A0tPmxsjYk77UUbVaa0x2Gf2
> ukNNzN-cWsNkA&e=
> 
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c
> b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 1e17f8086d59a8..0e538fafcc20ff 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -210,10 +210,10 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr
> *mr)
>  	return err;
>  }
> 
> -static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
> +static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
>  {
>  	struct rxe_mr *mr = to_rmr(ibmr);
> -	struct page *page = virt_to_page(iova & mr->page_mask);
> +	struct page *page = ib_virt_dma_to_page(dma_addr);
>  	bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
>  	int err;
> 
> @@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64
> iova, void *addr,
>  	return 0;
>  }
> 
> -static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
> +static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
>  			    unsigned int length, enum rxe_mr_copy_dir dir)
>  {
> -	unsigned int page_offset = iova & (PAGE_SIZE - 1);
> +	unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
>  	unsigned int bytes;
>  	struct page *page;
>  	u8 *va;
> 
>  	while (length) {
> -		page = virt_to_page(iova & mr->page_mask);
> +		page = ib_virt_dma_to_page(dma_addr);
>  		bytes = min_t(unsigned int, length,
>  				PAGE_SIZE - page_offset);
>  		va = kmap_local_page(page);
> @@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64
> iova, void *addr,
> 
>  		kunmap_local(va);
>  		page_offset = 0;
> -		iova += bytes;
> +		dma_addr += bytes;
>  		addr += bytes;
>  		length -= bytes;
>  	}
> @@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova,
> int opcode,
> 
>  	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
>  		page_offset = iova & (PAGE_SIZE - 1);
> -		page = virt_to_page(iova & PAGE_MASK);
> +		page = ib_virt_dma_to_page(iova);
>  	} else {
>  		unsigned long index;
>  		int err;
> @@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova,
> u64 value)
> 
>  	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
>  		page_offset = iova & (PAGE_SIZE - 1);
> -		page = virt_to_page(iova & PAGE_MASK);
> +		page = ib_virt_dma_to_page(iova);
>  	} else {
>  		unsigned long index;
>  		int err;
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c
> b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index 4e2db7c2e4ed78..12819153bda769 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -760,7 +760,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe
> *wqe,
>  	int i;
> 
>  	for (i = 0; i < ibwr->num_sge; i++, sge++) {
> -		memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> +		memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
>  		p += sge->length;
>  	}
>  }
> diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c
> b/drivers/infiniband/sw/siw/siw_qp_rx.c
> index fd721cc19682eb..58bbf738e4e599 100644
> --- a/drivers/infiniband/sw/siw/siw_qp_rx.c
> +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
> @@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int
> *pbl_idx,
>  			break;
> 
>  		bytes = min(bytes, len);
> -		if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
> +		if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
>  		    bytes) {
>  			copied += bytes;
>  			offset += bytes;
> @@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp)
>  		mem_p = *mem;
>  		if (mem_p->mem_obj == NULL)
>  			rv = siw_rx_kva(srx,
> -				(void *)(uintptr_t)(sge->laddr + frx->sge_off),
> +				ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
>  				sge_bytes);
>  		else if (!mem_p->is_pbl)
>  			rv = siw_rx_umem(srx, mem_p->umem,
> @@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp)
> 
>  	if (mem_p->mem_obj == NULL)
>  		rv = siw_rx_kva(srx,
> -			(void *)(uintptr_t)(sge->laddr + wqe->processed),
> +			ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
>  			bytes);
>  	else if (!mem_p->is_pbl)
>  		rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
> diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c
> b/drivers/infiniband/sw/siw/siw_qp_tx.c
> index 6bb9e9e81ff4ca..4b292e0504f1a1 100644
> --- a/drivers/infiniband/sw/siw/siw_qp_tx.c
> +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
> @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem,
> u64 addr, int *idx)
>  	dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
> 
>  	if (paddr)
> -		return virt_to_page((void *)(uintptr_t)paddr);
> +		return ib_virt_dma_to_page(paddr);
> 
>  	return NULL;
>  }
> @@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void
> *paddr)
> 
>  		if (!mem->mem_obj) {
>  			/* Kernel client using kva */
> -			memcpy(paddr,
> -			       (const void *)(uintptr_t)sge->laddr, bytes);
> +			memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
>  		} else if (c_tx->in_syscall) {
>  			if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
>  					   bytes))
> @@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct
> socket *s)
>  			 * or memory region with assigned kernel buffer
>  			 */
>  			iov[seg].iov_base =
> -				(void *)(uintptr_t)(sge->laddr + sge_off);
> +				ib_virt_dma_to_ptr(sge->laddr + sge_off);
>  			iov[seg].iov_len = sge_len;
> 
>  			if (do_crc)
> @@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx,
> struct socket *s)
>  				 * Cast to an uintptr_t to preserve all 64 bits
>  				 * in sge->laddr.
>  				 */
> -				uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
> +				u64 va = sge->laddr + sge_off;
> 
> -				/*
> -				 * virt_to_page() takes a (void *) pointer
> -				 * so cast to a (void *) meaning it will be 64
> -				 * bits on a 64 bit platform and 32 bits on a
> -				 * 32 bit platform.
> -				 */
> -				page_array[seg] = virt_to_page((void *)(va &
> PAGE_MASK));
> +				page_array[seg] = ib_virt_dma_to_page(va);
>  				if (do_crc)
>  					crypto_shash_update(
>  						c_tx->mpa_crc_hd,
> -						(void *)va,
> +						ib_virt_dma_to_ptr(va),
>  						plen);
>  			}
> 
> diff --git a/drivers/infiniband/sw/siw/siw_verbs.c
> b/drivers/infiniband/sw/siw/siw_verbs.c
> index 906fde1a2a0de2..398ec13db62481 100644
> --- a/drivers/infiniband/sw/siw/siw_verbs.c
> +++ b/drivers/infiniband/sw/siw/siw_verbs.c
> @@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr
> *core_wr,
>  			bytes = -EINVAL;
>  			break;
>  		}
> -		memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
> +		memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
>  		       core_sge->length);
> 
>  		kbuf += core_sge->length;
> @@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct
> scatterlist *sl, int num_sle,
>  		}
>  		siw_dbg_mem(mem,
>  			"sge[%d], size %u, addr 0x%p, total %lu\n",
> -			i, pble->size, (void *)(uintptr_t)pble->addr,
> +			i, pble->size, ib_virt_dma_to_ptr(pble->addr),
>  			pbl_size);
>  	}
>  	rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 949cf4ffc536c5..1e7774ac808f08 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -4035,6 +4035,31 @@ static inline bool
> ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
>  	return dma_pci_p2pdma_supported(dev->dma_device);
>  }
> 
> +/**
> + * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
> + * @dma_addr: The DMA address
> + *
> + * Used by ib_uses_virt_dma() devices to get back to the kernel pointer
> after
> + * going through the dma_addr marshalling.
> + */
> +static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
> +{
> +	/* virt_dma mode maps the kvs's directly into the dma addr */
> +	return (void *)(uintptr_t)dma_addr;
> +}
> +
> +/**
> + * ib_virt_dma_to_page - Convert a dma_addr to a struct page
> + * @dma_addr: The DMA address
> + *
> + * Used by ib_uses_virt_dma() device to get back to the struct page after
> going
> + * through the dma_addr marshalling.
> + */
> +static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
> +{
> +	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
> +}
> +
>  /**
>   * ib_dma_mapping_error - check a DMA addr for error
>   * @dev: The device for which the dma_addr was created
> 
> base-commit: a2e20b29cf9ce6d2070a6e36666e2239f7f9625b
> --
> 2.40.0


Thank you Jason. Looks cleaner than all that explicit casting!
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 1e17f8086d59a8..0e538fafcc20ff 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -210,10 +210,10 @@  int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
 	return err;
 }
 
-static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
+static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
 {
 	struct rxe_mr *mr = to_rmr(ibmr);
-	struct page *page = virt_to_page(iova & mr->page_mask);
+	struct page *page = ib_virt_dma_to_page(dma_addr);
 	bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
 	int err;
 
@@ -279,16 +279,16 @@  static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
 	return 0;
 }
 
-static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
+static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
 			    unsigned int length, enum rxe_mr_copy_dir dir)
 {
-	unsigned int page_offset = iova & (PAGE_SIZE - 1);
+	unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
 	unsigned int bytes;
 	struct page *page;
 	u8 *va;
 
 	while (length) {
-		page = virt_to_page(iova & mr->page_mask);
+		page = ib_virt_dma_to_page(dma_addr);
 		bytes = min_t(unsigned int, length,
 				PAGE_SIZE - page_offset);
 		va = kmap_local_page(page);
@@ -300,7 +300,7 @@  static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
 
 		kunmap_local(va);
 		page_offset = 0;
-		iova += bytes;
+		dma_addr += bytes;
 		addr += bytes;
 		length -= bytes;
 	}
@@ -488,7 +488,7 @@  int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
 
 	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
 		page_offset = iova & (PAGE_SIZE - 1);
-		page = virt_to_page(iova & PAGE_MASK);
+		page = ib_virt_dma_to_page(iova);
 	} else {
 		unsigned long index;
 		int err;
@@ -545,7 +545,7 @@  int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
 
 	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
 		page_offset = iova & (PAGE_SIZE - 1);
-		page = virt_to_page(iova & PAGE_MASK);
+		page = ib_virt_dma_to_page(iova);
 	} else {
 		unsigned long index;
 		int err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4e2db7c2e4ed78..12819153bda769 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -760,7 +760,7 @@  static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
 	int i;
 
 	for (i = 0; i < ibwr->num_sge; i++, sge++) {
-		memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
+		memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
 		p += sge->length;
 	}
 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index fd721cc19682eb..58bbf738e4e599 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -139,7 +139,7 @@  static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
 			break;
 
 		bytes = min(bytes, len);
-		if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+		if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
 		    bytes) {
 			copied += bytes;
 			offset += bytes;
@@ -487,7 +487,7 @@  int siw_proc_send(struct siw_qp *qp)
 		mem_p = *mem;
 		if (mem_p->mem_obj == NULL)
 			rv = siw_rx_kva(srx,
-				(void *)(uintptr_t)(sge->laddr + frx->sge_off),
+				ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
 				sge_bytes);
 		else if (!mem_p->is_pbl)
 			rv = siw_rx_umem(srx, mem_p->umem,
@@ -852,7 +852,7 @@  int siw_proc_rresp(struct siw_qp *qp)
 
 	if (mem_p->mem_obj == NULL)
 		rv = siw_rx_kva(srx,
-			(void *)(uintptr_t)(sge->laddr + wqe->processed),
+			ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
 			bytes);
 	else if (!mem_p->is_pbl)
 		rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 6bb9e9e81ff4ca..4b292e0504f1a1 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@  static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
 	dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
 
 	if (paddr)
-		return virt_to_page((void *)(uintptr_t)paddr);
+		return ib_virt_dma_to_page(paddr);
 
 	return NULL;
 }
@@ -56,8 +56,7 @@  static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
 
 		if (!mem->mem_obj) {
 			/* Kernel client using kva */
-			memcpy(paddr,
-			       (const void *)(uintptr_t)sge->laddr, bytes);
+			memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
 		} else if (c_tx->in_syscall) {
 			if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
 					   bytes))
@@ -477,7 +476,7 @@  static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
 			 * or memory region with assigned kernel buffer
 			 */
 			iov[seg].iov_base =
-				(void *)(uintptr_t)(sge->laddr + sge_off);
+				ib_virt_dma_to_ptr(sge->laddr + sge_off);
 			iov[seg].iov_len = sge_len;
 
 			if (do_crc)
@@ -537,19 +536,13 @@  static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
 				 * Cast to an uintptr_t to preserve all 64 bits
 				 * in sge->laddr.
 				 */
-				uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
+				u64 va = sge->laddr + sge_off;
 
-				/*
-				 * virt_to_page() takes a (void *) pointer
-				 * so cast to a (void *) meaning it will be 64
-				 * bits on a 64 bit platform and 32 bits on a
-				 * 32 bit platform.
-				 */
-				page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
+				page_array[seg] = ib_virt_dma_to_page(va);
 				if (do_crc)
 					crypto_shash_update(
 						c_tx->mpa_crc_hd,
-						(void *)va,
+						ib_virt_dma_to_ptr(va),
 						plen);
 			}
 
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 906fde1a2a0de2..398ec13db62481 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -660,7 +660,7 @@  static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
 			bytes = -EINVAL;
 			break;
 		}
-		memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
+		memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
 		       core_sge->length);
 
 		kbuf += core_sge->length;
@@ -1523,7 +1523,7 @@  int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
 		}
 		siw_dbg_mem(mem,
 			"sge[%d], size %u, addr 0x%p, total %lu\n",
-			i, pble->size, (void *)(uintptr_t)pble->addr,
+			i, pble->size, ib_virt_dma_to_ptr(pble->addr),
 			pbl_size);
 	}
 	rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 949cf4ffc536c5..1e7774ac808f08 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4035,6 +4035,31 @@  static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
 	return dma_pci_p2pdma_supported(dev->dma_device);
 }
 
+/**
+ * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
+ * @dma_addr: The DMA address
+ *
+ * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
+ * going through the dma_addr marshalling.
+ */
+static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
+{
+	/* virt_dma mode maps the kvs's directly into the dma addr */
+	return (void *)(uintptr_t)dma_addr;
+}
+
+/**
+ * ib_virt_dma_to_page - Convert a dma_addr to a struct page
+ * @dma_addr: The DMA address
+ *
+ * Used by ib_uses_virt_dma() device to get back to the struct page after going
+ * through the dma_addr marshalling.
+ */
+static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
+{
+	return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
+}
+
 /**
  * ib_dma_mapping_error - check a DMA addr for error
  * @dev: The device for which the dma_addr was created