Message ID | 20241009015903.801987-2-matsuda-daisuke@fujitsu.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | On-Demand Paging on SoftRoCE | expand |
在 2024/10/9 9:58, Daisuke Matsuda 写道: > Some functions in rxe_mr.c are going to be used in rxe_odp.c, which is to > be created in the subsequent patch. List the declarations of the functions > in rxe_loc.h. > > Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com> > --- > drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++++++ > drivers/infiniband/sw/rxe/rxe_mr.c | 11 +++-------- > 2 files changed, 11 insertions(+), 8 deletions(-) > > diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h > index ded46119151b..866c36533b53 100644 > --- a/drivers/infiniband/sw/rxe/rxe_loc.h > +++ b/drivers/infiniband/sw/rxe/rxe_loc.h > @@ -58,6 +58,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); > > /* rxe_mr.c */ > u8 rxe_get_next_key(u32 last_key); > +void rxe_mr_init(int access, struct rxe_mr *mr); > void rxe_mr_init_dma(int access, struct rxe_mr *mr); > int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, > int access, struct rxe_mr *mr); > @@ -69,6 +70,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, > void *addr, int length, enum rxe_mr_copy_dir dir); > int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, > int sg_nents, unsigned int *sg_offset); > +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > + unsigned int length, enum rxe_mr_copy_dir dir); > int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, > u64 compare, u64 swap_add, u64 *orig_val); > int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); > @@ -80,6 +83,11 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); > int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); > void rxe_mr_cleanup(struct rxe_pool_elem *elem); > > +static inline unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) > +{ > + return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); > +} The type of the function rxe_mr_iova_to_index is "unsigned long". In some 32 architecture, unsigned long is 32 bit. The type of iova is u64. So it had better use u64 instead of "unsigned long". Zhu Yanjun > + > /* rxe_mw.c */ > int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); > int rxe_dealloc_mw(struct ib_mw *ibmw); > diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c > index da3dee520876..1f7b8cf93adc 100644 > --- a/drivers/infiniband/sw/rxe/rxe_mr.c > +++ b/drivers/infiniband/sw/rxe/rxe_mr.c > @@ -45,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) > } > } > > -static void rxe_mr_init(int access, struct rxe_mr *mr) > +void rxe_mr_init(int access, struct rxe_mr *mr) > { > u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); > > @@ -72,11 +72,6 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr) > mr->ibmr.type = IB_MR_TYPE_DMA; > } > > -static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) > -{ > - return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); > -} > - > static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) > { > return iova & (mr_page_size(mr) - 1); > @@ -242,8 +237,8 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl, > return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page); > } > > -static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > - unsigned int length, enum rxe_mr_copy_dir dir) > +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > + unsigned int length, enum rxe_mr_copy_dir dir) > { > unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova); > unsigned long index = rxe_mr_iova_to_index(mr, iova);
On Wed, Oct 9, 2024 11:13 PM Zhu Yanjun wrote: > > > 在 2024/10/9 9:58, Daisuke Matsuda 写道: > > Some functions in rxe_mr.c are going to be used in rxe_odp.c, which is to > > be created in the subsequent patch. List the declarations of the functions > > in rxe_loc.h. > > > > Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com> > > --- > > drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++++++ > > drivers/infiniband/sw/rxe/rxe_mr.c | 11 +++-------- > > 2 files changed, 11 insertions(+), 8 deletions(-) > > > > diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h > > index ded46119151b..866c36533b53 100644 > > --- a/drivers/infiniband/sw/rxe/rxe_loc.h > > +++ b/drivers/infiniband/sw/rxe/rxe_loc.h > > @@ -58,6 +58,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); > > > > /* rxe_mr.c */ > > u8 rxe_get_next_key(u32 last_key); > > +void rxe_mr_init(int access, struct rxe_mr *mr); > > void rxe_mr_init_dma(int access, struct rxe_mr *mr); > > int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, > > int access, struct rxe_mr *mr); > > @@ -69,6 +70,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, > > void *addr, int length, enum rxe_mr_copy_dir dir); > > int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, > > int sg_nents, unsigned int *sg_offset); > > +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > > + unsigned int length, enum rxe_mr_copy_dir dir); > > int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, > > u64 compare, u64 swap_add, u64 *orig_val); > > int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); > > @@ -80,6 +83,11 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); > > int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); > > void rxe_mr_cleanup(struct rxe_pool_elem *elem); > > > > +static inline unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) > > +{ > > + return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); > > +} > > The type of the function rxe_mr_iova_to_index is "unsigned long". In > some 32 architecture, unsigned long is 32 bit. > > The type of iova is u64. So it had better use u64 instead of "unsigned > long". > > Zhu Yanjun Hi, thanks for the comment. I think the current type declaration doesn't matter in 32-bit OS. The function returns an index of the page specified with 'iova'. Assuming the page size is typical 4KiB, u32 index can accommodate 16 TiB in total, which is larger than the theoretical limit imposed on 32-bit systems (i.e. 4GiB or 2^32 Bytes). Regards, Daisuke Matsuda > > > + > > /* rxe_mw.c */ > > int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); > > int rxe_dealloc_mw(struct ib_mw *ibmw); > > diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c > > index da3dee520876..1f7b8cf93adc 100644 > > --- a/drivers/infiniband/sw/rxe/rxe_mr.c > > +++ b/drivers/infiniband/sw/rxe/rxe_mr.c > > @@ -45,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) > > } > > } > > > > -static void rxe_mr_init(int access, struct rxe_mr *mr) > > +void rxe_mr_init(int access, struct rxe_mr *mr) > > { > > u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); > > > > @@ -72,11 +72,6 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr) > > mr->ibmr.type = IB_MR_TYPE_DMA; > > } > > > > -static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) > > -{ > > - return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); > > -} > > - > > static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) > > { > > return iova & (mr_page_size(mr) - 1); > > @@ -242,8 +237,8 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl, > > return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page); > > } > > > > -static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > > - unsigned int length, enum rxe_mr_copy_dir dir) > > +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > > + unsigned int length, enum rxe_mr_copy_dir dir) > > { > > unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova); > > unsigned long index = rxe_mr_iova_to_index(mr, iova);
在 2024/10/10 15:24, Daisuke Matsuda (Fujitsu) 写道: > On Wed, Oct 9, 2024 11:13 PM Zhu Yanjun wrote: >> >> >> 在 2024/10/9 9:58, Daisuke Matsuda 写道: >>> Some functions in rxe_mr.c are going to be used in rxe_odp.c, which is to >>> be created in the subsequent patch. List the declarations of the functions >>> in rxe_loc.h. >>> >>> Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com> >>> --- >>> drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++++++ >>> drivers/infiniband/sw/rxe/rxe_mr.c | 11 +++-------- >>> 2 files changed, 11 insertions(+), 8 deletions(-) >>> >>> diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h >>> index ded46119151b..866c36533b53 100644 >>> --- a/drivers/infiniband/sw/rxe/rxe_loc.h >>> +++ b/drivers/infiniband/sw/rxe/rxe_loc.h >>> @@ -58,6 +58,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); >>> >>> /* rxe_mr.c */ >>> u8 rxe_get_next_key(u32 last_key); >>> +void rxe_mr_init(int access, struct rxe_mr *mr); >>> void rxe_mr_init_dma(int access, struct rxe_mr *mr); >>> int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, >>> int access, struct rxe_mr *mr); >>> @@ -69,6 +70,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, >>> void *addr, int length, enum rxe_mr_copy_dir dir); >>> int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, >>> int sg_nents, unsigned int *sg_offset); >>> +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, >>> + unsigned int length, enum rxe_mr_copy_dir dir); >>> int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, >>> u64 compare, u64 swap_add, u64 *orig_val); >>> int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); >>> @@ -80,6 +83,11 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); >>> int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); >>> void rxe_mr_cleanup(struct rxe_pool_elem *elem); >>> >>> +static inline unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) >>> +{ >>> + return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); >>> +} >> >> The type of the function rxe_mr_iova_to_index is "unsigned long". In >> some 32 architecture, unsigned long is 32 bit. >> >> The type of iova is u64. So it had better use u64 instead of "unsigned >> long". >> >> Zhu Yanjun > > Hi, > thanks for the comment. > > I think the current type declaration doesn't matter in 32-bit OS. > The function returns an index of the page specified with 'iova'. > Assuming the page size is typical 4KiB, u32 index can accommodate > 16 TiB in total, which is larger than the theoretical limit imposed > on 32-bit systems (i.e. 4GiB or 2^32 Bytes). But in 32 bit OS, this will likely pop out "type does not match" warning because "unsigned long" is 32 bit in 32-bit OS while u64 is always 64 bit. So it is better to use u64 type. This will not pop out any warnings whether in 32bit OS or in 64bit OS. Zhu Yanjun > > Regards, > Daisuke Matsuda > >> >>> + >>> /* rxe_mw.c */ >>> int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); >>> int rxe_dealloc_mw(struct ib_mw *ibmw); >>> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c >>> index da3dee520876..1f7b8cf93adc 100644 >>> --- a/drivers/infiniband/sw/rxe/rxe_mr.c >>> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c >>> @@ -45,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) >>> } >>> } >>> >>> -static void rxe_mr_init(int access, struct rxe_mr *mr) >>> +void rxe_mr_init(int access, struct rxe_mr *mr) >>> { >>> u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); >>> >>> @@ -72,11 +72,6 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr) >>> mr->ibmr.type = IB_MR_TYPE_DMA; >>> } >>> >>> -static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) >>> -{ >>> - return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); >>> -} >>> - >>> static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) >>> { >>> return iova & (mr_page_size(mr) - 1); >>> @@ -242,8 +237,8 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl, >>> return ib_sg_to_pages(ibmr, s"gl, sg_nents, sg_offset, rxe_set_page); >>> } >>> >>> -static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, >>> - unsigned int length, enum rxe_mr_copy_dir dir) >>> +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, >>> + unsigned int length, enum rxe_mr_copy_dir dir) >>> { >>> unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova); >>> unsigned long index = rxe_mr_iova_to_index(mr, iova); >
On Thu, October 10, 2024 6:18 PM Zhu Yanjun wrote: > 在 2024/10/10 15:24, Daisuke Matsuda (Fujitsu) 写道: > > On Wed, Oct 9, 2024 11:13 PM Zhu Yanjun wrote: > >> > >> > >> 在 2024/10/9 9:58, Daisuke Matsuda 写道: > >>> Some functions in rxe_mr.c are going to be used in rxe_odp.c, which is to > >>> be created in the subsequent patch. List the declarations of the functions > >>> in rxe_loc.h. > >>> > >>> Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com> > >>> --- > >>> drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++++++ > >>> drivers/infiniband/sw/rxe/rxe_mr.c | 11 +++-------- > >>> 2 files changed, 11 insertions(+), 8 deletions(-) > >>> > >>> diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h > >>> index ded46119151b..866c36533b53 100644 > >>> --- a/drivers/infiniband/sw/rxe/rxe_loc.h > >>> +++ b/drivers/infiniband/sw/rxe/rxe_loc.h > >>> @@ -58,6 +58,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); > >>> > >>> /* rxe_mr.c */ > >>> u8 rxe_get_next_key(u32 last_key); > >>> +void rxe_mr_init(int access, struct rxe_mr *mr); > >>> void rxe_mr_init_dma(int access, struct rxe_mr *mr); > >>> int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, > >>> int access, struct rxe_mr *mr); > >>> @@ -69,6 +70,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, > >>> void *addr, int length, enum rxe_mr_copy_dir dir); > >>> int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, > >>> int sg_nents, unsigned int *sg_offset); > >>> +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > >>> + unsigned int length, enum rxe_mr_copy_dir dir); > >>> int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, > >>> u64 compare, u64 swap_add, u64 *orig_val); > >>> int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); > >>> @@ -80,6 +83,11 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); > >>> int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); > >>> void rxe_mr_cleanup(struct rxe_pool_elem *elem); > >>> > >>> +static inline unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) > >>> +{ > >>> + return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); > >>> +} > >> > >> The type of the function rxe_mr_iova_to_index is "unsigned long". In > >> some 32 architecture, unsigned long is 32 bit. > >> > >> The type of iova is u64. So it had better use u64 instead of "unsigned > >> long". > >> > >> Zhu Yanjun > > > > Hi, > > thanks for the comment. > > > > I think the current type declaration doesn't matter in 32-bit OS. > > The function returns an index of the page specified with 'iova'. > > Assuming the page size is typical 4KiB, u32 index can accommodate > > 16 TiB in total, which is larger than the theoretical limit imposed > > on 32-bit systems (i.e. 4GiB or 2^32 Bytes). > > But in 32 bit OS, this will likely pop out "type does not match" warning > because "unsigned long" is 32 bit in 32-bit OS while u64 is always 64 > bit. So it is better to use u64 type. This will not pop out any warnings > whether in 32bit OS or in 64bit OS. That makes sense. The function was created in the commit 592627ccbdff. Cf. https://github.com/torvalds/linux/commit/592627ccbdff0ec6fff00fc761142a76db750dd4 It seems to me that rxe_mr_iova_to_page_offset() also has the same problem. === rxe_mr.c === static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) { return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); } static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) { return iova & (mr_page_size(mr) - 1); } ============= This patch in ODP series just moves the function definition, and it is intrinsically not the cause of the problem. If my ODP v8 patches could go into the for-next tree without objection, then I can send a new patch to fix them. Otherwise, we can fix them before my submitting ODP v9 patches. Thanks, Daisuke Matsuda > > Zhu Yanjun > > > > Regards, > > Daisuke Matsuda > > > >> > >>> + > >>> /* rxe_mw.c */ > >>> int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); > >>> int rxe_dealloc_mw(struct ib_mw *ibmw); > >>> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c > >>> index da3dee520876..1f7b8cf93adc 100644 > >>> --- a/drivers/infiniband/sw/rxe/rxe_mr.c > >>> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c > >>> @@ -45,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) > >>> } > >>> } > >>> > >>> -static void rxe_mr_init(int access, struct rxe_mr *mr) > >>> +void rxe_mr_init(int access, struct rxe_mr *mr) > >>> { > >>> u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); > >>> > >>> @@ -72,11 +72,6 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr) > >>> mr->ibmr.type = IB_MR_TYPE_DMA; > >>> } > >>> > >>> -static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) > >>> -{ > >>> - return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); > >>> -} > >>> - > >>> static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) > >>> { > >>> return iova & (mr_page_size(mr) - 1); > >>> @@ -242,8 +237,8 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl, > >>> return ib_sg_to_pages(ibmr, s"gl, sg_nents, sg_offset, rxe_set_page); > >>> } > >>> > >>> -static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > >>> - unsigned int length, enum rxe_mr_copy_dir dir) > >>> +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, > >>> + unsigned int length, enum rxe_mr_copy_dir dir) > >>> { > >>> unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova); > >>> unsigned long index = rxe_mr_iova_to_index(mr, iova); > >
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index ded46119151b..866c36533b53 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -58,6 +58,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); /* rxe_mr.c */ u8 rxe_get_next_key(u32 last_key); +void rxe_mr_init(int access, struct rxe_mr *mr); void rxe_mr_init_dma(int access, struct rxe_mr *mr); int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, int access, struct rxe_mr *mr); @@ -69,6 +70,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum rxe_mr_copy_dir dir); int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, + unsigned int length, enum rxe_mr_copy_dir dir); int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 compare, u64 swap_add, u64 *orig_val); int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); @@ -80,6 +83,11 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); void rxe_mr_cleanup(struct rxe_pool_elem *elem); +static inline unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) +{ + return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); +} + /* rxe_mw.c */ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); int rxe_dealloc_mw(struct ib_mw *ibmw); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index da3dee520876..1f7b8cf93adc 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -45,7 +45,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) } } -static void rxe_mr_init(int access, struct rxe_mr *mr) +void rxe_mr_init(int access, struct rxe_mr *mr) { u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); @@ -72,11 +72,6 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr) mr->ibmr.type = IB_MR_TYPE_DMA; } -static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) -{ - return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); -} - static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) { return iova & (mr_page_size(mr) - 1); @@ -242,8 +237,8 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl, return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page); } -static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, - unsigned int length, enum rxe_mr_copy_dir dir) +int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, + unsigned int length, enum rxe_mr_copy_dir dir) { unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova); unsigned long index = rxe_mr_iova_to_index(mr, iova);
Some functions in rxe_mr.c are going to be used in rxe_odp.c, which is to be created in the subsequent patch. List the declarations of the functions in rxe_loc.h. Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com> --- drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++++++ drivers/infiniband/sw/rxe/rxe_mr.c | 11 +++-------- 2 files changed, 11 insertions(+), 8 deletions(-)