@@ -72,6 +72,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
{
u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
u32 nents = min(sg_cnt, pages_per_mr);
+ struct ib_scatterlist ib_sg;
int count = 0, ret;
reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
@@ -87,7 +88,10 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
reg->inv_wr.next = NULL;
}
- ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
+ ib_sg.sg = sg;
+ ib_sg.dma_nents = nents;
+ ib_sg.offset = offset;
+ ret = ib_map_mr_sg(reg->mr, &ib_sg, PAGE_SIZE);
if (ret < 0 || ret < nents) {
ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
return -EINVAL;
@@ -2406,9 +2406,7 @@ EXPORT_SYMBOL(ib_set_vf_guid);
* ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
* and set it the memory region.
* @mr: memory region
- * @sg: dma mapped scatterlist
- * @sg_nents: number of entries in sg
- * @sg_offset: offset in bytes into sg
+ * @ib_sg: dma mapped ib scatterlist
* @page_size: page vector desired page size
*
* Constraints:
@@ -2427,15 +2425,15 @@ EXPORT_SYMBOL(ib_set_vf_guid);
* After this completes successfully, the memory region
* is ready for registration.
*/
-int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset, unsigned int page_size)
+int ib_map_mr_sg(struct ib_mr *mr, struct ib_scatterlist *ib_sg,
+ unsigned int page_size)
{
if (unlikely(!mr->device->ops.map_mr_sg))
return -EOPNOTSUPP;
mr->page_size = page_size;
- return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
+ return mr->device->ops.map_mr_sg(mr, ib_sg);
}
EXPORT_SYMBOL(ib_map_mr_sg);
@@ -2443,12 +2441,7 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* ib_sg_to_pages() - Convert the largest prefix of a sg list
* to a page vector
* @mr: memory region
- * @sgl: dma mapped scatterlist
- * @sg_nents: number of entries in sg
- * @sg_offset_p: IN: start offset in bytes into sg
- * OUT: offset in bytes for element n of the sg of the first
- * byte that has not been processed where n is the return
- * value of this function.
+ * @ib_sgl: dma mapped ib scatterlist
* @set_page: driver page assignment function pointer
*
* Core service helper for drivers to convert the largest
@@ -2456,26 +2449,32 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* prefix converted is the prefix that meet the requirements
* of ib_map_mr_sg.
*
+ * IN ib_sgl->offset: start offset in bytes into ib_sgl->sg
+ * OUT ib_sgl->offset: offset in bytes for element n of the sg of the first
+ * byte that has not been processed where n is the return
+ * value of this function.
+ *
* Returns the number of sg elements that were assigned to
* a page vector.
*/
-int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
- unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
+int ib_sg_to_pages(struct ib_mr *mr, struct ib_scatterlist *ib_sgl,
+ int (*set_page)(struct ib_mr *, u64))
{
struct scatterlist *sg;
+ struct scatterlist *sgl = ib_sgl->sg;
u64 last_end_dma_addr = 0;
- unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ unsigned int sg_offset = ib_sgl->offset;
unsigned int last_page_off = 0;
u64 page_mask = ~((u64)mr->page_size - 1);
int i, ret;
- if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
+ if (unlikely(ib_sgl->dma_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
return -EINVAL;
mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
mr->length = 0;
- for_each_sg(sgl, sg, sg_nents, i) {
+ for_each_sg(sgl, sg, ib_sgl->dma_nents, i) {
u64 dma_addr = sg_dma_address(sg) + sg_offset;
u64 prev_addr = dma_addr;
unsigned int dma_len = sg_dma_len(sg) - sg_offset;
@@ -2505,8 +2504,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
if (unlikely(ret < 0)) {
sg_offset = prev_addr - sg_dma_address(sg);
mr->length += prev_addr - dma_addr;
- if (sg_offset_p)
- *sg_offset_p = sg_offset;
+ ib_sgl->offset = sg_offset;
return i || sg_offset ? i : ret;
}
prev_addr = page_addr;
@@ -2521,8 +2519,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
sg_offset = 0;
}
- if (sg_offset_p)
- *sg_offset_p = 0;
+ ib_sgl->offset = 0;
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);
@@ -3400,13 +3400,12 @@ static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
return 0;
}
-int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct ib_scatterlist *ib_sg)
{
struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
mr->npages = 0;
- return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
+ return ib_sg_to_pages(ib_mr, ib_sg, bnxt_re_set_page);
}
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
@@ -205,8 +205,7 @@ int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct ib_scatterlist *ib_sg);
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr);
@@ -746,14 +746,13 @@ static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
+static int iwch_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct iwch_mr *mhp = to_iwch_mr(ibmr);
mhp->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, iwch_set_page);
}
static int iwch_destroy_qp(struct ib_qp *ib_qp)
@@ -1051,8 +1051,7 @@ void c4iw_qp_rem_ref(struct ib_qp *qp);
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+int c4iw_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
int c4iw_dealloc_mw(struct ib_mw *mw);
void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
@@ -782,14 +782,13 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int c4iw_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
mhp->mpl_len = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, c4iw_set_page);
}
int c4iw_dereg_mr(struct ib_mr *ib_mr)
@@ -1075,8 +1075,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
struct ib_udata *udata);
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
-int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
@@ -1375,14 +1375,13 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
mr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, hns_roce_set_page);
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@@ -1715,16 +1715,14 @@ static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
/**
* i40iw_map_mr_sg - map of sg list for fmr
* @ibmr: ib mem to access iwarp mr pointer
- * @sg: scatter gather list for fmr
- * @sg_nents: number of sg pages
+ * @ib_sg: dma mapped ib scatter gather list for fmr
*/
-static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
+static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct i40iw_mr *iwmr = to_iwmr(ibmr);
iwmr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, i40iw_set_page);
}
/**
@@ -739,8 +739,7 @@ int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
@@ -806,8 +806,7 @@ static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
int rc;
@@ -817,7 +816,7 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
mr->page_map_size, DMA_TO_DEVICE);
- rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
+ rc = ib_sg_to_pages(ibmr, ib_sg, mlx4_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
mr->page_map_size, DMA_TO_DEVICE);
@@ -1107,8 +1107,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
@@ -1934,20 +1934,18 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
- struct scatterlist *sgl,
- unsigned short sg_nents,
- unsigned int *sg_offset_p)
+ struct ib_scatterlist *ib_sgl)
{
- struct scatterlist *sg = sgl;
+ struct scatterlist *sg = ib_sgl->sg;
struct mlx5_klm *klms = mr->descs;
- unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ unsigned int sg_offset = ib_sgl->offset;
u32 lkey = mr->ibmr.pd->local_dma_lkey;
int i;
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
- for_each_sg(sgl, sg, sg_nents, i) {
+ for_each_sg(ib_sgl->sg, sg, ib_sgl->dma_nents, i) {
if (unlikely(i >= mr->max_descs))
break;
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
@@ -1959,8 +1957,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
}
mr->ndescs = i;
- if (sg_offset_p)
- *sg_offset_p = sg_offset;
+ ib_sgl->offset = sg_offset;
return i;
}
@@ -1979,8 +1976,7 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
@@ -1992,10 +1988,9 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
DMA_TO_DEVICE);
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
- n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
+ n = mlx5_ib_sg_to_klms(mr, ib_sg);
else
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- mlx5_set_page);
+ n = ib_sg_to_pages(ibmr, ib_sg, mlx5_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
@@ -402,14 +402,13 @@ static int nes_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int nes_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
+static int nes_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct nes_mr *nesmr = to_nesmr(ibmr);
nesmr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, nes_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, nes_set_page);
}
/**
@@ -3030,12 +3030,11 @@ static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
mr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, ocrdma_set_page);
}
@@ -110,7 +110,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
#endif /* __OCRDMA_VERBS_H__ */
@@ -2938,15 +2938,14 @@ static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
}
}
-int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct qedr_mr *mr = get_qedr_mr(ibmr);
mr->npages = 0;
handle_completed_mrs(mr->dev, &mr->info);
- return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, qedr_set_page);
}
struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
@@ -85,8 +85,7 @@ struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
-int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset);
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
@@ -310,8 +310,7 @@ static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct pvrdma_user_mr *mr = to_vmr(ibmr);
struct pvrdma_dev *dev = to_vdev(ibmr->device);
@@ -319,7 +318,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr->npages = 0;
- ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
+ ret = ib_sg_to_pages(ibmr, ib_sg, pvrdma_set_page);
if (ret < 0)
dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
@@ -410,8 +410,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int pvrdma_dereg_mr(struct ib_mr *mr);
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
-int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset);
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -629,21 +629,17 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
/**
* rvt_map_mr_sg - map sg list and set it the memory region
* @ibmr: memory region
- * @sg: dma mapped scatterlist
- * @sg_nents: number of entries in sg
- * @sg_offset: offset in bytes into sg
+ * @ib_sg: dma mapped ib scatterlist
*
* Return: number of sg elements mapped to the memory region
*/
-int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct rvt_mr *mr = to_imr(ibmr);
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- rvt_set_page);
+ return ib_sg_to_pages(ibmr, ib_sg, rvt_set_page);
}
/**
@@ -82,8 +82,7 @@ int rvt_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset);
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
@@ -1080,15 +1080,14 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
+static int rxe_map_mr_sg(struct ib_mr *ibmr, struct ib_scatterlist *ib_sg)
{
struct rxe_mem *mr = to_rmr(ibmr);
int n;
mr->nbuf = 0;
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+ n = ib_sg_to_pages(ibmr, ib_sg, rxe_set_page);
mr->va = ibmr->iova;
mr->iova = ibmr->iova;
@@ -235,12 +235,15 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
struct iser_page_vec *page_vec = rsc->page_vec;
struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
struct ib_pool_fmr *fmr;
+ struct ib_scatterlist ib_sg;
int ret, plen;
page_vec->npages = 0;
page_vec->fake_mr.page_size = SIZE_4K;
- plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->dma_nents, NULL, iser_set_page);
+ ib_sg.sg = mem->sg;
+ ib_sg.dma_nents = mem->dma_nents;
+ ib_sg.offset = 0;
+ plen = ib_sg_to_pages(&page_vec->fake_mr, &ib_sg, iser_set_page);
if (unlikely(plen < mem->dma_nents)) {
iser_err("page vec too short to hold this SG\n");
iser_data_buf_dump(mem, device->ib_device);
@@ -441,6 +444,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
struct ib_mr *mr = rsc->mr;
struct ib_reg_wr *wr;
+ struct ib_scatterlist ib_sg;
int n;
if (rsc->mr_valid)
@@ -448,7 +452,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ ib_sg.sg = mem->sg;
+ ib_sg.dma_nents = mem->dma_nents;
+ ib_sg.offset = 0;
+ n = ib_map_mr_sg(mr, &ib_sg, SIZE_4K);
if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->dma_nents);
@@ -1511,15 +1511,15 @@ static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
}
/*
- * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
- * where to start in the first element. If sg_offset_p != NULL then
- * *sg_offset_p is updated to the offset in state->sg[retval] of the first
+ * Map up to ib_sg->dma_nents elements of state->sg where ib_sg->offset
+ * is the offset where to start in the first element. If ib_sg->offset != 0 then
+ * ib_sg->offset is updated to the offset in state->sg[retval] of the first
* byte that has not yet been mapped.
*/
static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_request *req,
- struct srp_rdma_ch *ch, int sg_nents,
- unsigned int *sg_offset_p)
+ struct srp_rdma_ch *ch,
+ struct ib_scatterlist *ib_sg)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
@@ -1537,14 +1537,11 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 1 && target->global_rkey) {
- unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
-
- srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
- sg_dma_len(state->sg) - sg_offset,
+ if (ib_sg->dma_nents == 1 && target->global_rkey) {
+ srp_map_desc(state, sg_dma_address(state->sg) + ib_sg->offset,
+ sg_dma_len(state->sg) - ib_sg->offset,
target->global_rkey);
- if (sg_offset_p)
- *sg_offset_p = 0;
+ ib_sg->offset = 0;
return 1;
}
@@ -1555,13 +1552,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey);
- n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
- dev->mr_page_size);
+ n = ib_map_mr_sg(desc->mr, ib_sg, dev->mr_page_size);
if (unlikely(n < 0)) {
srp_fr_pool_put(ch->fr_pool, &desc, 1);
pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
- dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
- sg_offset_p ? *sg_offset_p : -1, n);
+ dev_name(&req->scmnd->device->sdev_gendev),
+ ib_sg->dma_nents, ib_sg->offset ? : -1, n);
return n;
}
@@ -1668,8 +1664,9 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
{
- unsigned int sg_offset = 0;
+ struct ib_scatterlist ib_sg;
+ ib_sg.offset = 0;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
@@ -1680,7 +1677,9 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
while (count) {
int i, n;
- n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
+ ib_sg.sg = state->sg;
+ ib_sg.dma_nents = count;
+ n = srp_map_finish_fr(state, req, ch, &ib_sg);
if (unlikely(n < 0))
return n;
@@ -1727,6 +1726,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_direct_buf idb_desc;
u64 idb_pages[1];
struct scatterlist idb_sg[1];
+ struct ib_scatterlist ib_sg;
int ret;
memset(&state, 0, sizeof(state));
@@ -1744,7 +1744,10 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
#endif
- ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
+ ib_sg.sg = state.sg;
+ ib_sg.dma_nents = 1;
+ ib_sg.offset = 0;
+ ret = srp_map_finish_fr(&state, req, ch, &ib_sg);
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
@@ -1213,6 +1213,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
int count)
{
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ struct ib_scatterlist ib_sg;
int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
@@ -1223,7 +1224,10 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
+ ib_sg.sg = req->sg_table.sgl;
+ ib_sg.dma_nents = count;
+ ib_sg.offset = 0;
+ nr = ib_map_mr_sg(req->mr, &ib_sg, SZ_4K);
if (unlikely(nr < count)) {
ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req->mr = NULL;
@@ -2489,6 +2489,7 @@ struct smbd_mr *smbd_register_mr(
int rc, i;
enum dma_data_direction dir;
struct ib_reg_wr *reg_wr;
+ struct ib_scatterlist ib_sg;
if (num_pages > info->max_frmr_depth) {
log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
@@ -2534,8 +2535,10 @@ struct smbd_mr *smbd_register_mr(
goto dma_map_error;
}
- rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
- NULL, PAGE_SIZE);
+ ib_sg.sg = smbdirect_mr->sgl;
+ ib_sg.dma_nents = num_pages;
+ ib_sg.offset = 0;
+ rc = ib_map_mr_sg(smbdirect_mr->mr, &ib_sg, PAGE_SIZE);
if (rc != num_pages) {
log_rdma_mr(ERR,
"ib_map_mr_sg failed rc = %d num_pages = %x\n",
@@ -2157,6 +2157,18 @@ struct ib_counters_read_attr {
u32 flags; /* use enum ib_read_counters_flags */
};
+/*
+ * struct ib_scatterlist - Mapped scatterlist for RDMA operations
+ * @sg: dma mapped sg list
+ * @dma_nents: returned by dma_map_sg
+ * @offset: start offset in bytes into the first sg element
+ */
+struct ib_scatterlist {
+ struct scatterlist *sg;
+ int dma_nents;
+ unsigned int offset;
+};
+
struct uverbs_attr_bundle;
/**
@@ -2317,8 +2329,7 @@ struct ib_device_ops {
enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge,
struct uverbs_attr_bundle *attrs);
- int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset);
+ int (*map_mr_sg)(struct ib_mr *mr, struct ib_scatterlist *ib_sg);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
@@ -3841,23 +3852,23 @@ struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
wq_ind_table_init_attr);
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset, unsigned int page_size);
+int ib_map_mr_sg(struct ib_mr *mr, struct ib_scatterlist *ib_sg,
+ unsigned int page_size);
static inline int
-ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset, unsigned int page_size)
+ib_map_mr_sg_zbva(struct ib_mr *mr, struct ib_scatterlist *ib_sg,
+ unsigned int page_size)
{
int n;
- n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
+ n = ib_map_mr_sg(mr, ib_sg, page_size);
mr->iova = 0;
return n;
}
-int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
- unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
+int ib_sg_to_pages(struct ib_mr *mr, struct ib_scatterlist *ib_sgl,
+ int (*set_page)(struct ib_mr *, u64));
void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
@@ -104,15 +104,18 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
{
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
struct ib_reg_wr reg_wr;
- int ret, off = 0;
+ struct ib_scatterlist ib_sg;
+ int ret;
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
atomic_inc(&ibmr->ic->i_fastreg_wrs);
cpu_relax();
}
- ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
- &off, PAGE_SIZE);
+ ib_sg.sg = ibmr->sg;
+ ib_sg.dma_nents = ibmr->sg_len;
+ ib_sg.offset = 0;
+ ret = ib_map_mr_sg_zbva(frmr->mr, &ib_sg, PAGE_SIZE);
if (unlikely(ret != ibmr->sg_len))
return ret < 0 ? ret : -EINVAL;
@@ -353,14 +353,15 @@ void smc_ib_put_memory_region(struct ib_mr *mr)
static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
{
- unsigned int offset = 0;
int sg_num;
+ struct ib_scatterlist ib_sg;
+ ib_sg.sg = buf_slot->sgt[SMC_SINGLE_LINK].sgl;
+ ib_sg.dma_nents = buf_slot->sgt[SMC_SINGLE_LINK].orig_nents;
+ ib_sg.offset = 0;
/* map the largest prefix of a dma mapped SG list */
sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
- buf_slot->sgt[SMC_SINGLE_LINK].sgl,
- buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
- &offset, PAGE_SIZE);
+ &ib_sg, PAGE_SIZE);
return sg_num;
}
@@ -400,6 +400,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_mr *mr;
struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr;
+ struct ib_scatterlist ib_sg;
int i, n;
u8 key;
@@ -441,7 +442,10 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
goto out_dmamap_err;
ibmr = frwr->fr_mr;
- n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
+ ib_sg.sg = mr->mr_sg;
+ ib_sg.dma_nents = mr->mr_nents;
+ ib_sg.offset = 0;
+ n = ib_map_mr_sg(ibmr, &ib_sg, PAGE_SIZE);
if (unlikely(n != mr->mr_nents))
goto out_mapmr_err;