@@ -241,6 +241,9 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
if (err)
return err;
+ mr->remap = RXE_REMAP_INIT;
+ spin_lock_init(&mr->remap_lock);
+
mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
mr->type = IB_MR_TYPE_MEM_REG;
@@ -587,10 +590,22 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
}
if (unlikely(mr->type != IB_MR_TYPE_MEM_REG)) {
- pr_warn("%s: mr->type (%d) is wrong type\n", __func__, mr->type);
+ pr_warn("%s: mr->type (%d) is wrong type\n",
+ __func__, mr->type);
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
+ spin_lock_bh(&mr->remap_lock);
+ if (mr->remap == RXE_REMAP_READY) {
+ mr->remap = RXE_REMAP_INIT;
+ } else {
+ spin_unlock_bh(&mr->remap_lock);
ret = -EINVAL;
goto err_drop_ref;
}
+ spin_unlock_bh(&mr->remap_lock);
+
mr->state = RXE_MR_STATE_FREE;
ret = 0;
@@ -634,10 +649,23 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
mr->state = RXE_MR_STATE_VALID;
- set = mr->cur_map_set;
- mr->cur_map_set = mr->next_map_set;
- mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
- mr->next_map_set = set;
+ spin_lock_bh(&mr->remap_lock);
+ switch (mr->remap) {
+ case RXE_REMAP_MAPPED:
+ set = mr->cur_map_set;
+ mr->cur_map_set = mr->next_map_set;
+ mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
+ mr->next_map_set = set;
+ mr->remap = RXE_REMAP_READY;
+ break;
+ case RXE_REMAP_READY:
+ mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
+ break;
+ default:
+ spin_unlock_bh(&mr->remap_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&mr->remap_lock);
return 0;
}
@@ -975,6 +975,17 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
struct rxe_map_set *set = mr->next_map_set;
int n;
+ spin_lock_bh(&mr->remap_lock);
+ if (mr->remap == RXE_REMAP_INIT) {
+ mr->remap = RXE_REMAP_BUSY;
+ } else {
+ spin_unlock_bh(&mr->remap_lock);
+ pr_warn("%s: mr#%d not in REMAP_INIT state\n",
+ __func__, mr->elem.index);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&mr->remap_lock);
+
set->nbuf = 0;
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page);
@@ -986,6 +997,10 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
set->page_mask = ibmr->page_size - 1;
set->offset = set->iova & set->page_mask;
+ spin_lock_bh(&mr->remap_lock);
+ mr->remap = RXE_REMAP_MAPPED;
+ spin_unlock_bh(&mr->remap_lock);
+
return n;
}
@@ -265,6 +265,13 @@ enum rxe_mr_state {
RXE_MR_STATE_VALID,
};
+enum rxe_remap_state {
+ RXE_REMAP_INIT,
+ RXE_REMAP_BUSY,
+ RXE_REMAP_MAPPED,
+ RXE_REMAP_READY,
+};
+
enum rxe_mr_copy_dir {
RXE_TO_MR_OBJ,
RXE_FROM_MR_OBJ,
@@ -308,11 +315,15 @@ struct rxe_mr {
struct rxe_pool_elem elem;
struct ib_mr ibmr;
+ enum rxe_mr_state state;
+ enum rxe_remap_state remap;
+
+ spinlock_t remap_lock;
+
struct ib_umem *umem;
u32 lkey;
u32 rkey;
- enum rxe_mr_state state;
enum ib_mr_type type;
int access;
This patch allows the rxe driver to re-register an FMR with or without remapping the mr. It adds a state variable that shows if the mr has been remapped and then only swaps the map sets if the mr has been remapped. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_mr.c | 38 +++++++++++++++++++++++---- drivers/infiniband/sw/rxe/rxe_verbs.c | 15 +++++++++++ drivers/infiniband/sw/rxe/rxe_verbs.h | 13 ++++++++- 3 files changed, 60 insertions(+), 6 deletions(-)