@@ -72,6 +72,8 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
+int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
@@ -32,13 +32,15 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
- iova > mr->ibmr.iova + mr->ibmr.length - length)
- return -EFAULT;
+ if (iova < mr->ibmr.iova ||
+ iova + length > mr->ibmr.iova + mr->ibmr.length) {
+ rxe_dbg_mr(mr, "iova/length out of range");
+ return -ERANGE;
+ }
return 0;
default:
- rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type);
+ rxe_dbg_mr(mr, "mr type not supported\n");
return -EINVAL;
}
}
@@ -299,37 +301,22 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
size_t offset;
int m, n;
- void *addr;
- if (mr->state != RXE_MR_STATE_VALID) {
- rxe_dbg_mr(mr, "Not in valid state\n");
- addr = NULL;
- goto out;
- }
+ if (mr->state != RXE_MR_STATE_VALID)
+ return NULL;
- if (!mr->map) {
- addr = (void *)(uintptr_t)iova;
- goto out;
- }
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return (void *)(uintptr_t)iova;
- if (mr_check_range(mr, iova, length)) {
- rxe_dbg_mr(mr, "Range violation\n");
- addr = NULL;
- goto out;
- }
+ if (mr_check_range(mr, iova, length))
+ return NULL;
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->map[m]->buf[n].size) {
- rxe_dbg_mr(mr, "Crosses page boundary\n");
- addr = NULL;
- goto out;
- }
-
- addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+ if (offset + length > mr->map[m]->buf[n].size)
+ return NULL;
-out:
- return addr;
+ return (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
}
int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
@@ -538,6 +525,46 @@ int copy_data(
return err;
}
+/* Guarantee atomicity of atomic operations at the machine level. */
+static DEFINE_SPINLOCK(atomic_ops_lock);
+
+int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ u64 *va;
+ u64 value;
+
+ if (mr->state != RXE_MR_STATE_VALID) {
+ rxe_dbg_mr(mr, "mr not in valid state");
+ return -EINVAL;
+ }
+
+ va = iova_to_vaddr(mr, iova, sizeof(u64));
+ if (!va) {
+ rxe_dbg_mr(mr, "iova out of range");
+ return -ERANGE;
+ }
+
+ if ((uintptr_t)va & 0x7) {
+ rxe_dbg_mr(mr, "iova not aligned");
+ return RXE_ERR_NOT_ALIGNED;
+ }
+
+ spin_lock_bh(&atomic_ops_lock);
+ value = *orig_val = *va;
+
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == compare)
+ *va = swap_add;
+ } else {
+ value += swap_add;
+ *va = value;
+ }
+ spin_unlock_bh(&atomic_ops_lock);
+
+ return 0;
+}
+
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
@@ -725,17 +725,12 @@ static enum resp_states process_flush(struct rxe_qp *qp,
return RESPST_ACKNOWLEDGE;
}
-/* Guarantee atomicity of atomic operations at the machine level. */
-static DEFINE_SPINLOCK(atomic_ops_lock);
-
static enum resp_states atomic_reply(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+ struct rxe_pkt_info *pkt)
{
- u64 *vaddr;
- enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- u64 value;
+ int err;
if (!res) {
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
@@ -743,33 +738,19 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
}
if (!res->replay) {
- if (mr->state != RXE_MR_STATE_VALID) {
- ret = RESPST_ERR_RKEY_VIOLATION;
- goto out;
- }
-
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
- sizeof(u64));
-
- /* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
- ret = RESPST_ERR_MISALIGNED_ATOMIC;
- goto out;
- }
-
- spin_lock_bh(&atomic_ops_lock);
- res->atomic.orig_val = value = *vaddr;
-
- if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
- if (value == atmeth_comp(pkt))
- value = atmeth_swap_add(pkt);
- } else {
- value += atmeth_swap_add(pkt);
+ u64 iova = qp->resp.va + qp->resp.offset;
+
+ err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
+ if (unlikely(err)) {
+ if (err == -RXE_ERR_NOT_ALIGNED)
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ else
+ return RESPST_ERR_RKEY_VIOLATION;
}
- *vaddr = value;
- spin_unlock_bh(&atomic_ops_lock);
-
qp->resp.msn++;
/* next expected psn, read handles this separately */
@@ -780,9 +761,7 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
qp->resp.status = IB_WC_SUCCESS;
}
- ret = RESPST_ACKNOWLEDGE;
-out:
- return ret;
+ return RESPST_ACKNOWLEDGE;
}
#ifdef CONFIG_64BIT
@@ -273,6 +273,11 @@ enum rxe_mr_state {
RXE_MR_STATE_VALID,
};
+/* some rxe specific error conditions */
+enum rxe_mr_error {
+ RXE_ERR_NOT_ALIGNED = 0x1001, /* misaligned address */
+};
+
enum rxe_mr_copy_dir {
RXE_TO_MR_OBJ,
RXE_FROM_MR_OBJ,
Isolate mr specific code from atomic_reply() in rxe_resp.c into a subroutine rxe_mr_do_atomic_op() in rxe_mr.c. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_loc.h | 2 + drivers/infiniband/sw/rxe/rxe_mr.c | 83 ++++++++++++++++++--------- drivers/infiniband/sw/rxe/rxe_resp.c | 49 +++++----------- drivers/infiniband/sw/rxe/rxe_verbs.h | 5 ++ 4 files changed, 76 insertions(+), 63 deletions(-)