@@ -1141,7 +1141,6 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE;
req->mr->need_inval = true;
- atomic_inc(&req->ref);
sg->addr = cpu_to_le64(req->mr->iova);
put_unaligned_le24(req->mr->length, sg->length);
@@ -1328,10 +1327,9 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
req->cqe.status = cqe->status;
req->cqe.result = cqe->result;
- if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
- wc->ex.invalidate_rkey == req->mr->rkey) {
- atomic_dec(&req->ref);
- } else if (req->mr->need_inval) {
+ if (req->mr->need_inval &&
+ (!(wc->wc_flags & IB_WC_WITH_INVALIDATE) ||
+ wc->ex.invalidate_rkey != req->mr->rkey)) {
ret = nvme_rdma_inv_rkey(queue, req);
if (unlikely(ret < 0)) {
dev_err(queue->ctrl->ctrl.device,
@@ -1339,12 +1337,12 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
req->mr->rkey, ret);
nvme_rdma_error_recovery(queue->ctrl);
}
- }
-
- if (atomic_dec_and_test(&req->ref)) {
- if (rq->tag == tag)
- ret = 1;
- nvme_end_request(rq, req->cqe.status, req->cqe.result);
+ } else {
+ if (atomic_dec_and_test(&req->ref)) {
+ if (rq->tag == tag)
+ ret = 1;
+ nvme_end_request(rq, req->cqe.status, req->cqe.result);
+ }
}
return ret;