@@ -460,8 +460,8 @@ static const struct ib_device_ops erdma_device_ops = {
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
.modify_qp = erdma_modify_qp,
- .post_recv = erdma_post_recv,
- .post_send = erdma_post_send,
+ .post_recv = erdma_post_recv_nodrain,
+ .post_send = erdma_post_send_nodrain,
.poll_cq = erdma_poll_cq,
.query_device = erdma_query_device,
.query_gid = erdma_query_gid,
@@ -475,8 +475,8 @@ static void kick_sq_db(struct erdma_qp *qp, u16 pi)
writeq(db_data, qp->kern_qp.hw_sq_db);
}
-int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
- const struct ib_send_wr **bad_send_wr)
+static int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr, bool drain)
{
struct erdma_qp *qp = to_eqp(ibqp);
int ret = 0;
@@ -488,6 +488,16 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
return -EINVAL;
spin_lock_irqsave(&qp->lock, flags);
+
+ if (unlikely(qp->flags & ERDMA_QP_FLAGS_TX_STOPPED)) {
+ *bad_send_wr = send_wr;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(drain))
+ qp->flags |= ERDMA_QP_FLAGS_TX_STOPPED;
+
sq_pi = qp->kern_qp.sq_pi;
while (wr) {
@@ -507,11 +517,19 @@ int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&qp->lock, flags);
+out:
+ spin_unlock_irqrestore(&qp->lock, flags);
return ret;
}
+int erdma_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr)
+{
+ return erdma_post_send(ibqp, send_wr, bad_send_wr, false);
+}
+
static int erdma_post_recv_one(struct erdma_qp *qp,
const struct ib_recv_wr *recv_wr)
{
@@ -542,8 +560,8 @@ static int erdma_post_recv_one(struct erdma_qp *qp,
return 0;
}
-int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
- const struct ib_recv_wr **bad_recv_wr)
+static int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr, bool drain)
{
const struct ib_recv_wr *wr = recv_wr;
struct erdma_qp *qp = to_eqp(ibqp);
@@ -552,6 +570,15 @@ int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
spin_lock_irqsave(&qp->lock, flags);
+ if (unlikely(qp->flags & ERDMA_QP_FLAGS_RX_STOPPED)) {
+ ret = -EINVAL;
+ *bad_recv_wr = recv_wr;
+ goto out;
+ }
+
+ if (unlikely(drain))
+ qp->flags |= ERDMA_QP_FLAGS_RX_STOPPED;
+
while (wr) {
ret = erdma_post_recv_one(qp, wr);
if (ret) {
@@ -561,6 +588,14 @@ int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
wr = wr->next;
}
+out:
spin_unlock_irqrestore(&qp->lock, flags);
return ret;
}
+
+int erdma_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
+{
+ return erdma_post_recv(ibqp, recv_wr, bad_recv_wr, false);
+}
@@ -195,6 +195,12 @@ struct erdma_qp_attrs {
u8 pd_len;
};
+enum erdma_qp_flags {
+ ERDMA_QP_FLAGS_DRAIN_ISSUED = (1 << 0),
+ ERDMA_QP_FLAGS_TX_STOPPED = (1 << 1),
+ ERDMA_QP_FLAGS_RX_STOPPED = (1 << 2),
+};
+
struct erdma_qp {
struct ib_qp ibqp;
struct kref ref;
@@ -202,6 +208,7 @@ struct erdma_qp {
struct erdma_dev *dev;
struct erdma_cep *cep;
struct rw_semaphore state_lock;
+ unsigned long flags;
union {
struct erdma_kqp kern_qp;
@@ -328,10 +335,12 @@ void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void erdma_qp_get_ref(struct ib_qp *ibqp);
void erdma_qp_put_ref(struct ib_qp *ibqp);
struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
-int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
- const struct ib_send_wr **bad_send_wr);
-int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
- const struct ib_recv_wr **bad_recv_wr);
+int erdma_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg);