diff mbox series

[1/6] IB/core: add support for draining Shared receive queues

Message ID 20240618001034.22681-2-mgurtovoy@nvidia.com (mailing list archive)
State Superseded
Headers show
Series Last WQE Reached event treatment | expand

Commit Message

Max Gurtovoy June 18, 2024, 12:10 a.m. UTC
To avoid leakage for QPs assocoated with SRQ, according to IB spec
(section 10.3.1):

"Note, for QPs that are associated with an SRQ, the Consumer should take
the QP through the Error State before invoking a Destroy QP or a Modify
QP to the Reset State. The Consumer may invoke the Destroy QP without
first performing a Modify QP to the Error State and waiting for the Affiliated
Asynchronous Last WQE Reached Event. However, if the Consumer
does not wait for the Affiliated Asynchronous Last WQE Reached Event,
then WQE and Data Segment leakage may occur. Therefore, it is good
programming practice to teardown a QP that is associated with an SRQ
by using the following process:
 - Put the QP in the Error State;
 - wait for the Affiliated Asynchronous Last WQE Reached Event;
 - either:
   - drain the CQ by invoking the Poll CQ verb and either wait for CQ
     to be empty or the number of Poll CQ operations has exceeded
     CQ capacity size; or
   - post another WR that completes on the same CQ and wait for this
     WR to return as a WC;
 - and then invoke a Destroy QP or Reset QP."

Catch the Last WQE Reached Event in the core layer without involving the
ULP drivers with extra logic during drain and destroy QP flows.

The "Last WQE Reached" event will only be send on the errant QP, for
signaling that the SRQ flushed all the WQEs that have been dequeued from
the SRQ for processing by the associated QP.

Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
 drivers/infiniband/core/verbs.c | 83 ++++++++++++++++++++++++++++++++-
 include/rdma/ib_verbs.h         |  2 +
 2 files changed, 84 insertions(+), 1 deletion(-)

Comments

Bart Van Assche June 18, 2024, 4:07 p.m. UTC | #1
On 6/17/24 5:10 PM, Max Gurtovoy wrote:
> +	if (wait_for_completion_timeout(&qp->srq_completion, 10 * HZ) > 0) {
> +		while (polled != cq->cqe) {
> +			n = ib_process_cq_direct(cq, cq->cqe - polled);
> +			if (!n)
> +				return;
> +			polled += n;
> +		}
> +	}

Why a hardcoded timeout (10 * HZ) instead of waiting forever?

Thanks,

Bart.
Sagi Grimberg June 19, 2024, 9:09 a.m. UTC | #2
On 18/06/2024 3:10, Max Gurtovoy wrote:
> To avoid leakage for QPs assocoated with SRQ, according to IB spec
> (section 10.3.1):
>
> "Note, for QPs that are associated with an SRQ, the Consumer should take
> the QP through the Error State before invoking a Destroy QP or a Modify
> QP to the Reset State. The Consumer may invoke the Destroy QP without
> first performing a Modify QP to the Error State and waiting for the Affiliated
> Asynchronous Last WQE Reached Event. However, if the Consumer
> does not wait for the Affiliated Asynchronous Last WQE Reached Event,
> then WQE and Data Segment leakage may occur. Therefore, it is good
> programming practice to teardown a QP that is associated with an SRQ
> by using the following process:
>   - Put the QP in the Error State;
>   - wait for the Affiliated Asynchronous Last WQE Reached Event;
>   - either:
>     - drain the CQ by invoking the Poll CQ verb and either wait for CQ
>       to be empty or the number of Poll CQ operations has exceeded
>       CQ capacity size; or
>     - post another WR that completes on the same CQ and wait for this
>       WR to return as a WC;
>   - and then invoke a Destroy QP or Reset QP."
>
> Catch the Last WQE Reached Event in the core layer without involving the
> ULP drivers with extra logic during drain and destroy QP flows.
>
> The "Last WQE Reached" event will only be send on the errant QP, for
> signaling that the SRQ flushed all the WQEs that have been dequeued from
> the SRQ for processing by the associated QP.
>
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
> ---
>   drivers/infiniband/core/verbs.c | 83 ++++++++++++++++++++++++++++++++-
>   include/rdma/ib_verbs.h         |  2 +
>   2 files changed, 84 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index 94a7f3b0c71c..9e4df7d40e0c 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -1101,6 +1101,16 @@ EXPORT_SYMBOL(ib_destroy_srq_user);
>   
>   /* Queue pairs */
>   
> +static void __ib_qp_event_handler(struct ib_event *event, void *context)
> +{
> +	struct ib_qp *qp = event->element.qp;
> +
> +	if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
> +		complete(&qp->srq_completion);
> +	else if (qp->registered_event_handler)
> +		qp->registered_event_handler(event, qp->qp_context);

There is no reason what-so-ever to withhold the LAST_WQE_REACHED from 
the ulp.
The ULP may be interested in consuming this event.

This should become:

+static void __ib_qp_event_handler(struct ib_event *event, void *context)
+{
+	struct ib_qp *qp = event->element.qp;
+
+	if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
+		complete(&qp->srq_completion);
+	if (qp->registered_event_handler)
+		qp->registered_event_handler(event, qp->qp_context);




> +}
> +
>   static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
>   {
>   	struct ib_qp *qp = context;
> @@ -1221,7 +1231,10 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
>   	qp->qp_type = attr->qp_type;
>   	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
>   	qp->srq = attr->srq;
> -	qp->event_handler = attr->event_handler;
> +	if (qp->srq)
> +		init_completion(&qp->srq_completion);

I think that if you unconditionally complete, you should also 
unconditionally initialize.
Sagi Grimberg June 19, 2024, 9:14 a.m. UTC | #3
On 18/06/2024 19:07, Bart Van Assche wrote:
> On 6/17/24 5:10 PM, Max Gurtovoy wrote:
>> +    if (wait_for_completion_timeout(&qp->srq_completion, 10 * HZ) > 
>> 0) {

I think this warrants a comment to why you stop after consuming cq->cqe 
completions
(i.e. shared completions).

>> +        while (polled != cq->cqe) {
>> +            n = ib_process_cq_direct(cq, cq->cqe - polled);
>> +            if (!n)
>> +                return;
>> +            polled += n;
>> +        }
>> +    }
>
> Why a hardcoded timeout (10 * HZ) instead of waiting forever?

Agreed. Is there a scenario where the IB event is missed or something?
Max Gurtovoy June 19, 2024, 11:12 a.m. UTC | #4
On 19/06/2024 12:14, Sagi Grimberg wrote:
>
>
> On 18/06/2024 19:07, Bart Van Assche wrote:
>> On 6/17/24 5:10 PM, Max Gurtovoy wrote:
>>> +    if (wait_for_completion_timeout(&qp->srq_completion, 10 * HZ) > 
>>> 0) {
>
> I think this warrants a comment to why you stop after consuming 
> cq->cqe completions
> (i.e. shared completions).
>
There is a full explanation in the function documentation.


>>> +        while (polled != cq->cqe) {
>>> +            n = ib_process_cq_direct(cq, cq->cqe - polled);
>>> +            if (!n)
>>> +                return;
>>> +            polled += n;
>>> +        }
>>> +    }
>>
>> Why a hardcoded timeout (10 * HZ) instead of waiting forever?
>
> Agreed. Is there a scenario where the IB event is missed or something?

I can change it to (60 * HZ) instead.

I prefer not waiting forever and getting a stuck kernel if the 
underlying device is defected.
Max Gurtovoy June 19, 2024, 11:16 a.m. UTC | #5
On 19/06/2024 12:09, Sagi Grimberg wrote:
>
>
> On 18/06/2024 3:10, Max Gurtovoy wrote:
>> To avoid leakage for QPs assocoated with SRQ, according to IB spec
>> (section 10.3.1):
>>
>> "Note, for QPs that are associated with an SRQ, the Consumer should take
>> the QP through the Error State before invoking a Destroy QP or a Modify
>> QP to the Reset State. The Consumer may invoke the Destroy QP without
>> first performing a Modify QP to the Error State and waiting for the 
>> Affiliated
>> Asynchronous Last WQE Reached Event. However, if the Consumer
>> does not wait for the Affiliated Asynchronous Last WQE Reached Event,
>> then WQE and Data Segment leakage may occur. Therefore, it is good
>> programming practice to teardown a QP that is associated with an SRQ
>> by using the following process:
>>   - Put the QP in the Error State;
>>   - wait for the Affiliated Asynchronous Last WQE Reached Event;
>>   - either:
>>     - drain the CQ by invoking the Poll CQ verb and either wait for CQ
>>       to be empty or the number of Poll CQ operations has exceeded
>>       CQ capacity size; or
>>     - post another WR that completes on the same CQ and wait for this
>>       WR to return as a WC;
>>   - and then invoke a Destroy QP or Reset QP."
>>
>> Catch the Last WQE Reached Event in the core layer without involving the
>> ULP drivers with extra logic during drain and destroy QP flows.
>>
>> The "Last WQE Reached" event will only be send on the errant QP, for
>> signaling that the SRQ flushed all the WQEs that have been dequeued from
>> the SRQ for processing by the associated QP.
>>
>> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
>> ---
>>   drivers/infiniband/core/verbs.c | 83 ++++++++++++++++++++++++++++++++-
>>   include/rdma/ib_verbs.h         |  2 +
>>   2 files changed, 84 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/infiniband/core/verbs.c 
>> b/drivers/infiniband/core/verbs.c
>> index 94a7f3b0c71c..9e4df7d40e0c 100644
>> --- a/drivers/infiniband/core/verbs.c
>> +++ b/drivers/infiniband/core/verbs.c
>> @@ -1101,6 +1101,16 @@ EXPORT_SYMBOL(ib_destroy_srq_user);
>>     /* Queue pairs */
>>   +static void __ib_qp_event_handler(struct ib_event *event, void 
>> *context)
>> +{
>> +    struct ib_qp *qp = event->element.qp;
>> +
>> +    if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
>> +        complete(&qp->srq_completion);
>> +    else if (qp->registered_event_handler)
>> +        qp->registered_event_handler(event, qp->qp_context);
>
> There is no reason what-so-ever to withhold the LAST_WQE_REACHED from 
> the ulp.
> The ULP may be interested in consuming this event.
>
> This should become:
>
> +static void __ib_qp_event_handler(struct ib_event *event, void *context)
> +{
> +    struct ib_qp *qp = event->element.qp;
> +
> +    if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
> +        complete(&qp->srq_completion);
> +    if (qp->registered_event_handler)
> +        qp->registered_event_handler(event, qp->qp_context);
>
>
Good idea.

Thanks.

>
>
>> +}
>> +
>>   static void __ib_shared_qp_event_handler(struct ib_event *event, 
>> void *context)
>>   {
>>       struct ib_qp *qp = context;
>> @@ -1221,7 +1231,10 @@ static struct ib_qp *create_qp(struct 
>> ib_device *dev, struct ib_pd *pd,
>>       qp->qp_type = attr->qp_type;
>>       qp->rwq_ind_tbl = attr->rwq_ind_tbl;
>>       qp->srq = attr->srq;
>> -    qp->event_handler = attr->event_handler;
>> +    if (qp->srq)
>> +        init_completion(&qp->srq_completion);
>
> I think that if you unconditionally complete, you should also 
> unconditionally initialize.

Non "SRQ" QP will not get "Last WQE reached" event. Unless device is 
defected.

Anyway, I'm ok with your suggestion. There is no hard in initializing it 
for Non "SRQ" QPs as well.
diff mbox series

Patch

diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 94a7f3b0c71c..9e4df7d40e0c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1101,6 +1101,16 @@  EXPORT_SYMBOL(ib_destroy_srq_user);
 
 /* Queue pairs */
 
+static void __ib_qp_event_handler(struct ib_event *event, void *context)
+{
+	struct ib_qp *qp = event->element.qp;
+
+	if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
+		complete(&qp->srq_completion);
+	else if (qp->registered_event_handler)
+		qp->registered_event_handler(event, qp->qp_context);
+}
+
 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 {
 	struct ib_qp *qp = context;
@@ -1221,7 +1231,10 @@  static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
 	qp->qp_type = attr->qp_type;
 	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
 	qp->srq = attr->srq;
-	qp->event_handler = attr->event_handler;
+	if (qp->srq)
+		init_completion(&qp->srq_completion);
+	qp->event_handler = __ib_qp_event_handler;
+	qp->registered_event_handler = attr->event_handler;
 	qp->port = attr->port_num;
 	qp->qp_context = attr->qp_context;
 
@@ -2884,6 +2897,72 @@  static void __ib_drain_rq(struct ib_qp *qp)
 		wait_for_completion(&rdrain.done);
 }
 
+/*
+ * __ib_drain_srq() - Block until Last WQE Reached event arrives, or timeout
+ *                    expires.
+ * @qp:               queue pair associated with SRQ to drain
+ *
+ * Quoting 10.3.1 Queue Pair and EE Context States:
+ *
+ * Note, for QPs that are associated with an SRQ, the Consumer should take the
+ * QP through the Error State before invoking a Destroy QP or a Modify QP to the
+ * Reset State.  The Consumer may invoke the Destroy QP without first performing
+ * a Modify QP to the Error State and waiting for the Affiliated Asynchronous
+ * Last WQE Reached Event. However, if the Consumer does not wait for the
+ * Affiliated Asynchronous Last WQE Reached Event, then WQE and Data Segment
+ * leakage may occur. Therefore, it is good programming practice to tear down a
+ * QP that is associated with an SRQ by using the following process:
+ *
+ * - Put the QP in the Error State
+ * - Wait for the Affiliated Asynchronous Last WQE Reached Event;
+ * - either:
+ *       drain the CQ by invoking the Poll CQ verb and either wait for CQ
+ *       to be empty or the number of Poll CQ operations has exceeded
+ *       CQ capacity size;
+ * - or
+ *       post another WR that completes on the same CQ and wait for this
+ *       WR to return as a WC;
+ * - and then invoke a Destroy QP or Reset QP.
+ *
+ * We use the first option.
+ */
+static void __ib_drain_srq(struct ib_qp *qp)
+{
+	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+	struct ib_cq *cq;
+	int n, polled = 0;
+	int ret;
+
+	if (!qp->srq) {
+		WARN_ONCE(1, "QP 0x%p is not associated with SRQ\n", qp);
+		return;
+	}
+
+	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+	if (ret) {
+		WARN_ONCE(ret, "failed to drain shared recv queue: %d\n", ret);
+		return;
+	}
+
+	if (ib_srq_has_cq(qp->srq->srq_type)) {
+		cq = qp->srq->ext.cq;
+	} else if (qp->recv_cq) {
+		cq = qp->recv_cq;
+	} else {
+		WARN_ONCE(1, "QP 0x%p has no CQ associated with SRQ\n", qp);
+		return;
+	}
+
+	if (wait_for_completion_timeout(&qp->srq_completion, 10 * HZ) > 0) {
+		while (polled != cq->cqe) {
+			n = ib_process_cq_direct(cq, cq->cqe - polled);
+			if (!n)
+				return;
+			polled += n;
+		}
+	}
+}
+
 /**
  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
  *		   application.
@@ -2962,6 +3041,8 @@  void ib_drain_qp(struct ib_qp *qp)
 	ib_drain_sq(qp);
 	if (!qp->srq)
 		ib_drain_rq(qp);
+	else
+		__ib_drain_srq(qp);
 }
 EXPORT_SYMBOL(ib_drain_qp);
 
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 477bf9dd5e71..5a193008f99c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1788,6 +1788,7 @@  struct ib_qp {
 	struct list_head	rdma_mrs;
 	struct list_head	sig_mrs;
 	struct ib_srq	       *srq;
+	struct completion	srq_completion;
 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
 	struct list_head	xrcd_list;
 
@@ -1797,6 +1798,7 @@  struct ib_qp {
 	struct ib_qp           *real_qp;
 	struct ib_uqp_object   *uobject;
 	void                  (*event_handler)(struct ib_event *, void *);
+	void                  (*registered_event_handler)(struct ib_event *, void *);
 	void		       *qp_context;
 	/* sgid_attrs associated with the AV's */
 	const struct ib_gid_attr *av_sgid_attr;