diff mbox

[v2,net-next,06/12] qed: Add LL2 slowpath handling

Message ID 1507020902-4952-7-git-send-email-Michal.Kalderon@cavium.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Kalderon, Michal Oct. 3, 2017, 8:54 a.m. UTC
For iWARP unaligned MPA flow, a slowpath event of flushing an
MPA connection that entered an unaligned state is required.
The flush ramrod is received on the ll2 queue, and a pre-registered
callback function is called to handle the flush event.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
---
 drivers/net/ethernet/qlogic/qed/qed_ll2.c | 40 +++++++++++++++++++++++++++++--
 include/linux/qed/qed_ll2_if.h            |  5 ++++
 2 files changed, 43 insertions(+), 2 deletions(-)

Comments

Leon Romanovsky Oct. 3, 2017, 1:26 p.m. UTC | #1
On Tue, Oct 03, 2017 at 11:54:56AM +0300, Michal Kalderon wrote:
> For iWARP unaligned MPA flow, a slowpath event of flushing an
> MPA connection that entered an unaligned state is required.
> The flush ramrod is received on the ll2 queue, and a pre-registered
> callback function is called to handle the flush event.
>
> Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
> Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
> ---
>  drivers/net/ethernet/qlogic/qed/qed_ll2.c | 40 +++++++++++++++++++++++++++++--
>  include/linux/qed/qed_ll2_if.h            |  5 ++++
>  2 files changed, 43 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
> index 8eb9645..047f556 100644
> --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
> +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>  }
>
>  static int
> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
> +			struct qed_ll2_info *p_ll2_conn,
> +			union core_rx_cqe_union *p_cqe,
> +			unsigned long *p_lock_flags)
> +{
> +	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
> +	struct core_rx_slow_path_cqe *sp_cqe;
> +
> +	sp_cqe = &p_cqe->rx_cqe_sp;
> +	if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
> +		DP_NOTICE(p_hwfn,
> +			  "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
> +			  sp_cqe->ramrod_cmd_id);
> +		return -EINVAL;
> +	}
> +
> +	if (!p_ll2_conn->cbs.slowpath_cb) {
> +		DP_NOTICE(p_hwfn,
> +			  "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
> +		return -EINVAL;
> +	}
> +
> +	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);

Interesting, you are unlock the lock which was taken in upper layer.
It is not actual error, but chances to have such error are pretty high
(for example, after refactoring).

> +
> +	p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
> +				    p_ll2_conn->my_id,
> +				    le32_to_cpu(sp_cqe->opaque_data.data[0]),
> +				    le32_to_cpu(sp_cqe->opaque_data.data[1]));
> +
> +	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
> +
> +	return 0;
> +}
> +
> +static int
>  qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
>  			      struct qed_ll2_info *p_ll2_conn,
>  			      union core_rx_cqe_union *p_cqe,
> @@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
>
>  		switch (cqe->rx_cqe_sp.type) {
>  		case CORE_RX_CQE_TYPE_SLOW_PATH:
> -			DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
> -			rc = -EINVAL;
> +			rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
> +						     cqe, &flags);
>  			break;
>  		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
>  		case CORE_RX_CQE_TYPE_REGULAR:
> @@ -1214,6 +1249,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
>  	p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
>  	p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
>  	p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
> +	p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
>  	p_ll2_info->cbs.cookie = cbs->cookie;
>
>  	return 0;
> diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
> index 95fdf02..e755954 100644
> --- a/include/linux/qed/qed_ll2_if.h
> +++ b/include/linux/qed/qed_ll2_if.h
> @@ -151,11 +151,16 @@ struct qed_ll2_comp_rx_data {
>  				     dma_addr_t first_frag_addr,
>  				     bool b_last_fragment, bool b_last_packet);
>
> +typedef
> +void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
> +			    u32 opaque_data_0, u32 opaque_data_1);
> +
>  struct qed_ll2_cbs {
>  	qed_ll2_complete_rx_packet_cb rx_comp_cb;
>  	qed_ll2_release_rx_packet_cb rx_release_cb;
>  	qed_ll2_complete_tx_packet_cb tx_comp_cb;
>  	qed_ll2_release_tx_packet_cb tx_release_cb;
> +	qed_ll2_slowpath_cb slowpath_cb;
>  	void *cookie;
>  };
>
> --
> 1.8.3.1
>
David Miller Oct. 3, 2017, 5:17 p.m. UTC | #2
From: Michal Kalderon <Michal.Kalderon@cavium.com>
Date: Tue, 3 Oct 2017 11:54:56 +0300

> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>  }
>  
>  static int
> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
> +			struct qed_ll2_info *p_ll2_conn,
> +			union core_rx_cqe_union *p_cqe,
> +			unsigned long *p_lock_flags)
> +{
 ...
> +	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
> +

You can't drop this lock.

Another thread can enter the loop of our caller and process RX queue
entries, then we would return from here and try to process the same
entries again.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kalderon, Michal Oct. 3, 2017, 6:05 p.m. UTC | #3
From: David Miller <davem@davemloft.net>
Sent: Tuesday, October 3, 2017 8:17 PM
>> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>>  }
>>
>>  static int
>> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
>> +                     struct qed_ll2_info *p_ll2_conn,
>> +                     union core_rx_cqe_union *p_cqe,
>> +                     unsigned long *p_lock_flags)
>> +{
>...
>> +     spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
>> +
>
>You can't drop this lock.
>
>Another thread can enter the loop of our caller and process RX queue
>entries, then we would return from here and try to process the same
>entries again.

The lock is there to synchronize access to chains between qed_ll2_rxq_completion
and qed_ll2_post_rx_buffer. qed_ll2_rxq_completion can't be called from
different threads, the light l2 uses the single sp status block we have.
The reason we release the lock is to avoid a deadlock where as a result of calling
upper-layer driver it will potentially post additional rx-buffers.



--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kalderon, Michal Oct. 3, 2017, 7:48 p.m. UTC | #4
From: Leon Romanovsky <leon@kernel.org>
Sent: Tuesday, October 3, 2017 4:26 PM

>On Tue, Oct 03, 2017 at 11:54:56AM +0300, Michal Kalderon wrote:
>> For iWARP unaligned MPA flow, a slowpath event of flushing an
>> MPA connection that entered an unaligned state is required.
>> The flush ramrod is received on the ll2 queue, and a pre-registered
>> callback function is called to handle the flush event.
>>
>> Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
>> Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
>> ---
>>  drivers/net/ethernet/qlogic/qed/qed_ll2.c | 40 +++++++++++++++++++++++++++++--
>>  include/linux/qed/qed_ll2_if.h            |  5 ++++
>>  2 files changed, 43 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
>> index 8eb9645..047f556 100644
>> --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
>> +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
>> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>>  }
>>
>>  static int
>> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
>> +                     struct qed_ll2_info *p_ll2_conn,
>> +                     union core_rx_cqe_union *p_cqe,
>> +                     unsigned long *p_lock_flags)
>> +{
>> +     struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
>> +     struct core_rx_slow_path_cqe *sp_cqe;
>> +
>> +     sp_cqe = &p_cqe->rx_cqe_sp;
>> +     if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
>> +             DP_NOTICE(p_hwfn,
>> +                       "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
>> +                       sp_cqe->ramrod_cmd_id);
>> +             return -EINVAL;
>> +     }
>> +
>> +     if (!p_ll2_conn->cbs.slowpath_cb) {
>> +             DP_NOTICE(p_hwfn,
>> +                       "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
>> +             return -EINVAL;
>> +     }
>> +
>> +     spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
>
>Interesting, you are unlock the lock which was taken in upper layer.
>It is not actual error, but chances to have such error are pretty high
>(for example, after refactoring).

Thanks. Ensuring that the lock will only be unlocked inside the calling function would make 
the calling function long and less readable.
The risk exists, but I think the fact that p_lock_flags is passed as parameter should 
give a strong indication in the future that lock should be handled delicately. --
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kalderon, Michal Oct. 5, 2017, 6:59 p.m. UTC | #5
From: Kalderon, Michal
Sent: Tuesday, October 3, 2017 9:05 PM
To: David Miller
>From: David Miller <davem@davemloft.net>
>Sent: Tuesday, October 3, 2017 8:17 PM
>>> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>>>  }
>>>
>>>  static int
>>> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
>>> +                     struct qed_ll2_info *p_ll2_conn,
>>> +                     union core_rx_cqe_union *p_cqe,
>>> +                     unsigned long *p_lock_flags)
>>> +{
>>...
>>> +     spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
>>> +
>>
>>You can't drop this lock.
>>
>>Another thread can enter the loop of our caller and process RX queue
>>entries, then we would return from here and try to process the same
>>entries again.
>
>The lock is there to synchronize access to chains between qed_ll2_rxq_completion
>and qed_ll2_post_rx_buffer. qed_ll2_rxq_completion can't be called from
>different threads, the light l2 uses the single sp status block we have.
>The reason we release the lock is to avoid a deadlock where as a result of calling
>upper-layer driver it will potentially post additional rx-buffers.

Dave, is there anything else needed from me on this? 
Noticed the series is still in "Changes Requested". 

thanks,
Michal


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Miller Oct. 5, 2017, 7:06 p.m. UTC | #6
From: "Kalderon, Michal" <Michal.Kalderon@cavium.com>
Date: Thu, 5 Oct 2017 18:59:04 +0000

> From: Kalderon, Michal
> Sent: Tuesday, October 3, 2017 9:05 PM
> To: David Miller
>>From: David Miller <davem@davemloft.net>
>>Sent: Tuesday, October 3, 2017 8:17 PM
>>>> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>>>>  }
>>>>
>>>>  static int
>>>> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
>>>> +                     struct qed_ll2_info *p_ll2_conn,
>>>> +                     union core_rx_cqe_union *p_cqe,
>>>> +                     unsigned long *p_lock_flags)
>>>> +{
>>>...
>>>> +     spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
>>>> +
>>>
>>>You can't drop this lock.
>>>
>>>Another thread can enter the loop of our caller and process RX queue
>>>entries, then we would return from here and try to process the same
>>>entries again.
>>
>>The lock is there to synchronize access to chains between qed_ll2_rxq_completion
>>and qed_ll2_post_rx_buffer. qed_ll2_rxq_completion can't be called from
>>different threads, the light l2 uses the single sp status block we have.
>>The reason we release the lock is to avoid a deadlock where as a result of calling
>>upper-layer driver it will potentially post additional rx-buffers.
> 
> Dave, is there anything else needed from me on this? 
> Noticed the series is still in "Changes Requested". 

I'm still not convinced that the lock dropping is legitimate.  What if a
spurious interrupt arrives?

If the execution path in the caller is serialized for some reason, why
are you using a spinlock and don't use that serialization for the mutual
exclusion necessary for these queue indexes?
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kalderon, Michal Oct. 5, 2017, 8:27 p.m. UTC | #7
From: David Miller <davem@davemloft.net>
Sent: Thursday, October 5, 2017 10:06 PM
>> From: Kalderon, Michal
>> Sent: Tuesday, October 3, 2017 9:05 PM
>> To: David Miller
>>>From: David Miller <davem@davemloft.net>
>>>Sent: Tuesday, October 3, 2017 8:17 PM
>>>>> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>>>>>  }
>>>>>
>>>>>  static int
>>>>> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
>>>>> +                     struct qed_ll2_info *p_ll2_conn,
>>>>> +                     union core_rx_cqe_union *p_cqe,
>>>>> +                     unsigned long *p_lock_flags)
>>>>> +{
>>>>...
>>>>> +     spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
>>>>> +
>>>>
>>>>You can't drop this lock.
>>>>
>>>>Another thread can enter the loop of our caller and process RX queue
>>>>entries, then we would return from here and try to process the same
>>>>entries again.
>>>
>>>The lock is there to synchronize access to chains between qed_ll2_rxq_completion
>>>and qed_ll2_post_rx_buffer. qed_ll2_rxq_completion can't be called from
>>>different threads, the light l2 uses the single sp status block we have.
>>>The reason we release the lock is to avoid a deadlock where as a result of calling
>>>upper-layer driver it will potentially post additional rx-buffers.
>>
>> Dave, is there anything else needed from me on this?
>> Noticed the series is still in "Changes Requested".
>
>I'm still not convinced that the lock dropping is legitimate.  What if a
>spurious interrupt arrives?
We're in the context of a dedicated tasklet here. So even if there is a spurious
interrupt, we're covered.

>
>If the execution path in the caller is serialized for some reason, why
>are you using a spinlock and don't use that serialization for the mutual
>exclusion necessary for these queue indexes?
Posting of rx-buffers back to the light-l2 is not always serialized and can be
called from different threads depending on the light-l2 client.
Unlocking before calling the callback enables the cb function to post rx buffers,
in this case, serialization protects us. The spinlock is required for the case
that rx buffers are posted from a different thread, where it could be run
simultaneously to the rxq_completion.

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Miller Oct. 6, 2017, 12:20 a.m. UTC | #8
From: "Kalderon, Michal" <Michal.Kalderon@cavium.com>
Date: Thu, 5 Oct 2017 20:27:22 +0000

> The spinlock is required for the case that rx buffers are posted
> from a different thread, where it could be run simultaneously to the
> rxq_completion.

This only brings us back to my original argument, if the lock is
necessary in order to synchronize with those paths, how can you
possible drop the lock safely here?

Is it because you re-read the head and tail pointers of the queue each
time around the loop?
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kalderon, Michal Oct. 6, 2017, 5:09 a.m. UTC | #9
From: linux-rdma-owner@vger.kernel.org <linux-rdma-owner@vger.kernel.org> on behalf of David Miller <davem@davemloft.net>

>From: "Kalderon, Michal" <Michal.Kalderon@cavium.com>
>Date: Thu, 5 Oct 2017 20:27:22 +0000
>
>> The spinlock is required for the case that rx buffers are posted
>> from a different thread, where it could be run simultaneously to the
>> rxq_completion.
>
>This only brings us back to my original argument, if the lock is
>necessary in order to synchronize with those paths, how can you
>possible drop the lock safely here?
>
>Is it because you re-read the head and tail pointers of the queue each
>time around the loop?

It's safe to drop the lock here because the implementation of the queue (qed_chain)
maintains both a consumer pointer (tail) indices and producer pointer(head).
The main loop reads  the FWs value and stores it as the "new cons"
and traverses the queue only until it reaches the FWs consumer value. 
The post function adds more buffers to the end of the queue and updates the 
producer. They will not affect the consumer pointers. So posting of buffers
doesn't affect the main loop.

The resources that are protected by the lock and accessed inside the loop
and from post-buffers are three linked-lists, free-descq, posting_descq and
active_descq, their head and tail are  read on every access
(elements are removed and moved between the lists).

Following this discussion, it looks like there was no need to take the lock in the
outer function, but only around the places that access these lists, this is a delicate
change which affects the ll2 clients (iscsi,fcoe,roce,iwarp). As this series is rather
large as it is and is intended for iWARP, please consider taking this one as it is. 
Since it doesn't change existing functionality and doesn't introduce risk to other
components. 

thanks,
Michal


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Miller Oct. 9, 2017, 4:40 a.m. UTC | #10
From: "Kalderon, Michal" <Michal.Kalderon@cavium.com>
Date: Tue, 3 Oct 2017 18:05:32 +0000

> From: David Miller <davem@davemloft.net>
> Sent: Tuesday, October 3, 2017 8:17 PM
>>> @@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
>>>  }
>>>
>>>  static int
>>> +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
>>> +                     struct qed_ll2_info *p_ll2_conn,
>>> +                     union core_rx_cqe_union *p_cqe,
>>> +                     unsigned long *p_lock_flags)
>>> +{
>>...
>>> +     spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
>>> +
>>
>>You can't drop this lock.
>>
>>Another thread can enter the loop of our caller and process RX queue
>>entries, then we would return from here and try to process the same
>>entries again.
> 
> The lock is there to synchronize access to chains between qed_ll2_rxq_completion
> and qed_ll2_post_rx_buffer. qed_ll2_rxq_completion can't be called from
> different threads, the light l2 uses the single sp status block we have.
> The reason we release the lock is to avoid a deadlock where as a result of calling
> upper-layer driver it will potentially post additional rx-buffers.

Ok, please repost this patch series.

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 8eb9645..047f556 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -423,6 +423,41 @@  static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
 }
 
 static int
+qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
+			struct qed_ll2_info *p_ll2_conn,
+			union core_rx_cqe_union *p_cqe,
+			unsigned long *p_lock_flags)
+{
+	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+	struct core_rx_slow_path_cqe *sp_cqe;
+
+	sp_cqe = &p_cqe->rx_cqe_sp;
+	if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
+		DP_NOTICE(p_hwfn,
+			  "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
+			  sp_cqe->ramrod_cmd_id);
+		return -EINVAL;
+	}
+
+	if (!p_ll2_conn->cbs.slowpath_cb) {
+		DP_NOTICE(p_hwfn,
+			  "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
+		return -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+
+	p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
+				    p_ll2_conn->my_id,
+				    le32_to_cpu(sp_cqe->opaque_data.data[0]),
+				    le32_to_cpu(sp_cqe->opaque_data.data[1]));
+
+	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+	return 0;
+}
+
+static int
 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
 			      struct qed_ll2_info *p_ll2_conn,
 			      union core_rx_cqe_union *p_cqe,
@@ -495,8 +530,8 @@  static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 
 		switch (cqe->rx_cqe_sp.type) {
 		case CORE_RX_CQE_TYPE_SLOW_PATH:
-			DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
-			rc = -EINVAL;
+			rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
+						     cqe, &flags);
 			break;
 		case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
 		case CORE_RX_CQE_TYPE_REGULAR:
@@ -1214,6 +1249,7 @@  static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
 	p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
 	p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
 	p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+	p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
 	p_ll2_info->cbs.cookie = cbs->cookie;
 
 	return 0;
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 95fdf02..e755954 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -151,11 +151,16 @@  struct qed_ll2_comp_rx_data {
 				     dma_addr_t first_frag_addr,
 				     bool b_last_fragment, bool b_last_packet);
 
+typedef
+void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
+			    u32 opaque_data_0, u32 opaque_data_1);
+
 struct qed_ll2_cbs {
 	qed_ll2_complete_rx_packet_cb rx_comp_cb;
 	qed_ll2_release_rx_packet_cb rx_release_cb;
 	qed_ll2_complete_tx_packet_cb tx_comp_cb;
 	qed_ll2_release_tx_packet_cb tx_release_cb;
+	qed_ll2_slowpath_cb slowpath_cb;
 	void *cookie;
 };