diff mbox series

[v5,7/7] ufs: core: Add error handling for MCQ mode

Message ID 7b884263c9db9a9666086a345ede85bb56d9dfc7.1683872601.git.quic_nguyenb@quicinc.com (mailing list archive)
State Superseded
Headers show
Series ufs: core: mcq: Add ufshcd_abort() and error handler support in MCQ mode | expand

Commit Message

Bao D. Nguyen May 12, 2023, 6:28 a.m. UTC
Add support for error handling for MCQ mode.

Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
---
 drivers/ufs/core/ufshcd.c | 85 +++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 74 insertions(+), 11 deletions(-)

Comments

Bart Van Assche May 19, 2023, 11:03 p.m. UTC | #1
On 5/11/23 23:28, Bao D. Nguyen wrote:
> @@ -6378,18 +6407,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
>   	bool needs_reset = false;
>   	int tag, ret;
>   
> -	/* Clear pending transfer requests */
> -	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
> -		ret = ufshcd_try_to_abort_task(hba, tag);
> -		dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
> -			hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
> -			ret ? "failed" : "succeeded");
> -		if (ret) {
> -			needs_reset = true;
> -			goto out;
> +	if (is_mcq_enabled(hba)) {
> +		struct ufshcd_lrb *lrbp;
> +		int tag;
> +
> +		for (tag = 0; tag < hba->nutrs; tag++) {
> +			lrbp = &hba->lrb[tag];
> +			if (!ufshcd_cmd_inflight(lrbp->cmd))
> +				continue;
> +			ret = ufshcd_try_to_abort_task(hba, tag);
> +			dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
> +				hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
> +				ret ? "failed" : "succeeded");
> +			if (ret) {
> +				needs_reset = true;
> +				goto out;
> +			}
> +		}
> +	} else {
> +		/* Clear pending transfer requests */
> +		for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
> +			ret = ufshcd_try_to_abort_task(hba, tag);
> +			dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
> +				hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
> +				ret ? "failed" : "succeeded");
> +			if (ret) {
> +				needs_reset = true;
> +				goto out;
> +			}
>   		}
>   	}

Please move the ufshcd_cmd_inflight() check into ufshcd_try_to_abort_task()
such that the same code path can be used for MCQ and legacy mode.

Thanks,

Bart.
Bao D. Nguyen May 20, 2023, 12:11 a.m. UTC | #2
On 5/19/2023 4:03 PM, Bart Van Assche wrote:
> On 5/11/23 23:28, Bao D. Nguyen wrote:
>> @@ -6378,18 +6407,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
>>       bool needs_reset = false;
>>       int tag, ret;
>> -    /* Clear pending transfer requests */
>> -    for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
>> -        ret = ufshcd_try_to_abort_task(hba, tag);
>> -        dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
>> -            hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
>> -            ret ? "failed" : "succeeded");
>> -        if (ret) {
>> -            needs_reset = true;
>> -            goto out;
>> +    if (is_mcq_enabled(hba)) {
>> +        struct ufshcd_lrb *lrbp;
>> +        int tag;
>> +
>> +        for (tag = 0; tag < hba->nutrs; tag++) {
>> +            lrbp = &hba->lrb[tag];
>> +            if (!ufshcd_cmd_inflight(lrbp->cmd))
>> +                continue;
>> +            ret = ufshcd_try_to_abort_task(hba, tag);
>> +            dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
>> +                hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
>> +                ret ? "failed" : "succeeded");
>> +            if (ret) {
>> +                needs_reset = true;
>> +                goto out;
>> +            }
>> +        }
>> +    } else {
>> +        /* Clear pending transfer requests */
>> +        for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
>> +            ret = ufshcd_try_to_abort_task(hba, tag);
>> +            dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
>> +                hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
>> +                ret ? "failed" : "succeeded");
>> +            if (ret) {
>> +                needs_reset = true;
>> +                goto out;
>> +            }
>>           }
>>       }
> 
> Please move the ufshcd_cmd_inflight() check into ufshcd_try_to_abort_task()
> such that the same code path can be used for MCQ and legacy mode.
Thank you, Bart.
I will make the change.

Thanks
Bao

> 
> Thanks,
> 
> Bart.
Stanley Jhu May 22, 2023, 6:48 a.m. UTC | #3
Hi Bao,

Bao D. Nguyen <quic_nguyenb@quicinc.com> 於 2023年5月12日 週五 下午2:34寫道:
>
> Add support for error handling for MCQ mode.
>
> Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
> ---
>  drivers/ufs/core/ufshcd.c | 85 +++++++++++++++++++++++++++++++++++++++++------
>  1 file changed, 74 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
> index ec07e49..9345118 100644
> --- a/drivers/ufs/core/ufshcd.c
> +++ b/drivers/ufs/core/ufshcd.c
> @@ -3148,6 +3148,16 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
>                 err = -ETIMEDOUT;
>                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
>                         __func__, lrbp->task_tag);
> +
> +               /* MCQ mode */
> +               if (is_mcq_enabled(hba)) {
> +                       err = ufshcd_clear_cmd(hba, lrbp->task_tag);
> +                       if (!err)
> +                               hba->dev_cmd.complete = NULL;

How about always clearing hba->dev_cmd.complete? If ufshcd_clear_cmd()
fails (for example, times out), "complete" should be cleared, similar
to the "pending" case in the SDB path.

> +                       return err;
> +               }
> +
> +               /* SDB mode */
>                 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
>                         /* successfully cleared the command, retry if needed */
>                         err = -EAGAIN;
> @@ -5581,6 +5591,10 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
>   */
>  static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
>  {
> +       struct ufshcd_lrb *lrbp;
> +       u32 hwq_num, utag;
> +       int tag;
> +
>         /* Resetting interrupt aggregation counters first and reading the
>          * DOOR_BELL afterward allows us to handle all the completed requests.
>          * In order to prevent other interrupts starvation the DB is read once
> @@ -5599,7 +5613,22 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
>          * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
>          * do not want polling to trigger spurious interrupt complaints.
>          */
> -       ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
> +       if (!is_mcq_enabled(hba)) {
> +               ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
> +               goto out;
> +       }
> +
> +       /* MCQ mode */
> +       for (tag = 0; tag < hba->nutrs; tag++) {
> +               lrbp = &hba->lrb[tag];
> +               if (ufshcd_cmd_inflight(lrbp->cmd)) {
> +                       utag = blk_mq_unique_tag(scsi_cmd_to_rq(lrbp->cmd));
> +                       hwq_num = blk_mq_unique_tag_to_hwq(utag);
> +                       ufshcd_poll(hba->host, hwq_num);
> +               }
> +       }

In SDB mode, the DOOR_BELL is reset by ufshcd_hba_stop(). All bits
that were previously set in DOOR_BELL are also set in "completed_reqs"
in ufshcd_poll(). This allows ufshcd_poll() to handle all outstanding
requests properly.

However, in MCQ mode, the CQ tail registers cannot provide the same
information after they are reset. Hence, they cannot be properly
referenced by ufshcd_poll().

Thanks,
Stanley Chu
Stanley Jhu May 22, 2023, 6:56 a.m. UTC | #4
On Mon, May 22, 2023 at 2:48 PM Stanley Chu <chu.stanley@gmail.com> wrote:
>
> Hi Bao,
>
> Bao D. Nguyen <quic_nguyenb@quicinc.com> 於 2023年5月12日 週五 下午2:34寫道:
> >
> > Add support for error handling for MCQ mode.
> >
> > Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
> > ---
> >  drivers/ufs/core/ufshcd.c | 85 +++++++++++++++++++++++++++++++++++++++++------
> >  1 file changed, 74 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
> > index ec07e49..9345118 100644
> > --- a/drivers/ufs/core/ufshcd.c
> > +++ b/drivers/ufs/core/ufshcd.c
> > @@ -3148,6 +3148,16 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
> >                 err = -ETIMEDOUT;
> >                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
> >                         __func__, lrbp->task_tag);
> > +
> > +               /* MCQ mode */
> > +               if (is_mcq_enabled(hba)) {
> > +                       err = ufshcd_clear_cmd(hba, lrbp->task_tag);
> > +                       if (!err)
> > +                               hba->dev_cmd.complete = NULL;
>
> How about always clearing hba->dev_cmd.complete? If ufshcd_clear_cmd()
> fails (for example, times out), "complete" should be cleared, similar
> to the "pending" case in the SDB path.
>
> > +                       return err;
> > +               }
> > +
> > +               /* SDB mode */
> >                 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
> >                         /* successfully cleared the command, retry if needed */
> >                         err = -EAGAIN;
> > @@ -5581,6 +5591,10 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
> >   */
> >  static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
> >  {
> > +       struct ufshcd_lrb *lrbp;
> > +       u32 hwq_num, utag;
> > +       int tag;
> > +
> >         /* Resetting interrupt aggregation counters first and reading the
> >          * DOOR_BELL afterward allows us to handle all the completed requests.
> >          * In order to prevent other interrupts starvation the DB is read once
> > @@ -5599,7 +5613,22 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
> >          * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
> >          * do not want polling to trigger spurious interrupt complaints.
> >          */
> > -       ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
> > +       if (!is_mcq_enabled(hba)) {
> > +               ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
> > +               goto out;
> > +       }
> > +
> > +       /* MCQ mode */
> > +       for (tag = 0; tag < hba->nutrs; tag++) {
> > +               lrbp = &hba->lrb[tag];
> > +               if (ufshcd_cmd_inflight(lrbp->cmd)) {
> > +                       utag = blk_mq_unique_tag(scsi_cmd_to_rq(lrbp->cmd));
> > +                       hwq_num = blk_mq_unique_tag_to_hwq(utag);
> > +                       ufshcd_poll(hba->host, hwq_num);
> > +               }
> > +       }
>
> In SDB mode, the DOOR_BELL is reset by ufshcd_hba_stop(). All bits
> that were previously set in DOOR_BELL are also set in "completed_reqs"
> in ufshcd_poll(). This allows ufshcd_poll() to handle all outstanding
> requests properly.
>
> However, in MCQ mode, the CQ tail registers cannot provide the same
> information after they are reset. Hence, they cannot be properly
> referenced by ufshcd_poll().

A fixed version sample is as follows and has been tested on our end.

struct scsi_cmnd *cmd;

for (tag = 0; tag < hba->nutrs; tag++) {
        lrbp = &hba->lrb[tag];
        cmd = lrbp->cmd;
        if (ufshcd_cmd_inflight(cmd)) {
                set_host_byte(cmd, DID_ERROR);
                ufshcd_release_scsi_cmd(hba, lrbp);
                scsi_done(cmd);
        }
}

Thanks,
Stanley Chu
Bao D. Nguyen May 23, 2023, 6:58 a.m. UTC | #5
On 5/19/2023 4:03 PM, Bart Van Assche wrote:
> On 5/11/23 23:28, Bao D. Nguyen wrote:
>> @@ -6378,18 +6407,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
>>       bool needs_reset = false;
>>       int tag, ret;
>> -    /* Clear pending transfer requests */
>> -    for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
>> -        ret = ufshcd_try_to_abort_task(hba, tag);
>> -        dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
>> -            hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
>> -            ret ? "failed" : "succeeded");
>> -        if (ret) {
>> -            needs_reset = true;
>> -            goto out;
>> +    if (is_mcq_enabled(hba)) {
>> +        struct ufshcd_lrb *lrbp;
>> +        int tag;
>> +
>> +        for (tag = 0; tag < hba->nutrs; tag++) {
>> +            lrbp = &hba->lrb[tag];
>> +            if (!ufshcd_cmd_inflight(lrbp->cmd))
>> +                continue;
>> +            ret = ufshcd_try_to_abort_task(hba, tag);
>> +            dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
>> +                hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
>> +                ret ? "failed" : "succeeded");
>> +            if (ret) {
>> +                needs_reset = true;
>> +                goto out;
>> +            }
>> +        }
>> +    } else {
>> +        /* Clear pending transfer requests */
>> +        for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
>> +            ret = ufshcd_try_to_abort_task(hba, tag);
>> +            dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
>> +                hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
>> +                ret ? "failed" : "succeeded");
>> +            if (ret) {
>> +                needs_reset = true;
>> +                goto out;
>> +            }
>>           }
>>       }
> 
> Please move the ufshcd_cmd_inflight() check into ufshcd_try_to_abort_task()
> such that the same code path can be used for MCQ and legacy mode.
Hi Bart,
Because the ufshcd_try_to_abort_task() is shared by sdb and mcq modes, I 
feel a bit uncomfortable using the new function ufshcd_cmd_inflight() in 
ufshcd_try_to_abort_task(). In this patch series, I am trying to avoid 
changing the sdb error handling logic as much as possible; only add 
error handling support for mcq mode. If you feel there is a very good 
benefit in making the change, I would give it a try. Otherwise, I would 
prefer not touching sdb error handling code that has been working well. 
Please let me know.

Thanks,
Bao

> 
> Thanks,
> 
> Bart.
Bao D. Nguyen May 23, 2023, 7:01 a.m. UTC | #6
On 5/21/2023 11:56 PM, Stanley Chu wrote:
> On Mon, May 22, 2023 at 2:48 PM Stanley Chu <chu.stanley@gmail.com> wrote:
>>
>> Hi Bao,
>>
>> Bao D. Nguyen <quic_nguyenb@quicinc.com> 於 2023年5月12日 週五 下午2:34寫道:
>>>
>>> Add support for error handling for MCQ mode.
>>>
>>> Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
>>> ---
>>>   drivers/ufs/core/ufshcd.c | 85 +++++++++++++++++++++++++++++++++++++++++------
>>>   1 file changed, 74 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
>>> index ec07e49..9345118 100644
>>> --- a/drivers/ufs/core/ufshcd.c
>>> +++ b/drivers/ufs/core/ufshcd.c
>>> @@ -3148,6 +3148,16 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
>>>                  err = -ETIMEDOUT;
>>>                  dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
>>>                          __func__, lrbp->task_tag);
>>> +
>>> +               /* MCQ mode */
>>> +               if (is_mcq_enabled(hba)) {
>>> +                       err = ufshcd_clear_cmd(hba, lrbp->task_tag);
>>> +                       if (!err)
>>> +                               hba->dev_cmd.complete = NULL;
>>
>> How about always clearing hba->dev_cmd.complete? If ufshcd_clear_cmd()
>> fails (for example, times out), "complete" should be cleared, similar
>> to the "pending" case in the SDB path.
>>
>>> +                       return err;
>>> +               }
>>> +
>>> +               /* SDB mode */
>>>                  if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
>>>                          /* successfully cleared the command, retry if needed */
>>>                          err = -EAGAIN;
>>> @@ -5581,6 +5591,10 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
>>>    */
>>>   static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
>>>   {
>>> +       struct ufshcd_lrb *lrbp;
>>> +       u32 hwq_num, utag;
>>> +       int tag;
>>> +
>>>          /* Resetting interrupt aggregation counters first and reading the
>>>           * DOOR_BELL afterward allows us to handle all the completed requests.
>>>           * In order to prevent other interrupts starvation the DB is read once
>>> @@ -5599,7 +5613,22 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
>>>           * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
>>>           * do not want polling to trigger spurious interrupt complaints.
>>>           */
>>> -       ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
>>> +       if (!is_mcq_enabled(hba)) {
>>> +               ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
>>> +               goto out;
>>> +       }
>>> +
>>> +       /* MCQ mode */
>>> +       for (tag = 0; tag < hba->nutrs; tag++) {
>>> +               lrbp = &hba->lrb[tag];
>>> +               if (ufshcd_cmd_inflight(lrbp->cmd)) {
>>> +                       utag = blk_mq_unique_tag(scsi_cmd_to_rq(lrbp->cmd));
>>> +                       hwq_num = blk_mq_unique_tag_to_hwq(utag);
>>> +                       ufshcd_poll(hba->host, hwq_num);
>>> +               }
>>> +       }
>>
>> In SDB mode, the DOOR_BELL is reset by ufshcd_hba_stop(). All bits
>> that were previously set in DOOR_BELL are also set in "completed_reqs"
>> in ufshcd_poll(). This allows ufshcd_poll() to handle all outstanding
>> requests properly.
>>
>> However, in MCQ mode, the CQ tail registers cannot provide the same
>> information after they are reset. Hence, they cannot be properly
>> referenced by ufshcd_poll().
> 
> A fixed version sample is as follows and has been tested on our end.
Thank you Stanley. I will make the change.

> 
> struct scsi_cmnd *cmd;
> 
> for (tag = 0; tag < hba->nutrs; tag++) {
>          lrbp = &hba->lrb[tag];
>          cmd = lrbp->cmd;
>          if (ufshcd_cmd_inflight(cmd)) {
>                  set_host_byte(cmd, DID_ERROR);
>                  ufshcd_release_scsi_cmd(hba, lrbp);
>                  scsi_done(cmd);
>          }
> }
> 
> Thanks,
> Stanley Chu
diff mbox series

Patch

diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ec07e49..9345118 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -3148,6 +3148,16 @@  static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
 		err = -ETIMEDOUT;
 		dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
 			__func__, lrbp->task_tag);
+
+		/* MCQ mode */
+		if (is_mcq_enabled(hba)) {
+			err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+			if (!err)
+				hba->dev_cmd.complete = NULL;
+			return err;
+		}
+
+		/* SDB mode */
 		if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
 			/* successfully cleared the command, retry if needed */
 			err = -EAGAIN;
@@ -5581,6 +5591,10 @@  static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
  */
 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
 {
+	struct ufshcd_lrb *lrbp;
+	u32 hwq_num, utag;
+	int tag;
+
 	/* Resetting interrupt aggregation counters first and reading the
 	 * DOOR_BELL afterward allows us to handle all the completed requests.
 	 * In order to prevent other interrupts starvation the DB is read once
@@ -5599,7 +5613,22 @@  static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
 	 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
 	 * do not want polling to trigger spurious interrupt complaints.
 	 */
-	ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
+	if (!is_mcq_enabled(hba)) {
+		ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
+		goto out;
+	}
+
+	/* MCQ mode */
+	for (tag = 0; tag < hba->nutrs; tag++) {
+		lrbp = &hba->lrb[tag];
+		if (ufshcd_cmd_inflight(lrbp->cmd)) {
+			utag = blk_mq_unique_tag(scsi_cmd_to_rq(lrbp->cmd));
+			hwq_num = blk_mq_unique_tag_to_hwq(utag);
+			ufshcd_poll(hba->host, hwq_num);
+		}
+	}
+
+out:
 
 	return IRQ_HANDLED;
 }
@@ -6378,18 +6407,36 @@  static bool ufshcd_abort_all(struct ufs_hba *hba)
 	bool needs_reset = false;
 	int tag, ret;
 
-	/* Clear pending transfer requests */
-	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
-		ret = ufshcd_try_to_abort_task(hba, tag);
-		dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
-			hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
-			ret ? "failed" : "succeeded");
-		if (ret) {
-			needs_reset = true;
-			goto out;
+	if (is_mcq_enabled(hba)) {
+		struct ufshcd_lrb *lrbp;
+		int tag;
+
+		for (tag = 0; tag < hba->nutrs; tag++) {
+			lrbp = &hba->lrb[tag];
+			if (!ufshcd_cmd_inflight(lrbp->cmd))
+				continue;
+			ret = ufshcd_try_to_abort_task(hba, tag);
+			dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+				hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+				ret ? "failed" : "succeeded");
+			if (ret) {
+				needs_reset = true;
+				goto out;
+			}
+		}
+	} else {
+		/* Clear pending transfer requests */
+		for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+			ret = ufshcd_try_to_abort_task(hba, tag);
+			dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+				hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+				ret ? "failed" : "succeeded");
+			if (ret) {
+				needs_reset = true;
+				goto out;
+			}
 		}
 	}
-
 	/* Clear pending task management requests */
 	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
 		if (ufshcd_clear_tm_cmd(hba, tag)) {
@@ -7321,6 +7368,8 @@  static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 	unsigned long flags, pending_reqs = 0, not_cleared = 0;
 	struct Scsi_Host *host;
 	struct ufs_hba *hba;
+	struct ufs_hw_queue *hwq;
+	struct ufshcd_lrb *lrbp;
 	u32 pos, not_cleared_mask = 0;
 	int err;
 	u8 resp = 0xF, lun;
@@ -7336,6 +7385,20 @@  static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 		goto out;
 	}
 
+	if (is_mcq_enabled(hba)) {
+		for (pos = 0; pos < hba->nutrs; pos++) {
+			lrbp = &hba->lrb[pos];
+			if (ufshcd_cmd_inflight(lrbp->cmd) &&
+			    lrbp->lun == lun) {
+				ufshcd_clear_cmd(hba, pos);
+				hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+				ufshcd_mcq_poll_cqe_lock(hba, hwq);
+			}
+		}
+		err = 0;
+		goto out;
+	}
+
 	/* clear the commands that were pending for corresponding LUN */
 	spin_lock_irqsave(&hba->outstanding_lock, flags);
 	for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)