diff mbox series

[06/15] hpsa: use scsi_host_busy_iter() to traverse outstanding commands

Message ID 20211125151048.103910-7-hare@suse.de (mailing list archive)
State Changes Requested
Headers show
Series scsi: enabled reserved commands for LLDDs | expand

Commit Message

Hannes Reinecke Nov. 25, 2021, 3:10 p.m. UTC
Replace all hand-crafted command iterations with
scsi_host_busy_iter() calls.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/scsi/hpsa.c | 117 +++++++++++++++++++++++++-------------------
 1 file changed, 67 insertions(+), 50 deletions(-)

Comments

John Garry Nov. 26, 2021, 9:33 a.m. UTC | #1
On 25/11/2021 15:10, Hannes Reinecke wrote:
>   static void hpsa_drain_accel_commands(struct ctlr_info *h)
>   {
> -	struct CommandList *c = NULL;
> -	int i, accel_cmds_out;
> -	int refcount;
> +	struct hpsa_command_iter_data iter_data = {
> +		.h = h,
> +	};
>   
>   	do { /* wait for all outstanding ioaccel commands to drain out */
> -		accel_cmds_out = 0;
> -		for (i = 0; i < h->nr_cmds; i++) {
> -			c = h->cmd_pool + i;
> -			refcount = atomic_inc_return(&c->refcount);
> -			if (refcount > 1) /* Command is allocated */
> -				accel_cmds_out += is_accelerated_cmd(c);
> -			cmd_free(h, c);
> -		}
> -		if (accel_cmds_out <= 0)
> +		iter_data.count = 0;
> +		scsi_host_busy_iter(h->scsi_host,
> +				    hpsa_drain_accel_commands_iter,
> +				    &iter_data);

I haven't following this code exactly, but I assume that you want to 
iter the reserved requests as well (or in other places in others drivers 
in this series). For that to work we need to call 
blk_mq_start_request(), right? I could not see it called.

> +		if (iter_data.count <= 0)
>   			break;
>   		msleep(100);
>   	} while (1);
> -- 2.29.2
Hannes Reinecke Nov. 27, 2021, 5 p.m. UTC | #2
On 11/26/21 10:33 AM, John Garry wrote:
> On 25/11/2021 15:10, Hannes Reinecke wrote:
>>   static void hpsa_drain_accel_commands(struct ctlr_info *h)
>>   {
>> -    struct CommandList *c = NULL;
>> -    int i, accel_cmds_out;
>> -    int refcount;
>> +    struct hpsa_command_iter_data iter_data = {
>> +        .h = h,
>> +    };
>>       do { /* wait for all outstanding ioaccel commands to drain out */
>> -        accel_cmds_out = 0;
>> -        for (i = 0; i < h->nr_cmds; i++) {
>> -            c = h->cmd_pool + i;
>> -            refcount = atomic_inc_return(&c->refcount);
>> -            if (refcount > 1) /* Command is allocated */
>> -                accel_cmds_out += is_accelerated_cmd(c);
>> -            cmd_free(h, c);
>> -        }
>> -        if (accel_cmds_out <= 0)
>> +        iter_data.count = 0;
>> +        scsi_host_busy_iter(h->scsi_host,
>> +                    hpsa_drain_accel_commands_iter,
>> +                    &iter_data);
> 
> I haven't following this code exactly, but I assume that you want to 
> iter the reserved requests as well (or in other places in others drivers 
> in this series). For that to work we need to call 
> blk_mq_start_request(), right? I could not see it called.
> 
Actually, no; this is iterating over 'accel' commands, ie fastpath 
commands for RAID I/0. Which none of the reserved commands are.

But that doesn't mean that your comment about reserved commands not 
being started is invalid. Hmm.

Cheers,

Hannes
diff mbox series

Patch

diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c5f55b56fd2f..23babe177f5d 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1824,30 +1824,26 @@  static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
 	return rc;
 }
 
-static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
-						struct hpsa_scsi_dev_t *dev)
-{
-	int i;
-	int count = 0;
-
-	for (i = 0; i < h->nr_cmds; i++) {
-		struct CommandList *c = h->cmd_pool + i;
-		int refcount = atomic_inc_return(&c->refcount);
-
-		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
-				dev->scsi3addr)) {
-			unsigned long flags;
+struct hpsa_command_iter_data {
+	struct ctlr_info *h;
+	struct hpsa_scsi_dev_t *dev;
+	unsigned char *scsi3addr;
+	int count;
+};
 
-			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
-			if (!hpsa_is_cmd_idle(c))
-				++count;
-			spin_unlock_irqrestore(&h->lock, flags);
-		}
+static bool hpsa_find_outstanding_commands_iter(struct scsi_cmnd *sc,
+						void *data, bool reserved)
+{
+	struct hpsa_command_iter_data *iter_data = data;
+	struct ctlr_info *h = iter_data->h;
+	struct hpsa_scsi_dev_t *dev = iter_data->dev;
+	struct CommandList *c = h->cmd_pool + scsi_cmd_to_rq(sc)->tag;
 
-		cmd_free(h, c);
+	if (hpsa_cmd_dev_match(h, c, dev, dev->scsi3addr)) {
+		iter_data->count++;
+		return false;
 	}
-
-	return count;
+	return true;
 }
 
 #define NUM_WAIT 20
@@ -1857,13 +1853,20 @@  static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
 	int cmds = 0;
 	int waits = 0;
 	int num_wait = NUM_WAIT;
+	struct hpsa_command_iter_data iter_data = {
+		.h = h,
+		.dev = device,
+	};
 
 	if (device->external)
 		num_wait = HPSA_EH_PTRAID_TIMEOUT;
 
 	while (1) {
-		cmds = hpsa_find_outstanding_commands_for_dev(h, device);
-		if (cmds == 0)
+		iter_data.count = 0;
+		scsi_host_busy_iter(h->scsi_host,
+				    hpsa_find_outstanding_commands_iter,
+				    &iter_data);
+		if (iter_data.count == 0)
 			break;
 		if (++waits > num_wait)
 			break;
@@ -8180,27 +8183,34 @@  static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
 	kfree(h);				/* init_one 1 */
 }
 
+static bool fail_all_outstanding_cmds_iter(struct scsi_cmnd *sc, void *data,
+					   bool reserved)
+{
+	struct hpsa_command_iter_data *iter_data = data;
+	struct ctlr_info *h = iter_data->h;
+	struct CommandList *c = h->cmd_pool + scsi_cmd_to_rq(sc)->tag;
+
+	c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
+	finish_cmd(c);
+	atomic_dec(&h->commands_outstanding);
+	iter_data->count++;
+
+	return true;
+}
+
 /* Called when controller lockup detected. */
 static void fail_all_outstanding_cmds(struct ctlr_info *h)
 {
-	int i, refcount;
-	struct CommandList *c;
-	int failcount = 0;
+	struct hpsa_command_iter_data iter_data = {
+		.h = h,
+		.count = 0,
+	};
 
 	flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
-	for (i = 0; i < h->nr_cmds; i++) {
-		c = h->cmd_pool + i;
-		refcount = atomic_inc_return(&c->refcount);
-		if (refcount > 1) {
-			c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
-			finish_cmd(c);
-			atomic_dec(&h->commands_outstanding);
-			failcount++;
-		}
-		cmd_free(h, c);
-	}
+	scsi_host_busy_iter(h->scsi_host,
+			    fail_all_outstanding_cmds_iter, &iter_data);
 	dev_warn(&h->pdev->dev,
-		"failed %d commands in fail_all\n", failcount);
+		"failed %d commands in fail_all\n", iter_data.count);
 }
 
 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
@@ -9499,22 +9509,29 @@  static int is_accelerated_cmd(struct CommandList *c)
 	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
 }
 
+static bool hpsa_drain_accel_commands_iter(struct scsi_cmnd *sc, void *data,
+					   bool reserved)
+{
+	struct hpsa_command_iter_data *iter_data = data;
+	struct ctlr_info *h = iter_data->h;
+	struct CommandList *c = h->cmd_pool + scsi_cmd_to_rq(sc)->tag;
+
+	iter_data->count += is_accelerated_cmd(c);
+	return true;
+}
+
 static void hpsa_drain_accel_commands(struct ctlr_info *h)
 {
-	struct CommandList *c = NULL;
-	int i, accel_cmds_out;
-	int refcount;
+	struct hpsa_command_iter_data iter_data = {
+		.h = h,
+	};
 
 	do { /* wait for all outstanding ioaccel commands to drain out */
-		accel_cmds_out = 0;
-		for (i = 0; i < h->nr_cmds; i++) {
-			c = h->cmd_pool + i;
-			refcount = atomic_inc_return(&c->refcount);
-			if (refcount > 1) /* Command is allocated */
-				accel_cmds_out += is_accelerated_cmd(c);
-			cmd_free(h, c);
-		}
-		if (accel_cmds_out <= 0)
+		iter_data.count = 0;
+		scsi_host_busy_iter(h->scsi_host,
+				    hpsa_drain_accel_commands_iter,
+				    &iter_data);
+		if (iter_data.count <= 0)
 			break;
 		msleep(100);
 	} while (1);