@@ -2010,6 +2010,19 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
return retry; /* retry on raid path? */
}
+static void hpsa_cmd_free_and_done(struct ctlr_info *h,
+ struct CommandList *c, struct scsi_cmnd *cmd)
+{
+ cmd_free(h, c);
+ cmd->scsi_done(cmd);
+}
+
+static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
+{
+ INIT_WORK(&c->work, hpsa_command_resubmit_worker);
+ queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
+}
+
static void process_ioaccel2_completion(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
struct hpsa_scsi_dev_t *dev)
@@ -2018,13 +2031,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0)) {
- cmd_free(h, c);
- cmd->scsi_done(cmd);
- return;
- }
+ c2->error_data.status == 0))
+ return hpsa_cmd_free_and_done(h, c, cmd);
- /* Any RAID offload error results in retry which will use
+ /*
+ * Any RAID offload error results in retry which will use
* the normal I/O path so the controller can handle whatever's
* wrong.
*/
@@ -2034,19 +2045,14 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
if (c2->error_data.status ==
IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
dev->offload_enabled = 0;
- goto retry_cmd;
+
+ return hpsa_retry_cmd(h, c);
}
if (handle_ioaccel_mode2_error(h, c, cmd, c2))
- goto retry_cmd;
-
- cmd_free(h, c);
- cmd->scsi_done(cmd);
- return;
+ return hpsa_retry_cmd(h, c);
-retry_cmd:
- INIT_WORK(&c->work, hpsa_command_resubmit_worker);
- queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
+ return hpsa_cmd_free_and_done(h, c, cmd);
}
/* Returns 0 on success, < 0 otherwise. */
@@ -2119,22 +2125,15 @@ static void complete_scsi_command(struct CommandList *cp)
if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
/* DID_NO_CONNECT will prevent a retry */
cmd->result = DID_NO_CONNECT << 16;
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
- return;
+ return hpsa_cmd_free_and_done(h, cp, cmd);
}
if (cp->cmd_type == CMD_IOACCEL2)
return process_ioaccel2_completion(h, cp, cmd, dev);
scsi_set_resid(cmd, ei->ResidualCnt);
- if (ei->CommandStatus == 0) {
- if (cp->cmd_type == CMD_IOACCEL1)
- atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
- return;
- }
+ if (ei->CommandStatus == 0)
+ return hpsa_cmd_free_and_done(h, cp, cmd);
/* For I/O accelerator commands, copy over some fields to the normal
* CISS header used below for error handling.
@@ -2156,10 +2155,7 @@ static void complete_scsi_command(struct CommandList *cp)
if (is_logical_dev_addr_mode(dev->scsi3addr)) {
if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
dev->offload_enabled = 0;
- INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
- queue_work_on(raw_smp_processor_id(),
- h->resubmit_wq, &cp->work);
- return;
+ return hpsa_retry_cmd(h, cp);
}
}
@@ -2290,8 +2286,8 @@ static void complete_scsi_command(struct CommandList *cp)
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
cp, ei->CommandStatus);
}
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
+
+ return hpsa_cmd_free_and_done(h, cp, cmd);
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -4544,16 +4540,13 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
{
struct scsi_cmnd *cmd;
struct hpsa_scsi_dev_t *dev;
- struct CommandList *c =
- container_of(work, struct CommandList, work);
+ struct CommandList *c = container_of(work, struct CommandList, work);
cmd = c->scsi_cmd;
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
- cmd_free(c->h, c);
- cmd->scsi_done(cmd);
- return;
+ return hpsa_cmd_free_and_done(c->h, c, cmd);
}
if (c->cmd_type == CMD_IOACCEL2) {
struct ctlr_info *h = c->h;
@@ -4572,12 +4565,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
* then get SCSI_MLQUEUE_HOST_BUSY.
*/
cmd->result = DID_IMM_RETRY << 16;
- cmd->scsi_done(cmd);
- cmd_free(h, c); /* FIX-ME: on merge, change
- * to cmd_tagged_free() and
- * ultimately to
- * hpsa_cmd_free_and_done(). */
- return;
+ return hpsa_cmd_free_and_done(h, c, cmd);
}
/* else, fall thru and resubmit down CISS path */
}
@@ -4641,9 +4629,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (rc == 0)
return 0;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
- cmd_free(h, c); /* FIX-ME: on merge, change to
- * cmd_tagged_free(), and ultimately
- * to hpsa_cmd_resolve_and_free(). */
+ cmd_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
}
@@ -7761,8 +7747,6 @@ static void hpsa_flush_cache(struct ctlr_info *h)
struct CommandList *c;
int rc;
- /* Don't bother trying to flush the cache if locked up */
- /* FIXME not necessary if do_simple_cmd does the check */
if (unlikely(lockup_detected(h)))
return;
flush_buf = kzalloc(4, GFP_KERNEL);