@@ -5620,6 +5620,8 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return 0;
}
c = cmd_tagged_alloc(h, cmd);
+ if (c == NULL)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
/*
* Call alternate submit routine for I/O accelerated commands.
@@ -6026,7 +6028,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
BUG();
}
- atomic_inc(&c->refcount);
if (unlikely(!hpsa_is_cmd_idle(c))) {
/*
* We expect that the SCSI layer will hand us a unique tag
@@ -6034,14 +6035,20 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
* two requests...because if the selected command isn't idle
* then someone is going to be very disappointed.
*/
- dev_err(&h->pdev->dev,
- "tag collision (tag=%d) in cmd_tagged_alloc().\n",
- idx);
- if (c->scsi_cmd != NULL)
- scsi_print_command(c->scsi_cmd);
- scsi_print_command(scmd);
+ if (idx != h->last_collision_tag) { /* Print once per tag */
+ dev_warn(&h->pdev->dev,
+ "%s: tag collision (tag=%d)\n", __func__, idx);
+ if (c->scsi_cmd != NULL)
+ scsi_print_command(c->scsi_cmd);
+ if (scmd)
+ scsi_print_command(scmd);
+ h->last_collision_tag = idx;
+ }
+ return NULL;
}
+ atomic_inc(&c->refcount);
+
hpsa_cmd_partial_init(h, idx, c);
return c;
}
@@ -174,6 +174,7 @@ struct ctlr_info {
struct CfgTable __iomem *cfgtable;
int interrupts_enabled;
int max_commands;
+ int last_collision_tag; /* tags are global */
atomic_t commands_outstanding;
# define PERF_MODE_INT 0
# define DOORBELL_INT 1