@@ -3406,9 +3406,9 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
return ireq;
}
-static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
- struct sas_task *task,
- u16 tag)
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
{
struct isci_request *ireq;
@@ -3434,16 +3434,12 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
}
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag)
+ struct sas_task *task, struct isci_request * ireq)
{
enum sci_status status;
- struct isci_request *ireq;
unsigned long flags;
int ret = 0;
- /* do common allocation and init of request object. */
- ireq = isci_io_request_from_tag(ihost, task, tag);
-
status = isci_io_request_build(ihost, ireq, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
@@ -291,7 +291,10 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
struct isci_tmf *isci_tmf,
u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag);
+ struct sas_task *task, struct isci_request * ireq);
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag);
enum sci_status
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
@@ -162,18 +162,18 @@ int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
SAS_TASK_UNDELIVERED,
SAS_SAM_STAT_TASK_ABORTED);
} else {
+ struct isci_request *ireq;
+
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */
- status = isci_request_execute(ihost, idev, task, tag);
+ /* do common allocation and init of request object. */
+ status = isci_request_execute(ihost, idev, task, ireq);
if (status != SCI_SUCCESS) {
- spin_lock_irqsave(&task->task_state_lock, flags);
- /* Did not really start this command. */
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
if (test_bit(IDEV_GONE, &idev->flags)) {
/* Indicate that the device
* is gone.
@@ -498,7 +498,6 @@ int isci_task_abort_task(struct sas_task *task)
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
- (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request) {
idev = isci_get_device(task->dev->lldd_dev);
target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
In the queue path, move around when we assign sas_task->lldd_task such that this pointer and the SAS_TASK_AT_INITIATOR flag are set atomically. It is also not required to clear SAS_TASK_AT_INITIATOR in isci_task_execute_task() error path as it is also cleared immediately after in isci_task_refuse() call. Now the following items may be considered: - SAS_TASK_STATE_DONE and SAS_TASK_AT_INITIATOR are mutually exclusive apart from possibly when SAS_TASK_STATE_DONE is set in sas_scsi_find_task(), but that is after .lldd_abort_task, i.e. the considered callback, is called. - If isci_task_refuse() is called in the queue path, then sas_task->lldd_task and SAS_TASK_AT_INITIATOR are cleared atomically in isci_task_refuse(). - In the completion path, SAS_TASK_STATE_DONE is set and SAS_TASK_AT_INITIATOR is cleared atomically before the sas_task.lldd_task is cleared later. So in isci_task_abort_task() if SAS_TASK_STATE_DONE is not set and sas_task.lldd_task is still set, then SAS_TASK_AT_INITIATOR must be set - so we can drop this check on SAS_TASK_AT_INITIATOR. Signed-off-by: John Garry <john.garry@huawei.com> --- drivers/scsi/isci/request.c | 12 ++++-------- drivers/scsi/isci/request.h | 5 ++++- drivers/scsi/isci/task.c | 13 ++++++------- 3 files changed, 14 insertions(+), 16 deletions(-)