@@ -1182,6 +1182,47 @@ static void context_tasklet(unsigned long data)
}
}
+static void ohci_isoc_context_work(struct work_struct *work)
+{
+ struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
+ struct context *ctx = &isoc_ctx->context;
+ struct descriptor *d, *last;
+ u32 address;
+ int z;
+ struct descriptor_buffer *desc;
+
+ desc = list_entry(ctx->buffer_list.next, struct descriptor_buffer, list);
+ last = ctx->last;
+ while (last->branch_address != 0) {
+ struct descriptor_buffer *old_desc = desc;
+
+ address = le32_to_cpu(last->branch_address);
+ z = address & 0xf;
+ address &= ~0xf;
+ ctx->current_bus = address;
+
+ // If the branch address points to a buffer outside of the current buffer, advance
+ // to the next buffer.
+ if (address < desc->buffer_bus || address >= desc->buffer_bus + desc->used)
+ desc = list_entry(desc->list.next, struct descriptor_buffer, list);
+ d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
+ last = find_branch_descriptor(d, z);
+
+ if (!ctx->callback(ctx, d, last))
+ break;
+
+ if (old_desc != desc) {
+ // If we've advanced to the next buffer, move the previous buffer to the
+ // free list.
+ old_desc->used = 0;
+ guard(spinlock_irqsave)(&ctx->ohci->lock);
+ list_move_tail(&old_desc->list, &ctx->buffer_list);
+ }
+ ctx->last = last;
+ }
+}
+
/*
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
@@ -2242,8 +2283,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->ir_context_list[i].context.tasklet);
+ fw_iso_context_queue_work(&ohci->ir_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -2254,8 +2294,7 @@ static irqreturn_t irq_handler(int irq, void *data)
while (iso_event) {
i = ffs(iso_event) - 1;
- tasklet_schedule(
- &ohci->it_context_list[i].context.tasklet);
+ fw_iso_context_queue_work(&ohci->it_context_list[i].base);
iso_event &= ~(1 << i);
}
}
@@ -3130,6 +3169,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
+ fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work);
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
@@ -3227,7 +3267,6 @@ static int ohci_stop_iso(struct fw_iso_context *base)
}
flush_writes(ohci);
context_stop(&ctx->context);
- tasklet_kill(&ctx->context.tasklet);
return 0;
}
@@ -3584,10 +3623,8 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
- tasklet_disable_in_atomic(&ctx->context.tasklet);
-
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
- context_tasklet((unsigned long)&ctx->context);
+ ohci_isoc_context_work(&base->work);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -3607,8 +3644,6 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
smp_mb__after_atomic();
}
- tasklet_enable(&ctx->context.tasklet);
-
return ret;
}
This commit queues work item for IT/IR events at hardIRQ handler to operate the corresponding isochronous context. The work item is queued to any of worker-pools. The callback for either the implementation of unit protocol and user space clients is executed in sleepable work process context. The change could results in any errors of concurrent processing as well as sleep at atomic context. These errors are fixed by the following commits. Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp> --- drivers/firewire/ohci.c | 55 +++++++++++++++++++++++++++++++++-------- 1 file changed, 45 insertions(+), 10 deletions(-)