diff mbox series

[05/10] btrfs: remove irq disabling for btrfs_workqueue.list_lock

Message ID 20230314165910.373347-6-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/10] btrfs: use a plain workqueue for ordered_extent processing | expand

Commit Message

Christoph Hellwig March 14, 2023, 4:59 p.m. UTC
btrfs_queue_work with an ordered_func is never called from irq
context, so remove the irq disabling for btrfs_workqueue.list_lock.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/btrfs/async-thread.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

Comments

Johannes Thumshirn March 17, 2023, 10:34 a.m. UTC | #1
Looks good,
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
diff mbox series

Patch

diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index aac240430efe13..91ec1e2fea0c69 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -179,11 +179,10 @@  static void run_ordered_work(struct btrfs_workqueue *wq,
 	struct list_head *list = &wq->ordered_list;
 	struct btrfs_work *work;
 	spinlock_t *lock = &wq->list_lock;
-	unsigned long flags;
 	bool free_self = false;
 
 	while (1) {
-		spin_lock_irqsave(lock, flags);
+		spin_lock(lock);
 		if (list_empty(list))
 			break;
 		work = list_entry(list->next, struct btrfs_work,
@@ -207,13 +206,13 @@  static void run_ordered_work(struct btrfs_workqueue *wq,
 		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
 			break;
 		trace_btrfs_ordered_sched(work);
-		spin_unlock_irqrestore(lock, flags);
+		spin_unlock(lock);
 		work->ordered_func(work);
 
 		/* now take the lock again and drop our item from the list */
-		spin_lock_irqsave(lock, flags);
+		spin_lock(lock);
 		list_del(&work->ordered_list);
-		spin_unlock_irqrestore(lock, flags);
+		spin_unlock(lock);
 
 		if (work == self) {
 			/*
@@ -248,7 +247,7 @@  static void run_ordered_work(struct btrfs_workqueue *wq,
 			trace_btrfs_all_work_done(wq->fs_info, work);
 		}
 	}
-	spin_unlock_irqrestore(lock, flags);
+	spin_unlock(lock);
 
 	if (free_self) {
 		self->ordered_free(self);
@@ -307,14 +306,12 @@  void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
 
 void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
 {
-	unsigned long flags;
-
 	work->wq = wq;
 	thresh_queue_hook(wq);
 	if (work->ordered_func) {
-		spin_lock_irqsave(&wq->list_lock, flags);
+		spin_lock(&wq->list_lock);
 		list_add_tail(&work->ordered_list, &wq->ordered_list);
-		spin_unlock_irqrestore(&wq->list_lock, flags);
+		spin_unlock(&wq->list_lock);
 	}
 	trace_btrfs_work_queued(work);
 	queue_work(wq->normal_wq, &work->normal_work);