@@ -861,12 +861,12 @@ static int __buffer_migrate_folio(struct address_space *mapping,
}
bh = bh->b_this_page;
} while (bh != head);
+ spin_unlock(&mapping->i_private_lock);
if (busy) {
if (invalidated) {
rc = -EAGAIN;
goto unlock_buffers;
}
- spin_unlock(&mapping->i_private_lock);
invalidate_bh_lrus();
invalidated = true;
goto recheck_buffers;
@@ -884,8 +884,6 @@ static int __buffer_migrate_folio(struct address_space *mapping,
} while (bh != head);
unlock_buffers:
- if (check_refs)
- spin_unlock(&mapping->i_private_lock);
bh = head;
do {
unlock_buffer(bh);
The buffer_migrate_folio_norefs() should avoid holding the spin lock held in order to ensure we can support large folios. The prior commit "fs/buffer: avoid races with folio migrations on __find_get_block_slow()" ripped out the only rationale for having the atomic context, so we can remove the spin lock call now. Reported-by: kernel test robot <oliver.sang@intel.com> Reported-by: syzbot+f3c6fda1297c748a7076@syzkaller.appspotmail.com Closes: https://lore.kernel.org/oe-lkp/202503101536.27099c77-lkp@intel.com Fixes: 3c20917120ce ("block/bdev: enable large folio support for large logical block sizes") Signed-off-by: Luis Chamberlain <mcgrof@kernel.org> --- mm/migrate.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-)