@@ -2204,13 +2204,24 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
{
struct scrub_block *sblock;
int index;
+ unsigned int nofs_flag;
+ int ret = 0;
+
+ /*
+ * In order to avoid deadlock with reclaim when there is a transaction
+ * trying to pause scrub, use GFP_NOFS. The pausing request is done when
+ * the transaction commit starts, and it blocks the transaction until
+ * scrub is paused (done at specific points at scrub_stripe()).
+ */
+ nofs_flag = memalloc_nofs_save();
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
if (!sblock) {
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
/* one ref inside this function, plus one for each page added to
@@ -2230,7 +2241,8 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
scrub_block_put(sblock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
scrub_page_get(spage);
@@ -2269,12 +2281,11 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
} else {
for (index = 0; index < sblock->page_count; index++) {
struct scrub_page *spage = sblock->pagev[index];
- int ret;
ret = scrub_add_page_to_rd_bio(sctx, spage);
if (ret) {
scrub_block_put(sblock);
- return ret;
+ goto out;
}
}
@@ -2284,7 +2295,9 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
/* last one frees, either here or in bio completion for last page */
scrub_block_put(sblock);
- return 0;
+out:
+ memalloc_nofs_restore(nofs_flag);
+ return ret;
}
static void scrub_bio_end_io(struct bio *bio)