@@ -2275,6 +2275,53 @@ static int blk_mq_get_hw_queue_node(struct blk_mq_tag_set *set,
return node;
}
+static size_t order_to_size(unsigned int order)
+{
+ return (size_t)PAGE_SIZE << order;
+}
+
+static struct page *blk_mq_alloc_rqs_page(int node, unsigned order,
+ unsigned min_size)
+{
+ struct page *page;
+ unsigned this_order = order;
+
+ do {
+ page = alloc_pages_node(node,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+ this_order);
+ if (page)
+ break;
+ if (!this_order--)
+ break;
+ if (order_to_size(this_order) < min_size)
+ break;
+ } while (1);
+
+ if (!page)
+ return NULL;
+
+ page->private = this_order;
+
+ /*
+ * Allow kmemleak to scan these pages as they contain pointers
+ * to additional allocations like via ops->init_request().
+ */
+ kmemleak_alloc(page_address(page), order_to_size(this_order), 1, GFP_NOIO);
+
+ return page;
+}
+
+static void blk_mq_free_rqs_page(struct page *page)
+{
+ /*
+ * Remove kmemleak object previously allocated in
+ * blk_mq_alloc_rqs().
+ */
+ kmemleak_free(page_address(page));
+ __free_pages(page, page->private);
+}
+
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx)
{
@@ -2296,12 +2343,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
while (!list_empty(&tags->page_list)) {
page = list_first_entry(&tags->page_list, struct page, lru);
list_del_init(&page->lru);
- /*
- * Remove kmemleak object previously allocated in
- * blk_mq_alloc_rqs().
- */
- kmemleak_free(page_address(page));
- __free_pages(page, page->private);
+ blk_mq_free_rqs_page(page);
}
}
@@ -2348,11 +2390,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
return tags;
}
-static size_t order_to_size(unsigned int order)
-{
- return (size_t)PAGE_SIZE << order;
-}
-
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, int node)
{
@@ -2396,30 +2433,14 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
while (this_order && left < order_to_size(this_order - 1))
this_order--;
- do {
- page = alloc_pages_node(node,
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
- this_order);
- if (page)
- break;
- if (!this_order--)
- break;
- if (order_to_size(this_order) < rq_size)
- break;
- } while (1);
-
+ page = blk_mq_alloc_rqs_page(node, this_order, rq_size);
if (!page)
goto fail;
- page->private = this_order;
+ this_order = (int)page->private;
list_add_tail(&page->lru, &tags->page_list);
-
p = page_address(page);
- /*
- * Allow kmemleak to scan these pages as they contain pointers
- * to additional allocations like via ops->init_request().
- */
- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
+
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, depth - i);
left -= to_do * rq_size;
Add two helpers for allocating and freeing pages of request pool. No function change. Signed-off-by: Ming Lei <ming.lei@redhat.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Bart Van Assche <bvanassche@acm.org> Cc: John Garry <john.garry@huawei.com> Cc: Christoph Hellwig <hch@lst.de> --- block/blk-mq.c | 81 +++++++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 30 deletions(-)