diff mbox series

[v1,2/2] mm: hugetlb: don't drop hugetlb_lock around cma_release() call

Message ID 20201022225308.2927890-3-guro@fb.com (mailing list archive)
State New, archived
Headers show
Series mm: cma: introduce a non-blocking version of cma_release() | expand

Commit Message

Roman Gushchin Oct. 22, 2020, 10:53 p.m. UTC
Replace blocking cma_release() with a non-blocking cma_release_nowait()
call, so there is no more need to temporarily drop hugetlb_lock.

Signed-off-by: Roman Gushchin <guro@fb.com>
---
 mm/hugetlb.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index fe76f8fd5a73..230e9b6c9a2b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1224,10 +1224,11 @@  static void free_gigantic_page(struct page *page, unsigned int order)
 {
 	/*
 	 * If the page isn't allocated using the cma allocator,
-	 * cma_release() returns false.
+	 * cma_release_nowait() returns false.
 	 */
 #ifdef CONFIG_CMA
-	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
+	if (cma_release_nowait(hugetlb_cma[page_to_nid(page)], page,
+			       1 << order))
 		return;
 #endif
 
@@ -1312,14 +1313,8 @@  static void update_and_free_page(struct hstate *h, struct page *page)
 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
 	set_page_refcounted(page);
 	if (hstate_is_gigantic(h)) {
-		/*
-		 * Temporarily drop the hugetlb_lock, because
-		 * we might block in free_gigantic_page().
-		 */
-		spin_unlock(&hugetlb_lock);
 		destroy_compound_gigantic_page(page, huge_page_order(h));
 		free_gigantic_page(page, huge_page_order(h));
-		spin_lock(&hugetlb_lock);
 	} else {
 		__free_pages(page, huge_page_order(h));
 	}