diff mbox series

[4/6] cma: Add support for GFP_ZERO

Message ID 20190218210715.1066-5-krisman@collabora.com (mailing list archive)
State New, archived
Headers show
Series Improve handling of GFP flags in the CMA allocator | expand

Commit Message

Gabriel Krisman Bertazi Feb. 18, 2019, 9:07 p.m. UTC
Since cma_alloc now has gfp_mask, make it honor GFP_ZERO, to not suprise
potential users.

Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
---
 mm/cma.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/cma.c b/mm/cma.c
index 5789e3545faf..1dff74b1a8c5 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -408,12 +408,13 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 	unsigned long start = 0;
 	unsigned long bitmap_maxno, bitmap_no, bitmap_count;
 	size_t i;
+	void *kaddr;
 	struct page *page = NULL;
 	int ret = -ENOMEM;
 
 	/* Be noisy about caller asking for unsupported flags. */
 	WARN_ON(unlikely(!(gfp_mask & __GFP_DIRECT_RECLAIM) ||
-			 (gfp_mask & (__GFP_ZERO|__GFP_NOFAIL))));
+			 (gfp_mask & __GFP_NOFAIL)));
 
 	if (!cma || !cma->count)
 		return NULL;
@@ -477,8 +478,15 @@  struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 	 * those page blocks.
 	 */
 	if (page) {
-		for (i = 0; i < count; i++)
+		for (i = 0; i < count; i++) {
 			page_kasan_tag_reset(page + i);
+
+			if (gfp_mask & __GFP_ZERO) {
+				kaddr = kmap_atomic(page + i);
+				clear_page(kaddr);
+				kunmap_atomic(kaddr);
+			}
+		}
 	}
 
 	if (ret && !(gfp_mask & __GFP_NOWARN)) {