diff mbox series

[v2,1/3] mm/memory: move page_count() check into validate_page_before_insert()

Message ID 20240522125713.775114-2-david@redhat.com (mailing list archive)
State New
Headers show
Series mm/memory: cleanly support zeropage in vm_insert_page*(), vm_map_pages*() and vmf_insert_mixed() | expand

Commit Message

David Hildenbrand May 22, 2024, 12:57 p.m. UTC
We'll now also cover the case where insert_page() is called from
__vm_insert_mixed(), which sounds like the right thing to do.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/memory.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index b5453b86ec4b..a3aad7e58914 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1987,6 +1987,8 @@  static int validate_page_before_insert(struct page *page)
 {
 	struct folio *folio = page_folio(page);
 
+	if (!folio_ref_count(folio))
+		return -EINVAL;
 	if (folio_test_anon(folio) || folio_test_slab(folio) ||
 	    page_has_type(page))
 		return -EINVAL;
@@ -2041,8 +2043,6 @@  static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
 {
 	int err;
 
-	if (!page_count(page))
-		return -EINVAL;
 	err = validate_page_before_insert(page);
 	if (err)
 		return err;
@@ -2176,8 +2176,6 @@  int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 {
 	if (addr < vma->vm_start || addr >= vma->vm_end)
 		return -EFAULT;
-	if (!page_count(page))
-		return -EINVAL;
 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
 		BUG_ON(mmap_read_trylock(vma->vm_mm));
 		BUG_ON(vma->vm_flags & VM_PFNMAP);