diff mbox series

[18/35] mm: follow_pmd_mask() for dmem huge pmd

Message ID 25a50b534bb73164dcad1be1f7b9c48756445c3a.1602093760.git.yuleixzhang@tencent.com (mailing list archive)
State New, archived
Headers show
Series Enhance memory utilization with DMEMFS | expand

Commit Message

yulei zhang Oct. 8, 2020, 7:54 a.m. UTC
From: Yulei Zhang <yuleixzhang@tencent.com>

While follow_pmd_mask(), dmem huge pmd should be recognized and return
error pointer of '-EEXIST' to indicate that proper page table entry exists
in pmd special but no corresponding struct page, because dmem page means
non struct page backend. We update pmd if foll_flags takes FOLL_TOUCH.

Signed-off-by: Chen Zhuo <sagazchen@tencent.com>
Signed-off-by: Yulei Zhang <yuleixzhang@tencent.com>
---
 mm/gup.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index e5739a1974d5..726ffc5b0ea9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -380,6 +380,42 @@  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
 	return -EEXIST;
 }
 
+static struct page *
+follow_special_pmd(struct vm_area_struct *vma, unsigned long address,
+		   pmd_t *pmd, unsigned int flags)
+{
+	spinlock_t *ptl;
+
+	if (flags & FOLL_DUMP)
+		/* Avoid special (like zero) pages in core dumps */
+		return ERR_PTR(-EFAULT);
+
+	/* No page to get reference */
+	if (flags & FOLL_GET)
+		return ERR_PTR(-EFAULT);
+
+	if (flags & FOLL_TOUCH) {
+		pmd_t _pmd;
+
+		ptl = pmd_lock(vma->vm_mm, pmd);
+		if (!pmd_special(*pmd)) {
+			spin_unlock(ptl);
+			return NULL;
+		}
+		_pmd = pmd_mkyoung(*pmd);
+		if (flags & FOLL_WRITE)
+			_pmd = pmd_mkdirty(_pmd);
+		if (pmdp_set_access_flags(vma, address & HPAGE_PMD_MASK,
+					  pmd, _pmd,
+					  flags & FOLL_WRITE))
+			update_mmu_cache_pmd(vma, address, pmd);
+		spin_unlock(ptl);
+	}
+
+	/* Proper page table entry exists, but no corresponding struct page */
+	return ERR_PTR(-EEXIST);
+}
+
 /*
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
@@ -564,6 +600,12 @@  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 			return page;
 		return no_page_table(vma, flags);
 	}
+	if (pmd_special(*pmd)) {
+		page = follow_special_pmd(vma, address, pmd, flags);
+		if (page)
+			return page;
+		return no_page_table(vma, flags);
+	}
 	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
 		page = follow_huge_pd(vma, address,
 				      __hugepd(pmd_val(pmdval)), flags,