@@ -368,6 +368,8 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
#define PVMW_SYNC (1 << 0)
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Break COW-ed PTE during walking */
+#define PVMW_BREAK_COW_PTE (1 << 2)
struct page_vma_mapped_walk {
unsigned long pfn;
@@ -183,7 +183,8 @@ void putback_movable_pages(struct list_head *l)
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+ DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr,
+ PVMW_SYNC | PVMW_MIGRATION | PVMW_BREAK_COW_PTE);
while (page_vma_mapped_walk(&pvmw)) {
rmap_t rmap_flags = RMAP_NONE;
@@ -251,6 +251,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
step_forward(pvmw, PMD_SIZE);
continue;
}
+ if (pvmw->flags & PVMW_BREAK_COW_PTE) {
+ if (break_cow_pte(vma, pvmw->pmd, pvmw->address))
+ return not_found(pvmw);
+ }
if (!map_pte(pvmw))
goto next_pte;
this_pte:
@@ -1012,7 +1012,8 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address,
+ PVMW_SYNC | PVMW_BREAK_COW_PTE);
int *cleaned = arg;
*cleaned += page_vma_mkclean_one(&pvmw);
@@ -1463,7 +1464,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
pte_t pteval;
struct page *subpage;
bool anon_exclusive, ret = true;
@@ -1834,7 +1835,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
pte_t pteval;
struct page *subpage;
bool anon_exclusive, ret = true;
@@ -2187,7 +2188,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_BREAK_COW_PTE);
struct make_exclusive_args *args = priv;
pte_t pteval;
struct page *subpage;
@@ -1882,7 +1882,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
/*
* The folio is mapped into the page tables of one or more
- * processes. Try to unmap it here.
+ * processes. Try to unmap it here. Also, since it will write
+ * to the page tables, break COW PTE if they are.
*/
if (folio_mapped(folio)) {
enum ttu_flags flags = TTU_BATCH_FLUSH;
Some of the features (unmap, migrate, device exclusive, mkclean, etc) might modify the pte entry via rmap. Add a new page vma mapped walk flag, PVMW_BREAK_COW_PTE, to indicate the rmap walking to break COW PTE. Signed-off-by: Chih-En Lin <shiyn.lin@gmail.com> --- include/linux/rmap.h | 2 ++ mm/migrate.c | 3 ++- mm/page_vma_mapped.c | 4 ++++ mm/rmap.c | 9 +++++---- mm/vmscan.c | 3 ++- 5 files changed, 15 insertions(+), 6 deletions(-)