diff mbox series

[v3,4/7] mm/thp: Carry over dirty bit when thp splits on pmd

Message ID 20220809220100.20033-5-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm: Remember a/d bits for migration entries | expand

Commit Message

Peter Xu Aug. 9, 2022, 10 p.m. UTC
Carry over the dirty bit from pmd to pte when a huge pmd splits.  It
shouldn't be a correctness issue since when pmd_dirty() we'll have the page
marked dirty anyway, however having dirty bit carried over helps the next
initial writes of split ptes on some archs like x86.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 mm/huge_memory.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Huang, Ying Aug. 10, 2022, 6:24 a.m. UTC | #1
Peter Xu <peterx@redhat.com> writes:

> Carry over the dirty bit from pmd to pte when a huge pmd splits.  It
> shouldn't be a correctness issue since when pmd_dirty() we'll have the page
> marked dirty anyway, however having dirty bit carried over helps the next
> initial writes of split ptes on some archs like x86.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  mm/huge_memory.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 0611b2fd145a..e8e78d1bac5f 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2005,7 +2005,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>  	pgtable_t pgtable;
>  	pmd_t old_pmd, _pmd;
>  	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
> -	bool anon_exclusive = false;
> +	bool anon_exclusive = false, dirty = false;
>  	unsigned long addr;
>  	int i;
>  
> @@ -2098,6 +2098,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>  			SetPageDirty(page);
>  		write = pmd_write(old_pmd);
>  		young = pmd_young(old_pmd);
> +		dirty = pmd_dirty(old_pmd);

Nitpick: This can be put under

		if (pmd_dirty(old_pmd))
			SetPageDirty(page);

Not a big deal.

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

>  		soft_dirty = pmd_soft_dirty(old_pmd);
>  		uffd_wp = pmd_uffd_wp(old_pmd);
>  
> @@ -2161,6 +2162,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>  				entry = pte_wrprotect(entry);
>  			if (!young)
>  				entry = pte_mkold(entry);
> +			/* NOTE: this may set soft-dirty too on some archs */
> +			if (dirty)
> +				entry = pte_mkdirty(entry);
>  			if (soft_dirty)
>  				entry = pte_mksoft_dirty(entry);
>  			if (uffd_wp)
Peter Xu Aug. 10, 2022, 3:13 p.m. UTC | #2
On Wed, Aug 10, 2022 at 02:24:33PM +0800, Huang, Ying wrote:
> Peter Xu <peterx@redhat.com> writes:
> 
> > Carry over the dirty bit from pmd to pte when a huge pmd splits.  It
> > shouldn't be a correctness issue since when pmd_dirty() we'll have the page
> > marked dirty anyway, however having dirty bit carried over helps the next
> > initial writes of split ptes on some archs like x86.
> >
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> > ---
> >  mm/huge_memory.c | 6 +++++-
> >  1 file changed, 5 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 0611b2fd145a..e8e78d1bac5f 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -2005,7 +2005,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> >  	pgtable_t pgtable;
> >  	pmd_t old_pmd, _pmd;
> >  	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
> > -	bool anon_exclusive = false;
> > +	bool anon_exclusive = false, dirty = false;
> >  	unsigned long addr;
> >  	int i;
> >  
> > @@ -2098,6 +2098,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> >  			SetPageDirty(page);
> >  		write = pmd_write(old_pmd);
> >  		young = pmd_young(old_pmd);
> > +		dirty = pmd_dirty(old_pmd);
> 
> Nitpick: This can be put under
> 
> 		if (pmd_dirty(old_pmd))
> 			SetPageDirty(page);
> 
> Not a big deal.
> 
> Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

Yeah will do, thanks.
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0611b2fd145a..e8e78d1bac5f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2005,7 +2005,7 @@  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 	pgtable_t pgtable;
 	pmd_t old_pmd, _pmd;
 	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
-	bool anon_exclusive = false;
+	bool anon_exclusive = false, dirty = false;
 	unsigned long addr;
 	int i;
 
@@ -2098,6 +2098,7 @@  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 			SetPageDirty(page);
 		write = pmd_write(old_pmd);
 		young = pmd_young(old_pmd);
+		dirty = pmd_dirty(old_pmd);
 		soft_dirty = pmd_soft_dirty(old_pmd);
 		uffd_wp = pmd_uffd_wp(old_pmd);
 
@@ -2161,6 +2162,9 @@  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 				entry = pte_wrprotect(entry);
 			if (!young)
 				entry = pte_mkold(entry);
+			/* NOTE: this may set soft-dirty too on some archs */
+			if (dirty)
+				entry = pte_mkdirty(entry);
 			if (soft_dirty)
 				entry = pte_mksoft_dirty(entry);
 			if (uffd_wp)