diff mbox series

[v1,10/11] mm/memory: ignore dirty/accessed/soft-dirty bits in folio_pte_batch()

Message ID 20240122194200.381241-11-david@redhat.com (mailing list archive)
State Superseded
Headers show
Series mm/memory: optimize fork() with PTE-mapped THP | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/patch-10-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-10-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-10-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-10-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-10-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-10-test-6 success .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-10-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-10-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-10-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-10-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-10-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-10-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

David Hildenbrand Jan. 22, 2024, 7:41 p.m. UTC
Let's ignore these bits: they are irrelevant for fork, and will likely
be irrelevant for upcoming users such as page unmapping.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/memory.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

Comments

Ryan Roberts Jan. 23, 2024, 12:25 p.m. UTC | #1
On 22/01/2024 19:41, David Hildenbrand wrote:
> Let's ignore these bits: they are irrelevant for fork, and will likely
> be irrelevant for upcoming users such as page unmapping.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/memory.c | 10 ++++++++--
>  1 file changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index f563aec85b2a8..341b2be845b6e 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -953,24 +953,30 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
>  	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
>  }
>  
> +static inline pte_t __pte_batch_clear_ignored(pte_t pte)
> +{
> +	return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
> +}
> +
>  /*
>   * Detect a PTE batch: consecutive (present) PTEs that map consecutive
>   * pages of the same folio.
>   *
>   * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.

nit: last char should be a comma (,) not a full stop (.)

> + * the accessed bit, dirty bit and soft-dirty bit.
>   */
>  static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
>  		pte_t *start_ptep, pte_t pte, int max_nr)
>  {
>  	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
>  	const pte_t *end_ptep = start_ptep + max_nr;
> -	pte_t expected_pte = pte_next_pfn(pte);
> +	pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
>  	pte_t *ptep = start_ptep + 1;
>  
>  	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>  
>  	while (ptep != end_ptep) {
> -		pte = ptep_get(ptep);
> +		pte = __pte_batch_clear_ignored(ptep_get(ptep));
>  
>  		if (!pte_same(pte, expected_pte))
>  			break;

I think you'll lose dirty information in the child for private mappings? If the
first pte in a batch is clean, but a subsequent page is dirty, you will end up
setting all the pages in the batch as clean in the child. Previous behavior
would preserve dirty bit for private mappings.

In my version (v3) that did arbitrary batching, I had some fun and games
tracking dirty, write and uffd_wp:
https://lore.kernel.org/linux-arm-kernel/20231204105440.61448-2-ryan.roberts@arm.com/

Also, I think you will currently either set soft dirty on all or none of the
pages in the batch, depending on the value of the first. I previously convinced
myself that the state was unimportant so always cleared it in the child to
provide consistency.
David Hildenbrand Jan. 23, 2024, 1:06 p.m. UTC | #2
On 23.01.24 13:25, Ryan Roberts wrote:
> On 22/01/2024 19:41, David Hildenbrand wrote:
>> Let's ignore these bits: they are irrelevant for fork, and will likely
>> be irrelevant for upcoming users such as page unmapping.
>>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>   mm/memory.c | 10 ++++++++--
>>   1 file changed, 8 insertions(+), 2 deletions(-)
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index f563aec85b2a8..341b2be845b6e 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -953,24 +953,30 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
>>   	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
>>   }
>>   
>> +static inline pte_t __pte_batch_clear_ignored(pte_t pte)
>> +{
>> +	return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
>> +}
>> +
>>   /*
>>    * Detect a PTE batch: consecutive (present) PTEs that map consecutive
>>    * pages of the same folio.
>>    *
>>    * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
> 
> nit: last char should be a comma (,) not a full stop (.)
> 
>> + * the accessed bit, dirty bit and soft-dirty bit.
>>    */
>>   static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
>>   		pte_t *start_ptep, pte_t pte, int max_nr)
>>   {
>>   	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
>>   	const pte_t *end_ptep = start_ptep + max_nr;
>> -	pte_t expected_pte = pte_next_pfn(pte);
>> +	pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
>>   	pte_t *ptep = start_ptep + 1;
>>   
>>   	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>>   
>>   	while (ptep != end_ptep) {
>> -		pte = ptep_get(ptep);
>> +		pte = __pte_batch_clear_ignored(ptep_get(ptep));
>>   
>>   		if (!pte_same(pte, expected_pte))
>>   			break;
> 
> I think you'll lose dirty information in the child for private mappings? If the
> first pte in a batch is clean, but a subsequent page is dirty, you will end up
> setting all the pages in the batch as clean in the child. Previous behavior
> would preserve dirty bit for private mappings.
> 
> In my version (v3) that did arbitrary batching, I had some fun and games
> tracking dirty, write and uffd_wp:
> https://lore.kernel.org/linux-arm-kernel/20231204105440.61448-2-ryan.roberts@arm.com/
> 
> Also, I think you will currently either set soft dirty on all or none of the
> pages in the batch, depending on the value of the first. I previously convinced
> myself that the state was unimportant so always cleared it in the child to
> provide consistency.

Good points regarding dirty and soft-dirty. I wanted to avoid passing 
flags to folio_pte_batch(), but maybe that's just what we need to not 
change behavior.
Ryan Roberts Jan. 23, 2024, 1:42 p.m. UTC | #3
On 23/01/2024 13:06, David Hildenbrand wrote:
> On 23.01.24 13:25, Ryan Roberts wrote:
>> On 22/01/2024 19:41, David Hildenbrand wrote:
>>> Let's ignore these bits: they are irrelevant for fork, and will likely
>>> be irrelevant for upcoming users such as page unmapping.
>>>
>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>> ---
>>>   mm/memory.c | 10 ++++++++--
>>>   1 file changed, 8 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index f563aec85b2a8..341b2be845b6e 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -953,24 +953,30 @@ static __always_inline void __copy_present_ptes(struct
>>> vm_area_struct *dst_vma,
>>>       set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
>>>   }
>>>   +static inline pte_t __pte_batch_clear_ignored(pte_t pte)
>>> +{
>>> +    return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
>>> +}
>>> +
>>>   /*
>>>    * Detect a PTE batch: consecutive (present) PTEs that map consecutive
>>>    * pages of the same folio.
>>>    *
>>>    * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
>>
>> nit: last char should be a comma (,) not a full stop (.)
>>
>>> + * the accessed bit, dirty bit and soft-dirty bit.
>>>    */
>>>   static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
>>>           pte_t *start_ptep, pte_t pte, int max_nr)
>>>   {
>>>       unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
>>>       const pte_t *end_ptep = start_ptep + max_nr;
>>> -    pte_t expected_pte = pte_next_pfn(pte);
>>> +    pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
>>>       pte_t *ptep = start_ptep + 1;
>>>         VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>>>         while (ptep != end_ptep) {
>>> -        pte = ptep_get(ptep);
>>> +        pte = __pte_batch_clear_ignored(ptep_get(ptep));
>>>             if (!pte_same(pte, expected_pte))
>>>               break;
>>
>> I think you'll lose dirty information in the child for private mappings? If the
>> first pte in a batch is clean, but a subsequent page is dirty, you will end up
>> setting all the pages in the batch as clean in the child. Previous behavior
>> would preserve dirty bit for private mappings.
>>
>> In my version (v3) that did arbitrary batching, I had some fun and games
>> tracking dirty, write and uffd_wp:
>> https://lore.kernel.org/linux-arm-kernel/20231204105440.61448-2-ryan.roberts@arm.com/
>>
>> Also, I think you will currently either set soft dirty on all or none of the
>> pages in the batch, depending on the value of the first. I previously convinced
>> myself that the state was unimportant so always cleared it in the child to
>> provide consistency.
> 
> Good points regarding dirty and soft-dirty. I wanted to avoid passing flags to
> folio_pte_batch(), but maybe that's just what we need to not change behavior.

I think you could not bother with the enforce_uffd_wp - just always enforce
uffd-wp. So that's one simplification vs mine. Then you just need an any_dirty
flag following the same pattern as your any_writable. Then just set dirty on the
whole batch in the child if any were dirty in the parent.

Although now I'm wondering if there is a race here... What happens if a page in
the parent becomes dirty after you have checked it but before you write protect
it? Isn't that already a problem with the current non-batched version? Why do we
even to preserve dirty in the child for private mappings?
David Hildenbrand Jan. 23, 2024, 1:55 p.m. UTC | #4
On 23.01.24 14:42, Ryan Roberts wrote:
> On 23/01/2024 13:06, David Hildenbrand wrote:
>> On 23.01.24 13:25, Ryan Roberts wrote:
>>> On 22/01/2024 19:41, David Hildenbrand wrote:
>>>> Let's ignore these bits: they are irrelevant for fork, and will likely
>>>> be irrelevant for upcoming users such as page unmapping.
>>>>
>>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>>> ---
>>>>    mm/memory.c | 10 ++++++++--
>>>>    1 file changed, 8 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>> index f563aec85b2a8..341b2be845b6e 100644
>>>> --- a/mm/memory.c
>>>> +++ b/mm/memory.c
>>>> @@ -953,24 +953,30 @@ static __always_inline void __copy_present_ptes(struct
>>>> vm_area_struct *dst_vma,
>>>>        set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
>>>>    }
>>>>    +static inline pte_t __pte_batch_clear_ignored(pte_t pte)
>>>> +{
>>>> +    return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
>>>> +}
>>>> +
>>>>    /*
>>>>     * Detect a PTE batch: consecutive (present) PTEs that map consecutive
>>>>     * pages of the same folio.
>>>>     *
>>>>     * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
>>>
>>> nit: last char should be a comma (,) not a full stop (.)
>>>
>>>> + * the accessed bit, dirty bit and soft-dirty bit.
>>>>     */
>>>>    static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
>>>>            pte_t *start_ptep, pte_t pte, int max_nr)
>>>>    {
>>>>        unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
>>>>        const pte_t *end_ptep = start_ptep + max_nr;
>>>> -    pte_t expected_pte = pte_next_pfn(pte);
>>>> +    pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
>>>>        pte_t *ptep = start_ptep + 1;
>>>>          VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>>>>          while (ptep != end_ptep) {
>>>> -        pte = ptep_get(ptep);
>>>> +        pte = __pte_batch_clear_ignored(ptep_get(ptep));
>>>>              if (!pte_same(pte, expected_pte))
>>>>                break;
>>>
>>> I think you'll lose dirty information in the child for private mappings? If the
>>> first pte in a batch is clean, but a subsequent page is dirty, you will end up
>>> setting all the pages in the batch as clean in the child. Previous behavior
>>> would preserve dirty bit for private mappings.
>>>
>>> In my version (v3) that did arbitrary batching, I had some fun and games
>>> tracking dirty, write and uffd_wp:
>>> https://lore.kernel.org/linux-arm-kernel/20231204105440.61448-2-ryan.roberts@arm.com/
>>>
>>> Also, I think you will currently either set soft dirty on all or none of the
>>> pages in the batch, depending on the value of the first. I previously convinced
>>> myself that the state was unimportant so always cleared it in the child to
>>> provide consistency.
>>
>> Good points regarding dirty and soft-dirty. I wanted to avoid passing flags to
>> folio_pte_batch(), but maybe that's just what we need to not change behavior.
> 
> I think you could not bother with the enforce_uffd_wp - just always enforce
> uffd-wp. So that's one simplification vs mine. Then you just need an any_dirty

I think I'll just leave uffd-wp alone for now, corner case with 
fork/munmap that can be optimized later on top if really needed.

Regarding soft-dirty (which is set automatically much more often), I can 
certainly ignore the bit if !vma_soft_dirty_enabled(vma) [which is true 
in most of the cases]. So that's easy to handle. But likely, soft-dirty 
for the child is completely unexpressive and should always be cleared. 
Have to double check what the vmflag will be for the child process.

> flag following the same pattern as your any_writable. Then just set dirty on the
> whole batch in the child if any were dirty in the parent.

Regarding dirtying, I'm not 100% sure yet if we should just always dirty 
all ptes if any is dirty, or if we should preserve the state for private 
VMAs for now.

> 
> Although now I'm wondering if there is a race here... What happens if a page in
> the parent becomes dirty after you have checked it but before you write protect
> it? Isn't that already a problem with the current non-batched version? Why do we
> even to preserve dirty in the child for private mappings?

I suspect, because the parent could zap the anon folio. If the folio is 
clean, but the PTE dirty, I suspect that we could lose data of the child 
if we were to evict that clean folio (swapout).

So I assume we simply copy the dirty PTE bit, so the system knows that 
that folio is actually dirty, because one PTE is dirty.

Touching only PTEs avoids having to mess with folio flags.

But that's just pure speculation. E.g., fs/proc/task_mmu.c does some 
slightly different accounting if a PTE is dirty. But usually, it checks 
if either the PTE or the folios is dirty.

I'll have to do some more digging.
David Hildenbrand Jan. 23, 2024, 2:13 p.m. UTC | #5
>> Although now I'm wondering if there is a race here... What happens if a page in
>> the parent becomes dirty after you have checked it but before you write protect
>> it? Isn't that already a problem with the current non-batched version? Why do we
>> even to preserve dirty in the child for private mappings?
> 
> I suspect, because the parent could zap the anon folio. If the folio is
> clean, but the PTE dirty, I suspect that we could lose data of the child
> if we were to evict that clean folio (swapout).
> 
> So I assume we simply copy the dirty PTE bit, so the system knows that
> that folio is actually dirty, because one PTE is dirty.

Oh, and regarding your race concern: it's undefined which page state
would see if some write is racing with fork, so it also doesn't matter
if we would copy the PTE dirty bit or not, if it gets set in a racy fashion.

I'll not experiment with:

 From 14e83ff2a422a96ce5701f9c8454a49f9ed947e3 Mon Sep 17 00:00:00 2001
From: David Hildenbrand <david@redhat.com>
Date: Sat, 30 Dec 2023 12:54:35 +0100
Subject: [PATCH] mm/memory: ignore dirty/accessed/soft-dirty bits in
  folio_pte_batch()

Let's always ignore the accessed/young bit: we'll always mark the PTE
as old in our child process during fork, and upcoming users will
similarly not care.

Ignore the dirty bit only if we don't want to duplicate the dirty bit
into the child process during fork. Maybe, we could just set all PTEs
in the child dirty if any PTE is dirty. For now, let's keep the behavior
unchanged.

Ignore the soft-dirty bit only if the bit doesn't have any meaning in
the src vma.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
  mm/memory.c | 34 ++++++++++++++++++++++++++++++----
  1 file changed, 30 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 7690994929d26..9aba1b0e871ca 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -953,24 +953,44 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
  	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
  }
  
+/* Flags for folio_pte_batch(). */
+typedef int __bitwise fpb_t;
+
+/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
+#define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
+
+/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
+#define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
+
+static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
+{
+	if (flags & FPB_IGNORE_DIRTY)
+		pte = pte_mkclean(pte);
+	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
+		pte = pte_clear_soft_dirty(pte);
+	return pte_mkold(pte);
+}
+
  /*
   * Detect a PTE batch: consecutive (present) PTEs that map consecutive
   * pages of the same folio.
   *
   * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
+ * the accessed bit, dirty bit (with FPB_IGNORE_DIRTY) and soft-dirty bit
+ * (with FPB_IGNORE_SOFT_DIRTY).
   */
  static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
-		pte_t *start_ptep, pte_t pte, int max_nr)
+		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags)
  {
  	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
  	const pte_t *end_ptep = start_ptep + max_nr;
-	pte_t expected_pte = pte_next_pfn(pte);
+	pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags);
  	pte_t *ptep = start_ptep + 1;
  
  	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
  
  	while (ptep != end_ptep) {
-		pte = ptep_get(ptep);
+		pte = __pte_batch_clear_ignored(ptep_get(ptep), flags);
  
  		if (!pte_same(pte, expected_pte))
  			break;
@@ -1004,6 +1024,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
  {
  	struct page *page;
  	struct folio *folio;
+	fpb_t flags = 0;
  	int err, nr;
  
  	page = vm_normal_page(src_vma, addr, pte);
@@ -1018,7 +1039,12 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
  	 * by keeping the batching logic separate.
  	 */
  	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
-		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr);
+		if (src_vma->vm_flags & VM_SHARED)
+			flags |= FPB_IGNORE_DIRTY;
+		if (!vma_soft_dirty_enabled(src_vma))
+			flags |= FPB_IGNORE_SOFT_DIRTY;
+
+		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags);
  		folio_ref_add(folio, nr);
  		if (folio_test_anon(folio)) {
  			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
Ryan Roberts Jan. 23, 2024, 2:27 p.m. UTC | #6
On 23/01/2024 14:13, David Hildenbrand wrote:
>>> Although now I'm wondering if there is a race here... What happens if a page in
>>> the parent becomes dirty after you have checked it but before you write protect
>>> it? Isn't that already a problem with the current non-batched version? Why do we
>>> even to preserve dirty in the child for private mappings?
>>
>> I suspect, because the parent could zap the anon folio. If the folio is
>> clean, but the PTE dirty, I suspect that we could lose data of the child
>> if we were to evict that clean folio (swapout).
>>
>> So I assume we simply copy the dirty PTE bit, so the system knows that
>> that folio is actually dirty, because one PTE is dirty.
> 
> Oh, and regarding your race concern: it's undefined which page state
> would see if some write is racing with fork, so it also doesn't matter
> if we would copy the PTE dirty bit or not, if it gets set in a racy fashion.

Ahh that makes sense. Thanks.

> 
> I'll not experiment with:

Looks good as long as its still performant.

> 
> From 14e83ff2a422a96ce5701f9c8454a49f9ed947e3 Mon Sep 17 00:00:00 2001
> From: David Hildenbrand <david@redhat.com>
> Date: Sat, 30 Dec 2023 12:54:35 +0100
> Subject: [PATCH] mm/memory: ignore dirty/accessed/soft-dirty bits in
>  folio_pte_batch()
> 
> Let's always ignore the accessed/young bit: we'll always mark the PTE
> as old in our child process during fork, and upcoming users will
> similarly not care.
> 
> Ignore the dirty bit only if we don't want to duplicate the dirty bit
> into the child process during fork. Maybe, we could just set all PTEs
> in the child dirty if any PTE is dirty. For now, let's keep the behavior
> unchanged.
> 
> Ignore the soft-dirty bit only if the bit doesn't have any meaning in
> the src vma.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/memory.c | 34 ++++++++++++++++++++++++++++++----
>  1 file changed, 30 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 7690994929d26..9aba1b0e871ca 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -953,24 +953,44 @@ static __always_inline void __copy_present_ptes(struct
> vm_area_struct *dst_vma,
>      set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
>  }
>  
> +/* Flags for folio_pte_batch(). */
> +typedef int __bitwise fpb_t;
> +
> +/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
> +#define FPB_IGNORE_DIRTY        ((__force fpb_t)BIT(0))
> +
> +/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
> +#define FPB_IGNORE_SOFT_DIRTY        ((__force fpb_t)BIT(1))
> +
> +static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> +{
> +    if (flags & FPB_IGNORE_DIRTY)
> +        pte = pte_mkclean(pte);
> +    if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
> +        pte = pte_clear_soft_dirty(pte);
> +    return pte_mkold(pte);
> +}
> +
>  /*
>   * Detect a PTE batch: consecutive (present) PTEs that map consecutive
>   * pages of the same folio.
>   *
>   * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
> + * the accessed bit, dirty bit (with FPB_IGNORE_DIRTY) and soft-dirty bit
> + * (with FPB_IGNORE_SOFT_DIRTY).
>   */
>  static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> -        pte_t *start_ptep, pte_t pte, int max_nr)
> +        pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags)
>  {
>      unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
>      const pte_t *end_ptep = start_ptep + max_nr;
> -    pte_t expected_pte = pte_next_pfn(pte);
> +    pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags);
>      pte_t *ptep = start_ptep + 1;
>  
>      VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>  
>      while (ptep != end_ptep) {
> -        pte = ptep_get(ptep);
> +        pte = __pte_batch_clear_ignored(ptep_get(ptep), flags);
>  
>          if (!pte_same(pte, expected_pte))
>              break;
> @@ -1004,6 +1024,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct
> vm_area_struct *src_vma
>  {
>      struct page *page;
>      struct folio *folio;
> +    fpb_t flags = 0;
>      int err, nr;
>  
>      page = vm_normal_page(src_vma, addr, pte);
> @@ -1018,7 +1039,12 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct
> vm_area_struct *src_vma
>       * by keeping the batching logic separate.
>       */
>      if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
> -        nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr);
> +        if (src_vma->vm_flags & VM_SHARED)
> +            flags |= FPB_IGNORE_DIRTY;
> +        if (!vma_soft_dirty_enabled(src_vma))
> +            flags |= FPB_IGNORE_SOFT_DIRTY;
> +
> +        nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags);
>          folio_ref_add(folio, nr);
>          if (folio_test_anon(folio)) {
>              if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index f563aec85b2a8..341b2be845b6e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -953,24 +953,30 @@  static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
 }
 
+static inline pte_t __pte_batch_clear_ignored(pte_t pte)
+{
+	return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
+}
+
 /*
  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
  * pages of the same folio.
  *
  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
+ * the accessed bit, dirty bit and soft-dirty bit.
  */
 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
 		pte_t *start_ptep, pte_t pte, int max_nr)
 {
 	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
 	const pte_t *end_ptep = start_ptep + max_nr;
-	pte_t expected_pte = pte_next_pfn(pte);
+	pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
 	pte_t *ptep = start_ptep + 1;
 
 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
 
 	while (ptep != end_ptep) {
-		pte = ptep_get(ptep);
+		pte = __pte_batch_clear_ignored(ptep_get(ptep));
 
 		if (!pte_same(pte, expected_pte))
 			break;