diff mbox series

[v1,11/11] mm/memory: ignore writable bit in folio_pte_batch()

Message ID 20240122194200.381241-12-david@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/memory: optimize fork() with PTE-mapped THP | expand

Commit Message

David Hildenbrand Jan. 22, 2024, 7:42 p.m. UTC
... and conditionally return to the caller if any pte except the first one
is writable. fork() has to make sure to properly write-protect in case any
PTE is writable. Other users (e.g., page unmaping) won't care.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/memory.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

Comments

Ryan Roberts Jan. 23, 2024, 12:35 p.m. UTC | #1
On 22/01/2024 19:42, David Hildenbrand wrote:
> ... and conditionally return to the caller if any pte except the first one
> is writable. fork() has to make sure to properly write-protect in case any
> PTE is writable. Other users (e.g., page unmaping) won't care.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  mm/memory.c | 26 +++++++++++++++++++++-----
>  1 file changed, 21 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 341b2be845b6e..a26fd0669016b 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -955,7 +955,7 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
>  
>  static inline pte_t __pte_batch_clear_ignored(pte_t pte)
>  {
> -	return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
> +	return pte_wrprotect(pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte))));
>  }
>  
>  /*
> @@ -963,20 +963,29 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte)
>   * pages of the same folio.
>   *
>   * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
> - * the accessed bit, dirty bit and soft-dirty bit.
> + * the accessed bit, dirty bit, soft-dirty bit and writable bit.
> + . If "any_writable" is set, it will indicate if any other PTE besides the
> + * first (given) PTE is writable.
>   */
>  static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> -		pte_t *start_ptep, pte_t pte, int max_nr)
> +		pte_t *start_ptep, pte_t pte, int max_nr, bool *any_writable)
>  {
>  	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
>  	const pte_t *end_ptep = start_ptep + max_nr;
>  	pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
>  	pte_t *ptep = start_ptep + 1;
> +	bool writable;
> +
> +	if (any_writable)
> +		*any_writable = false;
>  
>  	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>  
>  	while (ptep != end_ptep) {
> -		pte = __pte_batch_clear_ignored(ptep_get(ptep));
> +		pte = ptep_get(ptep);
> +		if (any_writable)
> +			writable = !!pte_write(pte);
> +		pte = __pte_batch_clear_ignored(pte);
>  
>  		if (!pte_same(pte, expected_pte))
>  			break;
> @@ -989,6 +998,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
>  		if (pte_pfn(pte) == folio_end_pfn)
>  			break;
>  
> +		if (any_writable)
> +			*any_writable |= writable;
> +
>  		expected_pte = pte_next_pfn(expected_pte);
>  		ptep++;
>  	}
> @@ -1010,6 +1022,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
>  {
>  	struct page *page;
>  	struct folio *folio;
> +	bool any_writable;
>  	int err, nr;
>  
>  	page = vm_normal_page(src_vma, addr, pte);
> @@ -1024,7 +1037,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
>  	 * by keeping the batching logic separate.
>  	 */
>  	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
> -		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr);
> +		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr,
> +				     &any_writable);
>  		if (folio_test_anon(folio)) {
>  			folio_ref_add(folio, nr);
>  			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> @@ -1039,6 +1053,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
>  			folio_dup_file_rmap_ptes(folio, page, nr);
>  			rss[mm_counter_file(page)] += nr;
>  		}
> +		if (any_writable)
> +			pte = pte_mkwrite(pte, src_vma);
>  		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
>  				    addr, nr);
>  		return nr;
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 341b2be845b6e..a26fd0669016b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -955,7 +955,7 @@  static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
 
 static inline pte_t __pte_batch_clear_ignored(pte_t pte)
 {
-	return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
+	return pte_wrprotect(pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte))));
 }
 
 /*
@@ -963,20 +963,29 @@  static inline pte_t __pte_batch_clear_ignored(pte_t pte)
  * pages of the same folio.
  *
  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
- * the accessed bit, dirty bit and soft-dirty bit.
+ * the accessed bit, dirty bit, soft-dirty bit and writable bit.
+ . If "any_writable" is set, it will indicate if any other PTE besides the
+ * first (given) PTE is writable.
  */
 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
-		pte_t *start_ptep, pte_t pte, int max_nr)
+		pte_t *start_ptep, pte_t pte, int max_nr, bool *any_writable)
 {
 	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
 	const pte_t *end_ptep = start_ptep + max_nr;
 	pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
 	pte_t *ptep = start_ptep + 1;
+	bool writable;
+
+	if (any_writable)
+		*any_writable = false;
 
 	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
 
 	while (ptep != end_ptep) {
-		pte = __pte_batch_clear_ignored(ptep_get(ptep));
+		pte = ptep_get(ptep);
+		if (any_writable)
+			writable = !!pte_write(pte);
+		pte = __pte_batch_clear_ignored(pte);
 
 		if (!pte_same(pte, expected_pte))
 			break;
@@ -989,6 +998,9 @@  static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
 		if (pte_pfn(pte) == folio_end_pfn)
 			break;
 
+		if (any_writable)
+			*any_writable |= writable;
+
 		expected_pte = pte_next_pfn(expected_pte);
 		ptep++;
 	}
@@ -1010,6 +1022,7 @@  copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
 {
 	struct page *page;
 	struct folio *folio;
+	bool any_writable;
 	int err, nr;
 
 	page = vm_normal_page(src_vma, addr, pte);
@@ -1024,7 +1037,8 @@  copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
 	 * by keeping the batching logic separate.
 	 */
 	if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) {
-		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr);
+		nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr,
+				     &any_writable);
 		if (folio_test_anon(folio)) {
 			folio_ref_add(folio, nr);
 			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
@@ -1039,6 +1053,8 @@  copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
 			folio_dup_file_rmap_ptes(folio, page, nr);
 			rss[mm_counter_file(page)] += nr;
 		}
+		if (any_writable)
+			pte = pte_mkwrite(pte, src_vma);
 		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
 				    addr, nr);
 		return nr;