diff mbox series

[v7,01/11] mm/mremap: Fix race between MOVE_PMD mremap and pageout

Message ID 20210607055131.156184-2-aneesh.kumar@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Speedup mremap on ppc64 | expand

Commit Message

Aneesh Kumar K.V June 7, 2021, 5:51 a.m. UTC
CPU 1				CPU 2					CPU 3

mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one

mmap_write_lock_killable()

				addr = old_addr
				lock(pte_ptl)
lock(pmd_ptl)
pmd = *old_pmd
pmd_clear(old_pmd)
flush_tlb_range(old_addr)

*new_pmd = pmd
									*new_addr = 10; and fills
									TLB with new addr
									and old pfn

unlock(pmd_ptl)
				ptep_clear_flush()
				old pfn is free.
									Stale TLB entry

Fix this race by holding pmd lock in pageout. This still doesn't handle the race
between MOVE_PUD and pageout.

Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 include/linux/rmap.h |  9 ++++++---
 mm/page_vma_mapped.c | 36 ++++++++++++++++++------------------
 2 files changed, 24 insertions(+), 21 deletions(-)

Comments

Hugh Dickins June 8, 2021, 12:06 a.m. UTC | #1
On Mon, 7 Jun 2021, Aneesh Kumar K.V wrote:

> CPU 1				CPU 2					CPU 3
> 
> mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
> 
> mmap_write_lock_killable()
> 
> 				addr = old_addr
> 				lock(pte_ptl)
> lock(pmd_ptl)
> pmd = *old_pmd
> pmd_clear(old_pmd)
> flush_tlb_range(old_addr)
> 
> *new_pmd = pmd
> 									*new_addr = 10; and fills
> 									TLB with new addr
> 									and old pfn
> 
> unlock(pmd_ptl)
> 				ptep_clear_flush()
> 				old pfn is free.
> 									Stale TLB entry
> 
> Fix this race by holding pmd lock in pageout. This still doesn't handle the race
> between MOVE_PUD and pageout.
> 
> Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
> Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

This seems very wrong to me, to require another level of locking in the
rmap lookup, just to fix some new pagetable games in mremap.

But Linus asked "Am I missing something?": neither of you have mentioned
mremap's take_rmap_locks(), so I hope that already meets your need.  And
if it needs to be called more often than before (see "need_rmap_locks"),
that's probably okay.

Hugh

> ---
>  include/linux/rmap.h |  9 ++++++---
>  mm/page_vma_mapped.c | 36 ++++++++++++++++++------------------
>  2 files changed, 24 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index def5c62c93b3..272ab0c2b60b 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -207,7 +207,8 @@ struct page_vma_mapped_walk {
>  	unsigned long address;
>  	pmd_t *pmd;
>  	pte_t *pte;
> -	spinlock_t *ptl;
> +	spinlock_t *pte_ptl;
> +	spinlock_t *pmd_ptl;
>  	unsigned int flags;
>  };
>  
> @@ -216,8 +217,10 @@ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
>  	/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
>  	if (pvmw->pte && !PageHuge(pvmw->page))
>  		pte_unmap(pvmw->pte);
> -	if (pvmw->ptl)
> -		spin_unlock(pvmw->ptl);
> +	if (pvmw->pte_ptl)
> +		spin_unlock(pvmw->pte_ptl);
> +	if (pvmw->pmd_ptl)
> +		spin_unlock(pvmw->pmd_ptl);
>  }
>  
>  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 2cf01d933f13..87a2c94c7e27 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -47,8 +47,10 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
>  				return false;
>  		}
>  	}
> -	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
> -	spin_lock(pvmw->ptl);
> +	if (USE_SPLIT_PTE_PTLOCKS) {
> +		pvmw->pte_ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
> +		spin_lock(pvmw->pte_ptl);
> +	}
>  	return true;
>  }
>  
> @@ -162,8 +164,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  		if (!pvmw->pte)
>  			return false;
>  
> -		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
> -		spin_lock(pvmw->ptl);
> +		pvmw->pte_ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
> +		spin_lock(pvmw->pte_ptl);
>  		if (!check_pte(pvmw))
>  			return not_found(pvmw);
>  		return true;
> @@ -179,6 +181,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  	if (!pud_present(*pud))
>  		return false;
>  	pvmw->pmd = pmd_offset(pud, pvmw->address);
> +	pvmw->pmd_ptl = pmd_lock(mm, pvmw->pmd);
>  	/*
>  	 * Make sure the pmd value isn't cached in a register by the
>  	 * compiler and used as a stale value after we've observed a
> @@ -186,7 +189,6 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  	 */
>  	pmde = READ_ONCE(*pvmw->pmd);
>  	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
> -		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
>  		if (likely(pmd_trans_huge(*pvmw->pmd))) {
>  			if (pvmw->flags & PVMW_MIGRATION)
>  				return not_found(pvmw);
> @@ -206,14 +208,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  				}
>  			}
>  			return not_found(pvmw);
> -		} else {
> -			/* THP pmd was split under us: handle on pte level */
> -			spin_unlock(pvmw->ptl);
> -			pvmw->ptl = NULL;
>  		}
> -	} else if (!pmd_present(pmde)) {
> -		return false;
> -	}
> +	} else if (!pmd_present(pmde))
> +		return not_found(pvmw);
> +
>  	if (!map_pte(pvmw))
>  		goto next_pte;
>  	while (1) {
> @@ -233,19 +231,21 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  			/* Did we cross page table boundary? */
>  			if (pvmw->address % PMD_SIZE == 0) {
>  				pte_unmap(pvmw->pte);
> -				if (pvmw->ptl) {
> -					spin_unlock(pvmw->ptl);
> -					pvmw->ptl = NULL;
> +				if (pvmw->pte_ptl) {
> +					spin_unlock(pvmw->pte_ptl);
> +					pvmw->pte_ptl = NULL;
>  				}
> +				spin_unlock(pvmw->pmd_ptl);
> +				pvmw->pmd_ptl = NULL;
>  				goto restart;
>  			} else {
>  				pvmw->pte++;
>  			}
>  		} while (pte_none(*pvmw->pte));
>  
> -		if (!pvmw->ptl) {
> -			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
> -			spin_lock(pvmw->ptl);
> +		if (USE_SPLIT_PTE_PTLOCKS && !pvmw->pte_ptl) {
> +			pvmw->pte_ptl = pte_lockptr(mm, pvmw->pmd);
> +			spin_lock(pvmw->pte_ptl);
>  		}
>  	}
>  }
> -- 
> 2.31.1
Aneesh Kumar K.V June 8, 2021, 7:52 a.m. UTC | #2
Hi Hugh,

Hugh Dickins <hughd@google.com> writes:

> On Mon, 7 Jun 2021, Aneesh Kumar K.V wrote:
>
>> CPU 1				CPU 2					CPU 3
>> 
>> mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>> 
>> mmap_write_lock_killable()
>> 
>> 				addr = old_addr
>> 				lock(pte_ptl)
>> lock(pmd_ptl)
>> pmd = *old_pmd
>> pmd_clear(old_pmd)
>> flush_tlb_range(old_addr)
>> 
>> *new_pmd = pmd
>> 									*new_addr = 10; and fills
>> 									TLB with new addr
>> 									and old pfn
>> 
>> unlock(pmd_ptl)
>> 				ptep_clear_flush()
>> 				old pfn is free.
>> 									Stale TLB entry
>> 
>> Fix this race by holding pmd lock in pageout. This still doesn't handle the race
>> between MOVE_PUD and pageout.
>> 
>> Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
>> Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>
> This seems very wrong to me, to require another level of locking in the
> rmap lookup, just to fix some new pagetable games in mremap.
>
> But Linus asked "Am I missing something?": neither of you have mentioned
> mremap's take_rmap_locks(), so I hope that already meets your need.  And
> if it needs to be called more often than before (see "need_rmap_locks"),
> that's probably okay.
>
> Hugh
>

Thanks for reviewing the change. I missed the rmap lock in the code
path. How about the below change?

    mm/mremap: hold the rmap lock in write mode when moving page table entries.
    
    To avoid a race between rmap walk and mremap, mremap does take_rmap_locks().
    The lock was taken to ensure that rmap walk don't miss a page table entry due to
    PTE moves via move_pagetables(). The kernel does further optimization of
    this lock such that if we are going to find the newly added vma after the
    old vma, the rmap lock is not taken. This is because rmap walk would find the
    vmas in the same order and if we don't find the page table attached to
    older vma we would find it with the new vma which we would iterate later.
    The actual lifetime of the page is still controlled by the PTE lock.
    
    This patch updates the locking requirement to handle another race condition
    explained below with optimized mremap::
    
    Optmized PMD move
    
        CPU 1                           CPU 2                                   CPU 3
    
        mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
    
        mmap_write_lock_killable()
    
                                        addr = old_addr
                                        lock(pte_ptl)
        lock(pmd_ptl)
        pmd = *old_pmd
        pmd_clear(old_pmd)
        flush_tlb_range(old_addr)
    
        *new_pmd = pmd
                                                                                *new_addr = 10; and fills
                                                                                TLB with new addr
                                                                                and old pfn
    
        unlock(pmd_ptl)
                                        ptep_clear_flush()
                                        old pfn is free.
                                                                                Stale TLB entry
    
    Optmized PUD move:
    
        CPU 1                           CPU 2                                   CPU 3
    
        mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
    
        mmap_write_lock_killable()
    
                                        addr = old_addr
                                        lock(pte_ptl)
        lock(pud_ptl)
        pud = *old_pud
        pud_clear(old_pud)
        flush_tlb_range(old_addr)
    
        *new_pud = pud
                                                                                *new_addr = 10; and fills
                                                                                TLB with new addr
                                                                                and old pfn
    
        unlock(pud_ptl)
                                        ptep_clear_flush()
                                        old pfn is free.
                                                                                Stale TLB entry
    
    Both the above race condition can be fixed if we force mremap path to take rmap lock.
    
    Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

diff --git a/mm/mremap.c b/mm/mremap.c
index 9cd352fb9cf8..f12df630fb37 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -517,7 +517,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
 
 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
-					   old_pud, new_pud, need_rmap_locks))
+					   old_pud, new_pud, true))
 				continue;
 		}
 
@@ -544,7 +544,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 			 * moving at the PMD level if possible.
 			 */
 			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
-					   old_pmd, new_pmd, need_rmap_locks))
+					   old_pmd, new_pmd, true))
 				continue;
 		}
Kirill A. Shutemov June 8, 2021, 9:42 a.m. UTC | #3
On Tue, Jun 08, 2021 at 01:22:23PM +0530, Aneesh Kumar K.V wrote:
> 
> Hi Hugh,
> 
> Hugh Dickins <hughd@google.com> writes:
> 
> > On Mon, 7 Jun 2021, Aneesh Kumar K.V wrote:
> >
> >> CPU 1				CPU 2					CPU 3
> >> 
> >> mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
> >> 
> >> mmap_write_lock_killable()
> >> 
> >> 				addr = old_addr
> >> 				lock(pte_ptl)
> >> lock(pmd_ptl)
> >> pmd = *old_pmd
> >> pmd_clear(old_pmd)
> >> flush_tlb_range(old_addr)
> >> 
> >> *new_pmd = pmd
> >> 									*new_addr = 10; and fills
> >> 									TLB with new addr
> >> 									and old pfn
> >> 
> >> unlock(pmd_ptl)
> >> 				ptep_clear_flush()
> >> 				old pfn is free.
> >> 									Stale TLB entry
> >> 
> >> Fix this race by holding pmd lock in pageout. This still doesn't handle the race
> >> between MOVE_PUD and pageout.
> >> 
> >> Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
> >> Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com
> >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> >
> > This seems very wrong to me, to require another level of locking in the
> > rmap lookup, just to fix some new pagetable games in mremap.
> >
> > But Linus asked "Am I missing something?": neither of you have mentioned
> > mremap's take_rmap_locks(), so I hope that already meets your need.  And
> > if it needs to be called more often than before (see "need_rmap_locks"),
> > that's probably okay.
> >
> > Hugh
> >
> 
> Thanks for reviewing the change. I missed the rmap lock in the code
> path. How about the below change?
> 
>     mm/mremap: hold the rmap lock in write mode when moving page table entries.
>     
>     To avoid a race between rmap walk and mremap, mremap does take_rmap_locks().
>     The lock was taken to ensure that rmap walk don't miss a page table entry due to
>     PTE moves via move_pagetables(). The kernel does further optimization of
>     this lock such that if we are going to find the newly added vma after the
>     old vma, the rmap lock is not taken. This is because rmap walk would find the
>     vmas in the same order and if we don't find the page table attached to
>     older vma we would find it with the new vma which we would iterate later.
>     The actual lifetime of the page is still controlled by the PTE lock.
>     
>     This patch updates the locking requirement to handle another race condition
>     explained below with optimized mremap::
>     
>     Optmized PMD move
>     
>         CPU 1                           CPU 2                                   CPU 3
>     
>         mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>     
>         mmap_write_lock_killable()
>     
>                                         addr = old_addr
>                                         lock(pte_ptl)
>         lock(pmd_ptl)
>         pmd = *old_pmd
>         pmd_clear(old_pmd)
>         flush_tlb_range(old_addr)
>     
>         *new_pmd = pmd
>                                                                                 *new_addr = 10; and fills
>                                                                                 TLB with new addr
>                                                                                 and old pfn
>     
>         unlock(pmd_ptl)
>                                         ptep_clear_flush()
>                                         old pfn is free.
>                                                                                 Stale TLB entry
>     
>     Optmized PUD move:
>     
>         CPU 1                           CPU 2                                   CPU 3
>     
>         mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>     
>         mmap_write_lock_killable()
>     
>                                         addr = old_addr
>                                         lock(pte_ptl)
>         lock(pud_ptl)
>         pud = *old_pud
>         pud_clear(old_pud)
>         flush_tlb_range(old_addr)
>     
>         *new_pud = pud
>                                                                                 *new_addr = 10; and fills
>                                                                                 TLB with new addr
>                                                                                 and old pfn
>     
>         unlock(pud_ptl)
>                                         ptep_clear_flush()
>                                         old pfn is free.
>                                                                                 Stale TLB entry
>     
>     Both the above race condition can be fixed if we force mremap path to take rmap lock.
>     
>     Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

Looks like it should be enough to address the race.

It would be nice to understand what is performance overhead of the
additional locking. Is it still faster to move single PMD page table under
these locks comparing to moving PTE page table entries without the locks?
Aneesh Kumar K.V June 8, 2021, 11:17 a.m. UTC | #4
On 6/8/21 3:12 PM, Kirill A. Shutemov wrote:
> On Tue, Jun 08, 2021 at 01:22:23PM +0530, Aneesh Kumar K.V wrote:
>>
>> Hi Hugh,
>>
>> Hugh Dickins <hughd@google.com> writes:
>>
>>> On Mon, 7 Jun 2021, Aneesh Kumar K.V wrote:
>>>
>>>> CPU 1				CPU 2					CPU 3
>>>>
>>>> mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>>>>
>>>> mmap_write_lock_killable()
>>>>
>>>> 				addr = old_addr
>>>> 				lock(pte_ptl)
>>>> lock(pmd_ptl)
>>>> pmd = *old_pmd
>>>> pmd_clear(old_pmd)
>>>> flush_tlb_range(old_addr)
>>>>
>>>> *new_pmd = pmd
>>>> 									*new_addr = 10; and fills
>>>> 									TLB with new addr
>>>> 									and old pfn
>>>>
>>>> unlock(pmd_ptl)
>>>> 				ptep_clear_flush()
>>>> 				old pfn is free.
>>>> 									Stale TLB entry
>>>>
>>>> Fix this race by holding pmd lock in pageout. This still doesn't handle the race
>>>> between MOVE_PUD and pageout.
>>>>
>>>> Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
>>>> Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com
>>>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>>>
>>> This seems very wrong to me, to require another level of locking in the
>>> rmap lookup, just to fix some new pagetable games in mremap.
>>>
>>> But Linus asked "Am I missing something?": neither of you have mentioned
>>> mremap's take_rmap_locks(), so I hope that already meets your need.  And
>>> if it needs to be called more often than before (see "need_rmap_locks"),
>>> that's probably okay.
>>>
>>> Hugh
>>>
>>
>> Thanks for reviewing the change. I missed the rmap lock in the code
>> path. How about the below change?
>>
>>      mm/mremap: hold the rmap lock in write mode when moving page table entries.
>>      
>>      To avoid a race between rmap walk and mremap, mremap does take_rmap_locks().
>>      The lock was taken to ensure that rmap walk don't miss a page table entry due to
>>      PTE moves via move_pagetables(). The kernel does further optimization of
>>      this lock such that if we are going to find the newly added vma after the
>>      old vma, the rmap lock is not taken. This is because rmap walk would find the
>>      vmas in the same order and if we don't find the page table attached to
>>      older vma we would find it with the new vma which we would iterate later.
>>      The actual lifetime of the page is still controlled by the PTE lock.
>>      
>>      This patch updates the locking requirement to handle another race condition
>>      explained below with optimized mremap::
>>      
>>      Optmized PMD move
>>      
>>          CPU 1                           CPU 2                                   CPU 3
>>      
>>          mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>>      
>>          mmap_write_lock_killable()
>>      
>>                                          addr = old_addr
>>                                          lock(pte_ptl)
>>          lock(pmd_ptl)
>>          pmd = *old_pmd
>>          pmd_clear(old_pmd)
>>          flush_tlb_range(old_addr)
>>      
>>          *new_pmd = pmd
>>                                                                                  *new_addr = 10; and fills
>>                                                                                  TLB with new addr
>>                                                                                  and old pfn
>>      
>>          unlock(pmd_ptl)
>>                                          ptep_clear_flush()
>>                                          old pfn is free.
>>                                                                                  Stale TLB entry
>>      
>>      Optmized PUD move:
>>      
>>          CPU 1                           CPU 2                                   CPU 3
>>      
>>          mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>>      
>>          mmap_write_lock_killable()
>>      
>>                                          addr = old_addr
>>                                          lock(pte_ptl)
>>          lock(pud_ptl)
>>          pud = *old_pud
>>          pud_clear(old_pud)
>>          flush_tlb_range(old_addr)
>>      
>>          *new_pud = pud
>>                                                                                  *new_addr = 10; and fills
>>                                                                                  TLB with new addr
>>                                                                                  and old pfn
>>      
>>          unlock(pud_ptl)
>>                                          ptep_clear_flush()
>>                                          old pfn is free.
>>                                                                                  Stale TLB entry
>>      
>>      Both the above race condition can be fixed if we force mremap path to take rmap lock.
>>      
>>      Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> 
> Looks like it should be enough to address the race.
> 
> It would be nice to understand what is performance overhead of the
> additional locking. Is it still faster to move single PMD page table under
> these locks comparing to moving PTE page table entries without the locks?
> 

The improvements provided by optimized mremap as captured in patch 11 is
large.

mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:
1GB mremap - Source PTE-aligned, Destination PTE-aligned
   mremap time:      2292772ns
1GB mremap - Source PMD-aligned, Destination PMD-aligned
   mremap time:      1158928ns
1GB mremap - Source PUD-aligned, Destination PUD-aligned
   mremap time:        63886ns

With additional locking, I haven't observed much change in those 
numbers. But that could also be because there is no contention on these 
locks when this test is run?

-aneesh
Kirill A. Shutemov June 8, 2021, 12:05 p.m. UTC | #5
On Tue, Jun 08, 2021 at 04:47:19PM +0530, Aneesh Kumar K.V wrote:
> On 6/8/21 3:12 PM, Kirill A. Shutemov wrote:
> > On Tue, Jun 08, 2021 at 01:22:23PM +0530, Aneesh Kumar K.V wrote:
> > > 
> > > Hi Hugh,
> > > 
> > > Hugh Dickins <hughd@google.com> writes:
> > > 
> > > > On Mon, 7 Jun 2021, Aneesh Kumar K.V wrote:
> > > > 
> > > > > CPU 1				CPU 2					CPU 3
> > > > > 
> > > > > mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
> > > > > 
> > > > > mmap_write_lock_killable()
> > > > > 
> > > > > 				addr = old_addr
> > > > > 				lock(pte_ptl)
> > > > > lock(pmd_ptl)
> > > > > pmd = *old_pmd
> > > > > pmd_clear(old_pmd)
> > > > > flush_tlb_range(old_addr)
> > > > > 
> > > > > *new_pmd = pmd
> > > > > 									*new_addr = 10; and fills
> > > > > 									TLB with new addr
> > > > > 									and old pfn
> > > > > 
> > > > > unlock(pmd_ptl)
> > > > > 				ptep_clear_flush()
> > > > > 				old pfn is free.
> > > > > 									Stale TLB entry
> > > > > 
> > > > > Fix this race by holding pmd lock in pageout. This still doesn't handle the race
> > > > > between MOVE_PUD and pageout.
> > > > > 
> > > > > Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
> > > > > Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com
> > > > > Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> > > > 
> > > > This seems very wrong to me, to require another level of locking in the
> > > > rmap lookup, just to fix some new pagetable games in mremap.
> > > > 
> > > > But Linus asked "Am I missing something?": neither of you have mentioned
> > > > mremap's take_rmap_locks(), so I hope that already meets your need.  And
> > > > if it needs to be called more often than before (see "need_rmap_locks"),
> > > > that's probably okay.
> > > > 
> > > > Hugh
> > > > 
> > > 
> > > Thanks for reviewing the change. I missed the rmap lock in the code
> > > path. How about the below change?
> > > 
> > >      mm/mremap: hold the rmap lock in write mode when moving page table entries.
> > >      To avoid a race between rmap walk and mremap, mremap does take_rmap_locks().
> > >      The lock was taken to ensure that rmap walk don't miss a page table entry due to
> > >      PTE moves via move_pagetables(). The kernel does further optimization of
> > >      this lock such that if we are going to find the newly added vma after the
> > >      old vma, the rmap lock is not taken. This is because rmap walk would find the
> > >      vmas in the same order and if we don't find the page table attached to
> > >      older vma we would find it with the new vma which we would iterate later.
> > >      The actual lifetime of the page is still controlled by the PTE lock.
> > >      This patch updates the locking requirement to handle another race condition
> > >      explained below with optimized mremap::
> > >      Optmized PMD move
> > >          CPU 1                           CPU 2                                   CPU 3
> > >          mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
> > >          mmap_write_lock_killable()
> > >                                          addr = old_addr
> > >                                          lock(pte_ptl)
> > >          lock(pmd_ptl)
> > >          pmd = *old_pmd
> > >          pmd_clear(old_pmd)
> > >          flush_tlb_range(old_addr)
> > >          *new_pmd = pmd
> > >                                                                                  *new_addr = 10; and fills
> > >                                                                                  TLB with new addr
> > >                                                                                  and old pfn
> > >          unlock(pmd_ptl)
> > >                                          ptep_clear_flush()
> > >                                          old pfn is free.
> > >                                                                                  Stale TLB entry
> > >      Optmized PUD move:
> > >          CPU 1                           CPU 2                                   CPU 3
> > >          mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
> > >          mmap_write_lock_killable()
> > >                                          addr = old_addr
> > >                                          lock(pte_ptl)
> > >          lock(pud_ptl)
> > >          pud = *old_pud
> > >          pud_clear(old_pud)
> > >          flush_tlb_range(old_addr)
> > >          *new_pud = pud
> > >                                                                                  *new_addr = 10; and fills
> > >                                                                                  TLB with new addr
> > >                                                                                  and old pfn
> > >          unlock(pud_ptl)
> > >                                          ptep_clear_flush()
> > >                                          old pfn is free.
> > >                                                                                  Stale TLB entry
> > >      Both the above race condition can be fixed if we force mremap path to take rmap lock.
> > >      Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> > 
> > Looks like it should be enough to address the race.
> > 
> > It would be nice to understand what is performance overhead of the
> > additional locking. Is it still faster to move single PMD page table under
> > these locks comparing to moving PTE page table entries without the locks?
> > 
> 
> The improvements provided by optimized mremap as captured in patch 11 is
> large.
> 
> mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:
> 1GB mremap - Source PTE-aligned, Destination PTE-aligned
>   mremap time:      2292772ns
> 1GB mremap - Source PMD-aligned, Destination PMD-aligned
>   mremap time:      1158928ns
> 1GB mremap - Source PUD-aligned, Destination PUD-aligned
>   mremap time:        63886ns
> 
> With additional locking, I haven't observed much change in those numbers.
> But that could also be because there is no contention on these locks when
> this test is run?

Okay, it's good enough: contention should not be common and it's okay to
pay a price for correctness.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Hugh Dickins June 8, 2021, 8:39 p.m. UTC | #6
On Tue, 8 Jun 2021, Aneesh Kumar K.V wrote:
> 
>     mm/mremap: hold the rmap lock in write mode when moving page table entries.
>     
>     To avoid a race between rmap walk and mremap, mremap does take_rmap_locks().
>     The lock was taken to ensure that rmap walk don't miss a page table entry due to
>     PTE moves via move_pagetables(). The kernel does further optimization of
>     this lock such that if we are going to find the newly added vma after the
>     old vma, the rmap lock is not taken. This is because rmap walk would find the
>     vmas in the same order and if we don't find the page table attached to
>     older vma we would find it with the new vma which we would iterate later.
>     The actual lifetime of the page is still controlled by the PTE lock.
>     
>     This patch updates the locking requirement to handle another race condition
>     explained below with optimized mremap::
>     
>     Optmized PMD move
>     
>         CPU 1                           CPU 2                                   CPU 3
>     
>         mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>     
>         mmap_write_lock_killable()
>     
>                                         addr = old_addr
>                                         lock(pte_ptl)
>         lock(pmd_ptl)
>         pmd = *old_pmd
>         pmd_clear(old_pmd)
>         flush_tlb_range(old_addr)
>     
>         *new_pmd = pmd
>                                                                                 *new_addr = 10; and fills
>                                                                                 TLB with new addr
>                                                                                 and old pfn
>     
>         unlock(pmd_ptl)
>                                         ptep_clear_flush()
>                                         old pfn is free.
>                                                                                 Stale TLB entry
>     

The PUD example below is mainly a waste a space and time:
"Optimized PUD move suffers from a similar race." would be better.

>     Optmized PUD move:
>     
>         CPU 1                           CPU 2                                   CPU 3
>     
>         mremap(old_addr, new_addr)      page_shrinker/try_to_unmap_one
>     
>         mmap_write_lock_killable()
>     
>                                         addr = old_addr
>                                         lock(pte_ptl)
>         lock(pud_ptl)
>         pud = *old_pud
>         pud_clear(old_pud)
>         flush_tlb_range(old_addr)
>     
>         *new_pud = pud
>                                                                                 *new_addr = 10; and fills
>                                                                                 TLB with new addr
>                                                                                 and old pfn
>     
>         unlock(pud_ptl)
>                                         ptep_clear_flush()
>                                         old pfn is free.
>                                                                                 Stale TLB entry
>     
>     Both the above race condition can be fixed if we force mremap path to take rmap lock.
>     

Don't forget the Fixes and Link you had in the previous version:
Fixes: 2c91bd4a4e2e ("mm: speed up mremap by 20x on large regions")
Link: https://lore.kernel.org/linux-mm/CAHk-=wgXVR04eBNtxQfevontWnP6FDm+oj5vauQXP3S-huwbPw@mail.gmail.com

>     Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

Thanks, this is orders of magnitude better!
Acked-by: Hugh Dickins <hughd@google.com>

> 
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 9cd352fb9cf8..f12df630fb37 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -517,7 +517,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
>  		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
>  
>  			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
> -					   old_pud, new_pud, need_rmap_locks))
> +					   old_pud, new_pud, true))
>  				continue;
>  		}
>  
> @@ -544,7 +544,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
>  			 * moving at the PMD level if possible.
>  			 */
>  			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
> -					   old_pmd, new_pmd, need_rmap_locks))
> +					   old_pmd, new_pmd, true))
>  				continue;
>  		}
>  
>
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..272ab0c2b60b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -207,7 +207,8 @@  struct page_vma_mapped_walk {
 	unsigned long address;
 	pmd_t *pmd;
 	pte_t *pte;
-	spinlock_t *ptl;
+	spinlock_t *pte_ptl;
+	spinlock_t *pmd_ptl;
 	unsigned int flags;
 };
 
@@ -216,8 +217,10 @@  static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
 	/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
 	if (pvmw->pte && !PageHuge(pvmw->page))
 		pte_unmap(pvmw->pte);
-	if (pvmw->ptl)
-		spin_unlock(pvmw->ptl);
+	if (pvmw->pte_ptl)
+		spin_unlock(pvmw->pte_ptl);
+	if (pvmw->pmd_ptl)
+		spin_unlock(pvmw->pmd_ptl);
 }
 
 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 2cf01d933f13..87a2c94c7e27 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -47,8 +47,10 @@  static bool map_pte(struct page_vma_mapped_walk *pvmw)
 				return false;
 		}
 	}
-	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
-	spin_lock(pvmw->ptl);
+	if (USE_SPLIT_PTE_PTLOCKS) {
+		pvmw->pte_ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
+		spin_lock(pvmw->pte_ptl);
+	}
 	return true;
 }
 
@@ -162,8 +164,8 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 		if (!pvmw->pte)
 			return false;
 
-		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
-		spin_lock(pvmw->ptl);
+		pvmw->pte_ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
+		spin_lock(pvmw->pte_ptl);
 		if (!check_pte(pvmw))
 			return not_found(pvmw);
 		return true;
@@ -179,6 +181,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 	if (!pud_present(*pud))
 		return false;
 	pvmw->pmd = pmd_offset(pud, pvmw->address);
+	pvmw->pmd_ptl = pmd_lock(mm, pvmw->pmd);
 	/*
 	 * Make sure the pmd value isn't cached in a register by the
 	 * compiler and used as a stale value after we've observed a
@@ -186,7 +189,6 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 	 */
 	pmde = READ_ONCE(*pvmw->pmd);
 	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
-		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
 		if (likely(pmd_trans_huge(*pvmw->pmd))) {
 			if (pvmw->flags & PVMW_MIGRATION)
 				return not_found(pvmw);
@@ -206,14 +208,10 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 				}
 			}
 			return not_found(pvmw);
-		} else {
-			/* THP pmd was split under us: handle on pte level */
-			spin_unlock(pvmw->ptl);
-			pvmw->ptl = NULL;
 		}
-	} else if (!pmd_present(pmde)) {
-		return false;
-	}
+	} else if (!pmd_present(pmde))
+		return not_found(pvmw);
+
 	if (!map_pte(pvmw))
 		goto next_pte;
 	while (1) {
@@ -233,19 +231,21 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 			/* Did we cross page table boundary? */
 			if (pvmw->address % PMD_SIZE == 0) {
 				pte_unmap(pvmw->pte);
-				if (pvmw->ptl) {
-					spin_unlock(pvmw->ptl);
-					pvmw->ptl = NULL;
+				if (pvmw->pte_ptl) {
+					spin_unlock(pvmw->pte_ptl);
+					pvmw->pte_ptl = NULL;
 				}
+				spin_unlock(pvmw->pmd_ptl);
+				pvmw->pmd_ptl = NULL;
 				goto restart;
 			} else {
 				pvmw->pte++;
 			}
 		} while (pte_none(*pvmw->pte));
 
-		if (!pvmw->ptl) {
-			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
-			spin_lock(pvmw->ptl);
+		if (USE_SPLIT_PTE_PTLOCKS && !pvmw->pte_ptl) {
+			pvmw->pte_ptl = pte_lockptr(mm, pvmw->pmd);
+			spin_lock(pvmw->pte_ptl);
 		}
 	}
 }