diff mbox series

mm/smaps: Don't access young/dirty bit if pte unpresent

Message ID 20220803220329.46299-1-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm/smaps: Don't access young/dirty bit if pte unpresent | expand

Commit Message

Peter Xu Aug. 3, 2022, 10:03 p.m. UTC
These bits should only be valid when the ptes are present.  Introducing two
booleans for it and set it to false when !pte_present().

Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries", 2012-05-31)
Signed-off-by: Peter Xu <peterx@redhat.com>
---
 fs/proc/task_mmu.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka (SUSE) Aug. 4, 2022, 9:02 a.m. UTC | #1
On 8/4/22 00:03, Peter Xu wrote:
> These bits should only be valid when the ptes are present.  Introducing two
> booleans for it and set it to false when !pte_present().
> 
> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
> Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries", 2012-05-31)
> Signed-off-by: Peter Xu <peterx@redhat.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  fs/proc/task_mmu.c | 7 ++++---
>  1 file changed, 4 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 9913f3be9fd2..482f91577f8c 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	struct vm_area_struct *vma = walk->vma;
>  	bool locked = !!(vma->vm_flags & VM_LOCKED);
>  	struct page *page = NULL;
> -	bool migration = false;
> +	bool migration = false, young = false, dirty = false;
>  
>  	if (pte_present(*pte)) {
>  		page = vm_normal_page(vma, addr, *pte);
> +		young = pte_young(*pte);
> +		dirty = pte_dirty(*pte);
>  	} else if (is_swap_pte(*pte)) {
>  		swp_entry_t swpent = pte_to_swp_entry(*pte);
>  
> @@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	if (!page)
>  		return;
>  
> -	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
> -		      locked, migration);
> +	smaps_account(mss, page, false, young, dirty, locked, migration);
>  }
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
David Hildenbrand Aug. 4, 2022, 11:01 a.m. UTC | #2
On 04.08.22 00:03, Peter Xu wrote:
> These bits should only be valid when the ptes are present.  Introducing two
> booleans for it and set it to false when !pte_present().
> 
> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
> Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries", 2012-05-31)
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  fs/proc/task_mmu.c | 7 ++++---
>  1 file changed, 4 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 9913f3be9fd2..482f91577f8c 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	struct vm_area_struct *vma = walk->vma;
>  	bool locked = !!(vma->vm_flags & VM_LOCKED);
>  	struct page *page = NULL;
> -	bool migration = false;
> +	bool migration = false, young = false, dirty = false;
>  
>  	if (pte_present(*pte)) {
>  		page = vm_normal_page(vma, addr, *pte);
> +		young = pte_young(*pte);
> +		dirty = pte_dirty(*pte);
>  	} else if (is_swap_pte(*pte)) {
>  		swp_entry_t swpent = pte_to_swp_entry(*pte);
>  
> @@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	if (!page)
>  		return;
>  
> -	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
> -		      locked, migration);
> +	smaps_account(mss, page, false, young, dirty, locked, migration);
>  }
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE

Reviewed-by: David Hildenbrand <david@redhat.com>
Yang Shi Aug. 4, 2022, 5:43 p.m. UTC | #3
On Wed, Aug 3, 2022 at 3:03 PM Peter Xu <peterx@redhat.com> wrote:
>
> These bits should only be valid when the ptes are present.  Introducing two
> booleans for it and set it to false when !pte_present().
>
> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
> Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries", 2012-05-31)
> Signed-off-by: Peter Xu <peterx@redhat.com>

Reviewed-by: Yang Shi <shy828301@gmail.com>

> ---
>  fs/proc/task_mmu.c | 7 ++++---
>  1 file changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 9913f3be9fd2..482f91577f8c 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>         struct vm_area_struct *vma = walk->vma;
>         bool locked = !!(vma->vm_flags & VM_LOCKED);
>         struct page *page = NULL;
> -       bool migration = false;
> +       bool migration = false, young = false, dirty = false;
>
>         if (pte_present(*pte)) {
>                 page = vm_normal_page(vma, addr, *pte);
> +               young = pte_young(*pte);
> +               dirty = pte_dirty(*pte);
>         } else if (is_swap_pte(*pte)) {
>                 swp_entry_t swpent = pte_to_swp_entry(*pte);
>
> @@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>         if (!page)
>                 return;
>
> -       smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
> -                     locked, migration);
> +       smaps_account(mss, page, false, young, dirty, locked, migration);
>  }
>
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> --
> 2.32.0
>
Andrew Morton Aug. 4, 2022, 10:40 p.m. UTC | #4
On Wed,  3 Aug 2022 18:03:29 -0400 Peter Xu <peterx@redhat.com> wrote:

> These bits should only be valid when the ptes are present.  Introducing two
> booleans for it and set it to false when !pte_present().

Please (always) describe the user visible runtime effects of the bug
which is being fixed?


> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	struct vm_area_struct *vma = walk->vma;
>  	bool locked = !!(vma->vm_flags & VM_LOCKED);
>  	struct page *page = NULL;
> -	bool migration = false;
> +	bool migration = false, young = false, dirty = false;
>  
>  	if (pte_present(*pte)) {
>  		page = vm_normal_page(vma, addr, *pte);
> +		young = pte_young(*pte);
> +		dirty = pte_dirty(*pte);
>  	} else if (is_swap_pte(*pte)) {
>  		swp_entry_t swpent = pte_to_swp_entry(*pte);
>  
> @@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	if (!page)
>  		return;
>  
> -	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
> -		      locked, migration);
> +	smaps_account(mss, page, false, young, dirty, locked, migration);
>  }
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -- 
> 2.32.0
Peter Xu Aug. 5, 2022, 3:52 p.m. UTC | #5
On Thu, Aug 04, 2022 at 03:40:50PM -0700, Andrew Morton wrote:
> On Wed,  3 Aug 2022 18:03:29 -0400 Peter Xu <peterx@redhat.com> wrote:
> 
> > These bits should only be valid when the ptes are present.  Introducing two
> > booleans for it and set it to false when !pte_present().
> 
> Please (always) describe the user visible runtime effects of the bug
> which is being fixed?

Will do in the future.

For this specific one I'll add some more details:

  The bug is found during code reading and no real world issue reported,
  but logically such an error can cause incorrect readings for either smaps
  or smaps_rollup output on quite a few fields.

  For example, it could cause over-estimate on values like Shared_Dirty,
  Private_Dirty, Referenced; or it could also cause under-estimate on
  values like LazyFree, Shared_Clean, Private_Clean.

Meanwhile I think I overlooked the pmd handling which seems to have the
same issue but done in another commit..  Will repost soon.

Thanks,

> 
> 
> > --- a/fs/proc/task_mmu.c
> > +++ b/fs/proc/task_mmu.c
> > @@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
> >  	struct vm_area_struct *vma = walk->vma;
> >  	bool locked = !!(vma->vm_flags & VM_LOCKED);
> >  	struct page *page = NULL;
> > -	bool migration = false;
> > +	bool migration = false, young = false, dirty = false;
> >  
> >  	if (pte_present(*pte)) {
> >  		page = vm_normal_page(vma, addr, *pte);
> > +		young = pte_young(*pte);
> > +		dirty = pte_dirty(*pte);
> >  	} else if (is_swap_pte(*pte)) {
> >  		swp_entry_t swpent = pte_to_swp_entry(*pte);
> >  
> > @@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
> >  	if (!page)
> >  		return;
> >  
> > -	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
> > -		      locked, migration);
> > +	smaps_account(mss, page, false, young, dirty, locked, migration);
> >  }
> >  
> >  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> > -- 
> > 2.32.0
>
diff mbox series

Patch

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9913f3be9fd2..482f91577f8c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -527,10 +527,12 @@  static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 	struct vm_area_struct *vma = walk->vma;
 	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page = NULL;
-	bool migration = false;
+	bool migration = false, young = false, dirty = false;
 
 	if (pte_present(*pte)) {
 		page = vm_normal_page(vma, addr, *pte);
+		young = pte_young(*pte);
+		dirty = pte_dirty(*pte);
 	} else if (is_swap_pte(*pte)) {
 		swp_entry_t swpent = pte_to_swp_entry(*pte);
 
@@ -560,8 +562,7 @@  static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 	if (!page)
 		return;
 
-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
-		      locked, migration);
+	smaps_account(mss, page, false, young, dirty, locked, migration);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE