diff mbox series

[v4,54/66] mm/memcontrol: Stop using mm->highest_vm_end

Message ID 20211201142918.921493-55-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Introducing the Maple Tree | expand

Commit Message

Liam R. Howlett Dec. 1, 2021, 2:30 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/memcontrol.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

Comments

Vlastimil Babka Jan. 20, 2022, 11:21 a.m. UTC | #1
On 12/1/21 15:30, Liam Howlett wrote:
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> 
> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> ---
>  mm/memcontrol.c | 6 ++----
>  1 file changed, 2 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 781605e92015..ac95b3eca557 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -5808,7 +5808,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
>  	unsigned long precharge;
>  
>  	mmap_read_lock(mm);
> -	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
> +	walk_page_range(mm, 0, -1, &precharge_walk_ops, NULL);

Wouldn't ULONG_MAX look better?

>  	mmap_read_unlock(mm);
>  
>  	precharge = mc.precharge;
> @@ -6106,9 +6106,7 @@ static void mem_cgroup_move_charge(void)
>  	 * When we have consumed all precharges and failed in doing
>  	 * additional charge, the page walk just aborts.
>  	 */
> -	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
> -			NULL);
> -
> +	walk_page_range(mc.mm, 0, -1, &charge_walk_ops, NULL);
>  	mmap_read_unlock(mc.mm);
>  	atomic_dec(&mc.from->moving_account);
>  }
Liam R. Howlett Jan. 26, 2022, 2:34 a.m. UTC | #2
* Vlastimil Babka <vbabka@suse.cz> [220120 06:21]:
> On 12/1/21 15:30, Liam Howlett wrote:
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> > 
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > ---
> >  mm/memcontrol.c | 6 ++----
> >  1 file changed, 2 insertions(+), 4 deletions(-)
> > 
> > diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> > index 781605e92015..ac95b3eca557 100644
> > --- a/mm/memcontrol.c
> > +++ b/mm/memcontrol.c
> > @@ -5808,7 +5808,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
> >  	unsigned long precharge;
> >  
> >  	mmap_read_lock(mm);
> > -	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
> > +	walk_page_range(mm, 0, -1, &precharge_walk_ops, NULL);
> 
> Wouldn't ULONG_MAX look better?

Sure, I'm happy enough to use ULONG_MAX.

> 
> >  	mmap_read_unlock(mm);
> >  
> >  	precharge = mc.precharge;
> > @@ -6106,9 +6106,7 @@ static void mem_cgroup_move_charge(void)
> >  	 * When we have consumed all precharges and failed in doing
> >  	 * additional charge, the page walk just aborts.
> >  	 */
> > -	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
> > -			NULL);
> > -
> > +	walk_page_range(mc.mm, 0, -1, &charge_walk_ops, NULL);
> >  	mmap_read_unlock(mc.mm);
> >  	atomic_dec(&mc.from->moving_account);
> >  }
>
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 781605e92015..ac95b3eca557 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5808,7 +5808,7 @@  static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
 	unsigned long precharge;
 
 	mmap_read_lock(mm);
-	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
+	walk_page_range(mm, 0, -1, &precharge_walk_ops, NULL);
 	mmap_read_unlock(mm);
 
 	precharge = mc.precharge;
@@ -6106,9 +6106,7 @@  static void mem_cgroup_move_charge(void)
 	 * When we have consumed all precharges and failed in doing
 	 * additional charge, the page walk just aborts.
 	 */
-	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
-			NULL);
-
+	walk_page_range(mc.mm, 0, -1, &charge_walk_ops, NULL);
 	mmap_read_unlock(mc.mm);
 	atomic_dec(&mc.from->moving_account);
 }