diff mbox series

[v3,4/4] mm: Remove managed_page_count spinlock

Message ID 1541665398-29925-5-git-send-email-arunks@codeaurora.org (mailing list archive)
State New, archived
Headers show
Series mm: convert totalram_pages, totalhigh_pages and managed pages to atomic | expand

Commit Message

Arun KS Nov. 8, 2018, 8:23 a.m. UTC
Now totalram_pages and managed_pages are atomic varibles. No need
of managed_page_count spinlock.

Signed-off-by: Arun KS <arunks@codeaurora.org>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/mmzone.h | 6 ------
 mm/page_alloc.c        | 5 -----
 2 files changed, 11 deletions(-)

Comments

Michal Hocko Nov. 8, 2018, 8:34 a.m. UTC | #1
On Thu 08-11-18 13:53:18, Arun KS wrote:
> Now totalram_pages and managed_pages are atomic varibles. No need
> of managed_page_count spinlock.

As explained earlier. Please add a motivation here. Feel free to reuse
wording from http://lkml.kernel.org/r/20181107103630.GF2453@dhcp22.suse.cz

> 
> Signed-off-by: Arun KS <arunks@codeaurora.org>
> Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
> Acked-by: Michal Hocko <mhocko@suse.com>
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  include/linux/mmzone.h | 6 ------
>  mm/page_alloc.c        | 5 -----
>  2 files changed, 11 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index e73dc31..c71b4d9 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -428,12 +428,6 @@ struct zone {
>  	 * Write access to present_pages at runtime should be protected by
>  	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
>  	 * present_pages should get_online_mems() to get a stable value.
> -	 *
> -	 * Read access to managed_pages should be safe because it's unsigned
> -	 * long. Write access to zone->managed_pages and totalram_pages are
> -	 * protected by managed_page_count_lock at runtime. Idealy only
> -	 * adjust_managed_page_count() should be used instead of directly
> -	 * touching zone->managed_pages and totalram_pages.
>  	 */
>  	atomic_long_t		managed_pages;
>  	unsigned long		spanned_pages;
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index f8b64cc..26c5e14 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -122,9 +122,6 @@
>  };
>  EXPORT_SYMBOL(node_states);
>  
> -/* Protect totalram_pages and zone->managed_pages */
> -static DEFINE_SPINLOCK(managed_page_count_lock);
> -
>  atomic_long_t _totalram_pages __read_mostly;
>  EXPORT_SYMBOL(_totalram_pages);
>  unsigned long totalreserve_pages __read_mostly;
> @@ -7065,14 +7062,12 @@ static int __init cmdline_parse_movablecore(char *p)
>  
>  void adjust_managed_page_count(struct page *page, long count)
>  {
> -	spin_lock(&managed_page_count_lock);
>  	atomic_long_add(count, &page_zone(page)->managed_pages);
>  	totalram_pages_add(count);
>  #ifdef CONFIG_HIGHMEM
>  	if (PageHighMem(page))
>  		totalhigh_pages_add(count);
>  #endif
> -	spin_unlock(&managed_page_count_lock);
>  }
>  EXPORT_SYMBOL(adjust_managed_page_count);
>  
> -- 
> 1.9.1
Arun KS Nov. 8, 2018, 10:03 a.m. UTC | #2
On 2018-11-08 14:04, Michal Hocko wrote:
> On Thu 08-11-18 13:53:18, Arun KS wrote:
>> Now totalram_pages and managed_pages are atomic varibles. No need
>> of managed_page_count spinlock.
> 
> As explained earlier. Please add a motivation here. Feel free to reuse
> wording from 
> http://lkml.kernel.org/r/20181107103630.GF2453@dhcp22.suse.cz

Sure. Will add in next spin.

Regards,
Arun
> 
>> 
>> Signed-off-by: Arun KS <arunks@codeaurora.org>
>> Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
>> Acked-by: Michal Hocko <mhocko@suse.com>
>> Acked-by: Vlastimil Babka <vbabka@suse.cz>
>> ---
>>  include/linux/mmzone.h | 6 ------
>>  mm/page_alloc.c        | 5 -----
>>  2 files changed, 11 deletions(-)
>> 
>> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>> index e73dc31..c71b4d9 100644
>> --- a/include/linux/mmzone.h
>> +++ b/include/linux/mmzone.h
>> @@ -428,12 +428,6 @@ struct zone {
>>  	 * Write access to present_pages at runtime should be protected by
>>  	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
>>  	 * present_pages should get_online_mems() to get a stable value.
>> -	 *
>> -	 * Read access to managed_pages should be safe because it's unsigned
>> -	 * long. Write access to zone->managed_pages and totalram_pages are
>> -	 * protected by managed_page_count_lock at runtime. Idealy only
>> -	 * adjust_managed_page_count() should be used instead of directly
>> -	 * touching zone->managed_pages and totalram_pages.
>>  	 */
>>  	atomic_long_t		managed_pages;
>>  	unsigned long		spanned_pages;
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index f8b64cc..26c5e14 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -122,9 +122,6 @@
>>  };
>>  EXPORT_SYMBOL(node_states);
>> 
>> -/* Protect totalram_pages and zone->managed_pages */
>> -static DEFINE_SPINLOCK(managed_page_count_lock);
>> -
>>  atomic_long_t _totalram_pages __read_mostly;
>>  EXPORT_SYMBOL(_totalram_pages);
>>  unsigned long totalreserve_pages __read_mostly;
>> @@ -7065,14 +7062,12 @@ static int __init 
>> cmdline_parse_movablecore(char *p)
>> 
>>  void adjust_managed_page_count(struct page *page, long count)
>>  {
>> -	spin_lock(&managed_page_count_lock);
>>  	atomic_long_add(count, &page_zone(page)->managed_pages);
>>  	totalram_pages_add(count);
>>  #ifdef CONFIG_HIGHMEM
>>  	if (PageHighMem(page))
>>  		totalhigh_pages_add(count);
>>  #endif
>> -	spin_unlock(&managed_page_count_lock);
>>  }
>>  EXPORT_SYMBOL(adjust_managed_page_count);
>> 
>> --
>> 1.9.1
Michal Hocko Nov. 8, 2018, 10:14 a.m. UTC | #3
On Thu 08-11-18 15:33:06, Arun KS wrote:
> On 2018-11-08 14:04, Michal Hocko wrote:
> > On Thu 08-11-18 13:53:18, Arun KS wrote:
> > > Now totalram_pages and managed_pages are atomic varibles. No need
> > > of managed_page_count spinlock.
> > 
> > As explained earlier. Please add a motivation here. Feel free to reuse
> > wording from
> > http://lkml.kernel.org/r/20181107103630.GF2453@dhcp22.suse.cz
> 
> Sure. Will add in next spin.

Andrew usually updates changelogs if you give him the full wording.
I would wait few days before resubmitting, if that is needed at all.
0day will throw a lot of random configs which can reveal some leftovers.
Arun KS Nov. 8, 2018, 10:31 a.m. UTC | #4
On 2018-11-08 15:44, Michal Hocko wrote:
> On Thu 08-11-18 15:33:06, Arun KS wrote:
>> On 2018-11-08 14:04, Michal Hocko wrote:
>> > On Thu 08-11-18 13:53:18, Arun KS wrote:
>> > > Now totalram_pages and managed_pages are atomic varibles. No need
>> > > of managed_page_count spinlock.
>> >
>> > As explained earlier. Please add a motivation here. Feel free to reuse
>> > wording from
>> > http://lkml.kernel.org/r/20181107103630.GF2453@dhcp22.suse.cz
>> 
>> Sure. Will add in next spin.
> 
> Andrew usually updates changelogs if you give him the full wording.
> I would wait few days before resubmitting, if that is needed at all.

mm: Remove managed_page_count spinlock

Now that totalram_pages and managed_pages are atomic varibles, no need
of managed_page_count spinlock. The lock had really a weak consistency
guarantee. It hasn't been used for anything but the update but no reader
actually cares about all the values being updated to be in sync.

Signed-off-by: Arun KS <arunks@codeaurora.org>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>


> 0day will throw a lot of random configs which can reveal some 
> leftovers.

Yea. Fixed few of them during v3.

Regards,
Arun
Arun KS Nov. 9, 2018, 3:42 p.m. UTC | #5
On 2018-11-08 15:44, Michal Hocko wrote:
> On Thu 08-11-18 15:33:06, Arun KS wrote:
>> On 2018-11-08 14:04, Michal Hocko wrote:
>> > On Thu 08-11-18 13:53:18, Arun KS wrote:
>> > > Now totalram_pages and managed_pages are atomic varibles. No need
>> > > of managed_page_count spinlock.
>> >
>> > As explained earlier. Please add a motivation here. Feel free to reuse
>> > wording from
>> > http://lkml.kernel.org/r/20181107103630.GF2453@dhcp22.suse.cz
>> 
>> Sure. Will add in next spin.
> 
> Andrew usually updates changelogs if you give him the full wording.
> I would wait few days before resubmitting, if that is needed at all.
> 0day will throw a lot of random configs which can reveal some 
> leftovers.

0day sent one more failure. Will fix that and resend one more version.

Regards,
Arun
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e73dc31..c71b4d9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -428,12 +428,6 @@  struct zone {
 	 * Write access to present_pages at runtime should be protected by
 	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
 	 * present_pages should get_online_mems() to get a stable value.
-	 *
-	 * Read access to managed_pages should be safe because it's unsigned
-	 * long. Write access to zone->managed_pages and totalram_pages are
-	 * protected by managed_page_count_lock at runtime. Idealy only
-	 * adjust_managed_page_count() should be used instead of directly
-	 * touching zone->managed_pages and totalram_pages.
 	 */
 	atomic_long_t		managed_pages;
 	unsigned long		spanned_pages;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f8b64cc..26c5e14 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -122,9 +122,6 @@ 
 };
 EXPORT_SYMBOL(node_states);
 
-/* Protect totalram_pages and zone->managed_pages */
-static DEFINE_SPINLOCK(managed_page_count_lock);
-
 atomic_long_t _totalram_pages __read_mostly;
 EXPORT_SYMBOL(_totalram_pages);
 unsigned long totalreserve_pages __read_mostly;
@@ -7065,14 +7062,12 @@  static int __init cmdline_parse_movablecore(char *p)
 
 void adjust_managed_page_count(struct page *page, long count)
 {
-	spin_lock(&managed_page_count_lock);
 	atomic_long_add(count, &page_zone(page)->managed_pages);
 	totalram_pages_add(count);
 #ifdef CONFIG_HIGHMEM
 	if (PageHighMem(page))
 		totalhigh_pages_add(count);
 #endif
-	spin_unlock(&managed_page_count_lock);
 }
 EXPORT_SYMBOL(adjust_managed_page_count);