diff mbox series

[v2,1/4] mm: Fix multiple evaluvations of totalram_pages and managed_pages

Message ID 1541521310-28739-2-git-send-email-arunks@codeaurora.org (mailing list archive)
State New, archived
Headers show
Series mm: convert totalram_pages, totalhigh_pages and managed pages to atomic | expand

Commit Message

Arun KS Nov. 6, 2018, 4:21 p.m. UTC
This patch is in preparation to a later patch which converts totalram_pages
and zone->managed_pages to atomic variables. This patch does not introduce
any functional changes.

Signed-off-by: Arun KS <arunks@codeaurora.org>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
---
 arch/um/kernel/mem.c                 |  3 +--
 arch/x86/kernel/cpu/microcode/core.c |  5 +++--
 drivers/hv/hv_balloon.c              | 19 ++++++++++---------
 fs/file_table.c                      |  7 ++++---
 kernel/fork.c                        |  5 +++--
 kernel/kexec_core.c                  |  5 +++--
 mm/page_alloc.c                      |  5 +++--
 mm/shmem.c                           |  3 ++-
 net/dccp/proto.c                     |  7 ++++---
 net/netfilter/nf_conntrack_core.c    |  7 ++++---
 net/netfilter/xt_hashlimit.c         |  5 +++--
 net/sctp/protocol.c                  |  7 ++++---
 12 files changed, 44 insertions(+), 34 deletions(-)

Comments

Michal Hocko Nov. 7, 2018, 8:20 a.m. UTC | #1
On Tue 06-11-18 21:51:47, Arun KS wrote:
> This patch is in preparation to a later patch which converts totalram_pages
> and zone->managed_pages to atomic variables. This patch does not introduce
> any functional changes.

I forgot to comment on this one. The patch makes a lot of sense. But I
would be little bit more conservative and won't claim "no functional
changes". As things stand now multiple reads in the same function are
racy (without holding the lock). I do not see any example of an
obviously harmful case but claiming the above is too strong of a
statement. I would simply go with something like "Please note that
re-reading the value might lead to a different value and as such it
could lead to unexpected behavior. There are no known bugs as a result
of the current code but it is better to prevent from them in principle."

> Signed-off-by: Arun KS <arunks@codeaurora.org>
> Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>

Other than that
Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  arch/um/kernel/mem.c                 |  3 +--
>  arch/x86/kernel/cpu/microcode/core.c |  5 +++--
>  drivers/hv/hv_balloon.c              | 19 ++++++++++---------
>  fs/file_table.c                      |  7 ++++---
>  kernel/fork.c                        |  5 +++--
>  kernel/kexec_core.c                  |  5 +++--
>  mm/page_alloc.c                      |  5 +++--
>  mm/shmem.c                           |  3 ++-
>  net/dccp/proto.c                     |  7 ++++---
>  net/netfilter/nf_conntrack_core.c    |  7 ++++---
>  net/netfilter/xt_hashlimit.c         |  5 +++--
>  net/sctp/protocol.c                  |  7 ++++---
>  12 files changed, 44 insertions(+), 34 deletions(-)
> 
> diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
> index 1067469..134d3fd 100644
> --- a/arch/um/kernel/mem.c
> +++ b/arch/um/kernel/mem.c
> @@ -51,8 +51,7 @@ void __init mem_init(void)
>  
>  	/* this will put all low memory onto the freelists */
>  	memblock_free_all();
> -	max_low_pfn = totalram_pages;
> -	max_pfn = totalram_pages;
> +	max_pfn = max_low_pfn = totalram_pages;
>  	mem_init_print_info(NULL);
>  	kmalloc_ok = 1;
>  }
> diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
> index 2637ff0..99c67ca 100644
> --- a/arch/x86/kernel/cpu/microcode/core.c
> +++ b/arch/x86/kernel/cpu/microcode/core.c
> @@ -434,9 +434,10 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
>  			       size_t len, loff_t *ppos)
>  {
>  	ssize_t ret = -EINVAL;
> +	unsigned long totalram_pgs = totalram_pages;
>  
> -	if ((len >> PAGE_SHIFT) > totalram_pages) {
> -		pr_err("too much data (max %ld pages)\n", totalram_pages);
> +	if ((len >> PAGE_SHIFT) > totalram_pgs) {
> +		pr_err("too much data (max %ld pages)\n", totalram_pgs);
>  		return ret;
>  	}
>  
> diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
> index 4163151..cac4945 100644
> --- a/drivers/hv/hv_balloon.c
> +++ b/drivers/hv/hv_balloon.c
> @@ -1090,6 +1090,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
>  static unsigned long compute_balloon_floor(void)
>  {
>  	unsigned long min_pages;
> +	unsigned long totalram_pgs = totalram_pages;
>  #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
>  	/* Simple continuous piecewiese linear function:
>  	 *  max MiB -> min MiB  gradient
> @@ -1102,16 +1103,16 @@ static unsigned long compute_balloon_floor(void)
>  	 *    8192       744    (1/16)
>  	 *   32768      1512	(1/32)
>  	 */
> -	if (totalram_pages < MB2PAGES(128))
> -		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
> -	else if (totalram_pages < MB2PAGES(512))
> -		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
> -	else if (totalram_pages < MB2PAGES(2048))
> -		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
> -	else if (totalram_pages < MB2PAGES(8192))
> -		min_pages = MB2PAGES(232) + (totalram_pages >> 4);
> +	if (totalram_pgs < MB2PAGES(128))
> +		min_pages = MB2PAGES(8) + (totalram_pgs >> 1);
> +	else if (totalram_pgs < MB2PAGES(512))
> +		min_pages = MB2PAGES(40) + (totalram_pgs >> 2);
> +	else if (totalram_pgs < MB2PAGES(2048))
> +		min_pages = MB2PAGES(104) + (totalram_pgs >> 3);
> +	else if (totalram_pgs < MB2PAGES(8192))
> +		min_pages = MB2PAGES(232) + (totalram_pgs >> 4);
>  	else
> -		min_pages = MB2PAGES(488) + (totalram_pages >> 5);
> +		min_pages = MB2PAGES(488) + (totalram_pgs >> 5);
>  #undef MB2PAGES
>  	return min_pages;
>  }
> diff --git a/fs/file_table.c b/fs/file_table.c
> index e49af4c..6e3c088 100644
> --- a/fs/file_table.c
> +++ b/fs/file_table.c
> @@ -380,10 +380,11 @@ void __init files_init(void)
>  void __init files_maxfiles_init(void)
>  {
>  	unsigned long n;
> -	unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
> +	unsigned long totalram_pgs = totalram_pages;
> +	unsigned long memreserve = (totalram_pgs - nr_free_pages()) * 3/2;
>  
> -	memreserve = min(memreserve, totalram_pages - 1);
> -	n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
> +	memreserve = min(memreserve, totalram_pgs - 1);
> +	n = ((totalram_pgs - memreserve) * (PAGE_SIZE / 1024)) / 10;
>  
>  	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
>  }
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 07cddff..7823f31 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -739,15 +739,16 @@ void __init __weak arch_task_cache_init(void) { }
>  static void set_max_threads(unsigned int max_threads_suggested)
>  {
>  	u64 threads;
> +	unsigned long totalram_pgs = totalram_pages;
>  
>  	/*
>  	 * The number of threads shall be limited such that the thread
>  	 * structures may only consume a small part of the available memory.
>  	 */
> -	if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
> +	if (fls64(totalram_pgs) + fls64(PAGE_SIZE) > 64)
>  		threads = MAX_THREADS;
>  	else
> -		threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
> +		threads = div64_u64((u64) totalram_pgs * (u64) PAGE_SIZE,
>  				    (u64) THREAD_SIZE * 8UL);
>  
>  	if (threads > max_threads_suggested)
> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
> index 86ef06d..dff217c 100644
> --- a/kernel/kexec_core.c
> +++ b/kernel/kexec_core.c
> @@ -152,6 +152,7 @@ int sanity_check_segment_list(struct kimage *image)
>  	int i;
>  	unsigned long nr_segments = image->nr_segments;
>  	unsigned long total_pages = 0;
> +	unsigned long totalram_pgs = totalram_pages;
>  
>  	/*
>  	 * Verify we have good destination addresses.  The caller is
> @@ -217,13 +218,13 @@ int sanity_check_segment_list(struct kimage *image)
>  	 * wasted allocating pages, which can cause a soft lockup.
>  	 */
>  	for (i = 0; i < nr_segments; i++) {
> -		if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
> +		if (PAGE_COUNT(image->segment[i].memsz) > totalram_pgs / 2)
>  			return -EINVAL;
>  
>  		total_pages += PAGE_COUNT(image->segment[i].memsz);
>  	}
>  
> -	if (total_pages > totalram_pages / 2)
> +	if (total_pages > totalram_pgs / 2)
>  		return -EINVAL;
>  
>  	/*
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index a919ba5..173312b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -7245,6 +7245,7 @@ static void calculate_totalreserve_pages(void)
>  		for (i = 0; i < MAX_NR_ZONES; i++) {
>  			struct zone *zone = pgdat->node_zones + i;
>  			long max = 0;
> +			unsigned long managed_pages = zone->managed_pages;
>  
>  			/* Find valid and maximum lowmem_reserve in the zone */
>  			for (j = i; j < MAX_NR_ZONES; j++) {
> @@ -7255,8 +7256,8 @@ static void calculate_totalreserve_pages(void)
>  			/* we treat the high watermark as reserved pages. */
>  			max += high_wmark_pages(zone);
>  
> -			if (max > zone->managed_pages)
> -				max = zone->managed_pages;
> +			if (max > managed_pages)
> +				max = managed_pages;
>  
>  			pgdat->totalreserve_pages += max;
>  
> diff --git a/mm/shmem.c b/mm/shmem.c
> index ea26d7a..6b91eab 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -114,7 +114,8 @@ static unsigned long shmem_default_max_blocks(void)
>  
>  static unsigned long shmem_default_max_inodes(void)
>  {
> -	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
> +	unsigned long totalram_pgs = totalram_pages;
> +	return min(totalram_pgs - totalhigh_pages, totalram_pgs / 2);
>  }
>  #endif
>  
> diff --git a/net/dccp/proto.c b/net/dccp/proto.c
> index 43733ac..f27daa1 100644
> --- a/net/dccp/proto.c
> +++ b/net/dccp/proto.c
> @@ -1131,6 +1131,7 @@ static inline void dccp_mib_exit(void)
>  static int __init dccp_init(void)
>  {
>  	unsigned long goal;
> +	unsigned long totalram_pgs = totalram_pages;
>  	int ehash_order, bhash_order, i;
>  	int rc;
>  
> @@ -1154,10 +1155,10 @@ static int __init dccp_init(void)
>  	 *
>  	 * The methodology is similar to that of the buffer cache.
>  	 */
> -	if (totalram_pages >= (128 * 1024))
> -		goal = totalram_pages >> (21 - PAGE_SHIFT);
> +	if (totalram_pgs >= (128 * 1024))
> +		goal = totalram_pgs >> (21 - PAGE_SHIFT);
>  	else
> -		goal = totalram_pages >> (23 - PAGE_SHIFT);
> +		goal = totalram_pgs >> (23 - PAGE_SHIFT);
>  
>  	if (thash_entries)
>  		goal = (thash_entries *
> diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
> index ca1168d..0b1801e 100644
> --- a/net/netfilter/nf_conntrack_core.c
> +++ b/net/netfilter/nf_conntrack_core.c
> @@ -2248,6 +2248,7 @@ static __always_inline unsigned int total_extension_size(void)
>  
>  int nf_conntrack_init_start(void)
>  {
> +	unsigned long totalram_pgs = totalram_pages;
>  	int max_factor = 8;
>  	int ret = -ENOMEM;
>  	int i;
> @@ -2267,11 +2268,11 @@ int nf_conntrack_init_start(void)
>  		 * >= 4GB machines have 65536 buckets.
>  		 */
>  		nf_conntrack_htable_size
> -			= (((totalram_pages << PAGE_SHIFT) / 16384)
> +			= (((totalram_pgs << PAGE_SHIFT) / 16384)
>  			   / sizeof(struct hlist_head));
> -		if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
> +		if (totalram_pgs > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
>  			nf_conntrack_htable_size = 65536;
> -		else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
> +		else if (totalram_pgs > (1024 * 1024 * 1024 / PAGE_SIZE))
>  			nf_conntrack_htable_size = 16384;
>  		if (nf_conntrack_htable_size < 32)
>  			nf_conntrack_htable_size = 32;
> diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
> index 3e7d259..6cb9a74 100644
> --- a/net/netfilter/xt_hashlimit.c
> +++ b/net/netfilter/xt_hashlimit.c
> @@ -274,14 +274,15 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
>  	struct xt_hashlimit_htable *hinfo;
>  	const struct seq_operations *ops;
>  	unsigned int size, i;
> +	unsigned long totalram_pgs = totalram_pages;
>  	int ret;
>  
>  	if (cfg->size) {
>  		size = cfg->size;
>  	} else {
> -		size = (totalram_pages << PAGE_SHIFT) / 16384 /
> +		size = (totalram_pgs << PAGE_SHIFT) / 16384 /
>  		       sizeof(struct hlist_head);
> -		if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
> +		if (totalram_pgs > 1024 * 1024 * 1024 / PAGE_SIZE)
>  			size = 8192;
>  		if (size < 16)
>  			size = 16;
> diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
> index 9b277bd..7128f85 100644
> --- a/net/sctp/protocol.c
> +++ b/net/sctp/protocol.c
> @@ -1368,6 +1368,7 @@ static __init int sctp_init(void)
>  	int status = -EINVAL;
>  	unsigned long goal;
>  	unsigned long limit;
> +	unsigned long totalram_pages;
>  	int max_share;
>  	int order;
>  	int num_entries;
> @@ -1426,10 +1427,10 @@ static __init int sctp_init(void)
>  	 * The methodology is similar to that of the tcp hash tables.
>  	 * Though not identical.  Start by getting a goal size
>  	 */
> -	if (totalram_pages >= (128 * 1024))
> -		goal = totalram_pages >> (22 - PAGE_SHIFT);
> +	if (totalram_pgs >= (128 * 1024))
> +		goal = totalram_pgs >> (22 - PAGE_SHIFT);
>  	else
> -		goal = totalram_pages >> (24 - PAGE_SHIFT);
> +		goal = totalram_pgs >> (24 - PAGE_SHIFT);
>  
>  	/* Then compute the page order for said goal */
>  	order = get_order(goal);
> -- 
> 1.9.1
Vlastimil Babka Nov. 7, 2018, 8:44 a.m. UTC | #2
On 11/7/18 9:20 AM, Michal Hocko wrote:
> On Tue 06-11-18 21:51:47, Arun KS wrote:

Hi,

there's typo in subject: evaluvations -> evaluations.

However, "fix" is also misleading (more below), so I'd suggest something
like:

mm: reference totalram_pages and managed_pages once per function

>> This patch is in preparation to a later patch which converts totalram_pages
>> and zone->managed_pages to atomic variables. This patch does not introduce
>> any functional changes.
> 
> I forgot to comment on this one. The patch makes a lot of sense. But I
> would be little bit more conservative and won't claim "no functional
> changes". As things stand now multiple reads in the same function are
> racy (without holding the lock). I do not see any example of an
> obviously harmful case but claiming the above is too strong of a
> statement. I would simply go with something like "Please note that
> re-reading the value might lead to a different value and as such it
> could lead to unexpected behavior. There are no known bugs as a result
> of the current code but it is better to prevent from them in principle."

However, the new code doesn't use READ_ONCE(), so the compiler is free
to read the value multiple times, and before the patch it was free to
read it just once, as the variables are not volatile. So strictly
speaking this is indeed not a functional change (if compiler decides
differently based on the patch, it's an implementation detail).

So even in my suggested subject above, 'reference' is meant as a source
code reference, not really a memory read reference. Couldn't think of a
better word though.
Michal Hocko Nov. 7, 2018, 9:23 a.m. UTC | #3
On Wed 07-11-18 09:44:00, Vlastimil Babka wrote:
> On 11/7/18 9:20 AM, Michal Hocko wrote:
> > On Tue 06-11-18 21:51:47, Arun KS wrote:
> 
> Hi,
> 
> there's typo in subject: evaluvations -> evaluations.
> 
> However, "fix" is also misleading (more below), so I'd suggest something
> like:
> 
> mm: reference totalram_pages and managed_pages once per function
> 
> >> This patch is in preparation to a later patch which converts totalram_pages
> >> and zone->managed_pages to atomic variables. This patch does not introduce
> >> any functional changes.
> > 
> > I forgot to comment on this one. The patch makes a lot of sense. But I
> > would be little bit more conservative and won't claim "no functional
> > changes". As things stand now multiple reads in the same function are
> > racy (without holding the lock). I do not see any example of an
> > obviously harmful case but claiming the above is too strong of a
> > statement. I would simply go with something like "Please note that
> > re-reading the value might lead to a different value and as such it
> > could lead to unexpected behavior. There are no known bugs as a result
> > of the current code but it is better to prevent from them in principle."
> 
> However, the new code doesn't use READ_ONCE(), so the compiler is free
> to read the value multiple times, and before the patch it was free to
> read it just once, as the variables are not volatile. So strictly
> speaking this is indeed not a functional change (if compiler decides
> differently based on the patch, it's an implementation detail).

Yes, compiler is allowed to optimize this either way without READ_ONCE
but it is allowed to do two reads so claiming no functional change is a
bit problematic. Not that this would be a reason to discuss this in
length...
kernel test robot Nov. 7, 2018, 7:54 p.m. UTC | #4
Hi Arun,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v4.20-rc1 next-20181107]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Arun-KS/mm-Fix-multiple-evaluvations-of-totalram_pages-and-managed_pages/20181108-025657
config: i386-randconfig-x014-201844 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   In file included from include/linux/export.h:45:0,
                    from include/linux/linkage.h:7,
                    from include/linux/kernel.h:7,
                    from include/linux/list.h:9,
                    from include/linux/module.h:9,
                    from net/sctp/protocol.c:44:
   net/sctp/protocol.c: In function 'sctp_init':
   net/sctp/protocol.c:1430:6: error: 'totalram_pgs' undeclared (first use in this function); did you mean 'totalram_pages'?
     if (totalram_pgs >= (128 * 1024))
         ^
   include/linux/compiler.h:58:30: note: in definition of macro '__trace_if'
     if (__builtin_constant_p(!!(cond)) ? !!(cond) :   \
                                 ^~~~
>> net/sctp/protocol.c:1430:2: note: in expansion of macro 'if'
     if (totalram_pgs >= (128 * 1024))
     ^~
   net/sctp/protocol.c:1430:6: note: each undeclared identifier is reported only once for each function it appears in
     if (totalram_pgs >= (128 * 1024))
         ^
   include/linux/compiler.h:58:30: note: in definition of macro '__trace_if'
     if (__builtin_constant_p(!!(cond)) ? !!(cond) :   \
                                 ^~~~
>> net/sctp/protocol.c:1430:2: note: in expansion of macro 'if'
     if (totalram_pgs >= (128 * 1024))
     ^~
   net/sctp/protocol.c:1371:16: warning: unused variable 'totalram_pages' [-Wunused-variable]
     unsigned long totalram_pages;
                   ^~~~~~~~~~~~~~

vim +/if +1430 net/sctp/protocol.c

  1363	
  1364	/* Initialize the universe into something sensible.  */
  1365	static __init int sctp_init(void)
  1366	{
  1367		int i;
  1368		int status = -EINVAL;
  1369		unsigned long goal;
  1370		unsigned long limit;
  1371		unsigned long totalram_pages;
  1372		int max_share;
  1373		int order;
  1374		int num_entries;
  1375		int max_entry_order;
  1376	
  1377		sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
  1378	
  1379		/* Allocate bind_bucket and chunk caches. */
  1380		status = -ENOBUFS;
  1381		sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
  1382						       sizeof(struct sctp_bind_bucket),
  1383						       0, SLAB_HWCACHE_ALIGN,
  1384						       NULL);
  1385		if (!sctp_bucket_cachep)
  1386			goto out;
  1387	
  1388		sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
  1389						       sizeof(struct sctp_chunk),
  1390						       0, SLAB_HWCACHE_ALIGN,
  1391						       NULL);
  1392		if (!sctp_chunk_cachep)
  1393			goto err_chunk_cachep;
  1394	
  1395		status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL);
  1396		if (status)
  1397			goto err_percpu_counter_init;
  1398	
  1399		/* Implementation specific variables. */
  1400	
  1401		/* Initialize default stream count setup information. */
  1402		sctp_max_instreams    		= SCTP_DEFAULT_INSTREAMS;
  1403		sctp_max_outstreams   		= SCTP_DEFAULT_OUTSTREAMS;
  1404	
  1405		/* Initialize handle used for association ids. */
  1406		idr_init(&sctp_assocs_id);
  1407	
  1408		limit = nr_free_buffer_pages() / 8;
  1409		limit = max(limit, 128UL);
  1410		sysctl_sctp_mem[0] = limit / 4 * 3;
  1411		sysctl_sctp_mem[1] = limit;
  1412		sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
  1413	
  1414		/* Set per-socket limits to no more than 1/128 the pressure threshold*/
  1415		limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7);
  1416		max_share = min(4UL*1024*1024, limit);
  1417	
  1418		sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
  1419		sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
  1420		sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
  1421	
  1422		sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
  1423		sysctl_sctp_wmem[1] = 16*1024;
  1424		sysctl_sctp_wmem[2] = max(64*1024, max_share);
  1425	
  1426		/* Size and allocate the association hash table.
  1427		 * The methodology is similar to that of the tcp hash tables.
  1428		 * Though not identical.  Start by getting a goal size
  1429		 */
> 1430		if (totalram_pgs >= (128 * 1024))
  1431			goal = totalram_pgs >> (22 - PAGE_SHIFT);
  1432		else
  1433			goal = totalram_pgs >> (24 - PAGE_SHIFT);
  1434	
  1435		/* Then compute the page order for said goal */
  1436		order = get_order(goal);
  1437	
  1438		/* Now compute the required page order for the maximum sized table we
  1439		 * want to create
  1440		 */
  1441		max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
  1442					    sizeof(struct sctp_bind_hashbucket));
  1443	
  1444		/* Limit the page order by that maximum hash table size */
  1445		order = min(order, max_entry_order);
  1446	
  1447		/* Allocate and initialize the endpoint hash table.  */
  1448		sctp_ep_hashsize = 64;
  1449		sctp_ep_hashtable =
  1450			kmalloc_array(64, sizeof(struct sctp_hashbucket), GFP_KERNEL);
  1451		if (!sctp_ep_hashtable) {
  1452			pr_err("Failed endpoint_hash alloc\n");
  1453			status = -ENOMEM;
  1454			goto err_ehash_alloc;
  1455		}
  1456		for (i = 0; i < sctp_ep_hashsize; i++) {
  1457			rwlock_init(&sctp_ep_hashtable[i].lock);
  1458			INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
  1459		}
  1460	
  1461		/* Allocate and initialize the SCTP port hash table.
  1462		 * Note that order is initalized to start at the max sized
  1463		 * table we want to support.  If we can't get that many pages
  1464		 * reduce the order and try again
  1465		 */
  1466		do {
  1467			sctp_port_hashtable = (struct sctp_bind_hashbucket *)
  1468				__get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
  1469		} while (!sctp_port_hashtable && --order > 0);
  1470	
  1471		if (!sctp_port_hashtable) {
  1472			pr_err("Failed bind hash alloc\n");
  1473			status = -ENOMEM;
  1474			goto err_bhash_alloc;
  1475		}
  1476	
  1477		/* Now compute the number of entries that will fit in the
  1478		 * port hash space we allocated
  1479		 */
  1480		num_entries = (1UL << order) * PAGE_SIZE /
  1481			      sizeof(struct sctp_bind_hashbucket);
  1482	
  1483		/* And finish by rounding it down to the nearest power of two
  1484		 * this wastes some memory of course, but its needed because
  1485		 * the hash function operates based on the assumption that
  1486		 * that the number of entries is a power of two
  1487		 */
  1488		sctp_port_hashsize = rounddown_pow_of_two(num_entries);
  1489	
  1490		for (i = 0; i < sctp_port_hashsize; i++) {
  1491			spin_lock_init(&sctp_port_hashtable[i].lock);
  1492			INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
  1493		}
  1494	
  1495		status = sctp_transport_hashtable_init();
  1496		if (status)
  1497			goto err_thash_alloc;
  1498	
  1499		pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
  1500			num_entries);
  1501	
  1502		sctp_sysctl_register();
  1503	
  1504		INIT_LIST_HEAD(&sctp_address_families);
  1505		sctp_v4_pf_init();
  1506		sctp_v6_pf_init();
  1507		sctp_sched_ops_init();
  1508	
  1509		status = register_pernet_subsys(&sctp_defaults_ops);
  1510		if (status)
  1511			goto err_register_defaults;
  1512	
  1513		status = sctp_v4_protosw_init();
  1514		if (status)
  1515			goto err_protosw_init;
  1516	
  1517		status = sctp_v6_protosw_init();
  1518		if (status)
  1519			goto err_v6_protosw_init;
  1520	
  1521		status = register_pernet_subsys(&sctp_ctrlsock_ops);
  1522		if (status)
  1523			goto err_register_ctrlsock;
  1524	
  1525		status = sctp_v4_add_protocol();
  1526		if (status)
  1527			goto err_add_protocol;
  1528	
  1529		/* Register SCTP with inet6 layer.  */
  1530		status = sctp_v6_add_protocol();
  1531		if (status)
  1532			goto err_v6_add_protocol;
  1533	
  1534		if (sctp_offload_init() < 0)
  1535			pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
  1536	
  1537	out:
  1538		return status;
  1539	err_v6_add_protocol:
  1540		sctp_v4_del_protocol();
  1541	err_add_protocol:
  1542		unregister_pernet_subsys(&sctp_ctrlsock_ops);
  1543	err_register_ctrlsock:
  1544		sctp_v6_protosw_exit();
  1545	err_v6_protosw_init:
  1546		sctp_v4_protosw_exit();
  1547	err_protosw_init:
  1548		unregister_pernet_subsys(&sctp_defaults_ops);
  1549	err_register_defaults:
  1550		sctp_v4_pf_exit();
  1551		sctp_v6_pf_exit();
  1552		sctp_sysctl_unregister();
  1553		free_pages((unsigned long)sctp_port_hashtable,
  1554			   get_order(sctp_port_hashsize *
  1555				     sizeof(struct sctp_bind_hashbucket)));
  1556	err_bhash_alloc:
  1557		sctp_transport_hashtable_destroy();
  1558	err_thash_alloc:
  1559		kfree(sctp_ep_hashtable);
  1560	err_ehash_alloc:
  1561		percpu_counter_destroy(&sctp_sockets_allocated);
  1562	err_percpu_counter_init:
  1563		kmem_cache_destroy(sctp_chunk_cachep);
  1564	err_chunk_cachep:
  1565		kmem_cache_destroy(sctp_bucket_cachep);
  1566		goto out;
  1567	}
  1568	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
kernel test robot Nov. 7, 2018, 8:07 p.m. UTC | #5
Hi Arun,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v4.20-rc1 next-20181107]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Arun-KS/mm-Fix-multiple-evaluvations-of-totalram_pages-and-managed_pages/20181108-025657
config: i386-randconfig-x004-201844 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   net//sctp/protocol.c: In function 'sctp_init':
>> net//sctp/protocol.c:1430:6: error: 'totalram_pgs' undeclared (first use in this function); did you mean 'totalram_pages'?
     if (totalram_pgs >= (128 * 1024))
         ^~~~~~~~~~~~
         totalram_pages
   net//sctp/protocol.c:1430:6: note: each undeclared identifier is reported only once for each function it appears in
   net//sctp/protocol.c:1371:16: warning: unused variable 'totalram_pages' [-Wunused-variable]
     unsigned long totalram_pages;
                   ^~~~~~~~~~~~~~

vim +1430 net//sctp/protocol.c

  1363	
  1364	/* Initialize the universe into something sensible.  */
  1365	static __init int sctp_init(void)
  1366	{
  1367		int i;
  1368		int status = -EINVAL;
  1369		unsigned long goal;
  1370		unsigned long limit;
  1371		unsigned long totalram_pages;
  1372		int max_share;
  1373		int order;
  1374		int num_entries;
  1375		int max_entry_order;
  1376	
  1377		sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
  1378	
  1379		/* Allocate bind_bucket and chunk caches. */
  1380		status = -ENOBUFS;
  1381		sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
  1382						       sizeof(struct sctp_bind_bucket),
  1383						       0, SLAB_HWCACHE_ALIGN,
  1384						       NULL);
  1385		if (!sctp_bucket_cachep)
  1386			goto out;
  1387	
  1388		sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
  1389						       sizeof(struct sctp_chunk),
  1390						       0, SLAB_HWCACHE_ALIGN,
  1391						       NULL);
  1392		if (!sctp_chunk_cachep)
  1393			goto err_chunk_cachep;
  1394	
  1395		status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL);
  1396		if (status)
  1397			goto err_percpu_counter_init;
  1398	
  1399		/* Implementation specific variables. */
  1400	
  1401		/* Initialize default stream count setup information. */
  1402		sctp_max_instreams    		= SCTP_DEFAULT_INSTREAMS;
  1403		sctp_max_outstreams   		= SCTP_DEFAULT_OUTSTREAMS;
  1404	
  1405		/* Initialize handle used for association ids. */
  1406		idr_init(&sctp_assocs_id);
  1407	
  1408		limit = nr_free_buffer_pages() / 8;
  1409		limit = max(limit, 128UL);
  1410		sysctl_sctp_mem[0] = limit / 4 * 3;
  1411		sysctl_sctp_mem[1] = limit;
  1412		sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
  1413	
  1414		/* Set per-socket limits to no more than 1/128 the pressure threshold*/
  1415		limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7);
  1416		max_share = min(4UL*1024*1024, limit);
  1417	
  1418		sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
  1419		sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
  1420		sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
  1421	
  1422		sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
  1423		sysctl_sctp_wmem[1] = 16*1024;
  1424		sysctl_sctp_wmem[2] = max(64*1024, max_share);
  1425	
  1426		/* Size and allocate the association hash table.
  1427		 * The methodology is similar to that of the tcp hash tables.
  1428		 * Though not identical.  Start by getting a goal size
  1429		 */
> 1430		if (totalram_pgs >= (128 * 1024))
  1431			goal = totalram_pgs >> (22 - PAGE_SHIFT);
  1432		else
  1433			goal = totalram_pgs >> (24 - PAGE_SHIFT);
  1434	
  1435		/* Then compute the page order for said goal */
  1436		order = get_order(goal);
  1437	
  1438		/* Now compute the required page order for the maximum sized table we
  1439		 * want to create
  1440		 */
  1441		max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
  1442					    sizeof(struct sctp_bind_hashbucket));
  1443	
  1444		/* Limit the page order by that maximum hash table size */
  1445		order = min(order, max_entry_order);
  1446	
  1447		/* Allocate and initialize the endpoint hash table.  */
  1448		sctp_ep_hashsize = 64;
  1449		sctp_ep_hashtable =
  1450			kmalloc_array(64, sizeof(struct sctp_hashbucket), GFP_KERNEL);
  1451		if (!sctp_ep_hashtable) {
  1452			pr_err("Failed endpoint_hash alloc\n");
  1453			status = -ENOMEM;
  1454			goto err_ehash_alloc;
  1455		}
  1456		for (i = 0; i < sctp_ep_hashsize; i++) {
  1457			rwlock_init(&sctp_ep_hashtable[i].lock);
  1458			INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
  1459		}
  1460	
  1461		/* Allocate and initialize the SCTP port hash table.
  1462		 * Note that order is initalized to start at the max sized
  1463		 * table we want to support.  If we can't get that many pages
  1464		 * reduce the order and try again
  1465		 */
  1466		do {
  1467			sctp_port_hashtable = (struct sctp_bind_hashbucket *)
  1468				__get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
  1469		} while (!sctp_port_hashtable && --order > 0);
  1470	
  1471		if (!sctp_port_hashtable) {
  1472			pr_err("Failed bind hash alloc\n");
  1473			status = -ENOMEM;
  1474			goto err_bhash_alloc;
  1475		}
  1476	
  1477		/* Now compute the number of entries that will fit in the
  1478		 * port hash space we allocated
  1479		 */
  1480		num_entries = (1UL << order) * PAGE_SIZE /
  1481			      sizeof(struct sctp_bind_hashbucket);
  1482	
  1483		/* And finish by rounding it down to the nearest power of two
  1484		 * this wastes some memory of course, but its needed because
  1485		 * the hash function operates based on the assumption that
  1486		 * that the number of entries is a power of two
  1487		 */
  1488		sctp_port_hashsize = rounddown_pow_of_two(num_entries);
  1489	
  1490		for (i = 0; i < sctp_port_hashsize; i++) {
  1491			spin_lock_init(&sctp_port_hashtable[i].lock);
  1492			INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
  1493		}
  1494	
  1495		status = sctp_transport_hashtable_init();
  1496		if (status)
  1497			goto err_thash_alloc;
  1498	
  1499		pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
  1500			num_entries);
  1501	
  1502		sctp_sysctl_register();
  1503	
  1504		INIT_LIST_HEAD(&sctp_address_families);
  1505		sctp_v4_pf_init();
  1506		sctp_v6_pf_init();
  1507		sctp_sched_ops_init();
  1508	
  1509		status = register_pernet_subsys(&sctp_defaults_ops);
  1510		if (status)
  1511			goto err_register_defaults;
  1512	
  1513		status = sctp_v4_protosw_init();
  1514		if (status)
  1515			goto err_protosw_init;
  1516	
  1517		status = sctp_v6_protosw_init();
  1518		if (status)
  1519			goto err_v6_protosw_init;
  1520	
  1521		status = register_pernet_subsys(&sctp_ctrlsock_ops);
  1522		if (status)
  1523			goto err_register_ctrlsock;
  1524	
  1525		status = sctp_v4_add_protocol();
  1526		if (status)
  1527			goto err_add_protocol;
  1528	
  1529		/* Register SCTP with inet6 layer.  */
  1530		status = sctp_v6_add_protocol();
  1531		if (status)
  1532			goto err_v6_add_protocol;
  1533	
  1534		if (sctp_offload_init() < 0)
  1535			pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
  1536	
  1537	out:
  1538		return status;
  1539	err_v6_add_protocol:
  1540		sctp_v4_del_protocol();
  1541	err_add_protocol:
  1542		unregister_pernet_subsys(&sctp_ctrlsock_ops);
  1543	err_register_ctrlsock:
  1544		sctp_v6_protosw_exit();
  1545	err_v6_protosw_init:
  1546		sctp_v4_protosw_exit();
  1547	err_protosw_init:
  1548		unregister_pernet_subsys(&sctp_defaults_ops);
  1549	err_register_defaults:
  1550		sctp_v4_pf_exit();
  1551		sctp_v6_pf_exit();
  1552		sctp_sysctl_unregister();
  1553		free_pages((unsigned long)sctp_port_hashtable,
  1554			   get_order(sctp_port_hashsize *
  1555				     sizeof(struct sctp_bind_hashbucket)));
  1556	err_bhash_alloc:
  1557		sctp_transport_hashtable_destroy();
  1558	err_thash_alloc:
  1559		kfree(sctp_ep_hashtable);
  1560	err_ehash_alloc:
  1561		percpu_counter_destroy(&sctp_sockets_allocated);
  1562	err_percpu_counter_init:
  1563		kmem_cache_destroy(sctp_chunk_cachep);
  1564	err_chunk_cachep:
  1565		kmem_cache_destroy(sctp_bucket_cachep);
  1566		goto out;
  1567	}
  1568	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox series

Patch

diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 1067469..134d3fd 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -51,8 +51,7 @@  void __init mem_init(void)
 
 	/* this will put all low memory onto the freelists */
 	memblock_free_all();
-	max_low_pfn = totalram_pages;
-	max_pfn = totalram_pages;
+	max_pfn = max_low_pfn = totalram_pages;
 	mem_init_print_info(NULL);
 	kmalloc_ok = 1;
 }
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2637ff0..99c67ca 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -434,9 +434,10 @@  static ssize_t microcode_write(struct file *file, const char __user *buf,
 			       size_t len, loff_t *ppos)
 {
 	ssize_t ret = -EINVAL;
+	unsigned long totalram_pgs = totalram_pages;
 
-	if ((len >> PAGE_SHIFT) > totalram_pages) {
-		pr_err("too much data (max %ld pages)\n", totalram_pages);
+	if ((len >> PAGE_SHIFT) > totalram_pgs) {
+		pr_err("too much data (max %ld pages)\n", totalram_pgs);
 		return ret;
 	}
 
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4163151..cac4945 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1090,6 +1090,7 @@  static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 static unsigned long compute_balloon_floor(void)
 {
 	unsigned long min_pages;
+	unsigned long totalram_pgs = totalram_pages;
 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
 	/* Simple continuous piecewiese linear function:
 	 *  max MiB -> min MiB  gradient
@@ -1102,16 +1103,16 @@  static unsigned long compute_balloon_floor(void)
 	 *    8192       744    (1/16)
 	 *   32768      1512	(1/32)
 	 */
-	if (totalram_pages < MB2PAGES(128))
-		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
-	else if (totalram_pages < MB2PAGES(512))
-		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
-	else if (totalram_pages < MB2PAGES(2048))
-		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
-	else if (totalram_pages < MB2PAGES(8192))
-		min_pages = MB2PAGES(232) + (totalram_pages >> 4);
+	if (totalram_pgs < MB2PAGES(128))
+		min_pages = MB2PAGES(8) + (totalram_pgs >> 1);
+	else if (totalram_pgs < MB2PAGES(512))
+		min_pages = MB2PAGES(40) + (totalram_pgs >> 2);
+	else if (totalram_pgs < MB2PAGES(2048))
+		min_pages = MB2PAGES(104) + (totalram_pgs >> 3);
+	else if (totalram_pgs < MB2PAGES(8192))
+		min_pages = MB2PAGES(232) + (totalram_pgs >> 4);
 	else
-		min_pages = MB2PAGES(488) + (totalram_pages >> 5);
+		min_pages = MB2PAGES(488) + (totalram_pgs >> 5);
 #undef MB2PAGES
 	return min_pages;
 }
diff --git a/fs/file_table.c b/fs/file_table.c
index e49af4c..6e3c088 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -380,10 +380,11 @@  void __init files_init(void)
 void __init files_maxfiles_init(void)
 {
 	unsigned long n;
-	unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
+	unsigned long totalram_pgs = totalram_pages;
+	unsigned long memreserve = (totalram_pgs - nr_free_pages()) * 3/2;
 
-	memreserve = min(memreserve, totalram_pages - 1);
-	n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
+	memreserve = min(memreserve, totalram_pgs - 1);
+	n = ((totalram_pgs - memreserve) * (PAGE_SIZE / 1024)) / 10;
 
 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 07cddff..7823f31 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -739,15 +739,16 @@  void __init __weak arch_task_cache_init(void) { }
 static void set_max_threads(unsigned int max_threads_suggested)
 {
 	u64 threads;
+	unsigned long totalram_pgs = totalram_pages;
 
 	/*
 	 * The number of threads shall be limited such that the thread
 	 * structures may only consume a small part of the available memory.
 	 */
-	if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
+	if (fls64(totalram_pgs) + fls64(PAGE_SIZE) > 64)
 		threads = MAX_THREADS;
 	else
-		threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
+		threads = div64_u64((u64) totalram_pgs * (u64) PAGE_SIZE,
 				    (u64) THREAD_SIZE * 8UL);
 
 	if (threads > max_threads_suggested)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 86ef06d..dff217c 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -152,6 +152,7 @@  int sanity_check_segment_list(struct kimage *image)
 	int i;
 	unsigned long nr_segments = image->nr_segments;
 	unsigned long total_pages = 0;
+	unsigned long totalram_pgs = totalram_pages;
 
 	/*
 	 * Verify we have good destination addresses.  The caller is
@@ -217,13 +218,13 @@  int sanity_check_segment_list(struct kimage *image)
 	 * wasted allocating pages, which can cause a soft lockup.
 	 */
 	for (i = 0; i < nr_segments; i++) {
-		if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
+		if (PAGE_COUNT(image->segment[i].memsz) > totalram_pgs / 2)
 			return -EINVAL;
 
 		total_pages += PAGE_COUNT(image->segment[i].memsz);
 	}
 
-	if (total_pages > totalram_pages / 2)
+	if (total_pages > totalram_pgs / 2)
 		return -EINVAL;
 
 	/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5..173312b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7245,6 +7245,7 @@  static void calculate_totalreserve_pages(void)
 		for (i = 0; i < MAX_NR_ZONES; i++) {
 			struct zone *zone = pgdat->node_zones + i;
 			long max = 0;
+			unsigned long managed_pages = zone->managed_pages;
 
 			/* Find valid and maximum lowmem_reserve in the zone */
 			for (j = i; j < MAX_NR_ZONES; j++) {
@@ -7255,8 +7256,8 @@  static void calculate_totalreserve_pages(void)
 			/* we treat the high watermark as reserved pages. */
 			max += high_wmark_pages(zone);
 
-			if (max > zone->managed_pages)
-				max = zone->managed_pages;
+			if (max > managed_pages)
+				max = managed_pages;
 
 			pgdat->totalreserve_pages += max;
 
diff --git a/mm/shmem.c b/mm/shmem.c
index ea26d7a..6b91eab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -114,7 +114,8 @@  static unsigned long shmem_default_max_blocks(void)
 
 static unsigned long shmem_default_max_inodes(void)
 {
-	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
+	unsigned long totalram_pgs = totalram_pages;
+	return min(totalram_pgs - totalhigh_pages, totalram_pgs / 2);
 }
 #endif
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 43733ac..f27daa1 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1131,6 +1131,7 @@  static inline void dccp_mib_exit(void)
 static int __init dccp_init(void)
 {
 	unsigned long goal;
+	unsigned long totalram_pgs = totalram_pages;
 	int ehash_order, bhash_order, i;
 	int rc;
 
@@ -1154,10 +1155,10 @@  static int __init dccp_init(void)
 	 *
 	 * The methodology is similar to that of the buffer cache.
 	 */
-	if (totalram_pages >= (128 * 1024))
-		goal = totalram_pages >> (21 - PAGE_SHIFT);
+	if (totalram_pgs >= (128 * 1024))
+		goal = totalram_pgs >> (21 - PAGE_SHIFT);
 	else
-		goal = totalram_pages >> (23 - PAGE_SHIFT);
+		goal = totalram_pgs >> (23 - PAGE_SHIFT);
 
 	if (thash_entries)
 		goal = (thash_entries *
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ca1168d..0b1801e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2248,6 +2248,7 @@  static __always_inline unsigned int total_extension_size(void)
 
 int nf_conntrack_init_start(void)
 {
+	unsigned long totalram_pgs = totalram_pages;
 	int max_factor = 8;
 	int ret = -ENOMEM;
 	int i;
@@ -2267,11 +2268,11 @@  int nf_conntrack_init_start(void)
 		 * >= 4GB machines have 65536 buckets.
 		 */
 		nf_conntrack_htable_size
-			= (((totalram_pages << PAGE_SHIFT) / 16384)
+			= (((totalram_pgs << PAGE_SHIFT) / 16384)
 			   / sizeof(struct hlist_head));
-		if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+		if (totalram_pgs > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
 			nf_conntrack_htable_size = 65536;
-		else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+		else if (totalram_pgs > (1024 * 1024 * 1024 / PAGE_SIZE))
 			nf_conntrack_htable_size = 16384;
 		if (nf_conntrack_htable_size < 32)
 			nf_conntrack_htable_size = 32;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 3e7d259..6cb9a74 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -274,14 +274,15 @@  static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
 	struct xt_hashlimit_htable *hinfo;
 	const struct seq_operations *ops;
 	unsigned int size, i;
+	unsigned long totalram_pgs = totalram_pages;
 	int ret;
 
 	if (cfg->size) {
 		size = cfg->size;
 	} else {
-		size = (totalram_pages << PAGE_SHIFT) / 16384 /
+		size = (totalram_pgs << PAGE_SHIFT) / 16384 /
 		       sizeof(struct hlist_head);
-		if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
+		if (totalram_pgs > 1024 * 1024 * 1024 / PAGE_SIZE)
 			size = 8192;
 		if (size < 16)
 			size = 16;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 9b277bd..7128f85 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1368,6 +1368,7 @@  static __init int sctp_init(void)
 	int status = -EINVAL;
 	unsigned long goal;
 	unsigned long limit;
+	unsigned long totalram_pages;
 	int max_share;
 	int order;
 	int num_entries;
@@ -1426,10 +1427,10 @@  static __init int sctp_init(void)
 	 * The methodology is similar to that of the tcp hash tables.
 	 * Though not identical.  Start by getting a goal size
 	 */
-	if (totalram_pages >= (128 * 1024))
-		goal = totalram_pages >> (22 - PAGE_SHIFT);
+	if (totalram_pgs >= (128 * 1024))
+		goal = totalram_pgs >> (22 - PAGE_SHIFT);
 	else
-		goal = totalram_pages >> (24 - PAGE_SHIFT);
+		goal = totalram_pgs >> (24 - PAGE_SHIFT);
 
 	/* Then compute the page order for said goal */
 	order = get_order(goal);