diff mbox series

[v2,2/4] mm/hugeltb: clean up hstate::max_huge_pages

Message ID 20230806074853.317203-3-xueshi.hu@smartx.com (mailing list archive)
State New
Headers show
Series mm/hugetlb: fix /sys and /proc fs dealing with persistent hugepages | expand

Commit Message

Xueshi Hu Aug. 6, 2023, 7:48 a.m. UTC
Presently, the sole use case of hstate::max_huge_pages is confined to
hugetlb_sysctl_handler_common() and hugetlbfs_size_to_hpages().
The former has been replaced with hstate::nr_huge_pages, while the latter
can be effortlessly substituted.

After hugeltb subsystem has been initialized, hstate::max_huge_pages
always equals to persistent_huge_pages(). It's a burden to maintain
the equation[1][2].

After this patch, hstate::max_huge_pages is only used in kernel command
line parameter parsing.

Renaming set_max_huge_pages() to set_nr_huge_pages() would enhance the
readability of the code.

[1]: Commit a43a83c79b4f ("mm/hugetlb: fix incorrect update of
max_huge_pages")
[2]: Commit c1470b33bb6e ("mm/hugetlb: fix incorrect hugepages count
during mem hotplug")

Signed-off-by: Xueshi Hu <xueshi.hu@smartx.com>
---
 fs/hugetlbfs/inode.c |  2 +-
 mm/hugetlb.c         | 24 +++++-------------------
 2 files changed, 6 insertions(+), 20 deletions(-)

Comments

David Hildenbrand Aug. 7, 2023, 3:17 p.m. UTC | #1
On 06.08.23 09:48, Xueshi Hu wrote:
> Presently, the sole use case of hstate::max_huge_pages is confined to
> hugetlb_sysctl_handler_common() and hugetlbfs_size_to_hpages().
> The former has been replaced with hstate::nr_huge_pages, while the latter

Most probably that conversion is wrong as it changes documented 
behavior, and therefore, this patch is not applicable.
diff mbox series

Patch

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316c4cebd3f3..cd1a3e4bf8fb 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1375,7 +1375,7 @@  hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
 
 	if (val_type == SIZE_PERCENT) {
 		size_opt <<= huge_page_shift(h);
-		size_opt *= h->max_huge_pages;
+		size_opt *= (h->nr_huge_pages - h->surplus_huge_pages);
 		do_div(size_opt, 100);
 	}
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 76af189053f0..56647235ab21 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2343,14 +2343,13 @@  int dissolve_free_huge_page(struct page *page)
 		}
 
 		remove_hugetlb_folio(h, folio, false);
-		h->max_huge_pages--;
 		spin_unlock_irq(&hugetlb_lock);
 
 		/*
 		 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
 		 * before freeing the page.  update_and_free_hugtlb_folio will fail to
 		 * free the page if it can not allocate required vmemmap.  We
-		 * need to adjust max_huge_pages if the page is not freed.
+		 * need to adjust nr_huge_pages if the page is not freed.
 		 * Attempt to allocate vmemmmap here so that we can take
 		 * appropriate action on failure.
 		 */
@@ -2360,7 +2359,6 @@  int dissolve_free_huge_page(struct page *page)
 		} else {
 			spin_lock_irq(&hugetlb_lock);
 			add_hugetlb_folio(h, folio, false);
-			h->max_huge_pages++;
 			spin_unlock_irq(&hugetlb_lock);
 		}
 
@@ -3274,8 +3272,6 @@  static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
 		h->max_huge_pages_node[nid], buf, nid, i);
-	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
-	h->max_huge_pages_node[nid] = i;
 }
 
 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
@@ -3336,7 +3332,6 @@  static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
 			h->max_huge_pages, buf, i);
-		h->max_huge_pages = i;
 	}
 	kfree(node_alloc_noretry);
 }
@@ -3460,7 +3455,7 @@  static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
 }
 
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
+static int set_nr_huge_pages(struct hstate *h, unsigned long count, int nid,
 			      nodemask_t *nodes_allowed)
 {
 	unsigned long min_count, ret;
@@ -3601,7 +3596,6 @@  static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
 			break;
 	}
 out:
-	h->max_huge_pages = persistent_huge_pages(h);
 	spin_unlock_irq(&hugetlb_lock);
 	mutex_unlock(&h->resize_lock);
 
@@ -3639,7 +3633,7 @@  static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
 	destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
 
 	/*
-	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
+	 * Taking target hstate mutex synchronizes with set_nr_huge_pages.
 	 * Without the mutex, pages added to target hstate could be marked
 	 * as surplus.
 	 *
@@ -3664,14 +3658,6 @@  static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
 
 	spin_lock_irq(&hugetlb_lock);
 
-	/*
-	 * Not absolutely necessary, but for consistency update max_huge_pages
-	 * based on pool changes for the demoted page.
-	 */
-	h->max_huge_pages--;
-	target_hstate->max_huge_pages +=
-		pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
-
 	return rc;
 }
 
@@ -3770,13 +3756,13 @@  static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
 	} else {
 		/*
 		 * Node specific request.  count adjustment happens in
-		 * set_max_huge_pages() after acquiring hugetlb_lock.
+		 * set_nr_huge_pages() after acquiring hugetlb_lock.
 		 */
 		init_nodemask_of_node(&nodes_allowed, nid);
 		n_mask = &nodes_allowed;
 	}
 
-	err = set_max_huge_pages(h, count, nid, n_mask);
+	err = set_nr_huge_pages(h, count, nid, n_mask);
 
 	return err ? err : len;
 }