diff mbox series

[RFC,v2,12/47] hugetlb: add hugetlb_hgm_walk and hugetlb_walk_step

Message ID 20221021163703.3218176-13-jthoughton@google.com (mailing list archive)
State New
Headers show
Series hugetlb: introduce HugeTLB high-granularity mapping | expand

Commit Message

James Houghton Oct. 21, 2022, 4:36 p.m. UTC
hugetlb_hgm_walk implements high-granularity page table walks for
HugeTLB. It is safe to call on non-HGM enabled VMAs; it will return
immediately.

hugetlb_walk_step implements how we step forwards in the walk. For
architectures that don't use GENERAL_HUGETLB, they will need to provide
their own implementation.

Signed-off-by: James Houghton <jthoughton@google.com>
---
 include/linux/hugetlb.h |  13 +++++
 mm/hugetlb.c            | 125 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 138 insertions(+)

Comments

Peter Xu Nov. 16, 2022, 10:02 p.m. UTC | #1
On Fri, Oct 21, 2022 at 04:36:28PM +0000, James Houghton wrote:
> +/* hugetlb_hgm_walk - walks a high-granularity HugeTLB page table to resolve
> + * the page table entry for @addr.
> + *
> + * @hpte must always be pointing at an hstate-level PTE (or deeper).
> + *
> + * This function will never walk further if it encounters a PTE of a size
> + * less than or equal to @sz.
> + *
> + * @stop_at_none determines what we do when we encounter an empty PTE.

IIUC it is not about empty PTE but swap-or-empty pte?

I'm not sure whether it'll be more straightforward to have "bool alloc"
just to show whether the caller would like to allocate pgtables when
walking the sub-level pgtable until the level specified.

In final version of the code I also think we should drop all the "/*
stop_at_pte */" comments in the callers. Maybe that already means the
meaning of the bool is confusing so we always need a hint.
James Houghton Nov. 17, 2022, 1:39 a.m. UTC | #2
On Wed, Nov 16, 2022 at 2:02 PM Peter Xu <peterx@redhat.com> wrote:
>
> On Fri, Oct 21, 2022 at 04:36:28PM +0000, James Houghton wrote:
> > +/* hugetlb_hgm_walk - walks a high-granularity HugeTLB page table to resolve
> > + * the page table entry for @addr.
> > + *
> > + * @hpte must always be pointing at an hstate-level PTE (or deeper).
> > + *
> > + * This function will never walk further if it encounters a PTE of a size
> > + * less than or equal to @sz.
> > + *
> > + * @stop_at_none determines what we do when we encounter an empty PTE.
>
> IIUC it is not about empty PTE but swap-or-empty pte?
>
> I'm not sure whether it'll be more straightforward to have "bool alloc"
> just to show whether the caller would like to allocate pgtables when
> walking the sub-level pgtable until the level specified.

I think "bool alloc" is cleaner. I'll do that. Thanks for the suggestion.

>
> In final version of the code I also think we should drop all the "/*
> stop_at_pte */" comments in the callers. Maybe that already means the
> meaning of the bool is confusing so we always need a hint.

I did that to hopefully make things easier to read. I'll remove it.

- James

>
> --
> Peter Xu
>
Mike Kravetz Dec. 14, 2022, 12:47 a.m. UTC | #3
On 10/21/22 16:36, James Houghton wrote:
> hugetlb_hgm_walk implements high-granularity page table walks for
> HugeTLB. It is safe to call on non-HGM enabled VMAs; it will return
> immediately.
> 
> hugetlb_walk_step implements how we step forwards in the walk. For
> architectures that don't use GENERAL_HUGETLB, they will need to provide
> their own implementation.
> 
> Signed-off-by: James Houghton <jthoughton@google.com>
> ---
>  include/linux/hugetlb.h |  13 +++++
>  mm/hugetlb.c            | 125 ++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 138 insertions(+)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 003255b0e40f..4b1548adecde 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -276,6 +276,10 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
>  pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
>  		      unsigned long addr, pud_t *pud);
>  
> +int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
> +		     struct hugetlb_pte *hpte, unsigned long addr,
> +		     unsigned long sz, bool stop_at_none);
> +
>  struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
>  
>  extern int sysctl_hugetlb_shm_group;
> @@ -288,6 +292,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
>  pte_t *huge_pte_offset(struct mm_struct *mm,
>  		       unsigned long addr, unsigned long sz);
>  unsigned long hugetlb_mask_last_page(struct hstate *h);
> +int hugetlb_walk_step(struct mm_struct *mm, struct hugetlb_pte *hpte,
> +		      unsigned long addr, unsigned long sz);
>  int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
>  				unsigned long addr, pte_t *ptep);
>  void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
> @@ -1066,6 +1072,8 @@ void hugetlb_register_node(struct node *node);
>  void hugetlb_unregister_node(struct node *node);
>  #endif
>  
> +enum hugetlb_level hpage_size_to_level(unsigned long sz);
> +
>  #else	/* CONFIG_HUGETLB_PAGE */
>  struct hstate {};
>  
> @@ -1253,6 +1261,11 @@ static inline void hugetlb_register_node(struct node *node)
>  static inline void hugetlb_unregister_node(struct node *node)
>  {
>  }
> +
> +static inline enum hugetlb_level hpage_size_to_level(unsigned long sz)
> +{
> +	return HUGETLB_LEVEL_PTE;
> +}
>  #endif	/* CONFIG_HUGETLB_PAGE */
>  
>  #ifdef CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index e3733388adee..90db59632559 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -95,6 +95,29 @@ static void hugetlb_vma_data_free(struct vm_area_struct *vma);
>  static int hugetlb_vma_data_alloc(struct vm_area_struct *vma);
>  static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
>  
> +/*
> + * hpage_size_to_level() - convert @sz to the corresponding page table level
> + *
> + * @sz must be less than or equal to a valid hugepage size.
> + */
> +enum hugetlb_level hpage_size_to_level(unsigned long sz)
> +{
> +	/*
> +	 * We order the conditionals from smallest to largest to pick the
> +	 * smallest level when multiple levels have the same size (i.e.,
> +	 * when levels are folded).
> +	 */
> +	if (sz < PMD_SIZE)
> +		return HUGETLB_LEVEL_PTE;
> +	if (sz < PUD_SIZE)
> +		return HUGETLB_LEVEL_PMD;
> +	if (sz < P4D_SIZE)
> +		return HUGETLB_LEVEL_PUD;
> +	if (sz < PGDIR_SIZE)
> +		return HUGETLB_LEVEL_P4D;
> +	return HUGETLB_LEVEL_PGD;
> +}
> +
>  static inline bool subpool_is_free(struct hugepage_subpool *spool)
>  {
>  	if (spool->count)
> @@ -7321,6 +7344,70 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
>  }
>  #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
>  
> +/* hugetlb_hgm_walk - walks a high-granularity HugeTLB page table to resolve
> + * the page table entry for @addr.
> + *
> + * @hpte must always be pointing at an hstate-level PTE (or deeper).
> + *
> + * This function will never walk further if it encounters a PTE of a size
> + * less than or equal to @sz.
> + *
> + * @stop_at_none determines what we do when we encounter an empty PTE. If true,
> + * we return that PTE. If false and @sz is less than the current PTE's size,
> + * we make that PTE point to the next level down, going until @sz is the same
> + * as our current PTE.

I was a bit confused about 'we return that PTE' when the function is of type
int.  TBH, I am not a fan of the current scheme of passing in *hpte and having
the hpte modified by the function.

> + *
> + * If @stop_at_none is true and @sz is PAGE_SIZE, this function will always
> + * succeed, but that does not guarantee that hugetlb_pte_size(hpte) is @sz.
> + *
> + * Return:
> + *	-ENOMEM if we couldn't allocate new PTEs.
> + *	-EEXIST if the caller wanted to walk further than a migration PTE,
> + *		poison PTE, or a PTE marker. The caller needs to manually deal
> + *		with this scenario.
> + *	-EINVAL if called with invalid arguments (@sz invalid, @hpte not
> + *		initialized).
> + *	0 otherwise.
> + *
> + *	Even if this function fails, @hpte is guaranteed to always remain
> + *	valid.
> + */
> +int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
> +		     struct hugetlb_pte *hpte, unsigned long addr,
> +		     unsigned long sz, bool stop_at_none)

Since we are potentially populating lower level page tables, we may want a
different function name.  It may just be me, but I think of walk as a read
only operation.  I would suggest putting populate in the name, but as Peter
pointed out elsewhere, that has other implications.  Sorry, I can not think
of something better right now.
Jane Chu Jan. 5, 2023, 12:57 a.m. UTC | #4
> + * @stop_at_none determines what we do when we encounter an empty PTE. If true,
> + * we return that PTE. If false and @sz is less than the current PTE's size,
> + * we make that PTE point to the next level down, going until @sz is the same
> + * as our current PTE.
[..]
> +int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
> +		     struct hugetlb_pte *hpte, unsigned long addr,
> +		     unsigned long sz, bool stop_at_none)
> +{
[..]
> +	while (hugetlb_pte_size(hpte) > sz && !ret) {
> +		pte = huge_ptep_get(hpte->ptep);
> +		if (!pte_present(pte)) {
> +			if (stop_at_none)
> +				return 0;
> +			if (unlikely(!huge_pte_none(pte)))
> +				return -EEXIST;

If 'stop_at_none' means settling down on the just encountered empty PTE,
should the above two "if" clauses switch order?  I thought Peter has
raised this question too, but I'm not seeing a response.

Regards,
-jane


> +		} else if (hugetlb_pte_present_leaf(hpte, pte))
> +			return 0;
> +		ret = hugetlb_walk_step(mm, hpte, addr, sz);
> +	}
> +
> +	return ret;
> +}
> +
Jane Chu Jan. 5, 2023, 1:12 a.m. UTC | #5
On 1/4/2023 4:57 PM, Jane Chu wrote:
>> + * @stop_at_none determines what we do when we encounter an empty 
>> PTE. If true,
>> + * we return that PTE. If false and @sz is less than the current 
>> PTE's size,
>> + * we make that PTE point to the next level down, going until @sz is 
>> the same
>> + * as our current PTE.
> [..]
>> +int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
>> +             struct hugetlb_pte *hpte, unsigned long addr,
>> +             unsigned long sz, bool stop_at_none)
>> +{

Also here below, the way 'stop_at_none' is used when HGM isn't enabled
is puzzling.  Could you elaborate please?

+	if (!hugetlb_hgm_enabled(vma)) {
+		if (stop_at_none)
+			return 0;
+		return sz == huge_page_size(hstate_vma(vma)) ? 0 : -EINVAL;
+	}

> [..]
>> +    while (hugetlb_pte_size(hpte) > sz && !ret) {
>> +        pte = huge_ptep_get(hpte->ptep);
>> +        if (!pte_present(pte)) {
>> +            if (stop_at_none)
>> +                return 0;
>> +            if (unlikely(!huge_pte_none(pte)))
>> +                return -EEXIST;
> 
> If 'stop_at_none' means settling down on the just encountered empty PTE,
> should the above two "if" clauses switch order?  I thought Peter has
> raised this question too, but I'm not seeing a response.
> 
> Regards,
> -jane
> 
> 
>> +        } else if (hugetlb_pte_present_leaf(hpte, pte))
>> +            return 0;
>> +        ret = hugetlb_walk_step(mm, hpte, addr, sz);
>> +    }
>> +
>> +    return ret;
>> +}
>> +
> 
> 

thanks,
-jane
James Houghton Jan. 5, 2023, 1:23 a.m. UTC | #6
On Thu, Jan 5, 2023 at 12:58 AM Jane Chu <jane.chu@oracle.com> wrote:
>
> > + * @stop_at_none determines what we do when we encounter an empty PTE. If true,
> > + * we return that PTE. If false and @sz is less than the current PTE's size,
> > + * we make that PTE point to the next level down, going until @sz is the same
> > + * as our current PTE.
> [..]
> > +int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
> > +                  struct hugetlb_pte *hpte, unsigned long addr,
> > +                  unsigned long sz, bool stop_at_none)
> > +{
> [..]
> > +     while (hugetlb_pte_size(hpte) > sz && !ret) {
> > +             pte = huge_ptep_get(hpte->ptep);
> > +             if (!pte_present(pte)) {
> > +                     if (stop_at_none)
> > +                             return 0;
> > +                     if (unlikely(!huge_pte_none(pte)))
> > +                             return -EEXIST;
>
> If 'stop_at_none' means settling down on the just encountered empty PTE,
> should the above two "if" clauses switch order?  I thought Peter has
> raised this question too, but I'm not seeing a response.

A better name for "stop_at_none" would be "dont_allocate"; it will be
changed in the next version. The idea is that "stop_at_none" would
simply do a walk, and the caller will deal with what it finds. If we
can't continue the walk for any reason, just return 0. So in this
case, if we land on a non-present, non-none PTE, we can't continue the
walk, so just return 0.

Another way to justify this order: we want to ensure that calls to
this function with stop_at_none=1 and sz=PAGE_SIZE will never fail,
and that gives us the order that you see. (This requirement is
documented in the comment above the definition of hugetlb_hgm_walk().
This guarantee makes it easier to write code that uses HGM walks.)

> Also here below, the way 'stop_at_none' is used when HGM isn't enabled
> is puzzling.  Could you elaborate please?
>
> > +       if (!hugetlb_hgm_enabled(vma)) {
> > +               if (stop_at_none)
> > +                       return 0;
> > +               return sz == huge_page_size(hstate_vma(vma)) ? 0 : -EINVAL;
> > +       }

This is for the same reason; if "stop_at_none" is provided, we need to
guarantee that this function won't fail. If "stop_at_none" is false
and sz != huge_page_size(), then the caller is attempting to use HGM
without having enabled it, hence -EINVAL.

Both of these bits will be cleaned up with the next version of this series. :)

Thanks!

- James
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 003255b0e40f..4b1548adecde 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -276,6 +276,10 @@  u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 		      unsigned long addr, pud_t *pud);
 
+int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
+		     struct hugetlb_pte *hpte, unsigned long addr,
+		     unsigned long sz, bool stop_at_none);
+
 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
 
 extern int sysctl_hugetlb_shm_group;
@@ -288,6 +292,8 @@  pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 pte_t *huge_pte_offset(struct mm_struct *mm,
 		       unsigned long addr, unsigned long sz);
 unsigned long hugetlb_mask_last_page(struct hstate *h);
+int hugetlb_walk_step(struct mm_struct *mm, struct hugetlb_pte *hpte,
+		      unsigned long addr, unsigned long sz);
 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
 				unsigned long addr, pte_t *ptep);
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
@@ -1066,6 +1072,8 @@  void hugetlb_register_node(struct node *node);
 void hugetlb_unregister_node(struct node *node);
 #endif
 
+enum hugetlb_level hpage_size_to_level(unsigned long sz);
+
 #else	/* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 
@@ -1253,6 +1261,11 @@  static inline void hugetlb_register_node(struct node *node)
 static inline void hugetlb_unregister_node(struct node *node)
 {
 }
+
+static inline enum hugetlb_level hpage_size_to_level(unsigned long sz)
+{
+	return HUGETLB_LEVEL_PTE;
+}
 #endif	/* CONFIG_HUGETLB_PAGE */
 
 #ifdef CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e3733388adee..90db59632559 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -95,6 +95,29 @@  static void hugetlb_vma_data_free(struct vm_area_struct *vma);
 static int hugetlb_vma_data_alloc(struct vm_area_struct *vma);
 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
 
+/*
+ * hpage_size_to_level() - convert @sz to the corresponding page table level
+ *
+ * @sz must be less than or equal to a valid hugepage size.
+ */
+enum hugetlb_level hpage_size_to_level(unsigned long sz)
+{
+	/*
+	 * We order the conditionals from smallest to largest to pick the
+	 * smallest level when multiple levels have the same size (i.e.,
+	 * when levels are folded).
+	 */
+	if (sz < PMD_SIZE)
+		return HUGETLB_LEVEL_PTE;
+	if (sz < PUD_SIZE)
+		return HUGETLB_LEVEL_PMD;
+	if (sz < P4D_SIZE)
+		return HUGETLB_LEVEL_PUD;
+	if (sz < PGDIR_SIZE)
+		return HUGETLB_LEVEL_P4D;
+	return HUGETLB_LEVEL_PGD;
+}
+
 static inline bool subpool_is_free(struct hugepage_subpool *spool)
 {
 	if (spool->count)
@@ -7321,6 +7344,70 @@  bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
 }
 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
 
+/* hugetlb_hgm_walk - walks a high-granularity HugeTLB page table to resolve
+ * the page table entry for @addr.
+ *
+ * @hpte must always be pointing at an hstate-level PTE (or deeper).
+ *
+ * This function will never walk further if it encounters a PTE of a size
+ * less than or equal to @sz.
+ *
+ * @stop_at_none determines what we do when we encounter an empty PTE. If true,
+ * we return that PTE. If false and @sz is less than the current PTE's size,
+ * we make that PTE point to the next level down, going until @sz is the same
+ * as our current PTE.
+ *
+ * If @stop_at_none is true and @sz is PAGE_SIZE, this function will always
+ * succeed, but that does not guarantee that hugetlb_pte_size(hpte) is @sz.
+ *
+ * Return:
+ *	-ENOMEM if we couldn't allocate new PTEs.
+ *	-EEXIST if the caller wanted to walk further than a migration PTE,
+ *		poison PTE, or a PTE marker. The caller needs to manually deal
+ *		with this scenario.
+ *	-EINVAL if called with invalid arguments (@sz invalid, @hpte not
+ *		initialized).
+ *	0 otherwise.
+ *
+ *	Even if this function fails, @hpte is guaranteed to always remain
+ *	valid.
+ */
+int hugetlb_hgm_walk(struct mm_struct *mm, struct vm_area_struct *vma,
+		     struct hugetlb_pte *hpte, unsigned long addr,
+		     unsigned long sz, bool stop_at_none)
+{
+	int ret = 0;
+	pte_t pte;
+
+	if (WARN_ON_ONCE(sz < PAGE_SIZE))
+		return -EINVAL;
+
+	if (!hugetlb_hgm_enabled(vma)) {
+		if (stop_at_none)
+			return 0;
+		return sz == huge_page_size(hstate_vma(vma)) ? 0 : -EINVAL;
+	}
+
+	hugetlb_vma_assert_locked(vma);
+
+	if (WARN_ON_ONCE(!hpte->ptep))
+		return -EINVAL;
+
+	while (hugetlb_pte_size(hpte) > sz && !ret) {
+		pte = huge_ptep_get(hpte->ptep);
+		if (!pte_present(pte)) {
+			if (stop_at_none)
+				return 0;
+			if (unlikely(!huge_pte_none(pte)))
+				return -EEXIST;
+		} else if (hugetlb_pte_present_leaf(hpte, pte))
+			return 0;
+		ret = hugetlb_walk_step(mm, hpte, addr, sz);
+	}
+
+	return ret;
+}
+
 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 			unsigned long addr, unsigned long sz)
@@ -7388,6 +7475,44 @@  pte_t *huge_pte_offset(struct mm_struct *mm,
 	return (pte_t *)pmd;
 }
 
+/*
+ * hugetlb_walk_step() - Walk the page table one step to resolve the page
+ * (hugepage or subpage) entry at address @addr.
+ *
+ * @sz always points at the final target PTE size (e.g. PAGE_SIZE for the
+ * lowest level PTE).
+ *
+ * @hpte will always remain valid, even if this function fails.
+ */
+int hugetlb_walk_step(struct mm_struct *mm, struct hugetlb_pte *hpte,
+		      unsigned long addr, unsigned long sz)
+{
+	pte_t *ptep;
+	spinlock_t *ptl;
+
+	switch (hpte->level) {
+	case HUGETLB_LEVEL_PUD:
+		ptep = (pte_t *)hugetlb_pmd_alloc(mm, hpte, addr);
+		if (IS_ERR(ptep))
+			return PTR_ERR(ptep);
+		hugetlb_pte_populate(hpte, ptep, PMD_SHIFT, HUGETLB_LEVEL_PMD);
+		break;
+	case HUGETLB_LEVEL_PMD:
+		ptep = hugetlb_pte_alloc(mm, hpte, addr);
+		if (IS_ERR(ptep))
+			return PTR_ERR(ptep);
+		ptl = pte_lockptr(mm, (pmd_t *)hpte->ptep);
+		hugetlb_pte_populate(hpte, ptep, PAGE_SHIFT, HUGETLB_LEVEL_PTE);
+		hpte->ptl = ptl;
+		break;
+	default:
+		WARN_ONCE(1, "%s: got invalid level: %d (shift: %d)\n",
+				__func__, hpte->level, hpte->shift);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 /*
  * Return a mask that can be used to update an address to the last huge
  * page in a page table page mapping size.  Used to skip non-present