Message ID | 20240916094309.1226908-2-dev.jain@arm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Do not shatter hugezeropage on wp-fault | expand |
On 16.09.24 11:43, Dev Jain wrote: > In preparation for the second patch, abstract away the THP allocation > logic present in the create_huge_pmd() path, which corresponds to the > faulting case when no page is present. > > There should be no functional change as a result of applying this patch, > except that, as David notes at [1], a PMD-aligned address should > be passed to update_mmu_cache_pmd(). > > [1]: https://lore.kernel.org/all/ddd3fcd2-48b3-4170-bcaa-2fe66e093f43@redhat.com/ > > Signed-off-by: Dev Jain <dev.jain@arm.com> > --- > mm/huge_memory.c | 108 +++++++++++++++++++++++++++++------------------ > 1 file changed, 66 insertions(+), 42 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 2a73efea02d7..cdc632b8dc9c 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -1146,47 +1146,88 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, > } > EXPORT_SYMBOL_GPL(thp_get_unmapped_area); > > -static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, > - struct page *page, gfp_t gfp) > +static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, > + unsigned long addr) > { > - struct vm_area_struct *vma = vmf->vma; > - struct folio *folio = page_folio(page); > - pgtable_t pgtable; > - unsigned long haddr = vmf->address & HPAGE_PMD_MASK; > - vm_fault_t ret = 0; > + unsigned long haddr = addr & HPAGE_PMD_MASK; > + gfp_t gfp = vma_thp_gfp_mask(vma); > + const int order = HPAGE_PMD_ORDER; > + struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, true); > > - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); > + if (unlikely(!folio)) { > + count_vm_event(THP_FAULT_FALLBACK); > + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); > + goto out; > + } > > + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); > if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { > folio_put(folio); > count_vm_event(THP_FAULT_FALLBACK); > count_vm_event(THP_FAULT_FALLBACK_CHARGE); > - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); > - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); > - return VM_FAULT_FALLBACK; > + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); > + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); > + return NULL; > } > folio_throttle_swaprate(folio, gfp); > > - pgtable = pte_alloc_one(vma->vm_mm); > - if (unlikely(!pgtable)) { > - ret = VM_FAULT_OOM; > - goto release; > - } > - > - folio_zero_user(folio, vmf->address); > + folio_zero_user(folio, addr); > /* > * The memory barrier inside __folio_mark_uptodate makes sure that > * folio_zero_user writes become visible before the set_pmd_at() > * write. > */ > __folio_mark_uptodate(folio); > +out: > + return folio; > +} > + > +static void __pmd_thp_fault_success_stats(struct vm_area_struct *vma) > +{ > + count_vm_event(THP_FAULT_ALLOC); > + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); > + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); > +} just inline that into map_anon_folio_pmd(), please. map_anon_folio_pmd is perfectly readable ;) > + > +static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, > + struct vm_area_struct *vma, unsigned long haddr) > +{ > + pmd_t entry; > + > + entry = mk_huge_pmd(&folio->page, vma->vm_page_prot); > + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); > + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); > + folio_add_lru_vma(folio, vma); > + set_pmd_at(vma->vm_mm, haddr, pmd, entry); > + update_mmu_cache_pmd(vma, haddr, pmd); > + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); > + __pmd_thp_fault_success_stats(vma); > +} > + > +static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) > +{ > + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; > + struct vm_area_struct *vma = vmf->vma; > + pgtable_t pgtable = NULL; > + struct folio *folio; > + vm_fault_t ret = 0; > + > + folio = vma_alloc_anon_folio_pmd(vma, vmf->address); > + if (unlikely(!folio)) { > + ret = VM_FAULT_FALLBACK; > + goto release; Why not simply "return VM_FAULT_FALLBACK;" ? There is nothing to release. Then you can avoid the "if (folio)" below and even stop initializing pgtable to NULL. With these things take care of Acked-by: David Hildenbrand <david@redhat.com>
Hi Dev, kernel test robot noticed the following build warnings: [auto build test WARNING on akpm-mm/mm-everything] [also build test WARNING on linus/master v6.11 next-20240918] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Dev-Jain/mm-Abstract-THP-allocation/20240916-174543 base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/20240916094309.1226908-2-dev.jain%40arm.com patch subject: [PATCH v4 1/2] mm: Abstract THP allocation config: i386-allmodconfig (https://download.01.org/0day-ci/archive/20240919/202409191416.9etlfugV-lkp@intel.com/config) compiler: gcc-12 (Debian 12.2.0-14) 12.2.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240919/202409191416.9etlfugV-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202409191416.9etlfugV-lkp@intel.com/ All warnings (new ones prefixed by >>): mm/huge_memory.c: In function 'vma_alloc_anon_folio_pmd': >> mm/huge_memory.c:1152:23: warning: unused variable 'haddr' [-Wunused-variable] 1152 | unsigned long haddr = addr & HPAGE_PMD_MASK; | ^~~~~ Kconfig warnings: (for reference only) WARNING: unmet direct dependencies detected for GET_FREE_REGION Depends on [n]: SPARSEMEM [=n] Selected by [m]: - RESOURCE_KUNIT_TEST [=m] && RUNTIME_TESTING_MENU [=y] && KUNIT [=m] vim +/haddr +1152 mm/huge_memory.c 1148 1149 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, 1150 unsigned long addr) 1151 { > 1152 unsigned long haddr = addr & HPAGE_PMD_MASK; 1153 gfp_t gfp = vma_thp_gfp_mask(vma); 1154 const int order = HPAGE_PMD_ORDER; 1155 struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, true); 1156 1157 if (unlikely(!folio)) { 1158 count_vm_event(THP_FAULT_FALLBACK); 1159 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 1160 goto out; 1161 } 1162 1163 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 1164 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { 1165 folio_put(folio); 1166 count_vm_event(THP_FAULT_FALLBACK); 1167 count_vm_event(THP_FAULT_FALLBACK_CHARGE); 1168 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); 1169 count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); 1170 return NULL; 1171 } 1172 folio_throttle_swaprate(folio, gfp); 1173 1174 folio_zero_user(folio, addr); 1175 /* 1176 * The memory barrier inside __folio_mark_uptodate makes sure that 1177 * folio_zero_user writes become visible before the set_pmd_at() 1178 * write. 1179 */ 1180 __folio_mark_uptodate(folio); 1181 out: 1182 return folio; 1183 } 1184
On 9/17/24 17:17, David Hildenbrand wrote: > On 16.09.24 11:43, Dev Jain wrote: >> In preparation for the second patch, abstract away the THP allocation >> logic present in the create_huge_pmd() path, which corresponds to the >> faulting case when no page is present. >> >> There should be no functional change as a result of applying this patch, >> except that, as David notes at [1], a PMD-aligned address should >> be passed to update_mmu_cache_pmd(). >> >> [1]: >> https://lore.kernel.org/all/ddd3fcd2-48b3-4170-bcaa-2fe66e093f43@redhat.com/ >> >> Signed-off-by: Dev Jain <dev.jain@arm.com> >> --- >> mm/huge_memory.c | 108 +++++++++++++++++++++++++++++------------------ >> 1 file changed, 66 insertions(+), 42 deletions(-) >> >> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >> index 2a73efea02d7..cdc632b8dc9c 100644 >> --- a/mm/huge_memory.c >> +++ b/mm/huge_memory.c >> @@ -1146,47 +1146,88 @@ unsigned long thp_get_unmapped_area(struct >> file *filp, unsigned long addr, >> } >> EXPORT_SYMBOL_GPL(thp_get_unmapped_area); >> -static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, >> - struct page *page, gfp_t gfp) >> +static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct >> *vma, >> + unsigned long addr) >> { >> - struct vm_area_struct *vma = vmf->vma; >> - struct folio *folio = page_folio(page); >> - pgtable_t pgtable; >> - unsigned long haddr = vmf->address & HPAGE_PMD_MASK; >> - vm_fault_t ret = 0; >> + unsigned long haddr = addr & HPAGE_PMD_MASK; >> + gfp_t gfp = vma_thp_gfp_mask(vma); >> + const int order = HPAGE_PMD_ORDER; >> + struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, >> true); >> - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); >> + if (unlikely(!folio)) { >> + count_vm_event(THP_FAULT_FALLBACK); >> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); >> + goto out; >> + } >> + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); >> if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { >> folio_put(folio); >> count_vm_event(THP_FAULT_FALLBACK); >> count_vm_event(THP_FAULT_FALLBACK_CHARGE); >> - count_mthp_stat(HPAGE_PMD_ORDER, >> MTHP_STAT_ANON_FAULT_FALLBACK); >> - count_mthp_stat(HPAGE_PMD_ORDER, >> MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); >> - return VM_FAULT_FALLBACK; >> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); >> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); >> + return NULL; >> } >> folio_throttle_swaprate(folio, gfp); >> - pgtable = pte_alloc_one(vma->vm_mm); >> - if (unlikely(!pgtable)) { >> - ret = VM_FAULT_OOM; >> - goto release; >> - } >> - >> - folio_zero_user(folio, vmf->address); >> + folio_zero_user(folio, addr); >> /* >> * The memory barrier inside __folio_mark_uptodate makes sure that >> * folio_zero_user writes become visible before the set_pmd_at() >> * write. >> */ >> __folio_mark_uptodate(folio); >> +out: >> + return folio; >> +} >> + >> +static void __pmd_thp_fault_success_stats(struct vm_area_struct *vma) >> +{ >> + count_vm_event(THP_FAULT_ALLOC); >> + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); >> + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); >> +} > > just inline that into map_anon_folio_pmd(), please. map_anon_folio_pmd > is perfectly readable ;) If you are asking me to open code it in map_anon_folio_pmd(), I'll do that. > >> + >> +static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, >> + struct vm_area_struct *vma, unsigned long haddr) > > >> +{ >> + pmd_t entry; >> + >> + entry = mk_huge_pmd(&folio->page, vma->vm_page_prot); >> + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); >> + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); >> + folio_add_lru_vma(folio, vma); >> + set_pmd_at(vma->vm_mm, haddr, pmd, entry); >> + update_mmu_cache_pmd(vma, haddr, pmd); >> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); >> + __pmd_thp_fault_success_stats(vma); >> +} >> + >> +static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) >> +{ >> + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; >> + struct vm_area_struct *vma = vmf->vma; >> + pgtable_t pgtable = NULL; >> + struct folio *folio; >> + vm_fault_t ret = 0; >> + >> + folio = vma_alloc_anon_folio_pmd(vma, vmf->address); >> + if (unlikely(!folio)) { >> + ret = VM_FAULT_FALLBACK; >> + goto release; > > Why not simply "return VM_FAULT_FALLBACK;" ? There is nothing to > release. Then you can avoid the > > "if (folio)" below and even stop initializing pgtable to NULL. Makes sense. > > > With these things take care of > > Acked-by: David Hildenbrand <david@redhat.com> Thanks!
On 24.09.24 06:25, Dev Jain wrote: > > On 9/17/24 17:17, David Hildenbrand wrote: >> On 16.09.24 11:43, Dev Jain wrote: >>> In preparation for the second patch, abstract away the THP allocation >>> logic present in the create_huge_pmd() path, which corresponds to the >>> faulting case when no page is present. >>> >>> There should be no functional change as a result of applying this patch, >>> except that, as David notes at [1], a PMD-aligned address should >>> be passed to update_mmu_cache_pmd(). >>> >>> [1]: >>> https://lore.kernel.org/all/ddd3fcd2-48b3-4170-bcaa-2fe66e093f43@redhat.com/ >>> >>> Signed-off-by: Dev Jain <dev.jain@arm.com> >>> --- >>> mm/huge_memory.c | 108 +++++++++++++++++++++++++++++------------------ >>> 1 file changed, 66 insertions(+), 42 deletions(-) >>> >>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >>> index 2a73efea02d7..cdc632b8dc9c 100644 >>> --- a/mm/huge_memory.c >>> +++ b/mm/huge_memory.c >>> @@ -1146,47 +1146,88 @@ unsigned long thp_get_unmapped_area(struct >>> file *filp, unsigned long addr, >>> } >>> EXPORT_SYMBOL_GPL(thp_get_unmapped_area); >>> -static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, >>> - struct page *page, gfp_t gfp) >>> +static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct >>> *vma, >>> + unsigned long addr) >>> { >>> - struct vm_area_struct *vma = vmf->vma; >>> - struct folio *folio = page_folio(page); >>> - pgtable_t pgtable; >>> - unsigned long haddr = vmf->address & HPAGE_PMD_MASK; >>> - vm_fault_t ret = 0; >>> + unsigned long haddr = addr & HPAGE_PMD_MASK; >>> + gfp_t gfp = vma_thp_gfp_mask(vma); >>> + const int order = HPAGE_PMD_ORDER; >>> + struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, >>> true); >>> - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); >>> + if (unlikely(!folio)) { >>> + count_vm_event(THP_FAULT_FALLBACK); >>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); >>> + goto out; >>> + } >>> + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); >>> if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { >>> folio_put(folio); >>> count_vm_event(THP_FAULT_FALLBACK); >>> count_vm_event(THP_FAULT_FALLBACK_CHARGE); >>> - count_mthp_stat(HPAGE_PMD_ORDER, >>> MTHP_STAT_ANON_FAULT_FALLBACK); >>> - count_mthp_stat(HPAGE_PMD_ORDER, >>> MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); >>> - return VM_FAULT_FALLBACK; >>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); >>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); >>> + return NULL; >>> } >>> folio_throttle_swaprate(folio, gfp); >>> - pgtable = pte_alloc_one(vma->vm_mm); >>> - if (unlikely(!pgtable)) { >>> - ret = VM_FAULT_OOM; >>> - goto release; >>> - } >>> - >>> - folio_zero_user(folio, vmf->address); >>> + folio_zero_user(folio, addr); >>> /* >>> * The memory barrier inside __folio_mark_uptodate makes sure that >>> * folio_zero_user writes become visible before the set_pmd_at() >>> * write. >>> */ >>> __folio_mark_uptodate(folio); >>> +out: >>> + return folio; >>> +} >>> + >>> +static void __pmd_thp_fault_success_stats(struct vm_area_struct *vma) >>> +{ >>> + count_vm_event(THP_FAULT_ALLOC); >>> + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); >>> + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); >>> +} >> >> just inline that into map_anon_folio_pmd(), please. map_anon_folio_pmd >> is perfectly readable ;) > > If you are asking me to open code it in map_anon_folio_pmd(), I'll do that. Yes, there will be a single user, so just keep it in the caller.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2a73efea02d7..cdc632b8dc9c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1146,47 +1146,88 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); -static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, - struct page *page, gfp_t gfp) +static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, + unsigned long addr) { - struct vm_area_struct *vma = vmf->vma; - struct folio *folio = page_folio(page); - pgtable_t pgtable; - unsigned long haddr = vmf->address & HPAGE_PMD_MASK; - vm_fault_t ret = 0; + unsigned long haddr = addr & HPAGE_PMD_MASK; + gfp_t gfp = vma_thp_gfp_mask(vma); + const int order = HPAGE_PMD_ORDER; + struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, true); - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + if (unlikely(!folio)) { + count_vm_event(THP_FAULT_FALLBACK); + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); + goto out; + } + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { folio_put(folio); count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK_CHARGE); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); - return VM_FAULT_FALLBACK; + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); + return NULL; } folio_throttle_swaprate(folio, gfp); - pgtable = pte_alloc_one(vma->vm_mm); - if (unlikely(!pgtable)) { - ret = VM_FAULT_OOM; - goto release; - } - - folio_zero_user(folio, vmf->address); + folio_zero_user(folio, addr); /* * The memory barrier inside __folio_mark_uptodate makes sure that * folio_zero_user writes become visible before the set_pmd_at() * write. */ __folio_mark_uptodate(folio); +out: + return folio; +} + +static void __pmd_thp_fault_success_stats(struct vm_area_struct *vma) +{ + count_vm_event(THP_FAULT_ALLOC); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); +} + +static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, + struct vm_area_struct *vma, unsigned long haddr) +{ + pmd_t entry; + + entry = mk_huge_pmd(&folio->page, vma->vm_page_prot); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); + folio_add_lru_vma(folio, vma); + set_pmd_at(vma->vm_mm, haddr, pmd, entry); + update_mmu_cache_pmd(vma, haddr, pmd); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); + __pmd_thp_fault_success_stats(vma); +} + +static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) +{ + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; + struct vm_area_struct *vma = vmf->vma; + pgtable_t pgtable = NULL; + struct folio *folio; + vm_fault_t ret = 0; + + folio = vma_alloc_anon_folio_pmd(vma, vmf->address); + if (unlikely(!folio)) { + ret = VM_FAULT_FALLBACK; + goto release; + } + + pgtable = pte_alloc_one(vma->vm_mm); + if (unlikely(!pgtable)) { + ret = VM_FAULT_OOM; + goto release; + } vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_none(*vmf->pmd))) { goto unlock_release; } else { - pmd_t entry; - ret = check_stable_address_space(vma->vm_mm); if (ret) goto unlock_release; @@ -1200,21 +1241,11 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, VM_BUG_ON(ret & VM_FAULT_FALLBACK); return ret; } - - entry = mk_huge_pmd(page, vma->vm_page_prot); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); - folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); - set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); - update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); - add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); + map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); mm_inc_nr_ptes(vma->vm_mm); deferred_split_folio(folio, false); spin_unlock(vmf->ptl); - count_vm_event(THP_FAULT_ALLOC); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); - count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); } return 0; @@ -1223,7 +1254,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, release: if (pgtable) pte_free(vma->vm_mm, pgtable); - folio_put(folio); + if (folio) + folio_put(folio); return ret; } @@ -1281,8 +1313,6 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm, vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - gfp_t gfp; - struct folio *folio; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; vm_fault_t ret; @@ -1333,14 +1363,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) } return ret; } - gfp = vma_thp_gfp_mask(vma); - folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); - if (unlikely(!folio)) { - count_vm_event(THP_FAULT_FALLBACK); - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); - return VM_FAULT_FALLBACK; - } - return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); + + return __do_huge_pmd_anonymous_page(vmf); } static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
In preparation for the second patch, abstract away the THP allocation logic present in the create_huge_pmd() path, which corresponds to the faulting case when no page is present. There should be no functional change as a result of applying this patch, except that, as David notes at [1], a PMD-aligned address should be passed to update_mmu_cache_pmd(). [1]: https://lore.kernel.org/all/ddd3fcd2-48b3-4170-bcaa-2fe66e093f43@redhat.com/ Signed-off-by: Dev Jain <dev.jain@arm.com> --- mm/huge_memory.c | 108 +++++++++++++++++++++++++++++------------------ 1 file changed, 66 insertions(+), 42 deletions(-)