@@ -5471,7 +5471,20 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
unsigned long addr, int *flags,
bool writable, int *last_cpupid)
{
- struct vm_area_struct *vma = vmf->vma;
+ if (vmf) {
+ struct vm_area_struct *vma = vmf->vma;
+ const vm_flags_t vmflags = vma->vm_flags;
+
+ /*
+ * Flag if the folio is shared between multiple address spaces.
+ * This used later when determining whether to group tasks.
+ */
+ if (folio_likely_mapped_shared(folio))
+ *flags |= vmflags & VM_SHARED ? TNF_SHARED : 0;
+
+ /* Record the current PID acceesing VMA */
+ vma_set_access_pid_bit(vma);
+ }
/*
* Avoid grouping on RO pages in general. RO pages shouldn't hurt as
@@ -5484,12 +5497,6 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
if (!writable)
*flags |= TNF_NO_GROUP;
- /*
- * Flag if the folio is shared between multiple address spaces. This
- * is later used when determining whether to group tasks together
- */
- if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
- *flags |= TNF_SHARED;
/*
* For memory tiering mode, cpupid of slow memory page is used
* to record page access time. So use default value.
@@ -5499,17 +5506,14 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
else
*last_cpupid = folio_last_cpupid(folio);
- /* Record the current PID acceesing VMA */
- vma_set_access_pid_bit(vma);
-
- count_vm_numa_event(NUMA_HINT_FAULTS);
#ifdef CONFIG_NUMA_BALANCING
+ count_vm_numa_event(NUMA_HINT_FAULTS);
count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1);
-#endif
if (folio_nid(folio) == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
+#endif
return mpol_misplaced(folio, vmf, addr);
}
@@ -2727,12 +2727,16 @@ static void sp_free(struct sp_node *n)
* mpol_misplaced - check whether current folio node is valid in policy
*
* @folio: folio to be checked
- * @vmf: structure describing the fault
+ * @vmf: structure describing the fault (NULL if called outside fault path)
* @addr: virtual address in @vma for shared policy lookup and interleave policy
+ * Ignored if vmf is NULL.
*
* Lookup current policy node id for vma,addr and "compare to" folio's
- * node id. Policy determination "mimics" alloc_page_vma().
- * Called from fault path where we know the vma and faulting address.
+ * node id - or task's policy node id if vmf is NULL. Policy determination
+ * "mimics" alloc_page_vma().
+ *
+ * vmf must be non-NULL if called from fault path where we know the vma and
+ * faulting address. The PTL must be held by caller if vmf is not NULL.
*
* Return: NUMA_NO_NODE if the page is in a node that is valid for this
* policy, or a suitable node ID to allocate a replacement folio from.
@@ -2744,7 +2748,6 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
pgoff_t ilx;
struct zoneref *z;
int curnid = folio_nid(folio);
- struct vm_area_struct *vma = vmf->vma;
int thiscpu = raw_smp_processor_id();
int thisnid = numa_node_id();
int polnid = NUMA_NO_NODE;
@@ -2754,18 +2757,24 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
* Make sure ptl is held so that we don't preempt and we
* have a stable smp processor id
*/
- lockdep_assert_held(vmf->ptl);
- pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
+ if (vmf) {
+ lockdep_assert_held(vmf->ptl);
+ pol = get_vma_policy(vmf->vma, addr, folio_order(folio), &ilx);
+ } else {
+ pol = get_task_policy(current);
+ }
if (!(pol->flags & MPOL_F_MOF))
goto out;
switch (pol->mode) {
case MPOL_INTERLEAVE:
- polnid = interleave_nid(pol, ilx);
+ polnid = vmf ? interleave_nid(pol, ilx) :
+ interleave_nodes(pol);
break;
case MPOL_WEIGHTED_INTERLEAVE:
- polnid = weighted_interleave_nid(pol, ilx);
+ polnid = vmf ? weighted_interleave_nid(pol, ilx) :
+ weighted_interleave_nodes(pol);
break;
case MPOL_PREFERRED:
numa_migrate_check and mpol_misplaced presume callers are in the fault path with accessed to a VMA. To enable migrations from page cache, re-using the same logic to handle migration prep is preferable. Mildly refactor numa_migrate_check and mpol_misplaced so that they may be called with (vmf = NULL) from non-faulting paths. Also move from numa balancing defines inside the appropriate ifdef. Signed-off-by: Gregory Price <gourry@gourry.net> --- mm/memory.c | 28 ++++++++++++++++------------ mm/mempolicy.c | 25 +++++++++++++++++-------- 2 files changed, 33 insertions(+), 20 deletions(-)