diff mbox series

[RFC,V1,1/1] sched/numa: Enhance vma scanning logic

Message ID 67bf778d592c39d02444825c416c2ed11d2ef4b2.1673610485.git.raghavendra.kt@amd.com (mailing list archive)
State New
Headers show
Series [RFC,V1,1/1] sched/numa: Enhance vma scanning logic | expand

Commit Message

Raghavendra K T Jan. 16, 2023, 1:35 a.m. UTC
During the Numa scanning make sure only relevant vmas of the
tasks are scanned.

Logic:
1) For the first two time allow unconditional scanning of vmas
2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
  False negetives in case of collison should be fine here.
3) If more than 4 pids exist assume task indeed accessed vma to
 to avoid false negetives

Co-developed-by: Bharata B Rao <bharata@amd.com>
(initial patch to store pid information)

Suggested-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Bharata B Rao <bharata@amd.com>
Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
---
 include/linux/mm_types.h |  2 ++
 kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
 mm/memory.c              | 21 +++++++++++++++++++++
 3 files changed, 55 insertions(+)

Comments

David Hildenbrand Jan. 17, 2023, 11:14 a.m. UTC | #1
On 16.01.23 03:25, Raghavendra K T wrote:
>   During the Numa scanning make sure only relevant vmas of the
> tasks are scanned.
> 
> Logic:
> 1) For the first two time allow unconditional scanning of vmas
> 2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
>    False negetives in case of collison should be fine here.
> 3) If more than 4 pids exist assume task indeed accessed vma to
>   to avoid false negetives
> 
> Co-developed-by: Bharata B Rao <bharata@amd.com>
> (initial patch to store pid information)
> 
> Suggested-by: Mel Gorman <mgorman@techsingularity.net>
> Signed-off-by: Bharata B Rao <bharata@amd.com>
> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
> ---
>   include/linux/mm_types.h |  2 ++
>   kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
>   mm/memory.c              | 21 +++++++++++++++++++++
>   3 files changed, 55 insertions(+)
> 
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 500e536796ca..07feae37b8e6 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -506,6 +506,8 @@ struct vm_area_struct {
>   	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
>   #endif
>   	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> +	unsigned int accessing_pids;
> +	int next_pid_slot;
>   } __randomize_layout;

What immediately jumps at me is the unconditional grow of a VMA by 8 
bytes. A process with 64k mappings consumes 512 KiB more of memory, 
possibly completely unnecessarily.

This at least needs to be fenced by CONFIG_NUMA_BALANCING.
Raghavendra K T Jan. 17, 2023, 1:09 p.m. UTC | #2
On 1/17/2023 4:44 PM, David Hildenbrand wrote:
> On 16.01.23 03:25, Raghavendra K T wrote:
>>   During the Numa scanning make sure only relevant vmas of the
>> tasks are scanned.
>>
>> Logic:
>> 1) For the first two time allow unconditional scanning of vmas
>> 2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
>>    False negetives in case of collison should be fine here.
>> 3) If more than 4 pids exist assume task indeed accessed vma to
>>   to avoid false negetives
>>
>> Co-developed-by: Bharata B Rao <bharata@amd.com>
>> (initial patch to store pid information)
>>
>> Suggested-by: Mel Gorman <mgorman@techsingularity.net>
>> Signed-off-by: Bharata B Rao <bharata@amd.com>
>> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
>> ---
>>   include/linux/mm_types.h |  2 ++
>>   kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
>>   mm/memory.c              | 21 +++++++++++++++++++++
>>   3 files changed, 55 insertions(+)
>>
>> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
>> index 500e536796ca..07feae37b8e6 100644
>> --- a/include/linux/mm_types.h
>> +++ b/include/linux/mm_types.h
>> @@ -506,6 +506,8 @@ struct vm_area_struct {
>>       struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
>>   #endif
>>       struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>> +    unsigned int accessing_pids;
>> +    int next_pid_slot;
>>   } __randomize_layout;
> 
> What immediately jumps at me is the unconditional grow of a VMA by 8 
> bytes. A process with 64k mappings consumes 512 KiB more of memory, 
> possibly completely unnecessarily.
> 
> This at least needs to be fenced by CONFIG_NUMA_BALANCING.
> 

Thanks for the review David. Good point..  I do agree. I see I will have
to fence further in memory.c only since fair.c is already taken care.
Mel Gorman Jan. 17, 2023, 2:59 p.m. UTC | #3
Note that the cc list is excessive for the topic.

On Mon, Jan 16, 2023 at 07:05:34AM +0530, Raghavendra K T wrote:
>  During the Numa scanning make sure only relevant vmas of the
> tasks are scanned.
> 
> Logic:
> 1) For the first two time allow unconditional scanning of vmas
> 2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
>   False negetives in case of collison should be fine here.
> 3) If more than 4 pids exist assume task indeed accessed vma to
>  to avoid false negetives
> 
> Co-developed-by: Bharata B Rao <bharata@amd.com>
> (initial patch to store pid information)
> 
> Suggested-by: Mel Gorman <mgorman@techsingularity.net>
> Signed-off-by: Bharata B Rao <bharata@amd.com>
> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
> ---
>  include/linux/mm_types.h |  2 ++
>  kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
>  mm/memory.c              | 21 +++++++++++++++++++++
>  3 files changed, 55 insertions(+)
> 
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 500e536796ca..07feae37b8e6 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -506,6 +506,8 @@ struct vm_area_struct {
>  	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
>  #endif
>  	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> +	unsigned int accessing_pids;
> +	int next_pid_slot;
>  } __randomize_layout;
>  

This should be behind CONFIG_NUMA_BALANCING but per-vma state should also be
tracked in its own struct and allocated on demand iff the state is required.

>  struct kioctx_table;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index e4a0b8bd941c..944d2e3b0b3c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2916,6 +2916,35 @@ static void reset_ptenuma_scan(struct task_struct *p)
>  	p->mm->numa_scan_offset = 0;
>  }
>  
> +static bool vma_is_accessed(struct vm_area_struct *vma)
> +{
> +	int i;
> +	bool more_pids_exist;
> +	unsigned long pid, max_pids;
> +	unsigned long current_pid = current->pid & LAST__PID_MASK;
> +
> +	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
> +
> +	/* By default we assume >= max_pids exist */
> +	more_pids_exist = true;
> +
> +	if (READ_ONCE(current->mm->numa_scan_seq) < 2)
> +		return true;
> +
> +	for (i = 0; i < max_pids; i++) {
> +		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
> +			LAST__PID_MASK;
> +		if (pid == current_pid)
> +			return true;
> +		if (pid == 0) {
> +			more_pids_exist = false;
> +			break;
> +		}
> +	}
> +
> +	return more_pids_exist;
> +}

I get the intent is to avoid PIDs scanning VMAs that it has never faulted
within but it seems unnecessarily complex to search on every fault to track
just 4 pids with no recent access information. The pid modulo BITS_PER_WORD
couls be used to set a bit on an unsigned long to track approximate recent
acceses and skip VMAs that do not have the bit set. That would allow more
recent PIDs to be tracked although false positives would still exist. It
would be necessary to reset the mask periodically.

Even tracking 4 pids, a reset is periodically needed. Otherwise it'll
be vulnerable to changes in phase behaviour causing all pids to scan all
VMAs again.

> @@ -3015,6 +3044,9 @@ static void task_numa_work(struct callback_head *work)
>  		if (!vma_is_accessible(vma))
>  			continue;
>  
> +		if (!vma_is_accessed(vma))
> +			continue;
> +
>  		do {
>  			start = max(start, vma->vm_start);
>  			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
> diff --git a/mm/memory.c b/mm/memory.c
> index 8c8420934d60..fafd78d87a51 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4717,7 +4717,28 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>  	pte_t pte, old_pte;
>  	bool was_writable = pte_savedwrite(vmf->orig_pte);
>  	int flags = 0;
> +	int pid_slot = vma->next_pid_slot;
>  
> +	int i;
> +	unsigned long pid, max_pids;
> +	unsigned long current_pid = current->pid & LAST__PID_MASK;
> +
> +	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
> +

Won't build on defconfig

> +	/* Avoid duplicate PID updation */
> +	for (i = 0; i < max_pids; i++) {
> +		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
> +			LAST__PID_MASK;
> +		if (pid == current_pid)
> +			goto skip_update;
> +	}
> +
> +	vma->next_pid_slot = (++pid_slot) % max_pids;
> +	vma->accessing_pids &= ~(LAST__PID_MASK << (pid_slot * LAST__PID_SHIFT));
> +	vma->accessing_pids |= ((current_pid) <<
> +			(pid_slot * LAST__PID_SHIFT));
> +

The PID tracking and clearing should probably be split out but that aside,
what about do_huge_pmd_numa_page?

First off though, expanding VMA size by more than a word for NUMA balancing
is probably a no-go.

This is a build-tested only prototype to illustrate how VMA could track
NUMA balancing state. It starts with applying the scan delay to every VMA
instead of every task to avoid scanning new or very short-lived VMAs. I
went back to my old notes on how I hoped to reduce excessive scanning in
NUMA balancing and it happened to be second on my list and straight-forward
to prototype in a few minutes.

diff --git a/include/linux/mm.h b/include/linux/mm.h
index f3f196e4d66d..3cebda5cc8a7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,6 +620,9 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
 	vma->vm_mm = mm;
 	vma->vm_ops = &dummy_vm_ops;
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
+#ifdef CONFIG_NUMA_BALANCING
+	vma->numab = NULL;
+#endif
 }
 
 static inline void vma_set_anonymous(struct vm_area_struct *vma)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3b8475007734..3c0cfdde33e0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -526,6 +526,10 @@ struct anon_vma_name {
 	char name[];
 };
 
+struct vma_numab {
+	unsigned long next_scan;
+};
+
 /*
  * This struct describes a virtual memory area. There is one of these
  * per VM-area/task. A VM area is any part of the process virtual memory
@@ -593,6 +597,9 @@ struct vm_area_struct {
 #endif
 #ifdef CONFIG_NUMA
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+	struct vma_numab *numab;	/* NUMA Balancing state */
 #endif
 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 } __randomize_layout;
diff --git a/kernel/fork.c b/kernel/fork.c
index 9f7fe3541897..2d34c484553d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -481,6 +481,9 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
 
 void vm_area_free(struct vm_area_struct *vma)
 {
+#ifdef CONFIG_NUMA_BALANCING
+	kfree(vma->numab);
+#endif
 	free_anon_vma_name(vma);
 	kmem_cache_free(vm_area_cachep, vma);
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c36aa54ae071..6a1cffdfc76b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3027,6 +3027,23 @@ static void task_numa_work(struct callback_head *work)
 		if (!vma_is_accessible(vma))
 			continue;
 
+		/* Initialise new per-VMA NUMAB state. */
+		if (!vma->numab) {
+			vma->numab = kzalloc(sizeof(struct vma_numab), GFP_KERNEL);
+			if (!vma->numab)
+				continue;
+
+			vma->numab->next_scan = now +
+				msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
+		}
+
+		/*
+		 * After the first scan is complete, delay the balancing scan
+		 * for new VMAs.
+		 */
+		if (mm->numa_scan_seq && time_before(jiffies, vma->numab->next_scan))
+			continue;
+
 		do {
 			start = max(start, vma->vm_start);
 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
Raghavendra K T Jan. 17, 2023, 5:45 p.m. UTC | #4
On 1/17/2023 8:29 PM, Mel Gorman wrote:
> Note that the cc list is excessive for the topic.
> 

Thank you Mel for the review. Sorry for the long list. (got by
get_maintainer). Will trim the list for V2.

> On Mon, Jan 16, 2023 at 07:05:34AM +0530, Raghavendra K T wrote:
>>   During the Numa scanning make sure only relevant vmas of the
>> tasks are scanned.
>>
>> Logic:
>> 1) For the first two time allow unconditional scanning of vmas
>> 2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
>>    False negetives in case of collison should be fine here.
>> 3) If more than 4 pids exist assume task indeed accessed vma to
>>   to avoid false negetives
>>
>> Co-developed-by: Bharata B Rao <bharata@amd.com>
>> (initial patch to store pid information)
>>
>> Suggested-by: Mel Gorman <mgorman@techsingularity.net>
>> Signed-off-by: Bharata B Rao <bharata@amd.com>
>> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
>> ---
>>   include/linux/mm_types.h |  2 ++
>>   kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
>>   mm/memory.c              | 21 +++++++++++++++++++++
>>   3 files changed, 55 insertions(+)
>>
>> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
>> index 500e536796ca..07feae37b8e6 100644
>> --- a/include/linux/mm_types.h
>> +++ b/include/linux/mm_types.h
>> @@ -506,6 +506,8 @@ struct vm_area_struct {
>>   	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
>>   #endif
>>   	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>> +	unsigned int accessing_pids;
>> +	int next_pid_slot;
>>   } __randomize_layout;
>>   
> 
> This should be behind CONFIG_NUMA_BALANCING but per-vma state should also be
> tracked in its own struct and allocated on demand iff the state is required.
> 

Agree as David also pointed. I will take your patch below as base to
develop per-vma struct on its own.

>>   struct kioctx_table;
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index e4a0b8bd941c..944d2e3b0b3c 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -2916,6 +2916,35 @@ static void reset_ptenuma_scan(struct task_struct *p)
>>   	p->mm->numa_scan_offset = 0;
>>   }
>>   
>> +static bool vma_is_accessed(struct vm_area_struct *vma)
>> +{
>> +	int i;
>> +	bool more_pids_exist;
>> +	unsigned long pid, max_pids;
>> +	unsigned long current_pid = current->pid & LAST__PID_MASK;
>> +
>> +	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
>> +
>> +	/* By default we assume >= max_pids exist */
>> +	more_pids_exist = true;
>> +
>> +	if (READ_ONCE(current->mm->numa_scan_seq) < 2)
>> +		return true;
>> +
>> +	for (i = 0; i < max_pids; i++) {
>> +		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
>> +			LAST__PID_MASK;
>> +		if (pid == current_pid)
>> +			return true;
>> +		if (pid == 0) {
>> +			more_pids_exist = false;
>> +			break;
>> +		}
>> +	}
>> +
>> +	return more_pids_exist;
>> +}
> 
> I get the intent is to avoid PIDs scanning VMAs that it has never faulted
> within but it seems unnecessarily complex to search on every fault to track
> just 4 pids with no recent access information. The pid modulo BITS_PER_WORD
> couls be used to set a bit on an unsigned long to track approximate recent
> acceses and skip VMAs that do not have the bit set. That would allow more
> recent PIDs to be tracked although false positives would still exist. It
> would be necessary to reset the mask periodically.

Got the idea but I lost you on pid modulo BITS_PER_WORD, (is it
extracting last 5 or 8 bits of PID?) OR
Do you intend to say we can just do

vma->accessing_pids | = current_pid..

so that later we can just check
if (vma->accessing_pids | current_pid) == vma->accessing_pids then it is
a hit..
This becomes simple and we avoid iteration, duplicate tracking etc

> 
> Even tracking 4 pids, a reset is periodically needed. Otherwise it'll
> be vulnerable to changes in phase behaviour causing all pids to scan all
> VMAs again.
> 

Agree. Yes this will be the key thing to do. On a related note I saw
huge increment in numa_scan_seq because we frequently visit scanning
after the patch

>> @@ -3015,6 +3044,9 @@ static void task_numa_work(struct callback_head *work)
>>   		if (!vma_is_accessible(vma))
>>   			continue;
>>   
>> +		if (!vma_is_accessed(vma))
>> +			continue;
>> +
>>   		do {
>>   			start = max(start, vma->vm_start);
>>   			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 8c8420934d60..fafd78d87a51 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -4717,7 +4717,28 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>   	pte_t pte, old_pte;
>>   	bool was_writable = pte_savedwrite(vmf->orig_pte);
>>   	int flags = 0;
>> +	int pid_slot = vma->next_pid_slot;
>>   
>> +	int i;
>> +	unsigned long pid, max_pids;
>> +	unsigned long current_pid = current->pid & LAST__PID_MASK;
>> +
>> +	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
>> +
> 
> Won't build on defconfig
> 

OOPs! Sorry. This also should have ideally gone behind
CONFIG_NUMA_BALANCING..

>> +	/* Avoid duplicate PID updation */
>> +	for (i = 0; i < max_pids; i++) {
>> +		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
>> +			LAST__PID_MASK;
>> +		if (pid == current_pid)
>> +			goto skip_update;
>> +	}
>> +
>> +	vma->next_pid_slot = (++pid_slot) % max_pids;
>> +	vma->accessing_pids &= ~(LAST__PID_MASK << (pid_slot * LAST__PID_SHIFT));
>> +	vma->accessing_pids |= ((current_pid) <<
>> +			(pid_slot * LAST__PID_SHIFT));
>> +
> 
> The PID tracking and clearing should probably be split out but that aside,

Sure will do.

> what about do_huge_pmd_numa_page?

Will target this eventually, (ASAP if it is less complicated) :)

> 
> First off though, expanding VMA size by more than a word for NUMA balancing
> is probably a no-go.
> 
Agree

> This is a build-tested only prototype to illustrate how VMA could track
> NUMA balancing state. It starts with applying the scan delay to every VMA
> instead of every task to avoid scanning new or very short-lived VMAs. I
> went back to my old notes on how I hoped to reduce excessive scanning in
> NUMA balancing and it happened to be second on my list and straight-forward
> to prototype in a few minutes.
> 

Nice idea. Thanks again.. I will take this as a base patch for expansion.

> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index f3f196e4d66d..3cebda5cc8a7 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -620,6 +620,9 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
>   	vma->vm_mm = mm;
>   	vma->vm_ops = &dummy_vm_ops;
>   	INIT_LIST_HEAD(&vma->anon_vma_chain);
> +#ifdef CONFIG_NUMA_BALANCING
> +	vma->numab = NULL;
> +#endif
>   }
>   
>   static inline void vma_set_anonymous(struct vm_area_struct *vma)
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 3b8475007734..3c0cfdde33e0 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -526,6 +526,10 @@ struct anon_vma_name {
>   	char name[];
>   };
>   
> +struct vma_numab {
> +	unsigned long next_scan;
> +};
> +
>   /*
>    * This struct describes a virtual memory area. There is one of these
>    * per VM-area/task. A VM area is any part of the process virtual memory
> @@ -593,6 +597,9 @@ struct vm_area_struct {
>   #endif
>   #ifdef CONFIG_NUMA
>   	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
> +#endif
> +#ifdef CONFIG_NUMA_BALANCING
> +	struct vma_numab *numab;	/* NUMA Balancing state */
>   #endif
>   	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>   } __randomize_layout;
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 9f7fe3541897..2d34c484553d 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -481,6 +481,9 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
>   
>   void vm_area_free(struct vm_area_struct *vma)
>   {
> +#ifdef CONFIG_NUMA_BALANCING
> +	kfree(vma->numab);
> +#endif
>   	free_anon_vma_name(vma);
>   	kmem_cache_free(vm_area_cachep, vma);
>   }
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index c36aa54ae071..6a1cffdfc76b 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3027,6 +3027,23 @@ static void task_numa_work(struct callback_head *work)
>   		if (!vma_is_accessible(vma))
>   			continue;
>   
> +		/* Initialise new per-VMA NUMAB state. */
> +		if (!vma->numab) {
> +			vma->numab = kzalloc(sizeof(struct vma_numab), GFP_KERNEL);
> +			if (!vma->numab)
> +				continue;
> +
> +			vma->numab->next_scan = now +
> +				msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
> +		}
> +
> +		/*
> +		 * After the first scan is complete, delay the balancing scan
> +		 * for new VMAs.
> +		 */
> +		if (mm->numa_scan_seq && time_before(jiffies, vma->numab->next_scan))
> +			continue;
> +
>   		do {
>   			start = max(start, vma->vm_start);
>   			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
> 

Thanks
- Raghu
Bharata B Rao Jan. 18, 2023, 4:43 a.m. UTC | #5
On 1/17/2023 8:29 PM, Mel Gorman wrote:
> Note that the cc list is excessive for the topic.

(Wasn't sure about pruning the CC list mid-thread, hence continuing with it)

<snip>

> 
> This is a build-tested only prototype to illustrate how VMA could track
> NUMA balancing state. It starts with applying the scan delay to every VMA
> instead of every task to avoid scanning new or very short-lived VMAs. I
> went back to my old notes on how I hoped to reduce excessive scanning in
> NUMA balancing and it happened to be second on my list and straight-forward
> to prototype in a few minutes.

While on the topic of improving NUMA balancer scanning relevancy, the following
additional points may be worth noting:

Recently there have been reports about NUMA balancing induced scanning and
subsequent MMU notifier invalidations causing problems in different scenarios.

1. Currently NUMA balancing won't check at scan time, if a page (or a VMA )is
not migratable since the page (or the address range) is pinned. It will go ahead
with MMU invalidation notifications and changes the PTE protection to PAGE_NONE
only to realize later that the pinned pages can't be migrated before reinstalling
the original PTE.

This was found to cause issues to SEV guests whose pages are completely pinned.
This was discussed here - https://lore.kernel.org/all/20220927000729.498292-1-Ashish.Kalra@amd.com/

We could probably use page_maybe_dma_pinned() to determine if the page is long
term pinned and avoid MMU invalidation and protection change for such a page.
However then we would have to do per-page invalidations (as against one time
PMD range invalidation that is done currently) which is probably not desirable.

Also MMU invalidations are expected to be issued under sleepable context (mostly
except in the OOM notification which uses nonblock verion, AFAICS). This makes it
difficult to check the pinned state of the page prior to MMU invalidation. Some of
this is discussed here: https://lore.kernel.org/linux-arm-kernel/YuEMkKY2RU%2F2KiZW@monolith.localdoman/

This current patchset where we attempt to restrict scanning to relevant VMAs may
help the above case partially, but any ideas on addressing this issue
comprehensively? It would have been ideal if we could identify such non-migratable
pages (long term pinned) clearly and avoid them entirely from scanning and protection
change. 

2. Applications that run on GPUs may like to avoid the NUMA balancing activity
completely and they benefit from per-process enabling/disabling of NUMA balancing.
The patchset (which has a different use case for per-process control) that helps
this is here - https://lore.kernel.org/all/49ed07b1-e167-7f94-9986-8e86fb60bb09@nvidia.com/

Improvements to increase the relevant scanning can help this case to an extent
but per-process NUMA balancing control should be a useful control to have.

Regards,
Bharata.
Raghavendra K T Jan. 18, 2023, 5:47 a.m. UTC | #6
On 1/17/2023 11:15 PM, Raghavendra K T wrote:
> On 1/17/2023 8:29 PM, Mel Gorman wrote:
>> Note that the cc list is excessive for the topic.
>>
[...]
> 
>>>   struct kioctx_table;
>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>> index e4a0b8bd941c..944d2e3b0b3c 100644
>>> --- a/kernel/sched/fair.c
>>> +++ b/kernel/sched/fair.c
>>> @@ -2916,6 +2916,35 @@ static void reset_ptenuma_scan(struct 
>>> task_struct *p)
>>>       p->mm->numa_scan_offset = 0;
>>>   }
>>> +static bool vma_is_accessed(struct vm_area_struct *vma)
>>> +{
>>> +    int i;
>>> +    bool more_pids_exist;
>>> +    unsigned long pid, max_pids;
>>> +    unsigned long current_pid = current->pid & LAST__PID_MASK;
>>> +
>>> +    max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
>>> +
>>> +    /* By default we assume >= max_pids exist */
>>> +    more_pids_exist = true;
>>> +
>>> +    if (READ_ONCE(current->mm->numa_scan_seq) < 2)
>>> +        return true;
>>> +
>>> +    for (i = 0; i < max_pids; i++) {
>>> +        pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
>>> +            LAST__PID_MASK;
>>> +        if (pid == current_pid)
>>> +            return true;
>>> +        if (pid == 0) {
>>> +            more_pids_exist = false;
>>> +            break;
>>> +        }
>>> +    }
>>> +
>>> +    return more_pids_exist;
>>> +}
>>
>> I get the intent is to avoid PIDs scanning VMAs that it has never faulted
>> within but it seems unnecessarily complex to search on every fault to 
>> track
>> just 4 pids with no recent access information. The pid modulo 
>> BITS_PER_WORD
>> couls be used to set a bit on an unsigned long to track approximate 
>> recent
>> acceses and skip VMAs that do not have the bit set. That would allow more
>> recent PIDs to be tracked although false positives would still exist. It
>> would be necessary to reset the mask periodically.
> 
> Got the idea but I lost you on pid modulo BITS_PER_WORD, (is it
> extracting last 5 or 8 bits of PID?) OR
> Do you intend to say we can just do
> 
> vma->accessing_pids | = current_pid..
> 
> so that later we can just check
> if (vma->accessing_pids | current_pid) == vma->accessing_pids then it is
> a hit..
> This becomes simple and we avoid iteration, duplicate tracking etc
> 

Did more brainstorming/thought on this, I see that you meant

active_bit = (current_pid % BITS_PER_LONG);
accessing_pids |= (1UL << active_bit);

In scan path:
active_bit = (current_pid % BITS_PER_LONG);
if (!(accessing_pids & (1UL << active_bit))
         goto skip_scanning;

My approach above would perhaps give more false positive, this seems 
better thing to..

Thanks, will come up with numbers for this patch  + your vma scan delay 
patch.

>>
>> Even tracking 4 pids, a reset is periodically needed. Otherwise it'll
>> be vulnerable to changes in phase behaviour causing all pids to scan all
>> VMAs again.
>>
> 
> Agree. Yes this will be the key thing to do. On a related note I saw
> huge increment in numa_scan_seq because we frequently visit scanning
> after the patch
> 
[...]
Mike Rapoport Jan. 19, 2023, 9:39 a.m. UTC | #7
Hi,

On Mon, Jan 16, 2023 at 07:05:34AM +0530, Raghavendra K T wrote:
>  During the Numa scanning make sure only relevant vmas of the
> tasks are scanned.

Please add more detailed description about what are the issues with the
current scanning this patch aims to solve.

> Logic:
> 1) For the first two time allow unconditional scanning of vmas
> 2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
>   False negetives in case of collison should be fine here.

         ^ negatives

> 3) If more than 4 pids exist assume task indeed accessed vma to
>  to avoid false negetives
> 
> Co-developed-by: Bharata B Rao <bharata@amd.com>
> (initial patch to store pid information)
> 
> Suggested-by: Mel Gorman <mgorman@techsingularity.net>
> Signed-off-by: Bharata B Rao <bharata@amd.com>
> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
> ---
>  include/linux/mm_types.h |  2 ++
>  kernel/sched/fair.c      | 32 ++++++++++++++++++++++++++++++++
>  mm/memory.c              | 21 +++++++++++++++++++++
>  3 files changed, 55 insertions(+)
> 
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 500e536796ca..07feae37b8e6 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -506,6 +506,8 @@ struct vm_area_struct {
>  	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
>  #endif
>  	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> +	unsigned int accessing_pids;
> +	int next_pid_slot;
>  } __randomize_layout;
>  
>  struct kioctx_table;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index e4a0b8bd941c..944d2e3b0b3c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -2916,6 +2916,35 @@ static void reset_ptenuma_scan(struct task_struct *p)
>  	p->mm->numa_scan_offset = 0;
>  }
>  
> +static bool vma_is_accessed(struct vm_area_struct *vma)
> +{
> +	int i;
> +	bool more_pids_exist;
> +	unsigned long pid, max_pids;
> +	unsigned long current_pid = current->pid & LAST__PID_MASK;
> +
> +	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
> +
> +	/* By default we assume >= max_pids exist */
> +	more_pids_exist = true;
> +
> +	if (READ_ONCE(current->mm->numa_scan_seq) < 2)
> +		return true;
> +
> +	for (i = 0; i < max_pids; i++) {
> +		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
> +			LAST__PID_MASK;
> +		if (pid == current_pid)
> +			return true;
> +		if (pid == 0) {
> +			more_pids_exist = false;
> +			break;
> +		}
> +	}
> +
> +	return more_pids_exist;
> +}
> +
>  /*
>   * The expensive part of numa migration is done from task_work context.
>   * Triggered from task_tick_numa().
> @@ -3015,6 +3044,9 @@ static void task_numa_work(struct callback_head *work)
>  		if (!vma_is_accessible(vma))
>  			continue;
>  
> +		if (!vma_is_accessed(vma))
> +			continue;
> +
>  		do {
>  			start = max(start, vma->vm_start);
>  			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
> diff --git a/mm/memory.c b/mm/memory.c
> index 8c8420934d60..fafd78d87a51 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4717,7 +4717,28 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>  	pte_t pte, old_pte;
>  	bool was_writable = pte_savedwrite(vmf->orig_pte);
>  	int flags = 0;
> +	int pid_slot = vma->next_pid_slot;
>  
> +	int i;
> +	unsigned long pid, max_pids;
> +	unsigned long current_pid = current->pid & LAST__PID_MASK;
> +
> +	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
> +
> +	/* Avoid duplicate PID updation */
> +	for (i = 0; i < max_pids; i++) {
> +		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
> +			LAST__PID_MASK;
> +		if (pid == current_pid)
> +			goto skip_update;
> +	}
> +
> +	vma->next_pid_slot = (++pid_slot) % max_pids;
> +	vma->accessing_pids &= ~(LAST__PID_MASK << (pid_slot * LAST__PID_SHIFT));
> +	vma->accessing_pids |= ((current_pid) <<
> +			(pid_slot * LAST__PID_SHIFT));
> +
> +skip_update:
>  	/*
>  	 * The "pte" at this point cannot be used safely without
>  	 * validation through pte_unmap_same(). It's of NUMA type but
> -- 
> 2.34.1
> 
>
Raghavendra K T Jan. 19, 2023, 10:24 a.m. UTC | #8
On 1/19/2023 3:09 PM, Mike Rapoport wrote:
> Hi,
> 
> On Mon, Jan 16, 2023 at 07:05:34AM +0530, Raghavendra K T wrote:
>>   During the Numa scanning make sure only relevant vmas of the
>> tasks are scanned.
> 
> Please add more detailed description about what are the issues with the
> current scanning this patch aims to solve.

Thank you for the review Mike. Sure will add more detail in the patch
  commit in V2

> 
>> Logic:
>> 1) For the first two time allow unconditional scanning of vmas
>> 2) Store recent 4 unique tasks (last 8bits of PIDs) accessed the vma.
>>    False negetives in case of collison should be fine here.
> 
>           ^ negatives

will take care of this and one below

>> 3) If more than 4 pids exist assume task indeed accessed vma to
>>   to avoid false negetives
>>
>> Co-developed-by: Bharata B Rao <bharata@amd.com>
>> (initial patch to store pid information)
>>
>> Suggested-by: Mel Gorman <mgorman@techsingularity.net>
>> Signed-off-by: Bharata B Rao <bharata@amd.com>
>> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
>> ---
[...]
Raghavendra K T Jan. 24, 2023, 7:18 p.m. UTC | #9
On 1/17/2023 11:15 PM, Raghavendra K T wrote:
> On 1/17/2023 8:29 PM, Mel Gorman wrote:
>> Note that the cc list is excessive for the topic.
>>
> 
> Thank you Mel for the review. Sorry for the long list. (got by
> get_maintainer). Will trim the list for V2.
>
(trimming the list early)
[...]
> 
> Nice idea. Thanks again.. I will take this as a base patch for expansion.
> 
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index f3f196e4d66d..3cebda5cc8a7 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -620,6 +620,9 @@ static inline void vma_init(struct vm_area_struct 
>> *vma, struct mm_struct *mm)
>>       vma->vm_mm = mm;
>>       vma->vm_ops = &dummy_vm_ops;
>>       INIT_LIST_HEAD(&vma->anon_vma_chain);
>> +#ifdef CONFIG_NUMA_BALANCING
>> +    vma->numab = NULL;
>> +#endif
>>   }
>>   static inline void vma_set_anonymous(struct vm_area_struct *vma)
>> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
>> index 3b8475007734..3c0cfdde33e0 100644
>> --- a/include/linux/mm_types.h
>> +++ b/include/linux/mm_types.h
>> @@ -526,6 +526,10 @@ struct anon_vma_name {
>>       char name[];
>>   };
>> +struct vma_numab {
>> +    unsigned long next_scan;
>> +};
>> +
>>   /*
>>    * This struct describes a virtual memory area. There is one of these
>>    * per VM-area/task. A VM area is any part of the process virtual 
>> memory
>> @@ -593,6 +597,9 @@ struct vm_area_struct {
>>   #endif
>>   #ifdef CONFIG_NUMA
>>       struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
>> +#endif
>> +#ifdef CONFIG_NUMA_BALANCING
>> +    struct vma_numab *numab;    /* NUMA Balancing state */
>>   #endif
>>       struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>>   } __randomize_layout;
>> diff --git a/kernel/fork.c b/kernel/fork.c
>> index 9f7fe3541897..2d34c484553d 100644
>> --- a/kernel/fork.c
>> +++ b/kernel/fork.c
>> @@ -481,6 +481,9 @@ struct vm_area_struct *vm_area_dup(struct 
>> vm_area_struct *orig)
>>   void vm_area_free(struct vm_area_struct *vma)
>>   {
>> +#ifdef CONFIG_NUMA_BALANCING
>> +    kfree(vma->numab);
>> +#endif >>       free_anon_vma_name(vma);
>>       kmem_cache_free(vm_area_cachep, vma);
>>   }

while running mmtest kernbench on (256 pcpu), I have hit BUG(),
(not reproducible in normal boot flow otherwise)

[  716.825398] kernel BUG at mm/slub.c:419!
[  716.825736] invalid opcode: 0000 [#146] PREEMPT SMP NOPTI
[  716.826042] CPU: 232 PID: 364844 Comm: cc1 Tainted: G      D W 
    6.1.0-test-snp-host-a7065246cf78+ #44
[  716.826345] Hardware name: Dell Inc. PowerEdge R6525/024PW1, BIOS 
2.6.6 01/13/2022
[  716.826645] RIP: 0010:__kmem_cache_free+0x2a4/0x2c0
[  716.826941] Code: ff e9 32 ff ff ff 49 8b 47 08 f0 48 83 28 01 0f 85 
9b fe ff ff 49 8b 47 08 4c 89 ff 48 8b 40 08 e8 a1 c5 cc 00 e9 86 fe ff 
ff <0f> 0b 48 8b 15 63 d6 4d 01 e9 85 fd ff ff 66 66 2e 0f 1f 84 00 00
[  716.827550] RSP: 0018:ffffb0b070547c28 EFLAGS: 00010246
[  716.827865] RAX: ffff990fa6bf1310 RBX: ffff990fa6bf1310 RCX: 
ffff990fa6bf1310
[  716.828180] RDX: 00000000001000e8 RSI: 0000000000000000 RDI: 
ffff98d000044200
[  716.828503] RBP: ffffb0b070547c50 R08: ffff98d030f222e0 R09: 
0000000000000001
[  716.828821] R10: ffff990ff6d298b0 R11: ffff98d030f226a0 R12: 
ffff98d000044200
[  716.829139] R13: ffffd605c29afc40 R14: ffffffff9e89c20f R15: 
ffffb0b070547d58
[  716.829458] FS:  00007f05f4cebac0(0000) GS:ffff994e00800000(0000) 
knlGS:0000000000000000
[  716.829781] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  716.830105] CR2: 00007f05e9cbc002 CR3: 00000040eea7c005 CR4: 
0000000000770ee0
[  716.830432] PKRU: 55555554
[  716.830749] Call Trace:
[  716.831057]  <TASK>
[  716.831360]  kfree+0x79/0x120
[  716.831664]  vm_area_free+0x1f/0x50
[  716.831970]  vma_expand+0x311/0x3e0
[  716.832274]  mmap_region+0x772/0x900
[  716.832571]  do_mmap+0x3c0/0x5e0
[  716.832866]  ? __this_cpu_preempt_check+0x13/0x20
[  716.833165]  ? security_mmap_file+0xa1/0xc0
[  716.833458]  vm_mmap_pgoff+0xd5/0x170
[  716.833745]  ksys_mmap_pgoff+0x46/0x210
[  716.834022]  __x64_sys_mmap+0x33/0x50
[  716.834291]  do_syscall_64+0x3b/0x90
[  716.834549]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
[  716.834806] RIP: 0033:0x7f05f471ebd7
[  716.835054] Code: 00 00 00 89 ef e8 59 ae ff ff eb e4 e8 62 7b 01 00 
66 90 f3 0f 1e fa 41 89 ca 41 f7 c1 ff 0f 00 00 75 10 b8 09 00 00 00 0f 
05 <48> 3d 00 f0 ff ff 77 21 c3 48 8b 05 29 a2 0f 00 64 c7 00 16 00 00
[  716.835567] RSP: 002b:00007fff24c27ae8 EFLAGS: 00000246 ORIG_RAX: 
0000000000000009
[  716.835826] RAX: ffffffffffffffda RBX: 0000000000200000 RCX: 
00007f05f471ebd7
[  716.836077] RDX: 0000000000000003 RSI: 0000000000200000 RDI: 
0000000000000000
[  716.836323] RBP: 0000000000000000 R08: 00000000ffffffff R09: 
0000000000000000
[  716.836567] R10: 0000000000000022 R11: 0000000000000246 R12: 
0000000000000038
[  716.836808] R13: 0000000000001fff R14: 0000000000000044 R15: 
0000000000000048
[  716.837049]  </TASK>
[  716.837285] Modules linked in: tls ipmi_ssif binfmt_misc 
nls_iso8859_1 joydev input_leds intel_rapl_msr intel_rapl_common 
amd64_edac edac_mce_amd hid_generic kvm_amd dell_smbios dcdbas wmi_bmof 
dell_wmi_descriptor kvm usbhid hid ccp k10temp wmi ipmi_si ipmi_devintf 
ipmi_msghandler acpi_power_meter mac_hid sch_fq_codel dm_multipath 
scsi_dh_rdac scsi_dh_emc scsi_dh_alua msr efi_pstore ip_tables x_tables 
autofs4 btrfs blake2b_generic zstd_compress raid10 raid456 
async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq 
libcrc32c raid1 raid0 multipath linear mgag200 drm_kms_helper 
syscopyarea sysfillrect sysimgblt fb_sys_fops crct10dif_pclmul 
i2c_algo_bit crc32_pclmul drm_shmem_helper ghash_clmulni_intel nvme 
aesni_intel crypto_simd cryptd tg3 drm nvme_core megaraid_sas ahci 
xhci_pci i2c_piix4 xhci_pci_renesas libahci
[  716.839185] ---[ end trace 0000000000000000 ]---

looks like we have to additionally handle numab initialization in
vm_area_dup() code path. something like below fixed it (copied pasted
from tty):

diff --git a/kernel/fork.c b/kernel/fork.c
index 08969f5aa38d..f5b2e41296c7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -475,12 +475,18 @@ struct vm_area_struct *vm_area_dup(struct 
vm_area_struct *orig)
                 *new = data_race(*orig);
                 INIT_LIST_HEAD(&new->anon_vma_chain);
                 dup_anon_vma_name(orig, new);
+#ifdef CONFIG_NUMA_BALANCING
+               new->numab = NULL;
+#endif
         }
         return new;
  }

Does this look okay? if so I will fold it into V2 spin (in
vma_scan_delay patch, hoping you are okay with this change and do not
see any other changes required)

>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index c36aa54ae071..6a1cffdfc76b 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -3027,6 +3027,23 @@ static void task_numa_work(struct callback_head 
>> *work)
>>           if (!vma_is_accessible(vma))
>>               continue;
>> +        /* Initialise new per-VMA NUMAB state. */
>> +        if (!vma->numab) {
>> +            vma->numab = kzalloc(sizeof(struct vma_numab), GFP_KERNEL);
>> +            if (!vma->numab)
>> +                continue;
>> +
>> +            vma->numab->next_scan = now +
>> +                msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
>> +        }
>> +
>> +        /*
>> +         * After the first scan is complete, delay the balancing scan
>> +         * for new VMAs.
>> +         */
>> +        if (mm->numa_scan_seq && time_before(jiffies, 
>> vma->numab->next_scan))
>> +            continue;
>> +
>>           do {
>>               start = max(start, vma->vm_start);
>>               end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
>>
>
Mel Gorman Jan. 27, 2023, 10:17 a.m. UTC | #10
On Wed, Jan 25, 2023 at 12:48:16AM +0530, Raghavendra K T wrote:
> looks like we have to additionally handle numab initialization in
> vm_area_dup() code path. something like below fixed it (copied pasted
> from tty):
> 

Yep, it wasn't even boot tested. Better approach is something like this,
still not actually tested

 include/linux/mm.h       |  9 +++++++++
 include/linux/mm_types.h |  7 +++++++
 kernel/fork.c            |  2 ++
 kernel/sched/fair.c      | 17 +++++++++++++++++
 4 files changed, 35 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8f857163ac89..481f90dc1983 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -612,6 +612,14 @@ struct vm_operations_struct {
 					  unsigned long addr);
 };
 
+#ifdef CONFIG_NUMA_BALANCING
+#define vma_numab_init(vma) do { (vma)->numab = NULL; } while (0)
+#define vma_numab_free(vma) do { kfree((vma)->numab); } while (0)
+#else
+static inline void vma_numab_init(struct vm_area_struct *vma) {}
+static inline void vma_numab_free(struct vm_area_struct *vma) {}
+#endif /* CONFIG_NUMA_BALANCING */
+
 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
 {
 	static const struct vm_operations_struct dummy_vm_ops = {};
@@ -620,6 +628,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
 	vma->vm_mm = mm;
 	vma->vm_ops = &dummy_vm_ops;
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	vma_numab_init(vma);
 }
 
 static inline void vma_set_anonymous(struct vm_area_struct *vma)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9757067c3053..43ce363d5124 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -526,6 +526,10 @@ struct anon_vma_name {
 	char name[];
 };
 
+struct vma_numab {
+	unsigned long next_scan;
+};
+
 /*
  * This struct describes a virtual memory area. There is one of these
  * per VM-area/task. A VM area is any part of the process virtual memory
@@ -593,6 +597,9 @@ struct vm_area_struct {
 #endif
 #ifdef CONFIG_NUMA
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+	struct vma_numab *numab;	/* NUMA Balancing state */
 #endif
 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 } __randomize_layout;
diff --git a/kernel/fork.c b/kernel/fork.c
index 9f7fe3541897..5a2e8c3cc410 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -474,6 +474,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
 		 */
 		*new = data_race(*orig);
 		INIT_LIST_HEAD(&new->anon_vma_chain);
+		vma_numab_init(new);
 		dup_anon_vma_name(orig, new);
 	}
 	return new;
@@ -481,6 +482,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
 
 void vm_area_free(struct vm_area_struct *vma)
 {
+	vma_numab_free(vma);
 	free_anon_vma_name(vma);
 	kmem_cache_free(vm_area_cachep, vma);
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c36aa54ae071..6a1cffdfc76b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3027,6 +3027,23 @@ static void task_numa_work(struct callback_head *work)
 		if (!vma_is_accessible(vma))
 			continue;
 
+		/* Initialise new per-VMA NUMAB state. */
+		if (!vma->numab) {
+			vma->numab = kzalloc(sizeof(struct vma_numab), GFP_KERNEL);
+			if (!vma->numab)
+				continue;
+
+			vma->numab->next_scan = now +
+				msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
+		}
+
+		/*
+		 * After the first scan is complete, delay the balancing scan
+		 * for new VMAs.
+		 */
+		if (mm->numa_scan_seq && time_before(jiffies, vma->numab->next_scan))
+			continue;
+
 		do {
 			start = max(start, vma->vm_start);
 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
Raghavendra K T Jan. 27, 2023, 3:27 p.m. UTC | #11
On 1/27/2023 3:47 PM, Mel Gorman wrote:
> On Wed, Jan 25, 2023 at 12:48:16AM +0530, Raghavendra K T wrote:
>> looks like we have to additionally handle numab initialization in
>> vm_area_dup() code path. something like below fixed it (copied pasted
>> from tty):
>>
> 
> Yep, it wasn't even boot tested. Better approach is something like this,
> still not actually tested
> 
>   include/linux/mm.h       |  9 +++++++++
>   include/linux/mm_types.h |  7 +++++++
>   kernel/fork.c            |  2 ++
>   kernel/sched/fair.c      | 17 +++++++++++++++++
>   4 files changed, 35 insertions(+)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 8f857163ac89..481f90dc1983 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -612,6 +612,14 @@ struct vm_operations_struct {
>   					  unsigned long addr);
>   };
>   
> +#ifdef CONFIG_NUMA_BALANCING
> +#define vma_numab_init(vma) do { (vma)->numab = NULL; } while (0)
> +#define vma_numab_free(vma) do { kfree((vma)->numab); } while (0)
> +#else
> +static inline void vma_numab_init(struct vm_area_struct *vma) {}
> +static inline void vma_numab_free(struct vm_area_struct *vma) {}
> +#endif /* CONFIG_NUMA_BALANCING */
> +
>   static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
>   {
>   	static const struct vm_operations_struct dummy_vm_ops = {};
> @@ -620,6 +628,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
>   	vma->vm_mm = mm;
>   	vma->vm_ops = &dummy_vm_ops;
>   	INIT_LIST_HEAD(&vma->anon_vma_chain);
> +	vma_numab_init(vma);
>   }
>   
>   static inline void vma_set_anonymous(struct vm_area_struct *vma)
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 9757067c3053..43ce363d5124 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -526,6 +526,10 @@ struct anon_vma_name {
>   	char name[];
>   };
>   
> +struct vma_numab {
> +	unsigned long next_scan;
> +};
> +
>   /*
>    * This struct describes a virtual memory area. There is one of these
>    * per VM-area/task. A VM area is any part of the process virtual memory
> @@ -593,6 +597,9 @@ struct vm_area_struct {
>   #endif
>   #ifdef CONFIG_NUMA
>   	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
> +#endif
> +#ifdef CONFIG_NUMA_BALANCING
> +	struct vma_numab *numab;	/* NUMA Balancing state */
>   #endif
>   	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>   } __randomize_layout;
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 9f7fe3541897..5a2e8c3cc410 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -474,6 +474,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
>   		 */
>   		*new = data_race(*orig);
>   		INIT_LIST_HEAD(&new->anon_vma_chain);
> +		vma_numab_init(new);
>   		dup_anon_vma_name(orig, new);
>   	}
>   	return new;
> @@ -481,6 +482,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
>   
>   void vm_area_free(struct vm_area_struct *vma)
>   {
> +	vma_numab_free(vma);
>   	free_anon_vma_name(vma);
>   	kmem_cache_free(vm_area_cachep, vma);
>   }
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index c36aa54ae071..6a1cffdfc76b 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3027,6 +3027,23 @@ static void task_numa_work(struct callback_head *work)
>   		if (!vma_is_accessible(vma))
>   			continue;
>   
> +		/* Initialise new per-VMA NUMAB state. */
> +		if (!vma->numab) {
> +			vma->numab = kzalloc(sizeof(struct vma_numab), GFP_KERNEL);
> +			if (!vma->numab)
> +				continue;
> +
> +			vma->numab->next_scan = now +
> +				msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
> +		}
> +
> +		/*
> +		 * After the first scan is complete, delay the balancing scan
> +		 * for new VMAs.
> +		 */
> +		if (mm->numa_scan_seq && time_before(jiffies, vma->numab->next_scan))
> +			continue;
> +
>   		do {
>   			start = max(start, vma->vm_start);
>   			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);


Thank you Mel. This looks better now.
Yes we would have moved to mm.h eventually to avoid #if clutter.

Also for PATCH2 function common to memory.c and huge_memory.c would
need the same to handle hugetlb vma as suggested by you.
Working on gathering numbers and PID clear logic now. will post V2 soon.

Thanks
- Raghu
Kalra, Ashish Feb. 21, 2023, 12:38 a.m. UTC | #12
Hello Mingwei, Sean,

Looking forward to your thoughts/feedback on the MMU invalidation 
notifier issues with SEV guests as mentioned below ?

Thanks,
Ashish

On 1/17/2023 10:43 PM, Bharata B Rao wrote:
> On 1/17/2023 8:29 PM, Mel Gorman wrote:
>> Note that the cc list is excessive for the topic.
> 
> (Wasn't sure about pruning the CC list mid-thread, hence continuing with it)
> 
> <snip>
> 
>>
>> This is a build-tested only prototype to illustrate how VMA could track
>> NUMA balancing state. It starts with applying the scan delay to every VMA
>> instead of every task to avoid scanning new or very short-lived VMAs. I
>> went back to my old notes on how I hoped to reduce excessive scanning in
>> NUMA balancing and it happened to be second on my list and straight-forward
>> to prototype in a few minutes.
> 
> While on the topic of improving NUMA balancer scanning relevancy, the following
> additional points may be worth noting:
> 
> Recently there have been reports about NUMA balancing induced scanning and
> subsequent MMU notifier invalidations causing problems in different scenarios.
> 
> 1. Currently NUMA balancing won't check at scan time, if a page (or a VMA )is
> not migratable since the page (or the address range) is pinned. It will go ahead
> with MMU invalidation notifications and changes the PTE protection to PAGE_NONE
> only to realize later that the pinned pages can't be migrated before reinstalling
> the original PTE.
> 
> This was found to cause issues to SEV guests whose pages are completely pinned.
> This was discussed here - https://lore.kernel.org/all/20220927000729.498292-1-Ashish.Kalra@amd.com/
> 
> We could probably use page_maybe_dma_pinned() to determine if the page is long
> term pinned and avoid MMU invalidation and protection change for such a page.
> However then we would have to do per-page invalidations (as against one time
> PMD range invalidation that is done currently) which is probably not desirable.
> 
> Also MMU invalidations are expected to be issued under sleepable context (mostly
> except in the OOM notification which uses nonblock verion, AFAICS). This makes it
> difficult to check the pinned state of the page prior to MMU invalidation. Some of
> this is discussed here: https://lore.kernel.org/linux-arm-kernel/YuEMkKY2RU%2F2KiZW@monolith.localdoman/
> 
> This current patchset where we attempt to restrict scanning to relevant VMAs may
> help the above case partially, but any ideas on addressing this issue
> comprehensively? It would have been ideal if we could identify such non-migratable
> pages (long term pinned) clearly and avoid them entirely from scanning and protection
> change.
> 
> 2. Applications that run on GPUs may like to avoid the NUMA balancing activity
> completely and they benefit from per-process enabling/disabling of NUMA balancing.
> The patchset (which has a different use case for per-process control) that helps
> this is here - https://lore.kernel.org/all/49ed07b1-e167-7f94-9986-8e86fb60bb09@nvidia.com/
> 
> Improvements to increase the relevant scanning can help this case to an extent
> but per-process NUMA balancing control should be a useful control to have.
> 
> Regards,
> Bharata.
>
diff mbox series

Patch

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 500e536796ca..07feae37b8e6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -506,6 +506,8 @@  struct vm_area_struct {
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+	unsigned int accessing_pids;
+	int next_pid_slot;
 } __randomize_layout;
 
 struct kioctx_table;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e4a0b8bd941c..944d2e3b0b3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2916,6 +2916,35 @@  static void reset_ptenuma_scan(struct task_struct *p)
 	p->mm->numa_scan_offset = 0;
 }
 
+static bool vma_is_accessed(struct vm_area_struct *vma)
+{
+	int i;
+	bool more_pids_exist;
+	unsigned long pid, max_pids;
+	unsigned long current_pid = current->pid & LAST__PID_MASK;
+
+	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
+
+	/* By default we assume >= max_pids exist */
+	more_pids_exist = true;
+
+	if (READ_ONCE(current->mm->numa_scan_seq) < 2)
+		return true;
+
+	for (i = 0; i < max_pids; i++) {
+		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
+			LAST__PID_MASK;
+		if (pid == current_pid)
+			return true;
+		if (pid == 0) {
+			more_pids_exist = false;
+			break;
+		}
+	}
+
+	return more_pids_exist;
+}
+
 /*
  * The expensive part of numa migration is done from task_work context.
  * Triggered from task_tick_numa().
@@ -3015,6 +3044,9 @@  static void task_numa_work(struct callback_head *work)
 		if (!vma_is_accessible(vma))
 			continue;
 
+		if (!vma_is_accessed(vma))
+			continue;
+
 		do {
 			start = max(start, vma->vm_start);
 			end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
diff --git a/mm/memory.c b/mm/memory.c
index 8c8420934d60..fafd78d87a51 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4717,7 +4717,28 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	pte_t pte, old_pte;
 	bool was_writable = pte_savedwrite(vmf->orig_pte);
 	int flags = 0;
+	int pid_slot = vma->next_pid_slot;
 
+	int i;
+	unsigned long pid, max_pids;
+	unsigned long current_pid = current->pid & LAST__PID_MASK;
+
+	max_pids = sizeof(unsigned int) * BITS_PER_BYTE / LAST__PID_SHIFT;
+
+	/* Avoid duplicate PID updation */
+	for (i = 0; i < max_pids; i++) {
+		pid = (vma->accessing_pids >> i * LAST__PID_SHIFT) &
+			LAST__PID_MASK;
+		if (pid == current_pid)
+			goto skip_update;
+	}
+
+	vma->next_pid_slot = (++pid_slot) % max_pids;
+	vma->accessing_pids &= ~(LAST__PID_MASK << (pid_slot * LAST__PID_SHIFT));
+	vma->accessing_pids |= ((current_pid) <<
+			(pid_slot * LAST__PID_SHIFT));
+
+skip_update:
 	/*
 	 * The "pte" at this point cannot be used safely without
 	 * validation through pte_unmap_same(). It's of NUMA type but