diff mbox series

[v4,6/7] hugetlb: parallelize 2M hugetlb allocation and initialization

Message ID 20240118123911.88833-7-gang.li@linux.dev (mailing list archive)
State New
Headers show
Series hugetlb: parallelize hugetlb page init on boot | expand

Commit Message

Gang Li Jan. 18, 2024, 12:39 p.m. UTC
By distributing both the allocation and the initialization tasks across
multiple threads, the initialization of 2M hugetlb will be faster,
thereby improving the boot speed.

Here are some test results:
        test          no patch(ms)   patched(ms)   saved
 ------------------- -------------- ------------- --------
  256c2t(4 node) 2M           3336          1051   68.52%
  128c1t(2 node) 2M           1943           716   63.15%

Signed-off-by: Gang Li <gang.li@linux.dev>
Tested-by: David Rientjes <rientjes@google.com>
---
 mm/hugetlb.c | 70 ++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 52 insertions(+), 18 deletions(-)

Comments

Muchun Song Jan. 22, 2024, 7:10 a.m. UTC | #1
On 2024/1/18 20:39, Gang Li wrote:
> By distributing both the allocation and the initialization tasks across
> multiple threads, the initialization of 2M hugetlb will be faster,
> thereby improving the boot speed.
>
> Here are some test results:
>          test          no patch(ms)   patched(ms)   saved
>   ------------------- -------------- ------------- --------
>    256c2t(4 node) 2M           3336          1051   68.52%
>    128c1t(2 node) 2M           1943           716   63.15%
>
> Signed-off-by: Gang Li <gang.li@linux.dev>
> Tested-by: David Rientjes <rientjes@google.com>
> ---
>   mm/hugetlb.c | 70 ++++++++++++++++++++++++++++++++++++++--------------
>   1 file changed, 52 insertions(+), 18 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index effe5539e545..9b348ba418f5 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -35,6 +35,7 @@
>   #include <linux/delayacct.h>
>   #include <linux/memory.h>
>   #include <linux/mm_inline.h>
> +#include <linux/padata.h>
>   
>   #include <asm/page.h>
>   #include <asm/pgalloc.h>
> @@ -3510,43 +3511,76 @@ static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,
>   	}
>   }
>   
> -static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
> +static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
>   {
> -	unsigned long i;
> +	struct hstate *h = (struct hstate *)arg;
> +	int i, num = end - start;
> +	nodemask_t node_alloc_noretry;
> +	unsigned long flags;
> +	int next_node = 0;

This should be first_online_node which may be not zero.

>   
> -	for (i = 0; i < h->max_huge_pages; ++i) {
> -		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
> +	/* Bit mask controlling how hard we retry per-node allocations.*/
> +	nodes_clear(node_alloc_noretry);
> +
> +	for (i = 0; i < num; ++i) {
> +		struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
> +						&node_alloc_noretry, &next_node);
> +		if (!folio)
>   			break;
> +		spin_lock_irqsave(&hugetlb_lock, flags);

I suspect there will more contention on this lock when parallelizing.
I want to know why you chose to drop prep_and_add_allocated_folios()
call in the original hugetlb_pages_alloc_boot()?

> +		__prep_account_new_huge_page(h, folio_nid(folio));
> +		enqueue_hugetlb_folio(h, folio);
> +		spin_unlock_irqrestore(&hugetlb_lock, flags);
>   		cond_resched();
>   	}
> +}
>   
> -	return i;
> +static void __init hugetlb_vmemmap_optimize_node(unsigned long start, unsigned long end, void *arg)
> +{
> +	struct hstate *h = (struct hstate *)arg;
> +	int nid = start;
> +
> +	hugetlb_vmemmap_optimize_folios(h, &h->hugepage_freelists[nid]);
>   }
>   
> -static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
> +static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
>   {
>   	unsigned long i;
> -	struct folio *folio;
> -	LIST_HEAD(folio_list);
> -	nodemask_t node_alloc_noretry;
> -
> -	/* Bit mask controlling how hard we retry per-node allocations.*/
> -	nodes_clear(node_alloc_noretry);
>   
>   	for (i = 0; i < h->max_huge_pages; ++i) {
> -		folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
> -						&node_alloc_noretry);
> -		if (!folio)
> +		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
>   			break;
> -		list_add(&folio->lru, &folio_list);
>   		cond_resched();
>   	}
>   
> -	prep_and_add_allocated_folios(h, &folio_list);
> -
>   	return i;
>   }
>   
> +static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
> +{
> +	struct padata_mt_job job = {
> +		.fn_arg		= h,
> +		.align		= 1,
> +		.numa_aware	= true
> +	};
> +
> +	job.thread_fn	= hugetlb_alloc_node;
> +	job.start	= 0;
> +	job.size	= h->max_huge_pages;
> +	job.min_chunk	= h->max_huge_pages / num_node_state(N_MEMORY) / 2;
> +	job.max_threads	= num_node_state(N_MEMORY) * 2;

I am curious the magic number of 2 used in assignments of ->min_chunk
and ->max_threads, does it from your experiment? I thinke it should
be a comment here.

And I am also sceptical about the optimization for a small amount of
allocation of hugepages. Given 4 hugepags needed to be allocated on UMA
system, job.min_chunk will be 2, job.max_threads will be 2. Then, 2
workers will be scheduled, however each worker will just allocate 2 pages,
how much the cost of scheduling? What if allocate 4 pages in single
worker? Do you have any numbers on parallelism vs non-parallelism in
a small allocation case? If we cannot gain from this case, I think we shold
assign a reasonable value to ->min_chunk based on experiment.

Thanks.

> +	padata_do_multithreaded(&job);
> +
> +	job.thread_fn	= hugetlb_vmemmap_optimize_node;
> +	job.start	= 0;
> +	job.size	= num_node_state(N_MEMORY);
> +	job.min_chunk	= 1;
> +	job.max_threads	= num_node_state(N_MEMORY);
> +	padata_do_multithreaded(&job);
> +
> +	return h->nr_huge_pages;
> +}
> +
>   /*
>    * NOTE: this routine is called in different contexts for gigantic and
>    * non-gigantic pages.
Gang Li Jan. 22, 2024, 10:12 a.m. UTC | #2
On 2024/1/22 15:10, Muchun Song wrote:> On 2024/1/18 20:39, Gang Li wrote:
>> +static void __init hugetlb_alloc_node(unsigned long start, unsigned 
>> long end, void *arg)
>>   {
>> -    unsigned long i;
>> +    struct hstate *h = (struct hstate *)arg;
>> +    int i, num = end - start;
>> +    nodemask_t node_alloc_noretry;
>> +    unsigned long flags;
>> +    int next_node = 0;
> 
> This should be first_online_node which may be not zero.
> 

That's right. Thanks!

>> -    for (i = 0; i < h->max_huge_pages; ++i) {
>> -        if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
>> +    /* Bit mask controlling how hard we retry per-node allocations.*/
>> +    nodes_clear(node_alloc_noretry);
>> +
>> +    for (i = 0; i < num; ++i) {
>> +        struct folio *folio = alloc_pool_huge_folio(h, 
>> &node_states[N_MEMORY],
>> +                        &node_alloc_noretry, &next_node);
>> +        if (!folio)
>>               break;
>> +        spin_lock_irqsave(&hugetlb_lock, flags);
>  > I suspect there will more contention on this lock when parallelizing.

In the worst case, there are only 'numa node number' of threads in
contention. And in my testing, it doesn't degrade performance, but
rather improves performance due to the reduced granularity.

> I want to know why you chose to drop prep_and_add_allocated_folios()
> call in the original hugetlb_pages_alloc_boot()?

Splitting him to parallelize hugetlb_vmemmap_optimize_folios.

>> +static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
>> +{
>> +    struct padata_mt_job job = {
>> +        .fn_arg        = h,
>> +        .align        = 1,
>> +        .numa_aware    = true
>> +    };
>> +
>> +    job.thread_fn    = hugetlb_alloc_node;
>> +    job.start    = 0;
>> +    job.size    = h->max_huge_pages;
>> +    job.min_chunk    = h->max_huge_pages / num_node_state(N_MEMORY) / 2;
>> +    job.max_threads    = num_node_state(N_MEMORY) * 2;
> 
> I am curious the magic number of 2 used in assignments of ->min_chunk
> and ->max_threads, does it from your experiment? I thinke it should
> be a comment here.
> 

This is tested and I can perform more detailed tests and provide data.

> And I am also sceptical about the optimization for a small amount of
> allocation of hugepages. Given 4 hugepags needed to be allocated on UMA
> system, job.min_chunk will be 2, job.max_threads will be 2. Then, 2
> workers will be scheduled, however each worker will just allocate 2 pages,
> how much the cost of scheduling? What if allocate 4 pages in single
> worker? Do you have any numbers on parallelism vs non-parallelism in
> a small allocation case? If we cannot gain from this case, I think we shold
> assign a reasonable value to ->min_chunk based on experiment.
> 
> Thanks.
>

That's a good suggestion, I'll run some tests and choose the best
values.
Muchun Song Jan. 22, 2024, 11:30 a.m. UTC | #3
> On Jan 22, 2024, at 18:12, Gang Li <gang.li@linux.dev> wrote:
> 
> On 2024/1/22 15:10, Muchun Song wrote:> On 2024/1/18 20:39, Gang Li wrote:
>>> +static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
>>>   {
>>> -    unsigned long i;
>>> +    struct hstate *h = (struct hstate *)arg;
>>> +    int i, num = end - start;
>>> +    nodemask_t node_alloc_noretry;
>>> +    unsigned long flags;
>>> +    int next_node = 0;
>> This should be first_online_node which may be not zero.
> 
> That's right. Thanks!
> 
>>> -    for (i = 0; i < h->max_huge_pages; ++i) {
>>> -        if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
>>> +    /* Bit mask controlling how hard we retry per-node allocations.*/
>>> +    nodes_clear(node_alloc_noretry);
>>> +
>>> +    for (i = 0; i < num; ++i) {
>>> +        struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
>>> +                        &node_alloc_noretry, &next_node);
>>> +        if (!folio)
>>>               break;
>>> +        spin_lock_irqsave(&hugetlb_lock, flags);
>> > I suspect there will more contention on this lock when parallelizing.
> 
> In the worst case, there are only 'numa node number' of threads in
> contention. And in my testing, it doesn't degrade performance, but
> rather improves performance due to the reduced granularity.

So, the performance does not change if you move the lock out of
loop?

> 
>> I want to know why you chose to drop prep_and_add_allocated_folios()
>> call in the original hugetlb_pages_alloc_boot()?
> 
> Splitting him to parallelize hugetlb_vmemmap_optimize_folios.

Unfortunately, HVO should be enabled before pages go to the pool list.

> 
>>> +static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
>>> +{
>>> +    struct padata_mt_job job = {
>>> +        .fn_arg        = h,
>>> +        .align        = 1,
>>> +        .numa_aware    = true
>>> +    };
>>> +
>>> +    job.thread_fn    = hugetlb_alloc_node;
>>> +    job.start    = 0;
>>> +    job.size    = h->max_huge_pages;
>>> +    job.min_chunk    = h->max_huge_pages / num_node_state(N_MEMORY) / 2;
>>> +    job.max_threads    = num_node_state(N_MEMORY) * 2;
>> I am curious the magic number of 2 used in assignments of ->min_chunk
>> and ->max_threads, does it from your experiment? I thinke it should
>> be a comment here.
> 
> This is tested and I can perform more detailed tests and provide data.
> 
>> And I am also sceptical about the optimization for a small amount of
>> allocation of hugepages. Given 4 hugepags needed to be allocated on UMA
>> system, job.min_chunk will be 2, job.max_threads will be 2. Then, 2
>> workers will be scheduled, however each worker will just allocate 2 pages,
>> how much the cost of scheduling? What if allocate 4 pages in single
>> worker? Do you have any numbers on parallelism vs non-parallelism in
>> a small allocation case? If we cannot gain from this case, I think we shold
>> assign a reasonable value to ->min_chunk based on experiment.
>> Thanks.
>> 
> 
> That's a good suggestion, I'll run some tests and choose the best
> values.
> 
>
Gang Li Jan. 23, 2024, 2:12 a.m. UTC | #4
On 2024/1/22 19:30, Muchun Song wrote:
>> On Jan 22, 2024, at 18:12, Gang Li <gang.li@linux.dev> wrote:
>>
>> On 2024/1/22 15:10, Muchun Song wrote:> On 2024/1/18 20:39, Gang Li wrote:
>>>> +static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
>>>>    {
>>>> -    unsigned long i;
>>>> +    struct hstate *h = (struct hstate *)arg;
>>>> +    int i, num = end - start;
>>>> +    nodemask_t node_alloc_noretry;
>>>> +    unsigned long flags;
>>>> +    int next_node = 0;
>>> This should be first_online_node which may be not zero.
>>
>> That's right. Thanks!
>>
>>>> -    for (i = 0; i < h->max_huge_pages; ++i) {
>>>> -        if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
>>>> +    /* Bit mask controlling how hard we retry per-node allocations.*/
>>>> +    nodes_clear(node_alloc_noretry);
>>>> +
>>>> +    for (i = 0; i < num; ++i) {
>>>> +        struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
>>>> +                        &node_alloc_noretry, &next_node);
>>>> +        if (!folio)
>>>>                break;
>>>> +        spin_lock_irqsave(&hugetlb_lock, flags);
>>>> I suspect there will more contention on this lock when parallelizing.
>>
>> In the worst case, there are only 'numa node number' of threads in
>> contention. And in my testing, it doesn't degrade performance, but
>> rather improves performance due to the reduced granularity.
> 
> So, the performance does not change if you move the lock out of
> loop?
>

If we move the lock out of loop, then multi-threading becomes 
single-threading, which definitely reduces performance.

```
+       spin_lock_irqsave(&hugetlb_lock, flags);
         for (i = 0; i < num; ++i) {
                 struct folio *folio = alloc_pool_huge_folio(h, 
&node_states[N_MEMORY],
                                                 &node_alloc_noretry, 
&next_node);
                 if (!folio)
                         break;
-               spin_lock_irqsave(&hugetlb_lock, flags);
                 __prep_account_new_huge_page(h, folio_nid(folio));
                 enqueue_hugetlb_folio(h, folio);
-               spin_unlock_irqrestore(&hugetlb_lock, flags);
                 cond_resched();
         }
+       spin_unlock_irqrestore(&hugetlb_lock, flags);
  }
```
Muchun Song Jan. 23, 2024, 3:32 a.m. UTC | #5
> On Jan 23, 2024, at 10:12, Gang Li <gang.li@linux.dev> wrote:
> 
> On 2024/1/22 19:30, Muchun Song wrote:
>>> On Jan 22, 2024, at 18:12, Gang Li <gang.li@linux.dev> wrote:
>>> 
>>> On 2024/1/22 15:10, Muchun Song wrote:> On 2024/1/18 20:39, Gang Li wrote:
>>>>> +static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
>>>>>   {
>>>>> -    unsigned long i;
>>>>> +    struct hstate *h = (struct hstate *)arg;
>>>>> +    int i, num = end - start;
>>>>> +    nodemask_t node_alloc_noretry;
>>>>> +    unsigned long flags;
>>>>> +    int next_node = 0;
>>>> This should be first_online_node which may be not zero.
>>> 
>>> That's right. Thanks!
>>> 
>>>>> -    for (i = 0; i < h->max_huge_pages; ++i) {
>>>>> -        if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
>>>>> +    /* Bit mask controlling how hard we retry per-node allocations.*/
>>>>> +    nodes_clear(node_alloc_noretry);
>>>>> +
>>>>> +    for (i = 0; i < num; ++i) {
>>>>> +        struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
>>>>> +                        &node_alloc_noretry, &next_node);
>>>>> +        if (!folio)
>>>>>               break;
>>>>> +        spin_lock_irqsave(&hugetlb_lock, flags);
>>>>> I suspect there will more contention on this lock when parallelizing.
>>> 
>>> In the worst case, there are only 'numa node number' of threads in
>>> contention. And in my testing, it doesn't degrade performance, but
>>> rather improves performance due to the reduced granularity.
>> So, the performance does not change if you move the lock out of
>> loop?
>> 
> 
> If we move the lock out of loop, then multi-threading becomes single-threading, which definitely reduces performance.

No. I mean batching the pages into pool list just like prep_and_add_allocated_folios
does.

> 
> ```
> +       spin_lock_irqsave(&hugetlb_lock, flags);
>        for (i = 0; i < num; ++i) {
>                struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
>                                                &node_alloc_noretry, &next_node);
>                if (!folio)
>                        break;
> -               spin_lock_irqsave(&hugetlb_lock, flags);
>                __prep_account_new_huge_page(h, folio_nid(folio));
>                enqueue_hugetlb_folio(h, folio);
> -               spin_unlock_irqrestore(&hugetlb_lock, flags);
>                cond_resched();
>        }
> +       spin_unlock_irqrestore(&hugetlb_lock, flags);
> }
> ```
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index effe5539e545..9b348ba418f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -35,6 +35,7 @@ 
 #include <linux/delayacct.h>
 #include <linux/memory.h>
 #include <linux/mm_inline.h>
+#include <linux/padata.h>
 
 #include <asm/page.h>
 #include <asm/pgalloc.h>
@@ -3510,43 +3511,76 @@  static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,
 	}
 }
 
-static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
+static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
 {
-	unsigned long i;
+	struct hstate *h = (struct hstate *)arg;
+	int i, num = end - start;
+	nodemask_t node_alloc_noretry;
+	unsigned long flags;
+	int next_node = 0;
 
-	for (i = 0; i < h->max_huge_pages; ++i) {
-		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
+	/* Bit mask controlling how hard we retry per-node allocations.*/
+	nodes_clear(node_alloc_noretry);
+
+	for (i = 0; i < num; ++i) {
+		struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
+						&node_alloc_noretry, &next_node);
+		if (!folio)
 			break;
+		spin_lock_irqsave(&hugetlb_lock, flags);
+		__prep_account_new_huge_page(h, folio_nid(folio));
+		enqueue_hugetlb_folio(h, folio);
+		spin_unlock_irqrestore(&hugetlb_lock, flags);
 		cond_resched();
 	}
+}
 
-	return i;
+static void __init hugetlb_vmemmap_optimize_node(unsigned long start, unsigned long end, void *arg)
+{
+	struct hstate *h = (struct hstate *)arg;
+	int nid = start;
+
+	hugetlb_vmemmap_optimize_folios(h, &h->hugepage_freelists[nid]);
 }
 
-static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
+static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
 {
 	unsigned long i;
-	struct folio *folio;
-	LIST_HEAD(folio_list);
-	nodemask_t node_alloc_noretry;
-
-	/* Bit mask controlling how hard we retry per-node allocations.*/
-	nodes_clear(node_alloc_noretry);
 
 	for (i = 0; i < h->max_huge_pages; ++i) {
-		folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
-						&node_alloc_noretry);
-		if (!folio)
+		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
 			break;
-		list_add(&folio->lru, &folio_list);
 		cond_resched();
 	}
 
-	prep_and_add_allocated_folios(h, &folio_list);
-
 	return i;
 }
 
+static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
+{
+	struct padata_mt_job job = {
+		.fn_arg		= h,
+		.align		= 1,
+		.numa_aware	= true
+	};
+
+	job.thread_fn	= hugetlb_alloc_node;
+	job.start	= 0;
+	job.size	= h->max_huge_pages;
+	job.min_chunk	= h->max_huge_pages / num_node_state(N_MEMORY) / 2;
+	job.max_threads	= num_node_state(N_MEMORY) * 2;
+	padata_do_multithreaded(&job);
+
+	job.thread_fn	= hugetlb_vmemmap_optimize_node;
+	job.start	= 0;
+	job.size	= num_node_state(N_MEMORY);
+	job.min_chunk	= 1;
+	job.max_threads	= num_node_state(N_MEMORY);
+	padata_do_multithreaded(&job);
+
+	return h->nr_huge_pages;
+}
+
 /*
  * NOTE: this routine is called in different contexts for gigantic and
  * non-gigantic pages.