diff mbox series

[v3,1/3] vmalloc: Choose a better start address in vm_area_register_early()

Message ID 20210809093750.131091-2-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series arm64: support page mapping percpu first chunk allocator | expand

Commit Message

Kefeng Wang Aug. 9, 2021, 9:37 a.m. UTC
There are some fixed locations in the vmalloc area be reserved
in ARM(see iotable_init()) and ARM64(see map_kernel()), but for
pcpu_page_first_chunk(), it calls vm_area_register_early() and
choose VMALLOC_START as the start address of vmap area which
could be conflicted with above address, then could trigger a
BUG_ON in vm_area_add_early().

Let's choose the end of existing address range in vmlist as the
start address instead of VMALLOC_START to avoid the BUG_ON.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/vmalloc.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

Comments

Catalin Marinas Aug. 25, 2021, 5:59 p.m. UTC | #1
On Mon, Aug 09, 2021 at 05:37:48PM +0800, Kefeng Wang wrote:
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index d5cd52805149..1e8fe08725b8 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2238,11 +2238,17 @@ void __init vm_area_add_early(struct vm_struct *vm)
>   */
>  void __init vm_area_register_early(struct vm_struct *vm, size_t align)
>  {
> -	static size_t vm_init_off __initdata;
> -	unsigned long addr;
> -
> -	addr = ALIGN(VMALLOC_START + vm_init_off, align);
> -	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
> +	struct vm_struct *head = vmlist, *curr, *next;
> +	unsigned long addr = ALIGN(VMALLOC_START, align);
> +
> +	while (head != NULL) {

Nitpick: I'd use the same pattern as in vm_area_add_early(), i.e. a
'for' loop. You might as well insert it directly than calling the add
function and going through the loop again. Not a strong preference
either way.

> +		next = head->next;
> +		curr = head;
> +		head = next;
> +		addr = ALIGN((unsigned long)curr->addr + curr->size, align);
> +		if (next && (unsigned long)next->addr - addr > vm->size)

Is greater or equal sufficient?

> +			break;
> +	}
>  
>  	vm->addr = (void *)addr;

Another nitpick: it's very unlikely on a 64-bit architecture but not
impossible on 32-bit to hit VMALLOC_END here. Maybe some BUG_ON.
Kefeng Wang Aug. 27, 2021, 8:36 a.m. UTC | #2
On 2021/8/26 1:59, Catalin Marinas wrote:
> On Mon, Aug 09, 2021 at 05:37:48PM +0800, Kefeng Wang wrote:
>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>> index d5cd52805149..1e8fe08725b8 100644
>> --- a/mm/vmalloc.c
>> +++ b/mm/vmalloc.c
>> @@ -2238,11 +2238,17 @@ void __init vm_area_add_early(struct vm_struct *vm)
>>    */
>>   void __init vm_area_register_early(struct vm_struct *vm, size_t align)
>>   {
>> -	static size_t vm_init_off __initdata;
>> -	unsigned long addr;
>> -
>> -	addr = ALIGN(VMALLOC_START + vm_init_off, align);
>> -	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
>> +	struct vm_struct *head = vmlist, *curr, *next;
>> +	unsigned long addr = ALIGN(VMALLOC_START, align);
>> +
>> +	while (head != NULL) {
> Nitpick: I'd use the same pattern as in vm_area_add_early(), i.e. a
> 'for' loop. You might as well insert it directly than calling the add
> function and going through the loop again. Not a strong preference
> either way.
>
>> +		next = head->next;
>> +		curr = head;
>> +		head = next;
>> +		addr = ALIGN((unsigned long)curr->addr + curr->size, align);
>> +		if (next && (unsigned long)next->addr - addr > vm->size)
> Is greater or equal sufficient?
>
>> +			break;
>> +	}
>>   
>>   	vm->addr = (void *)addr;
> Another nitpick: it's very unlikely on a 64-bit architecture but not
> impossible on 32-bit to hit VMALLOC_END here. Maybe some BUG_ON.

Hi Catalin, thank for your review, I will update in the next version,

Could you take a look the following change, is it OK?

void __init vm_area_register_early(struct vm_struct *vm, size_t align)

{

          struct vm_struct *next, *cur, **p;
          unsigned long addr = ALIGN(VMALLOC_START, align);
BUG_ON(vmap_initialized);

          for (p = &vmlist; (cur = *p) != NULL, next = cur->next; p = 
&next) {
                  addr = ALIGN((unsigned long)cur->addr + cur->size, 
align);
                  if (next && (unsigned long)next->addr - addr >= 
vm->size) {
                          p = &next;
break;
}
}

          BUG_ON(addr > VMALLOC_END - vm->size);
          vm->addr = (void *)addr;
          vm->next = *p;
          *p = vm;
}


>
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d5cd52805149..1e8fe08725b8 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2238,11 +2238,17 @@  void __init vm_area_add_early(struct vm_struct *vm)
  */
 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
 {
-	static size_t vm_init_off __initdata;
-	unsigned long addr;
-
-	addr = ALIGN(VMALLOC_START + vm_init_off, align);
-	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
+	struct vm_struct *head = vmlist, *curr, *next;
+	unsigned long addr = ALIGN(VMALLOC_START, align);
+
+	while (head != NULL) {
+		next = head->next;
+		curr = head;
+		head = next;
+		addr = ALIGN((unsigned long)curr->addr + curr->size, align);
+		if (next && (unsigned long)next->addr - addr > vm->size)
+			break;
+	}
 
 	vm->addr = (void *)addr;