diff mbox series

[RFC,V2,2/3] arm64/mm: Define arch_get_mappable_range()

Message ID 1606706992-26656-3-git-send-email-anshuman.khandual@arm.com (mailing list archive)
State New, archived
Headers show
Series mm/hotplug: Pre-validate the address range with platform | expand

Commit Message

Anshuman Khandual Nov. 30, 2020, 3:29 a.m. UTC
This overrides arch_get_mappable_range() on arm64 platform which will be
used with recently added generic framework. It drops inside_linear_region()
and subsequent check in arch_add_memory() which are no longer required.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/mm/mmu.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

Comments

David Hildenbrand Dec. 2, 2020, 9:26 a.m. UTC | #1
On 30.11.20 04:29, Anshuman Khandual wrote:
> This overrides arch_get_mappable_range() on arm64 platform which will be
> used with recently added generic framework. It drops inside_linear_region()
> and subsequent check in arch_add_memory() which are no longer required.
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Ard Biesheuvel <ardb@kernel.org>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: linux-arm-kernel@lists.infradead.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>  arch/arm64/mm/mmu.c | 14 ++++++--------
>  1 file changed, 6 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index ca692a815731..49ec8f2838f2 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -1444,16 +1444,19 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
>  	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
>  }
>  
> -static bool inside_linear_region(u64 start, u64 size)
> +struct range arch_get_mappable_range(void)
>  {
> +	struct range memhp_range;
> +
>  	/*
>  	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
>  	 * accommodating both its ends but excluding PAGE_END. Max physical
>  	 * range which can be mapped inside this linear mapping range, must
>  	 * also be derived from its end points.
>  	 */
> -	return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
> -	       (start + size - 1) <= __pa(PAGE_END - 1);
> +	memhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
> +	memhp_range.end =  __pa(PAGE_END - 1);
> +	return memhp_range;
>  }
>  
>  int arch_add_memory(int nid, u64 start, u64 size,
> @@ -1461,11 +1464,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
>  {
>  	int ret, flags = 0;
>  
> -	if (!inside_linear_region(start, size)) {
> -		pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
> -		return -EINVAL;
> -	}

As discussed, I think something like a VM_BUG_ON() here might makes
sense, indicating that we require the caller to validate upfront. Same
applies to the s390x variant.

Thanks!

> -
>  	if (rodata_full || debug_pagealloc_enabled())
>  		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
>  
>
Anshuman Khandual Dec. 2, 2020, 12:17 p.m. UTC | #2
On 12/2/20 2:56 PM, David Hildenbrand wrote:
> On 30.11.20 04:29, Anshuman Khandual wrote:
>> This overrides arch_get_mappable_range() on arm64 platform which will be
>> used with recently added generic framework. It drops inside_linear_region()
>> and subsequent check in arch_add_memory() which are no longer required.
>>
>> Cc: Catalin Marinas <catalin.marinas@arm.com>
>> Cc: Will Deacon <will@kernel.org>
>> Cc: Ard Biesheuvel <ardb@kernel.org>
>> Cc: Mark Rutland <mark.rutland@arm.com>
>> Cc: David Hildenbrand <david@redhat.com>
>> Cc: linux-arm-kernel@lists.infradead.org
>> Cc: linux-kernel@vger.kernel.org
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>  arch/arm64/mm/mmu.c | 14 ++++++--------
>>  1 file changed, 6 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index ca692a815731..49ec8f2838f2 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -1444,16 +1444,19 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
>>  	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
>>  }
>>  
>> -static bool inside_linear_region(u64 start, u64 size)
>> +struct range arch_get_mappable_range(void)
>>  {
>> +	struct range memhp_range;
>> +
>>  	/*
>>  	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
>>  	 * accommodating both its ends but excluding PAGE_END. Max physical
>>  	 * range which can be mapped inside this linear mapping range, must
>>  	 * also be derived from its end points.
>>  	 */
>> -	return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
>> -	       (start + size - 1) <= __pa(PAGE_END - 1);
>> +	memhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
>> +	memhp_range.end =  __pa(PAGE_END - 1);
>> +	return memhp_range;
>>  }
>>  
>>  int arch_add_memory(int nid, u64 start, u64 size,
>> @@ -1461,11 +1464,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
>>  {
>>  	int ret, flags = 0;
>>  
>> -	if (!inside_linear_region(start, size)) {
>> -		pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
>> -		return -EINVAL;
>> -	}
> As discussed, I think something like a VM_BUG_ON() here might makes
> sense, indicating that we require the caller to validate upfront. Same
> applies to the s390x variant.

Sure, will do.

> 
> Thanks!
>
diff mbox series

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ca692a815731..49ec8f2838f2 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1444,16 +1444,19 @@  static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
 	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
 }
 
-static bool inside_linear_region(u64 start, u64 size)
+struct range arch_get_mappable_range(void)
 {
+	struct range memhp_range;
+
 	/*
 	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
 	 * accommodating both its ends but excluding PAGE_END. Max physical
 	 * range which can be mapped inside this linear mapping range, must
 	 * also be derived from its end points.
 	 */
-	return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
-	       (start + size - 1) <= __pa(PAGE_END - 1);
+	memhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
+	memhp_range.end =  __pa(PAGE_END - 1);
+	return memhp_range;
 }
 
 int arch_add_memory(int nid, u64 start, u64 size,
@@ -1461,11 +1464,6 @@  int arch_add_memory(int nid, u64 start, u64 size,
 {
 	int ret, flags = 0;
 
-	if (!inside_linear_region(start, size)) {
-		pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
-		return -EINVAL;
-	}
-
 	if (rodata_full || debug_pagealloc_enabled())
 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;