diff mbox

[PATCHv6,7/8] arm64: efi: Use ioremap_exec for code sections

Message ID 1416606645-25633-8-git-send-email-lauraa@codeaurora.org (mailing list archive)
State New, archived
Headers show

Commit Message

Laura Abbott Nov. 21, 2014, 9:50 p.m. UTC
ioremap is not guaranteed to return memory with proper
execution permissions. Introduce ioremap_exec which will
ensure that permission bits are set as expected for EFI
code sections.

Tested-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
 arch/arm64/include/asm/io.h      |  1 +
 arch/arm64/include/asm/pgtable.h |  1 +
 arch/arm64/kernel/efi.c          | 12 +++++++++++-
 arch/arm64/mm/ioremap.c          | 11 +++++++++++
 4 files changed, 24 insertions(+), 1 deletion(-)

Comments

Mark Rutland Nov. 25, 2014, 5:26 p.m. UTC | #1
Hi Laura,

On Fri, Nov 21, 2014 at 09:50:44PM +0000, Laura Abbott wrote:
> ioremap is not guaranteed to return memory with proper
> execution permissions. Introduce ioremap_exec which will
> ensure that permission bits are set as expected for EFI
> code sections.
> 
> Tested-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
> ---
>  arch/arm64/include/asm/io.h      |  1 +
>  arch/arm64/include/asm/pgtable.h |  1 +
>  arch/arm64/kernel/efi.c          | 12 +++++++++++-
>  arch/arm64/mm/ioremap.c          | 11 +++++++++++
>  4 files changed, 24 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
> index 79f1d519..7dd8465 100644
> --- a/arch/arm64/include/asm/io.h
> +++ b/arch/arm64/include/asm/io.h
> @@ -230,6 +230,7 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
>  extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
>  extern void __iounmap(volatile void __iomem *addr);
>  extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
> +extern void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size);
>  
>  #define ioremap(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
>  #define ioremap_nocache(addr, size)	__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 41a43bf..9b1d9d0 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
>  #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
>  #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
>  #define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
> +#define PROT_NORMAL_EXEC	(PROT_DEFAULT | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
>  
>  #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
>  #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
> diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
> index 95c49eb..9e41f95 100644
> --- a/arch/arm64/kernel/efi.c
> +++ b/arch/arm64/kernel/efi.c
> @@ -47,6 +47,14 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
>  	return 0;
>  }
>  
> +static int __init is_code(efi_memory_desc_t *md)
> +{
> +	if (md->attribute & EFI_RUNTIME_SERVICES_CODE)
> +		return 1;
> +	return 0;
> +}
> +
> +
>  static void __init efi_setup_idmap(void)
>  {
>  	struct memblock_region *r;
> @@ -338,7 +346,9 @@ static int __init remap_region(efi_memory_desc_t *md, void **new)
>  	memrange_efi_to_native(&paddr, &npages);
>  	size = npages << PAGE_SHIFT;
>  
> -	if (is_normal_ram(md))
> +	if (is_code(md))
> +		vaddr = (__force u64)ioremap_exec(paddr, size);
> +	else if (is_normal_ram(md))
>  		vaddr = (__force u64)ioremap_cache(paddr, size);
>  	else
>  		vaddr = (__force u64)ioremap(paddr, size);

All of the above looks fine to me.

> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
> index cbb99c8..b998441 100644
> --- a/arch/arm64/mm/ioremap.c
> +++ b/arch/arm64/mm/ioremap.c
> @@ -103,6 +103,17 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>  }
>  EXPORT_SYMBOL(ioremap_cache);
>  
> +void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size)
> +{
> +	/* For normal memory we already have a cacheable mapping. */
> +	if (pfn_valid(__phys_to_pfn(phys_addr)))
> +		return (void __iomem *)__phys_to_virt(phys_addr);

Is this guaranteed to be executable in all cases once the stricter page
permissions are in force?

Thanks,
Mark.

> +
> +	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL_EXEC),
> +				__builtin_return_address(0));
> +}
> +EXPORT_SYMBOL(ioremap_exec);
> +
>  /*
>   * Must be called after early_fixmap_init
>   */
> -- 
> Qualcomm Innovation Center, Inc.
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project
> 
>
Laura Abbott Nov. 25, 2014, 6:57 p.m. UTC | #2
On 11/25/2014 9:26 AM, Mark Rutland wrote:
> Hi Laura,
>
> On Fri, Nov 21, 2014 at 09:50:44PM +0000, Laura Abbott wrote:
>> ioremap is not guaranteed to return memory with proper
>> execution permissions. Introduce ioremap_exec which will
>> ensure that permission bits are set as expected for EFI
>> code sections.
>>
>> Tested-by: Kees Cook <keescook@chromium.org>
>> Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
>> ---
>>   arch/arm64/include/asm/io.h      |  1 +
>>   arch/arm64/include/asm/pgtable.h |  1 +
>>   arch/arm64/kernel/efi.c          | 12 +++++++++++-
>>   arch/arm64/mm/ioremap.c          | 11 +++++++++++
>>   4 files changed, 24 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
>> index 79f1d519..7dd8465 100644
>> --- a/arch/arm64/include/asm/io.h
>> +++ b/arch/arm64/include/asm/io.h
>> @@ -230,6 +230,7 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
>>   extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
>>   extern void __iounmap(volatile void __iomem *addr);
>>   extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
>> +extern void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size);
>>
>>   #define ioremap(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
>>   #define ioremap_nocache(addr, size)	__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index 41a43bf..9b1d9d0 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
>>   #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
>>   #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
>>   #define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
>> +#define PROT_NORMAL_EXEC	(PROT_DEFAULT | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
>>
>>   #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
>>   #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
>> diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
>> index 95c49eb..9e41f95 100644
>> --- a/arch/arm64/kernel/efi.c
>> +++ b/arch/arm64/kernel/efi.c
>> @@ -47,6 +47,14 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
>>   	return 0;
>>   }
>>
>> +static int __init is_code(efi_memory_desc_t *md)
>> +{
>> +	if (md->attribute & EFI_RUNTIME_SERVICES_CODE)
>> +		return 1;
>> +	return 0;
>> +}
>> +
>> +
>>   static void __init efi_setup_idmap(void)
>>   {
>>   	struct memblock_region *r;
>> @@ -338,7 +346,9 @@ static int __init remap_region(efi_memory_desc_t *md, void **new)
>>   	memrange_efi_to_native(&paddr, &npages);
>>   	size = npages << PAGE_SHIFT;
>>
>> -	if (is_normal_ram(md))
>> +	if (is_code(md))
>> +		vaddr = (__force u64)ioremap_exec(paddr, size);
>> +	else if (is_normal_ram(md))
>>   		vaddr = (__force u64)ioremap_cache(paddr, size);
>>   	else
>>   		vaddr = (__force u64)ioremap(paddr, size);
>
> All of the above looks fine to me.
>
>> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
>> index cbb99c8..b998441 100644
>> --- a/arch/arm64/mm/ioremap.c
>> +++ b/arch/arm64/mm/ioremap.c
>> @@ -103,6 +103,17 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>>   }
>>   EXPORT_SYMBOL(ioremap_cache);
>>
>> +void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size)
>> +{
>> +	/* For normal memory we already have a cacheable mapping. */
>> +	if (pfn_valid(__phys_to_pfn(phys_addr)))
>> +		return (void __iomem *)__phys_to_virt(phys_addr);
>
> Is this guaranteed to be executable in all cases once the stricter page
> permissions are in force?
>

No but the updated version of the patch which adds the stricter page
permission adds a call to update the page permissions to be executable.
  
> Thanks,
> Mark.
>

Thanks,
Laura
Catalin Marinas Dec. 2, 2014, 5:15 p.m. UTC | #3
On Fri, Nov 21, 2014 at 09:50:44PM +0000, Laura Abbott wrote:
> diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
> index 95c49eb..9e41f95 100644
> --- a/arch/arm64/kernel/efi.c
> +++ b/arch/arm64/kernel/efi.c
> @@ -47,6 +47,14 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
>  	return 0;
>  }
>  
> +static int __init is_code(efi_memory_desc_t *md)
> +{
> +	if (md->attribute & EFI_RUNTIME_SERVICES_CODE)
> +		return 1;
> +	return 0;
> +}
> +
> +
>  static void __init efi_setup_idmap(void)
>  {
>  	struct memblock_region *r;
> @@ -338,7 +346,9 @@ static int __init remap_region(efi_memory_desc_t *md, void **new)
>  	memrange_efi_to_native(&paddr, &npages);
>  	size = npages << PAGE_SHIFT;
>  
> -	if (is_normal_ram(md))
> +	if (is_code(md))
> +		vaddr = (__force u64)ioremap_exec(paddr, size);
> +	else if (is_normal_ram(md))
>  		vaddr = (__force u64)ioremap_cache(paddr, size);
>  	else
>  		vaddr = (__force u64)ioremap(paddr, size);

What I don't understand is that currently the above remap_region()
function only uses ioremap_cache() and ignores any code attributes. So
we end up with PROT_NORMAL which is non-executable.

Is the current remap_region() function broken or we don't expect it to
be called with any EFI_RUNTIME_SERVICES_CODE region?

> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
> index cbb99c8..b998441 100644
> --- a/arch/arm64/mm/ioremap.c
> +++ b/arch/arm64/mm/ioremap.c
> @@ -103,6 +103,17 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>  }
>  EXPORT_SYMBOL(ioremap_cache);
>  
> +void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size)
> +{
> +	/* For normal memory we already have a cacheable mapping. */
> +	if (pfn_valid(__phys_to_pfn(phys_addr)))
> +		return (void __iomem *)__phys_to_virt(phys_addr);
> +
> +	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL_EXEC),
> +				__builtin_return_address(0));

In addition to what I said above, do we expect ioremap_exec() to be
called on non-pfn_valid() pages?
Ard Biesheuvel Dec. 2, 2014, 5:20 p.m. UTC | #4
On 2 December 2014 at 18:15, Catalin Marinas <catalin.marinas@arm.com> wrote:
> On Fri, Nov 21, 2014 at 09:50:44PM +0000, Laura Abbott wrote:
>> diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
>> index 95c49eb..9e41f95 100644
>> --- a/arch/arm64/kernel/efi.c
>> +++ b/arch/arm64/kernel/efi.c
>> @@ -47,6 +47,14 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
>>       return 0;
>>  }
>>
>> +static int __init is_code(efi_memory_desc_t *md)
>> +{
>> +     if (md->attribute & EFI_RUNTIME_SERVICES_CODE)
>> +             return 1;
>> +     return 0;
>> +}
>> +
>> +
>>  static void __init efi_setup_idmap(void)
>>  {
>>       struct memblock_region *r;
>> @@ -338,7 +346,9 @@ static int __init remap_region(efi_memory_desc_t *md, void **new)
>>       memrange_efi_to_native(&paddr, &npages);
>>       size = npages << PAGE_SHIFT;
>>
>> -     if (is_normal_ram(md))
>> +     if (is_code(md))
>> +             vaddr = (__force u64)ioremap_exec(paddr, size);
>> +     else if (is_normal_ram(md))
>>               vaddr = (__force u64)ioremap_cache(paddr, size);
>>       else
>>               vaddr = (__force u64)ioremap(paddr, size);
>
> What I don't understand is that currently the above remap_region()
> function only uses ioremap_cache() and ignores any code attributes. So
> we end up with PROT_NORMAL which is non-executable.
>

ioremap_cache() reuses the linear mapping if it covers the ioremapped
physical address.
That is why it currently works by accident.

> Is the current remap_region() function broken or we don't expect it to
> be called with any EFI_RUNTIME_SERVICES_CODE region?
>

It is called with such regions, but once we remove the UEFI reserved
regions from the linear mapping, this breaks down.
My virtmap series for 3.20 (for kexec with efi, mostly) removes all of
this code, in fact, so this patch will not even be necessary in that
case.

>> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
>> index cbb99c8..b998441 100644
>> --- a/arch/arm64/mm/ioremap.c
>> +++ b/arch/arm64/mm/ioremap.c
>> @@ -103,6 +103,17 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>>  }
>>  EXPORT_SYMBOL(ioremap_cache);
>>
>> +void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size)
>> +{
>> +     /* For normal memory we already have a cacheable mapping. */
>> +     if (pfn_valid(__phys_to_pfn(phys_addr)))
>> +             return (void __iomem *)__phys_to_virt(phys_addr);
>> +
>> +     return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL_EXEC),
>> +                             __builtin_return_address(0));
>
> In addition to what I said above, do we expect ioremap_exec() to be
> called on non-pfn_valid() pages?
>

I think introducing ioremap_exec() strictly for EFI at this point may
be redundant.
I intend to send the next version of the EFI virtmap for kexec series tomorrow.
diff mbox

Patch

diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 79f1d519..7dd8465 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -230,6 +230,7 @@  extern void __memset_io(volatile void __iomem *, int, size_t);
 extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
 extern void __iounmap(volatile void __iomem *addr);
 extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
+extern void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size);
 
 #define ioremap(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define ioremap_nocache(addr, size)	__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 41a43bf..9b1d9d0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -65,6 +65,7 @@  extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
 #define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_NORMAL_EXEC	(PROT_DEFAULT | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 95c49eb..9e41f95 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -47,6 +47,14 @@  static int __init is_normal_ram(efi_memory_desc_t *md)
 	return 0;
 }
 
+static int __init is_code(efi_memory_desc_t *md)
+{
+	if (md->attribute & EFI_RUNTIME_SERVICES_CODE)
+		return 1;
+	return 0;
+}
+
+
 static void __init efi_setup_idmap(void)
 {
 	struct memblock_region *r;
@@ -338,7 +346,9 @@  static int __init remap_region(efi_memory_desc_t *md, void **new)
 	memrange_efi_to_native(&paddr, &npages);
 	size = npages << PAGE_SHIFT;
 
-	if (is_normal_ram(md))
+	if (is_code(md))
+		vaddr = (__force u64)ioremap_exec(paddr, size);
+	else if (is_normal_ram(md))
 		vaddr = (__force u64)ioremap_cache(paddr, size);
 	else
 		vaddr = (__force u64)ioremap(paddr, size);
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index cbb99c8..b998441 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -103,6 +103,17 @@  void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
 }
 EXPORT_SYMBOL(ioremap_cache);
 
+void __iomem *ioremap_exec(phys_addr_t phys_addr, size_t size)
+{
+	/* For normal memory we already have a cacheable mapping. */
+	if (pfn_valid(__phys_to_pfn(phys_addr)))
+		return (void __iomem *)__phys_to_virt(phys_addr);
+
+	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL_EXEC),
+				__builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_exec);
+
 /*
  * Must be called after early_fixmap_init
  */