Patchwork [v1,1/3] x86/mm: Centralize PMD flags in sme_encrypt_kernel()

login
register
mail settings
Submitter Tom Lendacky
Date Dec. 7, 2017, 11:33 p.m.
Message ID <20171207233352.29646.52076.stgit@tlendack-t1.amdoffice.net>
Download mbox | patch
Permalink /patch/10101255/
State New
Headers show

Comments

Tom Lendacky - Dec. 7, 2017, 11:33 p.m.
In preparation for encrypting more than just the kernel during early
boot processing, centralize the use of the PMD flag settings based
on the type of mapping desired.  When 4KB aligned encryption is added,
this will allow either PTE flags or large page PMD flags to be used
without requiring the caller to adjust.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/mm/mem_encrypt.c |  109 +++++++++++++++++++++++++--------------------
 1 file changed, 60 insertions(+), 49 deletions(-)
Borislav Petkov - Dec. 20, 2017, 7:13 p.m.
On Thu, Dec 07, 2017 at 05:33:52PM -0600, Tom Lendacky wrote:
> In preparation for encrypting more than just the kernel during early
> boot processing, centralize the use of the PMD flag settings based
> on the type of mapping desired.  When 4KB aligned encryption is added,
> this will allow either PTE flags or large page PMD flags to be used
> without requiring the caller to adjust.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/mm/mem_encrypt.c |  109 +++++++++++++++++++++++++--------------------
>  1 file changed, 60 insertions(+), 49 deletions(-)
> 
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index d9a9e9f..2d8404b 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -464,6 +464,8 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
>  	set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
>  }
>  
> +static void *pgtable_area;

Ewww, a global variable which gets manipulated by functions. Can we not
do that pls?

sme_populate_pgd() used to return it. Why change that?

> +
>  static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
>  				 unsigned long end)
>  {
> @@ -484,10 +486,16 @@ static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
>  #define PGD_FLAGS	_KERNPG_TABLE_NOENC
>  #define P4D_FLAGS	_KERNPG_TABLE_NOENC
>  #define PUD_FLAGS	_KERNPG_TABLE_NOENC
> -#define PMD_FLAGS	(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
>  
> -static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
> -				     unsigned long vaddr, pmdval_t pmd_val)
> +#define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
> +
> +#define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
> +#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
> +				 (_PAGE_PAT | _PAGE_PWT))
> +#define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)

Align vertically.

Rest looks ok.
Tom Lendacky - Dec. 20, 2017, 7:59 p.m.
On 12/20/2017 1:13 PM, Borislav Petkov wrote:
> On Thu, Dec 07, 2017 at 05:33:52PM -0600, Tom Lendacky wrote:
>> In preparation for encrypting more than just the kernel during early
>> boot processing, centralize the use of the PMD flag settings based
>> on the type of mapping desired.  When 4KB aligned encryption is added,
>> this will allow either PTE flags or large page PMD flags to be used
>> without requiring the caller to adjust.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/mm/mem_encrypt.c |  109 +++++++++++++++++++++++++--------------------
>>  1 file changed, 60 insertions(+), 49 deletions(-)
>>
>> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
>> index d9a9e9f..2d8404b 100644
>> --- a/arch/x86/mm/mem_encrypt.c
>> +++ b/arch/x86/mm/mem_encrypt.c
>> @@ -464,6 +464,8 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
>>  	set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
>>  }
>>  
>> +static void *pgtable_area;
> 
> Ewww, a global variable which gets manipulated by functions. Can we not
> do that pls?
> 
> sme_populate_pgd() used to return it. Why change that?

It was starting to get pretty hairy with all the parameters and what was
needed to be returned when the second patch was introduced.  I'll look at
what I can do to avoid the global, maybe pass in the address of the
variable for updating within the function or combining the parameters into
a struct.

> 
>> +
>>  static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
>>  				 unsigned long end)
>>  {
>> @@ -484,10 +486,16 @@ static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
>>  #define PGD_FLAGS	_KERNPG_TABLE_NOENC
>>  #define P4D_FLAGS	_KERNPG_TABLE_NOENC
>>  #define PUD_FLAGS	_KERNPG_TABLE_NOENC
>> -#define PMD_FLAGS	(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
>>  
>> -static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
>> -				     unsigned long vaddr, pmdval_t pmd_val)
>> +#define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
>> +
>> +#define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
>> +#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
>> +				 (_PAGE_PAT | _PAGE_PWT))
>> +#define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
> 
> Align vertically.

Ok

Thanks,
Tom

> 
> Rest looks ok.
>

Patch

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index d9a9e9f..2d8404b 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -464,6 +464,8 @@  void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
 	set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
 }
 
+static void *pgtable_area;
+
 static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
 				 unsigned long end)
 {
@@ -484,10 +486,16 @@  static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
 #define PGD_FLAGS	_KERNPG_TABLE_NOENC
 #define P4D_FLAGS	_KERNPG_TABLE_NOENC
 #define PUD_FLAGS	_KERNPG_TABLE_NOENC
-#define PMD_FLAGS	(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
 
-static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
-				     unsigned long vaddr, pmdval_t pmd_val)
+#define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+
+#define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
+#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+				 (_PAGE_PAT | _PAGE_PWT))
+#define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
+
+static void __init sme_populate_pgd(pgd_t *pgd_base, unsigned long vaddr,
+				    unsigned long paddr, pmdval_t pmd_flags)
 {
 	pgd_t *pgd_p;
 	p4d_t *p4d_p;
@@ -538,7 +546,7 @@  static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
 	pud_p += pud_index(vaddr);
 	if (native_pud_val(*pud_p)) {
 		if (native_pud_val(*pud_p) & _PAGE_PSE)
-			goto out;
+			return;
 
 		pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
 	} else {
@@ -554,10 +562,43 @@  static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
 
 	pmd_p += pmd_index(vaddr);
 	if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
-		native_set_pmd(pmd_p, native_make_pmd(pmd_val));
+		native_set_pmd(pmd_p, native_make_pmd(paddr | pmd_flags));
+}
 
-out:
-	return pgtable_area;
+static void __init __sme_map_range(pgd_t *pgd, unsigned long vaddr,
+				   unsigned long vaddr_end,
+				   unsigned long paddr, pmdval_t pmd_flags)
+{
+	while (vaddr < vaddr_end) {
+		sme_populate_pgd(pgd, vaddr, paddr, pmd_flags);
+
+		vaddr += PMD_PAGE_SIZE;
+		paddr += PMD_PAGE_SIZE;
+	}
+}
+
+static void __init sme_map_range_encrypted(pgd_t *pgd,
+					   unsigned long vaddr,
+					   unsigned long vaddr_end,
+					   unsigned long paddr)
+{
+	__sme_map_range(pgd, vaddr, vaddr_end, paddr, PMD_FLAGS_ENC);
+}
+
+static void __init sme_map_range_decrypted(pgd_t *pgd,
+					   unsigned long vaddr,
+					   unsigned long vaddr_end,
+					   unsigned long paddr)
+{
+	__sme_map_range(pgd, vaddr, vaddr_end, paddr, PMD_FLAGS_DEC);
+}
+
+static void __init sme_map_range_decrypted_wp(pgd_t *pgd,
+					      unsigned long vaddr,
+					      unsigned long vaddr_end,
+					      unsigned long paddr)
+{
+	__sme_map_range(pgd, vaddr, vaddr_end, paddr, PMD_FLAGS_DEC_WP);
 }
 
 static unsigned long __init sme_pgtable_calc(unsigned long len)
@@ -616,9 +657,7 @@  void __init sme_encrypt_kernel(void)
 	unsigned long execute_start, execute_end, execute_len;
 	unsigned long kernel_start, kernel_end, kernel_len;
 	unsigned long pgtable_area_len;
-	unsigned long paddr, pmd_flags;
 	unsigned long decrypted_base;
-	void *pgtable_area;
 	pgd_t *pgd;
 
 	if (!sme_active())
@@ -690,14 +729,8 @@  void __init sme_encrypt_kernel(void)
 	 * addressing the workarea.
 	 */
 	pgd = (pgd_t *)native_read_cr3_pa();
-	paddr = workarea_start;
-	while (paddr < workarea_end) {
-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-						paddr,
-						paddr + PMD_FLAGS);
-
-		paddr += PMD_PAGE_SIZE;
-	}
+	sme_map_range_decrypted(pgd, workarea_start, workarea_end,
+				workarea_start);
 
 	/* Flush the TLB - no globals so cr3 is enough */
 	native_write_cr3(__native_read_cr3());
@@ -712,17 +745,6 @@  void __init sme_encrypt_kernel(void)
 	memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
 	pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
 
-	/* Add encrypted kernel (identity) mappings */
-	pmd_flags = PMD_FLAGS | _PAGE_ENC;
-	paddr = kernel_start;
-	while (paddr < kernel_end) {
-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-						paddr,
-						paddr + pmd_flags);
-
-		paddr += PMD_PAGE_SIZE;
-	}
-
 	/*
 	 * A different PGD index/entry must be used to get different
 	 * pagetable entries for the decrypted mapping. Choose the next
@@ -732,30 +754,19 @@  void __init sme_encrypt_kernel(void)
 	decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
 	decrypted_base <<= PGDIR_SHIFT;
 
-	/* Add decrypted, write-protected kernel (non-identity) mappings */
-	pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
-	paddr = kernel_start;
-	while (paddr < kernel_end) {
-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-						paddr + decrypted_base,
-						paddr + pmd_flags);
+	/* Add encrypted kernel (identity) mappings */
+	sme_map_range_encrypted(pgd, kernel_start, kernel_end, kernel_start);
 
-		paddr += PMD_PAGE_SIZE;
-	}
+	/* Add decrypted, write-protected kernel (non-identity) mappings */
+	sme_map_range_decrypted_wp(pgd, kernel_start + decrypted_base,
+				   kernel_end + decrypted_base, kernel_start);
 
 	/* Add decrypted workarea mappings to both kernel mappings */
-	paddr = workarea_start;
-	while (paddr < workarea_end) {
-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-						paddr,
-						paddr + PMD_FLAGS);
-
-		pgtable_area = sme_populate_pgd(pgd, pgtable_area,
-						paddr + decrypted_base,
-						paddr + PMD_FLAGS);
-
-		paddr += PMD_PAGE_SIZE;
-	}
+	sme_map_range_decrypted(pgd, workarea_start, workarea_end,
+				workarea_start);
+	sme_map_range_decrypted(pgd, workarea_start + decrypted_base,
+				workarea_end + decrypted_base,
+				workarea_start);
 
 	/* Perform the encryption */
 	sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,