diff mbox

[RFC,v4,16/28] x86: Add support for changing memory encryption attribute

Message ID 20170216154535.19244.6294.stgit@tlendack-t1.amdoffice.net (mailing list archive)
State New, archived
Headers show

Commit Message

Tom Lendacky Feb. 16, 2017, 3:45 p.m. UTC
Add support for changing the memory encryption attribute for one or more
memory pages.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/cacheflush.h |    3 ++
 arch/x86/mm/pageattr.c            |   66 +++++++++++++++++++++++++++++++++++++
 2 files changed, 69 insertions(+)

Comments

Borislav Petkov Feb. 22, 2017, 6:52 p.m. UTC | #1
On Thu, Feb 16, 2017 at 09:45:35AM -0600, Tom Lendacky wrote:
> Add support for changing the memory encryption attribute for one or more
> memory pages.

"This will be useful when we, ...., for example."

> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/include/asm/cacheflush.h |    3 ++
>  arch/x86/mm/pageattr.c            |   66 +++++++++++++++++++++++++++++++++++++
>  2 files changed, 69 insertions(+)
> 
> diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
> index 872877d..33ae60a 100644
> --- a/arch/x86/include/asm/cacheflush.h
> +++ b/arch/x86/include/asm/cacheflush.h
> @@ -12,6 +12,7 @@
>   * Executability : eXeutable, NoteXecutable
>   * Read/Write    : ReadOnly, ReadWrite
>   * Presence      : NotPresent
> + * Encryption    : Encrypted, Decrypted
>   *
>   * Within a category, the attributes are mutually exclusive.
>   *
> @@ -47,6 +48,8 @@
>  int set_memory_rw(unsigned long addr, int numpages);
>  int set_memory_np(unsigned long addr, int numpages);
>  int set_memory_4k(unsigned long addr, int numpages);
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
>  
>  int set_memory_array_uc(unsigned long *addr, int addrinarray);
>  int set_memory_array_wc(unsigned long *addr, int addrinarray);
> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
> index 91c5c63..9710f5c 100644
> --- a/arch/x86/mm/pageattr.c
> +++ b/arch/x86/mm/pageattr.c
> @@ -1742,6 +1742,72 @@ int set_memory_4k(unsigned long addr, int numpages)
>  					__pgprot(0), 1, 0, NULL);
>  }
>  
> +static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
> +{
> +	struct cpa_data cpa;
> +	unsigned long start;
> +	int ret;
> +
> +	/* Nothing to do if the _PAGE_ENC attribute is zero */
> +	if (_PAGE_ENC == 0)

Why not:

	if (!sme_active())

?

> +		return 0;
> +
> +	/* Save original start address since it will be modified */

That's obvious - it is a small-enough function to fit on the screen. No
need for the comment.

> +	start = addr;
> +
> +	memset(&cpa, 0, sizeof(cpa));
> +	cpa.vaddr = &addr;
> +	cpa.numpages = numpages;
> +	cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
> +	cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
> +	cpa.pgd = init_mm.pgd;
> +
> +	/* Should not be working on unaligned addresses */
> +	if (WARN_ONCE(*cpa.vaddr & ~PAGE_MASK,
> +		      "misaligned address: %#lx\n", *cpa.vaddr))

Use addr here so that you don't have to deref. gcc is probably smart
enough but the code should look more readable this way too.

> +		*cpa.vaddr &= PAGE_MASK;

I know, you must use cpa.vaddr here but if you move that alignment check
over the cpa assignment, you can use addr solely.

> +
> +	/* Must avoid aliasing mappings in the highmem code */
> +	kmap_flush_unused();
> +	vm_unmap_aliases();
> +
> +	/*
> +	 * Before changing the encryption attribute, we need to flush caches.
> +	 */
> +	if (static_cpu_has(X86_FEATURE_CLFLUSH))
> +		cpa_flush_range(start, numpages, 1);
> +	else
> +		cpa_flush_all(1);

I guess we don't really need the distinction since a SME CPU most
definitely implies CLFLUSH support but ok, let's be careful.

> +
> +	ret = __change_page_attr_set_clr(&cpa, 1);
> +
> +	/*
> +	 * After changing the encryption attribute, we need to flush TLBs
> +	 * again in case any speculative TLB caching occurred (but no need
> +	 * to flush caches again).  We could just use cpa_flush_all(), but
> +	 * in case TLB flushing gets optimized in the cpa_flush_range()
> +	 * path use the same logic as above.
> +	 */
> +	if (static_cpu_has(X86_FEATURE_CLFLUSH))
> +		cpa_flush_range(start, numpages, 0);
> +	else
> +		cpa_flush_all(0);
> +
> +	return ret;
> +}
Tom Lendacky Feb. 28, 2017, 10:46 p.m. UTC | #2
On 2/22/2017 12:52 PM, Borislav Petkov wrote:
> On Thu, Feb 16, 2017 at 09:45:35AM -0600, Tom Lendacky wrote:
>> Add support for changing the memory encryption attribute for one or more
>> memory pages.
>
> "This will be useful when we, ...., for example."

Yup, will expand on the "why".

>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/include/asm/cacheflush.h |    3 ++
>>  arch/x86/mm/pageattr.c            |   66 +++++++++++++++++++++++++++++++++++++
>>  2 files changed, 69 insertions(+)
>>
>> diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
>> index 872877d..33ae60a 100644
>> --- a/arch/x86/include/asm/cacheflush.h
>> +++ b/arch/x86/include/asm/cacheflush.h
>> @@ -12,6 +12,7 @@
>>   * Executability : eXeutable, NoteXecutable
>>   * Read/Write    : ReadOnly, ReadWrite
>>   * Presence      : NotPresent
>> + * Encryption    : Encrypted, Decrypted
>>   *
>>   * Within a category, the attributes are mutually exclusive.
>>   *
>> @@ -47,6 +48,8 @@
>>  int set_memory_rw(unsigned long addr, int numpages);
>>  int set_memory_np(unsigned long addr, int numpages);
>>  int set_memory_4k(unsigned long addr, int numpages);
>> +int set_memory_encrypted(unsigned long addr, int numpages);
>> +int set_memory_decrypted(unsigned long addr, int numpages);
>>
>>  int set_memory_array_uc(unsigned long *addr, int addrinarray);
>>  int set_memory_array_wc(unsigned long *addr, int addrinarray);
>> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
>> index 91c5c63..9710f5c 100644
>> --- a/arch/x86/mm/pageattr.c
>> +++ b/arch/x86/mm/pageattr.c
>> @@ -1742,6 +1742,72 @@ int set_memory_4k(unsigned long addr, int numpages)
>>  					__pgprot(0), 1, 0, NULL);
>>  }
>>
>> +static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
>> +{
>> +	struct cpa_data cpa;
>> +	unsigned long start;
>> +	int ret;
>> +
>> +	/* Nothing to do if the _PAGE_ENC attribute is zero */
>> +	if (_PAGE_ENC == 0)
>
> Why not:
>
> 	if (!sme_active())
>
> ?

Yup, it would be more clear.

>
>> +		return 0;
>> +
>> +	/* Save original start address since it will be modified */
>
> That's obvious - it is a small-enough function to fit on the screen. No
> need for the comment.

Ok.

>
>> +	start = addr;
>> +
>> +	memset(&cpa, 0, sizeof(cpa));
>> +	cpa.vaddr = &addr;
>> +	cpa.numpages = numpages;
>> +	cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
>> +	cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
>> +	cpa.pgd = init_mm.pgd;
>> +
>> +	/* Should not be working on unaligned addresses */
>> +	if (WARN_ONCE(*cpa.vaddr & ~PAGE_MASK,
>> +		      "misaligned address: %#lx\n", *cpa.vaddr))
>
> Use addr here so that you don't have to deref. gcc is probably smart
> enough but the code should look more readable this way too.
>

Ok.

>> +		*cpa.vaddr &= PAGE_MASK;
>
> I know, you must use cpa.vaddr here but if you move that alignment check
> over the cpa assignment, you can use addr solely.

Ok.

>
>> +
>> +	/* Must avoid aliasing mappings in the highmem code */
>> +	kmap_flush_unused();
>> +	vm_unmap_aliases();
>> +
>> +	/*
>> +	 * Before changing the encryption attribute, we need to flush caches.
>> +	 */
>> +	if (static_cpu_has(X86_FEATURE_CLFLUSH))
>> +		cpa_flush_range(start, numpages, 1);
>> +	else
>> +		cpa_flush_all(1);
>
> I guess we don't really need the distinction since a SME CPU most
> definitely implies CLFLUSH support but ok, let's be careful.
>
>> +
>> +	ret = __change_page_attr_set_clr(&cpa, 1);
>> +
>> +	/*
>> +	 * After changing the encryption attribute, we need to flush TLBs
>> +	 * again in case any speculative TLB caching occurred (but no need
>> +	 * to flush caches again).  We could just use cpa_flush_all(), but
>> +	 * in case TLB flushing gets optimized in the cpa_flush_range()
>> +	 * path use the same logic as above.
>> +	 */
>> +	if (static_cpu_has(X86_FEATURE_CLFLUSH))
>> +		cpa_flush_range(start, numpages, 0);
>> +	else
>> +		cpa_flush_all(0);
>> +
>> +	return ret;
>> +}
>
diff mbox

Patch

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 872877d..33ae60a 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -12,6 +12,7 @@ 
  * Executability : eXeutable, NoteXecutable
  * Read/Write    : ReadOnly, ReadWrite
  * Presence      : NotPresent
+ * Encryption    : Encrypted, Decrypted
  *
  * Within a category, the attributes are mutually exclusive.
  *
@@ -47,6 +48,8 @@ 
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
 int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 91c5c63..9710f5c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1742,6 +1742,72 @@  int set_memory_4k(unsigned long addr, int numpages)
 					__pgprot(0), 1, 0, NULL);
 }
 
+static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
+{
+	struct cpa_data cpa;
+	unsigned long start;
+	int ret;
+
+	/* Nothing to do if the _PAGE_ENC attribute is zero */
+	if (_PAGE_ENC == 0)
+		return 0;
+
+	/* Save original start address since it will be modified */
+	start = addr;
+
+	memset(&cpa, 0, sizeof(cpa));
+	cpa.vaddr = &addr;
+	cpa.numpages = numpages;
+	cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
+	cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
+	cpa.pgd = init_mm.pgd;
+
+	/* Should not be working on unaligned addresses */
+	if (WARN_ONCE(*cpa.vaddr & ~PAGE_MASK,
+		      "misaligned address: %#lx\n", *cpa.vaddr))
+		*cpa.vaddr &= PAGE_MASK;
+
+	/* Must avoid aliasing mappings in the highmem code */
+	kmap_flush_unused();
+	vm_unmap_aliases();
+
+	/*
+	 * Before changing the encryption attribute, we need to flush caches.
+	 */
+	if (static_cpu_has(X86_FEATURE_CLFLUSH))
+		cpa_flush_range(start, numpages, 1);
+	else
+		cpa_flush_all(1);
+
+	ret = __change_page_attr_set_clr(&cpa, 1);
+
+	/*
+	 * After changing the encryption attribute, we need to flush TLBs
+	 * again in case any speculative TLB caching occurred (but no need
+	 * to flush caches again).  We could just use cpa_flush_all(), but
+	 * in case TLB flushing gets optimized in the cpa_flush_range()
+	 * path use the same logic as above.
+	 */
+	if (static_cpu_has(X86_FEATURE_CLFLUSH))
+		cpa_flush_range(start, numpages, 0);
+	else
+		cpa_flush_all(0);
+
+	return ret;
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+	return __set_memory_enc_dec(addr, numpages, true);
+}
+EXPORT_SYMBOL(set_memory_encrypted);
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+	return __set_memory_enc_dec(addr, numpages, false);
+}
+EXPORT_SYMBOL(set_memory_decrypted);
+
 int set_pages_uc(struct page *page, int numpages)
 {
 	unsigned long addr = (unsigned long)page_address(page);