diff mbox

[PATCHv3,10/17] x86/mm: Implement prep_encrypted_page() and arch_free_page()

Message ID 20180612143915.68065-11-kirill.shutemov@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Kirill A. Shutemov June 12, 2018, 2:39 p.m. UTC
The hardware/CPU does not enforce coherency between mappings of the same
physical page with different KeyIDs or encryption keys.
We are responsible for cache management.

Flush cache on allocating encrypted page and on returning the page to
the free pool.

prep_encrypted_page() also takes care about zeroing the page. We have to
do this after KeyID is set for the page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/include/asm/mktme.h |  6 ++++++
 arch/x86/mm/mktme.c          | 39 ++++++++++++++++++++++++++++++++++++
 2 files changed, 45 insertions(+)

Comments

Dave Hansen June 13, 2018, 6:26 p.m. UTC | #1
On 06/12/2018 07:39 AM, Kirill A. Shutemov wrote:
> prep_encrypted_page() also takes care about zeroing the page. We have to
> do this after KeyID is set for the page.

This is an implementation detail that has gone unmentioned until now but
has impacted at least half a dozen locations in previous patches.  Can
you rectify that, please?


> +void prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
> +{
> +	int i;
> +
> +	/*
> +	 * The hardware/CPU does not enforce coherency between mappings of the
> +	 * same physical page with different KeyIDs or encrypt ion keys.

What are "encrypt ion"s? :)

> +	 * We are responsible for cache management.
> +	 *
> +	 * We flush cache before allocating encrypted page
> +	 */
> +	clflush_cache_range(page_address(page), PAGE_SIZE << order);
> +
> +	for (i = 0; i < (1 << order); i++) {
> +		WARN_ON_ONCE(lookup_page_ext(page)->keyid);

/* All pages coming out of the allocator should have KeyID 0 */

> +		lookup_page_ext(page)->keyid = keyid;
> +		/* Clear the page after the KeyID is set. */
> +		if (zero)
> +			clear_highpage(page);
> +	}
> +}

How expensive is this?

> +void arch_free_page(struct page *page, int order)
> +{
> +	int i;
> 

	/* KeyId-0 pages were not used for MKTME and need no work */

... or something

> +	if (!page_keyid(page))
> +		return;

Is page_keyid() optimized so that all this goes away automatically when
MKTME is compiled out or unsupported?

> +	for (i = 0; i < (1 << order); i++) {
> +		WARN_ON_ONCE(lookup_page_ext(page)->keyid > mktme_nr_keyids);
> +		lookup_page_ext(page)->keyid = 0;
> +	}
> +
> +	clflush_cache_range(page_address(page), PAGE_SIZE << order);
> +}
Kirill A. Shutemov June 18, 2018, 10:18 a.m. UTC | #2
On Wed, Jun 13, 2018 at 06:26:10PM +0000, Dave Hansen wrote:
> On 06/12/2018 07:39 AM, Kirill A. Shutemov wrote:
> > prep_encrypted_page() also takes care about zeroing the page. We have to
> > do this after KeyID is set for the page.
> 
> This is an implementation detail that has gone unmentioned until now but
> has impacted at least half a dozen locations in previous patches.  Can
> you rectify that, please?

It was mentioned in commit message of 04/17.

> > +void prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
> > +{
> > +	int i;
> > +
> > +	/*
> > +	 * The hardware/CPU does not enforce coherency between mappings of the
> > +	 * same physical page with different KeyIDs or encrypt ion keys.
> 
> What are "encrypt ion"s? :)

:P

> > +	 * We are responsible for cache management.
> > +	 *
> > +	 * We flush cache before allocating encrypted page
> > +	 */
> > +	clflush_cache_range(page_address(page), PAGE_SIZE << order);
> > +
> > +	for (i = 0; i < (1 << order); i++) {
> > +		WARN_ON_ONCE(lookup_page_ext(page)->keyid);
> 
> /* All pages coming out of the allocator should have KeyID 0 */
> 

Okay.

> > +		lookup_page_ext(page)->keyid = keyid;
> > +		/* Clear the page after the KeyID is set. */
> > +		if (zero)
> > +			clear_highpage(page);
> > +	}
> > +}
> 
> How expensive is this?

It just shifts cost of zeroing from page allocator here. It should not
have huge effect.

> > +void arch_free_page(struct page *page, int order)
> > +{
> > +	int i;
> > 
> 
> 	/* KeyId-0 pages were not used for MKTME and need no work */
> 
> ... or something

Okay.

> > +	if (!page_keyid(page))
> > +		return;
> 
> Is page_keyid() optimized so that all this goes away automatically when
> MKTME is compiled out or unsupported?

If MKTME is not enabled compile-time, this translation unit doesn't
compile at all.

I have not yet optimized for run-time unsupported case. I'll optimized it
based on performance measurements.

> > +	for (i = 0; i < (1 << order); i++) {
> > +		WARN_ON_ONCE(lookup_page_ext(page)->keyid > mktme_nr_keyids);
> > +		lookup_page_ext(page)->keyid = 0;
> > +	}
> > +
> > +	clflush_cache_range(page_address(page), PAGE_SIZE << order);
> > +}
> 
> 
> 
>
diff mbox

Patch

diff --git a/arch/x86/include/asm/mktme.h b/arch/x86/include/asm/mktme.h
index 0fe0db424e48..ec7036abdb3f 100644
--- a/arch/x86/include/asm/mktme.h
+++ b/arch/x86/include/asm/mktme.h
@@ -11,6 +11,12 @@  extern phys_addr_t mktme_keyid_mask;
 extern int mktme_nr_keyids;
 extern int mktme_keyid_shift;
 
+#define prep_encrypted_page prep_encrypted_page
+void prep_encrypted_page(struct page *page, int order, int keyid, bool zero);
+
+#define HAVE_ARCH_FREE_PAGE
+void arch_free_page(struct page *page, int order);
+
 #define vma_is_encrypted vma_is_encrypted
 bool vma_is_encrypted(struct vm_area_struct *vma);
 
diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c
index b02d5b9d4339..1821b87abb2f 100644
--- a/arch/x86/mm/mktme.c
+++ b/arch/x86/mm/mktme.c
@@ -1,4 +1,5 @@ 
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <asm/mktme.h>
 
 phys_addr_t mktme_keyid_mask;
@@ -30,6 +31,44 @@  int vma_keyid(struct vm_area_struct *vma)
 	return (prot & mktme_keyid_mask) >> mktme_keyid_shift;
 }
 
+void prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
+{
+	int i;
+
+	/*
+	 * The hardware/CPU does not enforce coherency between mappings of the
+	 * same physical page with different KeyIDs or encrypt ion keys.
+	 * We are responsible for cache management.
+	 *
+	 * We flush cache before allocating encrypted page
+	 */
+	clflush_cache_range(page_address(page), PAGE_SIZE << order);
+
+	for (i = 0; i < (1 << order); i++) {
+		WARN_ON_ONCE(lookup_page_ext(page)->keyid);
+		lookup_page_ext(page)->keyid = keyid;
+
+		/* Clear the page after the KeyID is set. */
+		if (zero)
+			clear_highpage(page);
+	}
+}
+
+void arch_free_page(struct page *page, int order)
+{
+	int i;
+
+	if (!page_keyid(page))
+		return;
+
+	for (i = 0; i < (1 << order); i++) {
+		WARN_ON_ONCE(lookup_page_ext(page)->keyid > mktme_nr_keyids);
+		lookup_page_ext(page)->keyid = 0;
+	}
+
+	clflush_cache_range(page_address(page), PAGE_SIZE << order);
+}
+
 static bool need_page_mktme(void)
 {
 	/* Make sure keyid doesn't collide with extended page flags */