diff mbox

[PATCHv5,12/19] x86/mm: Implement prep_encrypted_page() and arch_free_page()

Message ID 20180717112029.42378-13-kirill.shutemov@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

kirill.shutemov@linux.intel.com July 17, 2018, 11:20 a.m. UTC
The hardware/CPU does not enforce coherency between mappings of the same
physical page with different KeyIDs or encryption keys.
We are responsible for cache management.

Flush cache on allocating encrypted page and on returning the page to
the free pool.

prep_encrypted_page() also takes care about zeroing the page. We have to
do this after KeyID is set for the page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/include/asm/mktme.h |  6 +++++
 arch/x86/mm/mktme.c          | 49 ++++++++++++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

Comments

Dave Hansen July 18, 2018, 11:53 p.m. UTC | #1
The description doesn't mention the potential performance implications
of this patch.  That's criminal at this point.

> --- a/arch/x86/mm/mktme.c
> +++ b/arch/x86/mm/mktme.c
> @@ -1,4 +1,5 @@
>  #include <linux/mm.h>
> +#include <linux/highmem.h>
>  #include <asm/mktme.h>
>  
>  phys_addr_t mktme_keyid_mask;
> @@ -49,3 +50,51 @@ int vma_keyid(struct vm_area_struct *vma)
>  	prot = pgprot_val(vma->vm_page_prot);
>  	return (prot & mktme_keyid_mask) >> mktme_keyid_shift;
>  }
> +
> +void prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
> +{
> +	int i;
> +
> +	/* It's not encrypted page: nothing to do */
> +	if (!keyid)
> +		return;

prep_encrypted_page() is called in the fast path in the page allocator.
This out-of-line copy costs a function call for all users and this is
also out of the reach of the compiler to understand that keyid!=0 is
unlikely.

I think this needs to be treated to the inline-in-the-header treatment.

> +	/*
> +	 * The hardware/CPU does not enforce coherency between mappings of the
> +	 * same physical page with different KeyIDs or encryption keys.
> +	 * We are responsible for cache management.
> +	 *
> +	 * We flush cache before allocating encrypted page
> +	 */
> +	clflush_cache_range(page_address(page), PAGE_SIZE << order);

It's also worth pointing out that this must be done on the keyid alias
direct map, not the normal one.

Wait a sec...  How do we know which direct map to use?

> +	for (i = 0; i < (1 << order); i++) {
> +		/* All pages coming out of the allocator should have KeyID 0 */
> +		WARN_ON_ONCE(lookup_page_ext(page)->keyid);
> +		lookup_page_ext(page)->keyid = keyid;
> +
> +		/* Clear the page after the KeyID is set. */
> +		if (zero)
> +			clear_highpage(page);
> +
> +		page++;
> +	}
> +}
> +
> +void arch_free_page(struct page *page, int order)
> +{
> +	int i;
> +
> +	/* It's not encrypted page: nothing to do */
> +	if (!page_keyid(page))
> +		return;

Ditto on pushing this to a header.

> +	clflush_cache_range(page_address(page), PAGE_SIZE << order);

OK, how do we know which copy of the direct map to use, here?

> +	for (i = 0; i < (1 << order); i++) {
> +		/* Check if the page has reasonable KeyID */
> +		WARN_ON_ONCE(lookup_page_ext(page)->keyid > mktme_nr_keyids);
> +		lookup_page_ext(page)->keyid = 0;
> +		page++;
> +	}
> +}
>
Kirill A . Shutemov July 23, 2018, 9:50 a.m. UTC | #2
On Wed, Jul 18, 2018 at 04:53:27PM -0700, Dave Hansen wrote:
> The description doesn't mention the potential performance implications
> of this patch.  That's criminal at this point.
> 
> > --- a/arch/x86/mm/mktme.c
> > +++ b/arch/x86/mm/mktme.c
> > @@ -1,4 +1,5 @@
> >  #include <linux/mm.h>
> > +#include <linux/highmem.h>
> >  #include <asm/mktme.h>
> >  
> >  phys_addr_t mktme_keyid_mask;
> > @@ -49,3 +50,51 @@ int vma_keyid(struct vm_area_struct *vma)
> >  	prot = pgprot_val(vma->vm_page_prot);
> >  	return (prot & mktme_keyid_mask) >> mktme_keyid_shift;
> >  }
> > +
> > +void prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
> > +{
> > +	int i;
> > +
> > +	/* It's not encrypted page: nothing to do */
> > +	if (!keyid)
> > +		return;
> 
> prep_encrypted_page() is called in the fast path in the page allocator.
> This out-of-line copy costs a function call for all users and this is
> also out of the reach of the compiler to understand that keyid!=0 is
> unlikely.
> 
> I think this needs to be treated to the inline-in-the-header treatment.

Okay. Again as a macros.

> > +	/*
> > +	 * The hardware/CPU does not enforce coherency between mappings of the
> > +	 * same physical page with different KeyIDs or encryption keys.
> > +	 * We are responsible for cache management.
> > +	 *
> > +	 * We flush cache before allocating encrypted page
> > +	 */
> > +	clflush_cache_range(page_address(page), PAGE_SIZE << order);
> 
> It's also worth pointing out that this must be done on the keyid alias
> direct map, not the normal one.
> 
> Wait a sec...  How do we know which direct map to use?

page_address() -> lowmem_page_address() -> page_to_virt()

page_to_virt() returns virtual address from the right direct mapping.

> > +	for (i = 0; i < (1 << order); i++) {
> > +		/* All pages coming out of the allocator should have KeyID 0 */
> > +		WARN_ON_ONCE(lookup_page_ext(page)->keyid);
> > +		lookup_page_ext(page)->keyid = keyid;
> > +
> > +		/* Clear the page after the KeyID is set. */
> > +		if (zero)
> > +			clear_highpage(page);
> > +
> > +		page++;
> > +	}
> > +}
> > +
> > +void arch_free_page(struct page *page, int order)
> > +{
> > +	int i;
> > +
> > +	/* It's not encrypted page: nothing to do */
> > +	if (!page_keyid(page))
> > +		return;
> 
> Ditto on pushing this to a header.
> 
> > +	clflush_cache_range(page_address(page), PAGE_SIZE << order);
> 
> OK, how do we know which copy of the direct map to use, here?

The same way as above.

> > +	for (i = 0; i < (1 << order); i++) {
> > +		/* Check if the page has reasonable KeyID */
> > +		WARN_ON_ONCE(lookup_page_ext(page)->keyid > mktme_nr_keyids);
> > +		lookup_page_ext(page)->keyid = 0;
> > +		page++;
> > +	}
> > +}
> > 
>
diff mbox

Patch

diff --git a/arch/x86/include/asm/mktme.h b/arch/x86/include/asm/mktme.h
index f0b7844e36a4..44409b8bbaca 100644
--- a/arch/x86/include/asm/mktme.h
+++ b/arch/x86/include/asm/mktme.h
@@ -19,6 +19,12 @@  int page_keyid(const struct page *page);
 #define vma_keyid vma_keyid
 int vma_keyid(struct vm_area_struct *vma);
 
+#define prep_encrypted_page prep_encrypted_page
+void prep_encrypted_page(struct page *page, int order, int keyid, bool zero);
+
+#define HAVE_ARCH_FREE_PAGE
+void arch_free_page(struct page *page, int order);
+
 #else
 #define mktme_keyid_mask	((phys_addr_t)0)
 #define mktme_nr_keyids		0
diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c
index a1f40ee61b25..1194496633ce 100644
--- a/arch/x86/mm/mktme.c
+++ b/arch/x86/mm/mktme.c
@@ -1,4 +1,5 @@ 
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <asm/mktme.h>
 
 phys_addr_t mktme_keyid_mask;
@@ -49,3 +50,51 @@  int vma_keyid(struct vm_area_struct *vma)
 	prot = pgprot_val(vma->vm_page_prot);
 	return (prot & mktme_keyid_mask) >> mktme_keyid_shift;
 }
+
+void prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
+{
+	int i;
+
+	/* It's not encrypted page: nothing to do */
+	if (!keyid)
+		return;
+
+	/*
+	 * The hardware/CPU does not enforce coherency between mappings of the
+	 * same physical page with different KeyIDs or encryption keys.
+	 * We are responsible for cache management.
+	 *
+	 * We flush cache before allocating encrypted page
+	 */
+	clflush_cache_range(page_address(page), PAGE_SIZE << order);
+
+	for (i = 0; i < (1 << order); i++) {
+		/* All pages coming out of the allocator should have KeyID 0 */
+		WARN_ON_ONCE(lookup_page_ext(page)->keyid);
+		lookup_page_ext(page)->keyid = keyid;
+
+		/* Clear the page after the KeyID is set. */
+		if (zero)
+			clear_highpage(page);
+
+		page++;
+	}
+}
+
+void arch_free_page(struct page *page, int order)
+{
+	int i;
+
+	/* It's not encrypted page: nothing to do */
+	if (!page_keyid(page))
+		return;
+
+	clflush_cache_range(page_address(page), PAGE_SIZE << order);
+
+	for (i = 0; i < (1 << order); i++) {
+		/* Check if the page has reasonable KeyID */
+		WARN_ON_ONCE(lookup_page_ext(page)->keyid > mktme_nr_keyids);
+		lookup_page_ext(page)->keyid = 0;
+		page++;
+	}
+}