Message ID | 20161110003655.3280.57333.stgit@tlendack-t1.amdoffice.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Nov 09, 2016 at 06:36:55PM -0600, Tom Lendacky wrote: > This patch adds support to be change the memory encryption attribute for > one or more memory pages. "Add support for changing ..." > Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> > --- > arch/x86/include/asm/cacheflush.h | 3 + > arch/x86/include/asm/mem_encrypt.h | 13 ++++++ > arch/x86/mm/mem_encrypt.c | 43 +++++++++++++++++++++ > arch/x86/mm/pageattr.c | 73 ++++++++++++++++++++++++++++++++++++ > 4 files changed, 132 insertions(+) ... > diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c > index 411210d..41cfdf9 100644 > --- a/arch/x86/mm/mem_encrypt.c > +++ b/arch/x86/mm/mem_encrypt.c > @@ -18,6 +18,7 @@ > #include <asm/fixmap.h> > #include <asm/setup.h> > #include <asm/bootparam.h> > +#include <asm/cacheflush.h> > > extern pmdval_t early_pmd_flags; > int __init __early_make_pgtable(unsigned long, pmdval_t); > @@ -33,6 +34,48 @@ EXPORT_SYMBOL_GPL(sme_me_mask); > /* Buffer used for early in-place encryption by BSP, no locking needed */ > static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); > > +int sme_set_mem_enc(void *vaddr, unsigned long size) > +{ > + unsigned long addr, numpages; > + > + if (!sme_me_mask) > + return 0; So those interfaces look duplicated to me: you have exported sme_set_mem_enc/sme_set_mem_unenc which take @size and then you have set_memory_enc/set_memory_dec which take numpages. And then you're testing sme_me_mask in both. What I'd prefer to have is only *two* set_memory_enc/set_memory_dec which take size in bytes and one workhorse __set_memory_enc_dec() which does it all. The user shouldn't have to care about numpages or size or whatever. Ok? > + > + addr = (unsigned long)vaddr & PAGE_MASK; > + numpages = PAGE_ALIGN(size) >> PAGE_SHIFT; > + > + /* > + * The set_memory_xxx functions take an integer for numpages, make > + * sure it doesn't exceed that. > + */ > + if (numpages > INT_MAX) > + return -EINVAL; > + > + return set_memory_enc(addr, numpages); > +} > +EXPORT_SYMBOL_GPL(sme_set_mem_enc); > + > +int sme_set_mem_unenc(void *vaddr, unsigned long size) > +{ > + unsigned long addr, numpages; > + > + if (!sme_me_mask) > + return 0; > + > + addr = (unsigned long)vaddr & PAGE_MASK; > + numpages = PAGE_ALIGN(size) >> PAGE_SHIFT; > + > + /* > + * The set_memory_xxx functions take an integer for numpages, make > + * sure it doesn't exceed that. > + */ > + if (numpages > INT_MAX) > + return -EINVAL; > + > + return set_memory_dec(addr, numpages); > +} > +EXPORT_SYMBOL_GPL(sme_set_mem_unenc); > + > /* > * This routine does not change the underlying encryption setting of the > * page(s) that map this memory. It assumes that eventually the memory is > diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c > index b8e6bb5..babf3a6 100644 > --- a/arch/x86/mm/pageattr.c > +++ b/arch/x86/mm/pageattr.c > @@ -1729,6 +1729,79 @@ int set_memory_4k(unsigned long addr, int numpages) > __pgprot(0), 1, 0, NULL); > } > > +static int __set_memory_enc_dec(struct cpa_data *cpa) > +{ > + unsigned long addr; > + int numpages; > + int ret; > + > + /* People should not be passing in unaligned addresses */ > + if (WARN_ONCE(*cpa->vaddr & ~PAGE_MASK, > + "misaligned address: %#lx\n", *cpa->vaddr)) > + *cpa->vaddr &= PAGE_MASK; > + > + addr = *cpa->vaddr; > + numpages = cpa->numpages; > + > + /* Must avoid aliasing mappings in the highmem code */ > + kmap_flush_unused(); > + vm_unmap_aliases(); > + > + ret = __change_page_attr_set_clr(cpa, 1); > + > + /* Check whether we really changed something */ > + if (!(cpa->flags & CPA_FLUSHTLB)) > + goto out; That label is used only once - just "return ret;" here. > + /* > + * On success we use CLFLUSH, when the CPU supports it to > + * avoid the WBINVD. > + */ > + if (!ret && static_cpu_has(X86_FEATURE_CLFLUSH)) > + cpa_flush_range(addr, numpages, 1); > + else > + cpa_flush_all(1); > + > +out: > + return ret; > +} > + > +int set_memory_enc(unsigned long addr, int numpages) > +{ > + struct cpa_data cpa; > + > + if (!sme_me_mask) > + return 0; > + > + memset(&cpa, 0, sizeof(cpa)); > + cpa.vaddr = &addr; > + cpa.numpages = numpages; > + cpa.mask_set = __pgprot(_PAGE_ENC); > + cpa.mask_clr = __pgprot(0); > + cpa.pgd = init_mm.pgd; You could move that... > + > + return __set_memory_enc_dec(&cpa); > +} > +EXPORT_SYMBOL(set_memory_enc); > + > +int set_memory_dec(unsigned long addr, int numpages) > +{ > + struct cpa_data cpa; > + > + if (!sme_me_mask) > + return 0; > + > + memset(&cpa, 0, sizeof(cpa)); > + cpa.vaddr = &addr; > + cpa.numpages = numpages; > + cpa.mask_set = __pgprot(0); > + cpa.mask_clr = __pgprot(_PAGE_ENC); > + cpa.pgd = init_mm.pgd; ... and that into __set_memory_enc_dec() too and pass in a "bool dec" or "bool enc" or so which presets mask_set and mask_clr properly. See above. I think two functions exported to other in-kernel users are more than enough.
On 11/17/2016 11:39 AM, Borislav Petkov wrote: > On Wed, Nov 09, 2016 at 06:36:55PM -0600, Tom Lendacky wrote: >> This patch adds support to be change the memory encryption attribute for >> one or more memory pages. > > "Add support for changing ..." Yeah, I kind of messed up that description a bit! > >> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> >> --- >> arch/x86/include/asm/cacheflush.h | 3 + >> arch/x86/include/asm/mem_encrypt.h | 13 ++++++ >> arch/x86/mm/mem_encrypt.c | 43 +++++++++++++++++++++ >> arch/x86/mm/pageattr.c | 73 ++++++++++++++++++++++++++++++++++++ >> 4 files changed, 132 insertions(+) > > ... > >> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c >> index 411210d..41cfdf9 100644 >> --- a/arch/x86/mm/mem_encrypt.c >> +++ b/arch/x86/mm/mem_encrypt.c >> @@ -18,6 +18,7 @@ >> #include <asm/fixmap.h> >> #include <asm/setup.h> >> #include <asm/bootparam.h> >> +#include <asm/cacheflush.h> >> >> extern pmdval_t early_pmd_flags; >> int __init __early_make_pgtable(unsigned long, pmdval_t); >> @@ -33,6 +34,48 @@ EXPORT_SYMBOL_GPL(sme_me_mask); >> /* Buffer used for early in-place encryption by BSP, no locking needed */ >> static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); >> >> +int sme_set_mem_enc(void *vaddr, unsigned long size) >> +{ >> + unsigned long addr, numpages; >> + >> + if (!sme_me_mask) >> + return 0; > > So those interfaces look duplicated to me: you have exported > sme_set_mem_enc/sme_set_mem_unenc which take @size and then you have > set_memory_enc/set_memory_dec which take numpages. > > And then you're testing sme_me_mask in both. > > What I'd prefer to have is only *two* set_memory_enc/set_memory_dec > which take size in bytes and one workhorse __set_memory_enc_dec() which > does it all. The user shouldn't have to care about numpages or size or > whatever. > > Ok? Yup, makes sense. I'll redo this. > >> + >> + addr = (unsigned long)vaddr & PAGE_MASK; >> + numpages = PAGE_ALIGN(size) >> PAGE_SHIFT; >> + >> + /* >> + * The set_memory_xxx functions take an integer for numpages, make >> + * sure it doesn't exceed that. >> + */ >> + if (numpages > INT_MAX) >> + return -EINVAL; >> + >> + return set_memory_enc(addr, numpages); >> +} >> +EXPORT_SYMBOL_GPL(sme_set_mem_enc); >> + >> +int sme_set_mem_unenc(void *vaddr, unsigned long size) >> +{ >> + unsigned long addr, numpages; >> + >> + if (!sme_me_mask) >> + return 0; >> + >> + addr = (unsigned long)vaddr & PAGE_MASK; >> + numpages = PAGE_ALIGN(size) >> PAGE_SHIFT; >> + >> + /* >> + * The set_memory_xxx functions take an integer for numpages, make >> + * sure it doesn't exceed that. >> + */ >> + if (numpages > INT_MAX) >> + return -EINVAL; >> + >> + return set_memory_dec(addr, numpages); >> +} >> +EXPORT_SYMBOL_GPL(sme_set_mem_unenc); >> + >> /* >> * This routine does not change the underlying encryption setting of the >> * page(s) that map this memory. It assumes that eventually the memory is >> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c >> index b8e6bb5..babf3a6 100644 >> --- a/arch/x86/mm/pageattr.c >> +++ b/arch/x86/mm/pageattr.c >> @@ -1729,6 +1729,79 @@ int set_memory_4k(unsigned long addr, int numpages) >> __pgprot(0), 1, 0, NULL); >> } >> >> +static int __set_memory_enc_dec(struct cpa_data *cpa) >> +{ >> + unsigned long addr; >> + int numpages; >> + int ret; >> + >> + /* People should not be passing in unaligned addresses */ >> + if (WARN_ONCE(*cpa->vaddr & ~PAGE_MASK, >> + "misaligned address: %#lx\n", *cpa->vaddr)) >> + *cpa->vaddr &= PAGE_MASK; >> + >> + addr = *cpa->vaddr; >> + numpages = cpa->numpages; >> + >> + /* Must avoid aliasing mappings in the highmem code */ >> + kmap_flush_unused(); >> + vm_unmap_aliases(); >> + >> + ret = __change_page_attr_set_clr(cpa, 1); >> + >> + /* Check whether we really changed something */ >> + if (!(cpa->flags & CPA_FLUSHTLB)) >> + goto out; > > That label is used only once - just "return ret;" here. Yup, will do. > >> + /* >> + * On success we use CLFLUSH, when the CPU supports it to >> + * avoid the WBINVD. >> + */ >> + if (!ret && static_cpu_has(X86_FEATURE_CLFLUSH)) >> + cpa_flush_range(addr, numpages, 1); >> + else >> + cpa_flush_all(1); >> + >> +out: >> + return ret; >> +} >> + >> +int set_memory_enc(unsigned long addr, int numpages) >> +{ >> + struct cpa_data cpa; >> + >> + if (!sme_me_mask) >> + return 0; >> + >> + memset(&cpa, 0, sizeof(cpa)); >> + cpa.vaddr = &addr; >> + cpa.numpages = numpages; >> + cpa.mask_set = __pgprot(_PAGE_ENC); >> + cpa.mask_clr = __pgprot(0); >> + cpa.pgd = init_mm.pgd; > > You could move that... > >> + >> + return __set_memory_enc_dec(&cpa); >> +} >> +EXPORT_SYMBOL(set_memory_enc); >> + >> +int set_memory_dec(unsigned long addr, int numpages) >> +{ >> + struct cpa_data cpa; >> + >> + if (!sme_me_mask) >> + return 0; >> + >> + memset(&cpa, 0, sizeof(cpa)); >> + cpa.vaddr = &addr; >> + cpa.numpages = numpages; >> + cpa.mask_set = __pgprot(0); >> + cpa.mask_clr = __pgprot(_PAGE_ENC); >> + cpa.pgd = init_mm.pgd; > > ... and that into __set_memory_enc_dec() too and pass in a "bool dec" or > "bool enc" or so which presets mask_set and mask_clr properly. > > See above. I think two functions exported to other in-kernel users are > more than enough. Should I move this functionality into the sme_set_mem_* functions or remove the sme_set_mem_* functions and use the set_memory_* functions directly. The latter means calculating the number of pages, but makes it clear that this works on a page level while the former keeps everything the mem_encrypt.c file (and I can change that to take in a page count so that it is clear about the page boundary usage). Thanks, Tom > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Sat, Nov 19, 2016 at 12:48:27PM -0600, Tom Lendacky wrote: > Should I move this functionality into the sme_set_mem_* functions or > remove the sme_set_mem_* functions and use the set_memory_* functions > directly. The latter means calculating the number of pages, but makes > it clear that this works on a page level while the former keeps > everything the mem_encrypt.c file (and I can change that to take in a > page count so that it is clear about the page boundary usage). A user of that interface doesn't care, right? All she wants to do is pass in an address and size and the code will figure out everything. And I think address and size is the simplest two args you can pass. numpages can be calculated from it. As you do in sme_set_mem_*. And you need to do it all in pageattr.c because it uses the cpa wankery in there so you probably want to define int set_memory_dec(unsigned long addr, size_t size) int set_memory_enc(unsigned long addr, size_t size) there which both simply call __set_memory_enc_dec(unsigned long addr, size_t size, bool enc) and it goes and figures out everything, builds the cpa_data and does the mapping. That looks very simple and clean to me.
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 61518cf..bfb08e5 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -13,6 +13,7 @@ * Executability : eXeutable, NoteXecutable * Read/Write : ReadOnly, ReadWrite * Presence : NotPresent + * Encryption : ENCrypted, DECrypted * * Within a category, the attributes are mutually exclusive. * @@ -48,6 +49,8 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages); +int set_memory_enc(unsigned long addr, int numpages); +int set_memory_dec(unsigned long addr, int numpages); int set_memory_array_uc(unsigned long *addr, int addrinarray); int set_memory_array_wc(unsigned long *addr, int addrinarray); diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 0b40f79..d544481 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -21,6 +21,9 @@ extern unsigned long sme_me_mask; +int sme_set_mem_enc(void *vaddr, unsigned long size); +int sme_set_mem_unenc(void *vaddr, unsigned long size); + void __init sme_early_mem_enc(resource_size_t paddr, unsigned long size); void __init sme_early_mem_dec(resource_size_t paddr, @@ -39,6 +42,16 @@ void __init sme_early_init(void); #define sme_me_mask 0UL +static inline int sme_set_mem_enc(void *vaddr, unsigned long size) +{ + return 0; +} + +static inline int sme_set_mem_unenc(void *vaddr, unsigned long size) +{ + return 0; +} + static inline void __init sme_early_mem_enc(resource_size_t paddr, unsigned long size) { diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 411210d..41cfdf9 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -18,6 +18,7 @@ #include <asm/fixmap.h> #include <asm/setup.h> #include <asm/bootparam.h> +#include <asm/cacheflush.h> extern pmdval_t early_pmd_flags; int __init __early_make_pgtable(unsigned long, pmdval_t); @@ -33,6 +34,48 @@ EXPORT_SYMBOL_GPL(sme_me_mask); /* Buffer used for early in-place encryption by BSP, no locking needed */ static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); +int sme_set_mem_enc(void *vaddr, unsigned long size) +{ + unsigned long addr, numpages; + + if (!sme_me_mask) + return 0; + + addr = (unsigned long)vaddr & PAGE_MASK; + numpages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + /* + * The set_memory_xxx functions take an integer for numpages, make + * sure it doesn't exceed that. + */ + if (numpages > INT_MAX) + return -EINVAL; + + return set_memory_enc(addr, numpages); +} +EXPORT_SYMBOL_GPL(sme_set_mem_enc); + +int sme_set_mem_unenc(void *vaddr, unsigned long size) +{ + unsigned long addr, numpages; + + if (!sme_me_mask) + return 0; + + addr = (unsigned long)vaddr & PAGE_MASK; + numpages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + /* + * The set_memory_xxx functions take an integer for numpages, make + * sure it doesn't exceed that. + */ + if (numpages > INT_MAX) + return -EINVAL; + + return set_memory_dec(addr, numpages); +} +EXPORT_SYMBOL_GPL(sme_set_mem_unenc); + /* * This routine does not change the underlying encryption setting of the * page(s) that map this memory. It assumes that eventually the memory is diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index b8e6bb5..babf3a6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1729,6 +1729,79 @@ int set_memory_4k(unsigned long addr, int numpages) __pgprot(0), 1, 0, NULL); } +static int __set_memory_enc_dec(struct cpa_data *cpa) +{ + unsigned long addr; + int numpages; + int ret; + + /* People should not be passing in unaligned addresses */ + if (WARN_ONCE(*cpa->vaddr & ~PAGE_MASK, + "misaligned address: %#lx\n", *cpa->vaddr)) + *cpa->vaddr &= PAGE_MASK; + + addr = *cpa->vaddr; + numpages = cpa->numpages; + + /* Must avoid aliasing mappings in the highmem code */ + kmap_flush_unused(); + vm_unmap_aliases(); + + ret = __change_page_attr_set_clr(cpa, 1); + + /* Check whether we really changed something */ + if (!(cpa->flags & CPA_FLUSHTLB)) + goto out; + + /* + * On success we use CLFLUSH, when the CPU supports it to + * avoid the WBINVD. + */ + if (!ret && static_cpu_has(X86_FEATURE_CLFLUSH)) + cpa_flush_range(addr, numpages, 1); + else + cpa_flush_all(1); + +out: + return ret; +} + +int set_memory_enc(unsigned long addr, int numpages) +{ + struct cpa_data cpa; + + if (!sme_me_mask) + return 0; + + memset(&cpa, 0, sizeof(cpa)); + cpa.vaddr = &addr; + cpa.numpages = numpages; + cpa.mask_set = __pgprot(_PAGE_ENC); + cpa.mask_clr = __pgprot(0); + cpa.pgd = init_mm.pgd; + + return __set_memory_enc_dec(&cpa); +} +EXPORT_SYMBOL(set_memory_enc); + +int set_memory_dec(unsigned long addr, int numpages) +{ + struct cpa_data cpa; + + if (!sme_me_mask) + return 0; + + memset(&cpa, 0, sizeof(cpa)); + cpa.vaddr = &addr; + cpa.numpages = numpages; + cpa.mask_set = __pgprot(0); + cpa.mask_clr = __pgprot(_PAGE_ENC); + cpa.pgd = init_mm.pgd; + + return __set_memory_enc_dec(&cpa); +} +EXPORT_SYMBOL(set_memory_dec); + int set_pages_uc(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page);
This patch adds support to be change the memory encryption attribute for one or more memory pages. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> --- arch/x86/include/asm/cacheflush.h | 3 + arch/x86/include/asm/mem_encrypt.h | 13 ++++++ arch/x86/mm/mem_encrypt.c | 43 +++++++++++++++++++++ arch/x86/mm/pageattr.c | 73 ++++++++++++++++++++++++++++++++++++ 4 files changed, 132 insertions(+) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html