Message ID | 20180717112029.42378-12-kirill.shutemov@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
> --- a/arch/x86/mm/mktme.c > +++ b/arch/x86/mm/mktme.c > @@ -1,3 +1,4 @@ > +#include <linux/mm.h> > #include <asm/mktme.h> > > phys_addr_t mktme_keyid_mask; > @@ -37,3 +38,14 @@ struct page_ext_operations page_mktme_ops = { > .need = need_page_mktme, > .init = init_page_mktme, > }; > + > +int vma_keyid(struct vm_area_struct *vma) > +{ > + pgprotval_t prot; > + > + if (!mktme_enabled()) > + return 0; > + > + prot = pgprot_val(vma->vm_page_prot); > + return (prot & mktme_keyid_mask) >> mktme_keyid_shift; > +} I'm a bit surprised this isn't inlined. Not that function calls are expensive, but we *could* entirely avoid them using the normal pattern of: // In the header: static inline vma_keyid(...) { if (!mktme_enabled()) return 0; return __vma_keyid(...); // <- the .c file version }
On Wed, Jul 18, 2018 at 04:40:14PM -0700, Dave Hansen wrote: > > --- a/arch/x86/mm/mktme.c > > +++ b/arch/x86/mm/mktme.c > > @@ -1,3 +1,4 @@ > > +#include <linux/mm.h> > > #include <asm/mktme.h> > > > > phys_addr_t mktme_keyid_mask; > > @@ -37,3 +38,14 @@ struct page_ext_operations page_mktme_ops = { > > .need = need_page_mktme, > > .init = init_page_mktme, > > }; > > + > > +int vma_keyid(struct vm_area_struct *vma) > > +{ > > + pgprotval_t prot; > > + > > + if (!mktme_enabled()) > > + return 0; > > + > > + prot = pgprot_val(vma->vm_page_prot); > > + return (prot & mktme_keyid_mask) >> mktme_keyid_shift; > > +} > > I'm a bit surprised this isn't inlined. Not that function calls are > expensive, but we *could* entirely avoid them using the normal pattern of: > > // In the header: > static inline vma_keyid(...) > { > if (!mktme_enabled()) > return 0; > > return __vma_keyid(...); // <- the .c file version > } Okay. I'll do this. But it would be a macros. <asm/mktme.h> gets included very early. We cannot really use jump label code there directly.
diff --git a/arch/x86/include/asm/mktme.h b/arch/x86/include/asm/mktme.h index 7266494b4f0a..f0b7844e36a4 100644 --- a/arch/x86/include/asm/mktme.h +++ b/arch/x86/include/asm/mktme.h @@ -4,6 +4,8 @@ #include <linux/types.h> #include <linux/page_ext.h> +struct vm_area_struct; + #ifdef CONFIG_X86_INTEL_MKTME extern phys_addr_t mktme_keyid_mask; extern int mktme_nr_keyids; @@ -14,6 +16,9 @@ extern struct page_ext_operations page_mktme_ops; #define page_keyid page_keyid int page_keyid(const struct page *page); +#define vma_keyid vma_keyid +int vma_keyid(struct vm_area_struct *vma); + #else #define mktme_keyid_mask ((phys_addr_t)0) #define mktme_nr_keyids 0 diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c index 09cbff678b9f..a1f40ee61b25 100644 --- a/arch/x86/mm/mktme.c +++ b/arch/x86/mm/mktme.c @@ -1,3 +1,4 @@ +#include <linux/mm.h> #include <asm/mktme.h> phys_addr_t mktme_keyid_mask; @@ -37,3 +38,14 @@ struct page_ext_operations page_mktme_ops = { .need = need_page_mktme, .init = init_page_mktme, }; + +int vma_keyid(struct vm_area_struct *vma) +{ + pgprotval_t prot; + + if (!mktme_enabled()) + return 0; + + prot = pgprot_val(vma->vm_page_prot); + return (prot & mktme_keyid_mask) >> mktme_keyid_shift; +}
We store KeyID in upper bits for vm_page_prot that match position of KeyID in PTE. vma_keyid() extracts KeyID from vm_page_prot. With KeyID in vm_page_prot we don't need to modify any page table helper to propagate the KeyID to page table entires. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/x86/include/asm/mktme.h | 5 +++++ arch/x86/mm/mktme.c | 12 ++++++++++++ 2 files changed, 17 insertions(+)