Message ID | 20210416154106.23721-5-kirill.shutemov@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | TDX and guest memory unmapping | expand |
On 4/16/21 8:40 AM, Kirill A. Shutemov wrote: > Mirror SEV, use SWIOTLB always if KVM memory protection is enabled. ... > arch/x86/mm/mem_encrypt.c | 44 --------------------------- > arch/x86/mm/mem_encrypt_common.c | 48 ++++++++++++++++++++++++++++++ The changelog need to at least mention what's going on here. It doesn't prepare me at all for having code move around. > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig > index d197b3beb904..c51d14db5620 100644 > --- a/arch/x86/Kconfig > +++ b/arch/x86/Kconfig > @@ -812,6 +812,7 @@ config KVM_GUEST > select ARCH_CPUIDLE_HALTPOLL > select X86_HV_CALLBACK_VECTOR > select X86_MEM_ENCRYPT_COMMON > + select SWIOTLB > default y > help > This option enables various optimizations for running under the KVM So, this feature is always compiled in with KVM. Could you say a couple of things about that? Why did you decide not have a Kconfig option for it? > diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h > index 31c4df123aa0..a748b30c2f23 100644 > --- a/arch/x86/include/asm/mem_encrypt.h > +++ b/arch/x86/include/asm/mem_encrypt.h > @@ -47,10 +47,8 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); > > void __init mem_encrypt_free_decrypted_mem(void); > > -/* Architecture __weak replacement functions */ > -void __init mem_encrypt_init(void); > - > void __init sev_es_init_vc_handling(void); > + > bool sme_active(void); > bool sev_active(void); > bool sev_es_active(void); > @@ -91,6 +89,9 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } > > #endif /* CONFIG_AMD_MEM_ENCRYPT */ > > +/* Architecture __weak replacement functions */ > +void __init mem_encrypt_init(void); FWIW, I'd rather have the code movement in separate patches from the functional changes. > /* > * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when > * writing to or comparing values from the cr3 register. Having the > diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c > index aed6034fcac1..ba179f5ca198 100644 > --- a/arch/x86/kernel/kvm.c > +++ b/arch/x86/kernel/kvm.c > @@ -26,6 +26,7 @@ > #include <linux/kprobes.h> > #include <linux/nmi.h> > #include <linux/swait.h> > +#include <linux/swiotlb.h> > #include <asm/timer.h> > #include <asm/cpu.h> > #include <asm/traps.h> > @@ -765,6 +766,7 @@ static void __init kvm_init_platform(void) > pr_info("KVM memory protection enabled\n"); > mem_protected = true; > setup_force_cpu_cap(X86_FEATURE_KVM_MEM_PROTECTED); > + swiotlb_force = SWIOTLB_FORCE; > } > } > > diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c > index c2cfa5e7c152..814060a6ceb0 100644 > --- a/arch/x86/kernel/pci-swiotlb.c > +++ b/arch/x86/kernel/pci-swiotlb.c > @@ -13,6 +13,7 @@ > #include <asm/dma.h> > #include <asm/xen/swiotlb-xen.h> > #include <asm/iommu_table.h> > +#include <asm/kvm_para.h> > > int swiotlb __read_mostly; > > @@ -49,7 +50,7 @@ int __init pci_swiotlb_detect_4gb(void) > * buffers are allocated and used for devices that do not support > * the addressing range required for the encryption mask. > */ > - if (sme_active()) > + if (sme_active() || kvm_mem_protected()) > swiotlb = 1; > > return swiotlb; While I don't doubt you got it right, it would be nice to also explain in the changelog why you manipulate both 'swiotlb_force' and 'swiotlb'. > diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c > index 9ca477b9b8ba..3478f20fb46f 100644 > --- a/arch/x86/mm/mem_encrypt.c > +++ b/arch/x86/mm/mem_encrypt.c > @@ -409,47 +409,3 @@ void __init mem_encrypt_free_decrypted_mem(void) > > free_init_pages("unused decrypted", vaddr, vaddr_end); > } > - > -static void print_mem_encrypt_feature_info(void) > -{ > - pr_info("AMD Memory Encryption Features active:"); > - > - /* Secure Memory Encryption */ > - if (sme_active()) { > - /* > - * SME is mutually exclusive with any of the SEV > - * features below. > - */ > - pr_cont(" SME\n"); > - return; > - } > - > - /* Secure Encrypted Virtualization */ > - if (sev_active()) > - pr_cont(" SEV"); > - > - /* Encrypted Register State */ > - if (sev_es_active()) > - pr_cont(" SEV-ES"); > - > - pr_cont("\n"); > -} > - > -/* Architecture __weak replacement functions */ > -void __init mem_encrypt_init(void) > -{ > - if (!sme_me_mask) > - return; > - > - /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ > - swiotlb_update_mem_attributes(); > - > - /* > - * With SEV, we need to unroll the rep string I/O instructions. > - */ > - if (sev_active()) > - static_branch_enable(&sev_enable_key); > - > - print_mem_encrypt_feature_info(); > -} > - > diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c > index 6bf0718bb72a..351b77361a5d 100644 > --- a/arch/x86/mm/mem_encrypt_common.c > +++ b/arch/x86/mm/mem_encrypt_common.c > @@ -11,6 +11,7 @@ > #include <linux/mem_encrypt.h> > #include <linux/dma-direct.h> > #include <asm/kvm_para.h> > +#include <asm/mem_encrypt.h> > > /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ > bool force_dma_unencrypted(struct device *dev) > @@ -37,3 +38,50 @@ bool force_dma_unencrypted(struct device *dev) > > return false; > } > + > +static void print_mem_encrypt_feature_info(void) > +{ > + if (kvm_mem_protected()) { > + pr_info("KVM memory protection enabled\n"); > + return; > + } I understand that they're touching similar areas of code, but I'm a bit unnerved with memory protection being in all these "encryption" functions and files. I think some thoughtful renaming is in order. > + pr_info("AMD Memory Encryption Features active:"); > + > + /* Secure Memory Encryption */ > + if (sme_active()) { > + /* > + * SME is mutually exclusive with any of the SEV > + * features below. > + */ > + pr_cont(" SME\n"); > + return; > + } > + > + /* Secure Encrypted Virtualization */ > + if (sev_active()) > + pr_cont(" SEV"); > + > + /* Encrypted Register State */ > + if (sev_es_active()) > + pr_cont(" SEV-ES"); > + > + pr_cont("\n"); > +} This, for instance really shouldn't be in common code. It should be in an AMD-specific area. > +void __init mem_encrypt_init(void) > +{ > + if (!sme_me_mask && !kvm_mem_protected()) > + return; > + > + /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ > + swiotlb_update_mem_attributes(); > + > + /* > + * With SEV, we need to unroll the rep string I/O instructions. > + */ > + if (sev_active()) > + static_branch_enable(&sev_enable_key); > + > + print_mem_encrypt_feature_info(); > +} This function is called like this: > /* > * This needs to be called before any devices perform DMA > * operations that might use the SWIOTLB bounce buffers. It will > * mark the bounce buffers as decrypted so that their usage will > * not cause "plain-text" data to be decrypted when accessed. > */ > mem_encrypt_init(); So, maybe this should be x86_swiotlb_init() or something. Then, move the print_mem_encrypt_feature_info() elsewhere, probably back out to mem_init(). Maybe even just call it print_arch_mem_features() or something.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d197b3beb904..c51d14db5620 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -812,6 +812,7 @@ config KVM_GUEST select ARCH_CPUIDLE_HALTPOLL select X86_HV_CALLBACK_VECTOR select X86_MEM_ENCRYPT_COMMON + select SWIOTLB default y help This option enables various optimizations for running under the KVM diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 31c4df123aa0..a748b30c2f23 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -47,10 +47,8 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); void __init mem_encrypt_free_decrypted_mem(void); -/* Architecture __weak replacement functions */ -void __init mem_encrypt_init(void); - void __init sev_es_init_vc_handling(void); + bool sme_active(void); bool sev_active(void); bool sev_es_active(void); @@ -91,6 +89,9 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } #endif /* CONFIG_AMD_MEM_ENCRYPT */ +/* Architecture __weak replacement functions */ +void __init mem_encrypt_init(void); + /* * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when * writing to or comparing values from the cr3 register. Having the diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index aed6034fcac1..ba179f5ca198 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -26,6 +26,7 @@ #include <linux/kprobes.h> #include <linux/nmi.h> #include <linux/swait.h> +#include <linux/swiotlb.h> #include <asm/timer.h> #include <asm/cpu.h> #include <asm/traps.h> @@ -765,6 +766,7 @@ static void __init kvm_init_platform(void) pr_info("KVM memory protection enabled\n"); mem_protected = true; setup_force_cpu_cap(X86_FEATURE_KVM_MEM_PROTECTED); + swiotlb_force = SWIOTLB_FORCE; } } diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index c2cfa5e7c152..814060a6ceb0 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -13,6 +13,7 @@ #include <asm/dma.h> #include <asm/xen/swiotlb-xen.h> #include <asm/iommu_table.h> +#include <asm/kvm_para.h> int swiotlb __read_mostly; @@ -49,7 +50,7 @@ int __init pci_swiotlb_detect_4gb(void) * buffers are allocated and used for devices that do not support * the addressing range required for the encryption mask. */ - if (sme_active()) + if (sme_active() || kvm_mem_protected()) swiotlb = 1; return swiotlb; diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9ca477b9b8ba..3478f20fb46f 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -409,47 +409,3 @@ void __init mem_encrypt_free_decrypted_mem(void) free_init_pages("unused decrypted", vaddr, vaddr_end); } - -static void print_mem_encrypt_feature_info(void) -{ - pr_info("AMD Memory Encryption Features active:"); - - /* Secure Memory Encryption */ - if (sme_active()) { - /* - * SME is mutually exclusive with any of the SEV - * features below. - */ - pr_cont(" SME\n"); - return; - } - - /* Secure Encrypted Virtualization */ - if (sev_active()) - pr_cont(" SEV"); - - /* Encrypted Register State */ - if (sev_es_active()) - pr_cont(" SEV-ES"); - - pr_cont("\n"); -} - -/* Architecture __weak replacement functions */ -void __init mem_encrypt_init(void) -{ - if (!sme_me_mask) - return; - - /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ - swiotlb_update_mem_attributes(); - - /* - * With SEV, we need to unroll the rep string I/O instructions. - */ - if (sev_active()) - static_branch_enable(&sev_enable_key); - - print_mem_encrypt_feature_info(); -} - diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c index 6bf0718bb72a..351b77361a5d 100644 --- a/arch/x86/mm/mem_encrypt_common.c +++ b/arch/x86/mm/mem_encrypt_common.c @@ -11,6 +11,7 @@ #include <linux/mem_encrypt.h> #include <linux/dma-direct.h> #include <asm/kvm_para.h> +#include <asm/mem_encrypt.h> /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -37,3 +38,50 @@ bool force_dma_unencrypted(struct device *dev) return false; } + +static void print_mem_encrypt_feature_info(void) +{ + if (kvm_mem_protected()) { + pr_info("KVM memory protection enabled\n"); + return; + } + + pr_info("AMD Memory Encryption Features active:"); + + /* Secure Memory Encryption */ + if (sme_active()) { + /* + * SME is mutually exclusive with any of the SEV + * features below. + */ + pr_cont(" SME\n"); + return; + } + + /* Secure Encrypted Virtualization */ + if (sev_active()) + pr_cont(" SEV"); + + /* Encrypted Register State */ + if (sev_es_active()) + pr_cont(" SEV-ES"); + + pr_cont("\n"); +} + +void __init mem_encrypt_init(void) +{ + if (!sme_me_mask && !kvm_mem_protected()) + return; + + /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ + swiotlb_update_mem_attributes(); + + /* + * With SEV, we need to unroll the rep string I/O instructions. + */ + if (sev_active()) + static_branch_enable(&sev_enable_key); + + print_mem_encrypt_feature_info(); +}
Mirror SEV, use SWIOTLB always if KVM memory protection is enabled. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/x86/Kconfig | 1 + arch/x86/include/asm/mem_encrypt.h | 7 +++-- arch/x86/kernel/kvm.c | 2 ++ arch/x86/kernel/pci-swiotlb.c | 3 +- arch/x86/mm/mem_encrypt.c | 44 --------------------------- arch/x86/mm/mem_encrypt_common.c | 48 ++++++++++++++++++++++++++++++ 6 files changed, 57 insertions(+), 48 deletions(-)