diff mbox series

[RFCv2,16/16] mm: Do not use zero page for VM_KVM_PROTECTED VMAs

Message ID 20201020061859.18385-17-kirill.shutemov@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series KVM protected memory extension | expand

Commit Message

Kirill A . Shutemov Oct. 20, 2020, 6:18 a.m. UTC
Presence of zero pages in the mapping would disclose content of the
mapping. Don't use them if KVM memory protection is enabled.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/s390/include/asm/pgtable.h | 2 +-
 include/linux/mm.h              | 4 ++--
 mm/huge_memory.c                | 3 +--
 mm/memory.c                     | 3 +--
 4 files changed, 5 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b55561cc8786..72ca3b3f04cb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -543,7 +543,7 @@  static inline int mm_alloc_pgste(struct mm_struct *mm)
  * In the case that a guest uses storage keys
  * faults should no longer be backed by zero pages
  */
-#define mm_forbids_zeropage mm_has_pgste
+#define vma_forbids_zeropage(vma) mm_has_pgste(vma->vm_mm)
 static inline int mm_uses_skeys(struct mm_struct *mm)
 {
 #ifdef CONFIG_PGSTE
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 74efc51e63f0..ee713b7c2819 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -130,8 +130,8 @@  extern int mmap_rnd_compat_bits __read_mostly;
  * s390 does this to prevent multiplexing of hardware bits
  * related to the physical page in case of virtualization.
  */
-#ifndef mm_forbids_zeropage
-#define mm_forbids_zeropage(X)	(0)
+#ifndef vma_forbids_zeropage
+#define vma_forbids_zeropage(vma) vma_is_kvm_protected(vma)
 #endif
 
 /*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40974656cb43..383614b24c4f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -709,8 +709,7 @@  vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 		return VM_FAULT_OOM;
 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
 		return VM_FAULT_OOM;
-	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
-			!mm_forbids_zeropage(vma->vm_mm) &&
+	if (!(vmf->flags & FAULT_FLAG_WRITE) && !vma_forbids_zeropage(vma) &&
 			transparent_hugepage_use_zero_page()) {
 		pgtable_t pgtable;
 		struct page *zero_page;
diff --git a/mm/memory.c b/mm/memory.c
index e28bd5f902a7..9907ffe00490 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3495,8 +3495,7 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 		return 0;
 
 	/* Use the zero-page for reads */
-	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
-			!mm_forbids_zeropage(vma->vm_mm)) {
+	if (!(vmf->flags & FAULT_FLAG_WRITE) && !vma_forbids_zeropage(vma)) {
 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
 						vma->vm_page_prot));
 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,