@@ -543,7 +543,7 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
* In the case that a guest uses storage keys
* faults should no longer be backed by zero pages
*/
-#define mm_forbids_zeropage mm_has_pgste
+#define vma_forbids_zeropage(vma) mm_has_pgste(vma->vm_mm)
static inline int mm_uses_skeys(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -130,8 +130,8 @@ extern int mmap_rnd_compat_bits __read_mostly;
* s390 does this to prevent multiplexing of hardware bits
* related to the physical page in case of virtualization.
*/
-#ifndef mm_forbids_zeropage
-#define mm_forbids_zeropage(X) (0)
+#ifndef vma_forbids_zeropage
+#define vma_forbids_zeropage(vma) vma_is_kvm_protected(vma)
#endif
/*
@@ -709,8 +709,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
return VM_FAULT_OOM;
- if (!(vmf->flags & FAULT_FLAG_WRITE) &&
- !mm_forbids_zeropage(vma->vm_mm) &&
+ if (!(vmf->flags & FAULT_FLAG_WRITE) && !vma_forbids_zeropage(vma) &&
transparent_hugepage_use_zero_page()) {
pgtable_t pgtable;
struct page *zero_page;
@@ -3495,8 +3495,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
return 0;
/* Use the zero-page for reads */
- if (!(vmf->flags & FAULT_FLAG_WRITE) &&
- !mm_forbids_zeropage(vma->vm_mm)) {
+ if (!(vmf->flags & FAULT_FLAG_WRITE) && !vma_forbids_zeropage(vma)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vma->vm_page_prot));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
Presence of zero pages in the mapping would disclose content of the mapping. Don't use them if KVM memory protection is enabled. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/s390/include/asm/pgtable.h | 2 +- include/linux/mm.h | 4 ++-- mm/huge_memory.c | 3 +-- mm/memory.c | 3 +-- 4 files changed, 5 insertions(+), 7 deletions(-)