Message ID | 20181205030931.12037-2-aneesh.kumar@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | NestMMU pte upgrade workaround for mprotect | expand |
Hi Aneesh, I love your patch! Yet something to improve: [auto build test ERROR on linus/master] [also build test ERROR on v4.20-rc5 next-20181206] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] url: https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/NestMMU-pte-upgrade-workaround-for-mprotect/20181207-040417 config: x86_64-allmodconfig (attached as .config) compiler: gcc-7 (Debian 7.3.0-1) 7.3.0 reproduce: # save the attached .config to linux build tree make ARCH=x86_64 All errors (new ones prefixed by >>): In file included from arch/x86/include/asm/msr.h:246:0, from arch/x86/include/asm/processor.h:21, from arch/x86/include/asm/cpufeature.h:8, from arch/x86/include/asm/thread_info.h:53, from include/linux/thread_info.h:38, from arch/x86/include/asm/preempt.h:7, from include/linux/preempt.h:81, from include/linux/spinlock.h:51, from include/linux/mmzone.h:8, from include/linux/gfp.h:6, from include/linux/slab.h:15, from include/linux/crypto.h:24, from arch/x86/kernel/asm-offsets.c:9: arch/x86/include/asm/paravirt.h: In function 'ptep_modify_prot_start': >> arch/x86/include/asm/paravirt.h:424:28: error: dereferencing pointer to incomplete type 'struct vm_area_struct' struct mm_struct *mm = vma->vm_mm; ^~ make[2]: *** [arch/x86/kernel/asm-offsets.s] Error 1 make[2]: Target '__build' not remade because of errors. make[1]: *** [prepare0] Error 2 make[1]: Target 'prepare' not remade because of errors. make: *** [sub-make] Error 2 vim +424 arch/x86/include/asm/paravirt.h 418 419 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 420 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, 421 pte_t *ptep) 422 { 423 pteval_t ret; > 424 struct mm_struct *mm = vma->vm_mm; 425 426 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep); 427 428 return (pte_t) { .pte = ret }; 429 } 430 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
kbuild test robot <lkp@intel.com> writes: > Hi Aneesh, > > I love your patch! Yet something to improve: > > [auto build test ERROR on linus/master] > [also build test ERROR on v4.20-rc5 next-20181206] > [if your patch is applied to the wrong git tree, please drop us a note to help improve the system] > > url: https://github.com/0day-ci/linux/commits/Aneesh-Kumar-K-V/NestMMU-pte-upgrade-workaround-for-mprotect/20181207-040417 > config: x86_64-allmodconfig (attached as .config) > compiler: gcc-7 (Debian 7.3.0-1) 7.3.0 > reproduce: > # save the attached .config to linux build tree > make ARCH=x86_64 > > All errors (new ones prefixed by >>): > > In file included from arch/x86/include/asm/msr.h:246:0, > from arch/x86/include/asm/processor.h:21, > from arch/x86/include/asm/cpufeature.h:8, > from arch/x86/include/asm/thread_info.h:53, > from include/linux/thread_info.h:38, > from arch/x86/include/asm/preempt.h:7, > from include/linux/preempt.h:81, > from include/linux/spinlock.h:51, > from include/linux/mmzone.h:8, > from include/linux/gfp.h:6, > from include/linux/slab.h:15, > from include/linux/crypto.h:24, > from arch/x86/kernel/asm-offsets.c:9: > arch/x86/include/asm/paravirt.h: In function 'ptep_modify_prot_start': >>> arch/x86/include/asm/paravirt.h:424:28: error: dereferencing pointer to incomplete type 'struct vm_area_struct' > struct mm_struct *mm = vma->vm_mm; > ^~ > make[2]: *** [arch/x86/kernel/asm-offsets.s] Error 1 > make[2]: Target '__build' not remade because of errors. > make[1]: *** [prepare0] Error 2 > make[1]: Target 'prepare' not remade because of errors. > make: *** [sub-make] Error 2 > The fix turned out to be more code changes than what I expected. Instead of adding mm_types.h to paravirt.h, I changed most of the related functions to take vm_area_struct. That brings all ptep_modify_prot_start variants to take vm_area_struct as arg and IMHO that is better. What do you think? diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 0d75a4f60500..28152236a65b 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -421,9 +421,8 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned pte_t *ptep) { pteval_t ret; - struct mm_struct *mm = vma->vm_mm; - ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep); + ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep); return (pte_t) { .pte = ret }; } @@ -431,14 +430,13 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { - struct mm_struct *mm = vma->vm_mm; if (sizeof(pteval_t) > sizeof(long)) /* 5 arg words */ - pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte); + pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte); else PVOP_VCALL4(mmu.ptep_modify_prot_commit, - mm, addr, ptep, pte.pte); + vma, addr, ptep, pte.pte); } static inline void set_pte(pte_t *ptep, pte_t pte) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 26942ad63830..609a728ec809 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -55,6 +55,7 @@ struct task_struct; struct cpumask; struct flush_tlb_info; struct mmu_gather; +struct vm_area_struct; /* * Wrapper type for pointers to code which uses the non-standard @@ -254,9 +255,9 @@ struct pv_mmu_ops { pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, + pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); - void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, + void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte); struct paravirt_callee_save pte_val; diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index a7e47cf7ec6c..6e4c6bd62203 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h @@ -17,8 +17,8 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); -pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, +pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte); unsigned long xen_read_cr2_direct(void); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index a5d7ed125337..b7c89619cfc9 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -306,20 +306,20 @@ static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, __xen_set_pte(ptep, pteval); } -pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, +pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { /* Just return the pte as-is. We preserve the bits on commit */ - trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); + trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep); return *ptep; } -void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, +void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { struct mmu_update u; - trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); + trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 37039e918f17..3ff8b1c3f003 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -568,7 +568,7 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) return 0; } -static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, +static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { @@ -577,10 +577,10 @@ static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, * non-present, preventing the hardware from asynchronously * updating it. */ - return ptep_get_and_clear(mm, addr, ptep); + return ptep_get_and_clear(vma->vm_mm, addr, ptep); } -static inline void __ptep_modify_prot_commit(struct mm_struct *mm, +static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { @@ -588,7 +588,7 @@ static inline void __ptep_modify_prot_commit(struct mm_struct *mm, * The pte is non-present, so there's no hardware state to * preserve. */ - set_pte_at(mm, addr, ptep, pte); + set_pte_at(vma->vm_mm, addr, ptep, pte); } #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION @@ -610,7 +610,7 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - return __ptep_modify_prot_start(vma->vm_mm, addr, ptep); + return __ptep_modify_prot_start(vma, addr, ptep); } /* @@ -621,7 +621,7 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { - __ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte); + __ptep_modify_prot_commit(vma, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ #endif /* CONFIG_MMU */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 063732414dfb..5d730199e37b 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1069,8 +1069,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION -pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *); -void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t); +pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); +void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, pte_t *, pte_t); #define __HAVE_ARCH_PTEP_CLEAR_FLUSH static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f2cc7da473e4..29c0a21cd34a 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -301,12 +301,13 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL(ptep_xchg_lazy); -pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pgste_t pgste; pte_t old; int nodat; + struct mm_struct *mm = vma->vm_mm; preempt_disable(); pgste = ptep_xchg_start(mm, addr, ptep); @@ -320,10 +321,11 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL(ptep_modify_prot_start); -void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, +void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { pgste_t pgste; + struct mm_struct *mm = vma->vm_mm; if (!MACHINE_HAS_NX) pte_val(pte) &= ~_PAGE_NOEXEC; diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4bf42f9e4eea..1154f154025d 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -417,19 +417,22 @@ static inline pgdval_t pgd_val(pgd_t pgd) } #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION -static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pteval_t ret; + struct mm_struct *mm = vma->vm_mm; ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep); return (pte_t) { .pte = ret }; } -static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { + struct mm_struct *mm = vma->vm_mm; + if (sizeof(pteval_t) > sizeof(long)) /* 5 arg words */ pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 47c3764c469b..9952d7185170 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -940,10 +940,10 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, pte_t ptent = *pte; if (pte_present(ptent)) { - ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte); + ptent = ptep_modify_prot_start(vma, addr, pte); ptent = pte_wrprotect(ptent); ptent = pte_clear_soft_dirty(ptent); - ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent); + ptep_modify_prot_commit(vma, addr, pte, ptent); } else if (is_swap_pte(ptent)) { ptent = pte_swp_clear_soft_dirty(ptent); set_pte_at(vma->vm_mm, addr, pte, ptent); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 359fb935ded6..c9897dcc46c4 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -606,22 +606,22 @@ static inline void __ptep_modify_prot_commit(struct mm_struct *mm, * queue the update to be done at some later time. The update must be * actually committed before the pte lock is released, however. */ -static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - return __ptep_modify_prot_start(mm, addr, ptep); + return __ptep_modify_prot_start(vma->vm_mm, addr, ptep); } /* * Commit an update to a pte, leaving any hardware-controlled bits in * the PTE unmodified. */ -static inline void ptep_modify_prot_commit(struct mm_struct *mm, +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { - __ptep_modify_prot_commit(mm, addr, ptep, pte); + __ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ #endif /* CONFIG_MMU */ diff --git a/mm/memory.c b/mm/memory.c index 4ad2d293ddc2..d36b0eaa7862 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3588,12 +3588,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Make it present again, Depending on how arch implementes non * accessible ptes, some can allow access by kernel mode. */ - pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); + pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); pte = pte_modify(pte, vma->vm_page_prot); pte = pte_mkyoung(pte); if (was_writable) pte = pte_mkwrite(pte); - ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); + ptep_modify_prot_commit(vma, vmf->address, vmf->pte, pte); update_mmu_cache(vma, vmf->address, vmf->pte); page = vm_normal_page(vma, vmf->address, pte); diff --git a/mm/mprotect.c b/mm/mprotect.c index 6d331620b9e5..a301d4c83d3c 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -110,7 +110,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, continue; } - ptent = ptep_modify_prot_start(mm, addr, pte); + ptent = ptep_modify_prot_start(vma, addr, pte); ptent = pte_modify(ptent, newprot); if (preserve_write) ptent = pte_mk_savedwrite(ptent); @@ -121,7 +121,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, !(vma->vm_flags & VM_SOFTDIRTY))) { ptent = pte_mkwrite(ptent); } - ptep_modify_prot_commit(mm, addr, pte, ptent); + ptep_modify_prot_commit(vma, addr, pte, ptent); pages++; } else if (IS_ENABLED(CONFIG_MIGRATION)) { swp_entry_t entry = pte_to_swp_entry(oldpte);
Some architecture may want to call flush_tlb_range from these helpers. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- arch/s390/include/asm/pgtable.h | 4 ++-- arch/s390/mm/pgtable.c | 6 ++++-- arch/x86/include/asm/paravirt.h | 7 +++++-- fs/proc/task_mmu.c | 4 ++-- include/asm-generic/pgtable.h | 8 ++++---- mm/memory.c | 4 ++-- mm/mprotect.c | 4 ++-- 7 files changed, 21 insertions(+), 16 deletions(-)