Message ID | 20230612210423.18611-9-vishal.moola@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Split ptdesc from struct page | expand |
Context | Check | Description |
---|---|---|
conchuod/tree_selection | fail | Failed to apply to next/pending-fixes, riscv/for-next or riscv/master |
On Mon, Jun 12, 2023 at 02:03:57PM -0700, Vishal Moola (Oracle) wrote: > This removes some direct accesses to struct page, working towards > splitting out struct ptdesc from struct page. > > Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> > --- > arch/x86/xen/mmu_pv.c | 2 +- > include/linux/mm.h | 14 +++++++------- > 2 files changed, 8 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c > index b3b8d289b9ab..f469862e3ef4 100644 > --- a/arch/x86/xen/mmu_pv.c > +++ b/arch/x86/xen/mmu_pv.c > @@ -651,7 +651,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) > spinlock_t *ptl = NULL; > > #if USE_SPLIT_PTE_PTLOCKS > - ptl = ptlock_ptr(page); > + ptl = ptlock_ptr(page_ptdesc(page)); > spin_lock_nest_lock(ptl, &mm->page_table_lock); > #endif > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index e6f1be2a405e..bb934d51390f 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2828,9 +2828,9 @@ void __init ptlock_cache_init(void); > bool ptlock_alloc(struct ptdesc *ptdesc); > extern void ptlock_free(struct page *page); > > -static inline spinlock_t *ptlock_ptr(struct page *page) > +static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) > { > - return page->ptl; > + return ptdesc->ptl; > } > #else /* ALLOC_SPLIT_PTLOCKS */ > static inline void ptlock_cache_init(void) > @@ -2846,15 +2846,15 @@ static inline void ptlock_free(struct page *page) > { > } > > -static inline spinlock_t *ptlock_ptr(struct page *page) > +static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) > { > - return &page->ptl; > + return &ptdesc->ptl; > } > #endif /* ALLOC_SPLIT_PTLOCKS */ > > static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) > { > - return ptlock_ptr(pmd_page(*pmd)); > + return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); > } > > static inline bool ptlock_init(struct page *page) > @@ -2869,7 +2869,7 @@ static inline bool ptlock_init(struct page *page) > VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); > if (!ptlock_alloc(page_ptdesc(page))) > return false; > - spin_lock_init(ptlock_ptr(page)); > + spin_lock_init(ptlock_ptr(page_ptdesc(page))); > return true; > } > > @@ -2939,7 +2939,7 @@ static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd) > > static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) > { > - return ptlock_ptr(ptdesc_page(pmd_ptdesc(pmd))); > + return ptlock_ptr(pmd_ptdesc(pmd)); > } > > static inline bool pmd_ptlock_init(struct page *page) > -- > 2.40.1 > >
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index b3b8d289b9ab..f469862e3ef4 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -651,7 +651,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) spinlock_t *ptl = NULL; #if USE_SPLIT_PTE_PTLOCKS - ptl = ptlock_ptr(page); + ptl = ptlock_ptr(page_ptdesc(page)); spin_lock_nest_lock(ptl, &mm->page_table_lock); #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index e6f1be2a405e..bb934d51390f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2828,9 +2828,9 @@ void __init ptlock_cache_init(void); bool ptlock_alloc(struct ptdesc *ptdesc); extern void ptlock_free(struct page *page); -static inline spinlock_t *ptlock_ptr(struct page *page) +static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) { - return page->ptl; + return ptdesc->ptl; } #else /* ALLOC_SPLIT_PTLOCKS */ static inline void ptlock_cache_init(void) @@ -2846,15 +2846,15 @@ static inline void ptlock_free(struct page *page) { } -static inline spinlock_t *ptlock_ptr(struct page *page) +static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) { - return &page->ptl; + return &ptdesc->ptl; } #endif /* ALLOC_SPLIT_PTLOCKS */ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) { - return ptlock_ptr(pmd_page(*pmd)); + return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); } static inline bool ptlock_init(struct page *page) @@ -2869,7 +2869,7 @@ static inline bool ptlock_init(struct page *page) VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); if (!ptlock_alloc(page_ptdesc(page))) return false; - spin_lock_init(ptlock_ptr(page)); + spin_lock_init(ptlock_ptr(page_ptdesc(page))); return true; } @@ -2939,7 +2939,7 @@ static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd) static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) { - return ptlock_ptr(ptdesc_page(pmd_ptdesc(pmd))); + return ptlock_ptr(pmd_ptdesc(pmd)); } static inline bool pmd_ptlock_init(struct page *page)
This removes some direct accesses to struct page, working towards splitting out struct ptdesc from struct page. Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> --- arch/x86/xen/mmu_pv.c | 2 +- include/linux/mm.h | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-)