diff mbox

[PATH,V2,3/6] arm: mm: Enable HAVE_RCU_TABLE_FREE logic

Message ID 1408635812-31584-4-git-send-email-steve.capper@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Steve Capper Aug. 21, 2014, 3:43 p.m. UTC
In order to implement fast_get_user_pages we need to ensure that the
page table walker is protected from page table pages being freed from
under it.

This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging
to address spaces with multiple users will be call_rcu_sched freed.
Meaning that disabling interrupts will block the free and protect
the fast gup page walker.

Signed-off-by: Steve Capper <steve.capper@linaro.org>
---
 arch/arm/Kconfig           |  1 +
 arch/arm/include/asm/tlb.h | 38 ++++++++++++++++++++++++++++++++++++--
 2 files changed, 37 insertions(+), 2 deletions(-)

Comments

Catalin Marinas Aug. 27, 2014, 11:50 a.m. UTC | #1
On Thu, Aug 21, 2014 at 04:43:29PM +0100, Steve Capper wrote:
> --- a/arch/arm/include/asm/tlb.h
> +++ b/arch/arm/include/asm/tlb.h
> @@ -35,12 +35,39 @@
>  
>  #define MMU_GATHER_BUNDLE	8
>  
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +static inline void __tlb_remove_table(void *_table)
> +{
> +	free_page_and_swap_cache((struct page *)_table);
> +}
> +
> +struct mmu_table_batch {
> +	struct rcu_head		rcu;
> +	unsigned int		nr;
> +	void			*tables[0];
> +};
> +
> +#define MAX_TABLE_BATCH		\
> +	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
> +
> +extern void tlb_table_flush(struct mmu_gather *tlb);
> +extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
> +
> +#define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
> +#else
> +#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
> +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
> +
>  /*
>   * TLB handling.  This allows us to remove pages from the page
>   * tables, and efficiently handle the TLB issues.
>   */
>  struct mmu_gather {
>  	struct mm_struct	*mm;
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +	struct mmu_table_batch	*batch;
> +	unsigned int		need_flush;
> +#endif

We add need_flush here just because it is set by tlb_remove_table() but
it won't actually be checked by anything since arch/arm uses its own
version of tlb_flush_mmu(). But I wouldn't go for #ifdefs in the core
code either.

We should (as a separate patchset) convert arch/arm to generic
mmu_gather. I know Russell had objections in the past but mmu_gather has
evolved since and it's not longer inefficient (I think the only case is
shift_arg_pages but that's pretty much lost in the noise).

For this patch:

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Steve Capper Aug. 27, 2014, 12:59 p.m. UTC | #2
On Wed, Aug 27, 2014 at 12:50:10PM +0100, Catalin Marinas wrote:
> On Thu, Aug 21, 2014 at 04:43:29PM +0100, Steve Capper wrote:
> > --- a/arch/arm/include/asm/tlb.h
> > +++ b/arch/arm/include/asm/tlb.h
> > @@ -35,12 +35,39 @@
> >  
> >  #define MMU_GATHER_BUNDLE	8
> >  
> > +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> > +static inline void __tlb_remove_table(void *_table)
> > +{
> > +	free_page_and_swap_cache((struct page *)_table);
> > +}
> > +
> > +struct mmu_table_batch {
> > +	struct rcu_head		rcu;
> > +	unsigned int		nr;
> > +	void			*tables[0];
> > +};
> > +
> > +#define MAX_TABLE_BATCH		\
> > +	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
> > +
> > +extern void tlb_table_flush(struct mmu_gather *tlb);
> > +extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
> > +
> > +#define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
> > +#else
> > +#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
> > +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
> > +
> >  /*
> >   * TLB handling.  This allows us to remove pages from the page
> >   * tables, and efficiently handle the TLB issues.
> >   */
> >  struct mmu_gather {
> >  	struct mm_struct	*mm;
> > +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> > +	struct mmu_table_batch	*batch;
> > +	unsigned int		need_flush;
> > +#endif
> 
> We add need_flush here just because it is set by tlb_remove_table() but
> it won't actually be checked by anything since arch/arm uses its own
> version of tlb_flush_mmu(). But I wouldn't go for #ifdefs in the core
> code either.
> 
> We should (as a separate patchset) convert arch/arm to generic
> mmu_gather. I know Russell had objections in the past but mmu_gather has
> evolved since and it's not longer inefficient (I think the only case is
> shift_arg_pages but that's pretty much lost in the noise).

I would be happy to help out with a conversion to generic mmu_gather if
it's wanted for arm.

> 
> For this patch:
> 
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>

Cheers.
diff mbox

Patch

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c49a775..cc740d2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -60,6 +60,7 @@  config ARM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
+	select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UID16
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f1a0dac..3cadb72 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -35,12 +35,39 @@ 
 
 #define MMU_GATHER_BUNDLE	8
 
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+static inline void __tlb_remove_table(void *_table)
+{
+	free_page_and_swap_cache((struct page *)_table);
+}
+
+struct mmu_table_batch {
+	struct rcu_head		rcu;
+	unsigned int		nr;
+	void			*tables[0];
+};
+
+#define MAX_TABLE_BATCH		\
+	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+
+extern void tlb_table_flush(struct mmu_gather *tlb);
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+#define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
+#else
+#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
 /*
  * TLB handling.  This allows us to remove pages from the page
  * tables, and efficiently handle the TLB issues.
  */
 struct mmu_gather {
 	struct mm_struct	*mm;
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+	struct mmu_table_batch	*batch;
+	unsigned int		need_flush;
+#endif
 	unsigned int		fullmm;
 	struct vm_area_struct	*vma;
 	unsigned long		start, end;
@@ -101,6 +128,9 @@  static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
 	tlb_flush(tlb);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+	tlb_table_flush(tlb);
+#endif
 }
 
 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
@@ -129,6 +159,10 @@  tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 	tlb->pages = tlb->local;
 	tlb->nr = 0;
 	__tlb_alloc_page(tlb);
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+	tlb->batch = NULL;
+#endif
 }
 
 static inline void
@@ -205,7 +239,7 @@  static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 	tlb_add_flush(tlb, addr + SZ_1M);
 #endif
 
-	tlb_remove_page(tlb, pte);
+	tlb_remove_entry(tlb, pte);
 }
 
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
@@ -213,7 +247,7 @@  static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 {
 #ifdef CONFIG_ARM_LPAE
 	tlb_add_flush(tlb, addr);
-	tlb_remove_page(tlb, virt_to_page(pmdp));
+	tlb_remove_entry(tlb, virt_to_page(pmdp));
 #endif
 }