diff mbox

[For,Xen-4.10,Resend,1/3] Allow control of icache invalidations when calling flush_page_to_ram()

Message ID 20170515141012.6612-2-punit.agrawal@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Punit Agrawal May 15, 2017, 2:10 p.m. UTC
flush_page_to_ram() unconditionally drops the icache. In certain
situations this leads to execessive icache flushes when
flush_page_to_ram() ends up being repeatedly called in a loop.

Introduce a parameter to allow callers of flush_page_to_ram() to take
responsibility of synchronising the icache. This is in preparations for
adding logic to make the callers perform the necessary icache
maintenance operations.

Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
---
 xen/arch/arm/mm.c              | 5 +++--
 xen/arch/arm/p2m.c             | 2 +-
 xen/common/page_alloc.c        | 2 +-
 xen/include/asm-arm/page.h     | 2 +-
 xen/include/asm-x86/flushtlb.h | 2 +-
 5 files changed, 7 insertions(+), 6 deletions(-)

Comments

Jan Beulich May 17, 2017, 3:45 p.m. UTC | #1
>>> On 15.05.17 at 16:10, <punit.agrawal@arm.com> wrote:
> flush_page_to_ram() unconditionally drops the icache. In certain
> situations this leads to execessive icache flushes when
> flush_page_to_ram() ends up being repeatedly called in a loop.
> 
> Introduce a parameter to allow callers of flush_page_to_ram() to take
> responsibility of synchronising the icache. This is in preparations for
> adding logic to make the callers perform the necessary icache
> maintenance operations.
> 
> Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>

Non-ARM bits
Acked-by: Jan Beulich <jbeulich@suse.com>
provided the ARM maintainers agree with the ARM side (including
to use of this in subsequent patches).

Jan
Stefano Stabellini May 23, 2017, 9:45 p.m. UTC | #2
On Mon, 15 May 2017, Punit Agrawal wrote:
> flush_page_to_ram() unconditionally drops the icache. In certain
> situations this leads to execessive icache flushes when
> flush_page_to_ram() ends up being repeatedly called in a loop.
> 
> Introduce a parameter to allow callers of flush_page_to_ram() to take
> responsibility of synchronising the icache. This is in preparations for
> adding logic to make the callers perform the necessary icache
> maintenance operations.
> 
> Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

> ---
>  xen/arch/arm/mm.c              | 5 +++--
>  xen/arch/arm/p2m.c             | 2 +-
>  xen/common/page_alloc.c        | 2 +-
>  xen/include/asm-arm/page.h     | 2 +-
>  xen/include/asm-x86/flushtlb.h | 2 +-
>  5 files changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 48f74f6e65..082c872c72 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -420,7 +420,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
>  }
>  #endif
>  
> -void flush_page_to_ram(unsigned long mfn)
> +void flush_page_to_ram(unsigned long mfn, bool sync_icache)
>  {
>      void *v = map_domain_page(_mfn(mfn));
>  
> @@ -435,7 +435,8 @@ void flush_page_to_ram(unsigned long mfn)
>       * I-Cache (See D4.9.2 in ARM DDI 0487A.k_iss10775). Instead of using flush
>       * by VA on select platforms, we just flush the entire cache here.
>       */
> -    invalidate_icache();
> +    if ( sync_icache )
> +        invalidate_icache();
>  }
>  
>  void __init arch_init_memory(void)
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 34d57760d7..29f2e2fad3 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -1392,7 +1392,7 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
>          /* XXX: Implement preemption */
>          while ( gfn_x(start) < gfn_x(next_gfn) )
>          {
> -            flush_page_to_ram(mfn_x(mfn));
> +            flush_page_to_ram(mfn_x(mfn), true);
>  
>              start = gfn_add(start, 1);
>              mfn = mfn_add(mfn, 1);
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 9e41fb4cd3..eba78f1a3d 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -833,7 +833,7 @@ static struct page_info *alloc_heap_pages(
>          /* Ensure cache and RAM are consistent for platforms where the
>           * guest can control its own visibility of/through the cache.
>           */
> -        flush_page_to_ram(page_to_mfn(&pg[i]));
> +        flush_page_to_ram(page_to_mfn(&pg[i]), true);
>      }
>  
>      spin_unlock(&heap_lock);
> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 4b46e8831c..497b4c86ad 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -407,7 +407,7 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va,
>  }
>  
>  /* Flush the dcache for an entire page. */
> -void flush_page_to_ram(unsigned long mfn);
> +void flush_page_to_ram(unsigned long mfn, bool sync_icache);
>  
>  /*
>   * Print a walk of a page table or p2m
> diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
> index 8b7adef7c5..bd2be7e482 100644
> --- a/xen/include/asm-x86/flushtlb.h
> +++ b/xen/include/asm-x86/flushtlb.h
> @@ -118,7 +118,7 @@ void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
>  #define flush_tlb_one_all(v)                    \
>      flush_tlb_one_mask(&cpu_online_map, v)
>  
> -static inline void flush_page_to_ram(unsigned long mfn) {}
> +static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
>  static inline int invalidate_dcache_va_range(const void *p,
>                                               unsigned long size)
>  { return -EOPNOTSUPP; }
> -- 
> 2.11.0
>
diff mbox

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 48f74f6e65..082c872c72 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -420,7 +420,7 @@  unsigned long domain_page_map_to_mfn(const void *ptr)
 }
 #endif
 
-void flush_page_to_ram(unsigned long mfn)
+void flush_page_to_ram(unsigned long mfn, bool sync_icache)
 {
     void *v = map_domain_page(_mfn(mfn));
 
@@ -435,7 +435,8 @@  void flush_page_to_ram(unsigned long mfn)
      * I-Cache (See D4.9.2 in ARM DDI 0487A.k_iss10775). Instead of using flush
      * by VA on select platforms, we just flush the entire cache here.
      */
-    invalidate_icache();
+    if ( sync_icache )
+        invalidate_icache();
 }
 
 void __init arch_init_memory(void)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 34d57760d7..29f2e2fad3 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1392,7 +1392,7 @@  int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
         /* XXX: Implement preemption */
         while ( gfn_x(start) < gfn_x(next_gfn) )
         {
-            flush_page_to_ram(mfn_x(mfn));
+            flush_page_to_ram(mfn_x(mfn), true);
 
             start = gfn_add(start, 1);
             mfn = mfn_add(mfn, 1);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9e41fb4cd3..eba78f1a3d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -833,7 +833,7 @@  static struct page_info *alloc_heap_pages(
         /* Ensure cache and RAM are consistent for platforms where the
          * guest can control its own visibility of/through the cache.
          */
-        flush_page_to_ram(page_to_mfn(&pg[i]));
+        flush_page_to_ram(page_to_mfn(&pg[i]), true);
     }
 
     spin_unlock(&heap_lock);
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 4b46e8831c..497b4c86ad 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -407,7 +407,7 @@  static inline void flush_xen_data_tlb_range_va(unsigned long va,
 }
 
 /* Flush the dcache for an entire page. */
-void flush_page_to_ram(unsigned long mfn);
+void flush_page_to_ram(unsigned long mfn, bool sync_icache);
 
 /*
  * Print a walk of a page table or p2m
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index 8b7adef7c5..bd2be7e482 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -118,7 +118,7 @@  void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
 #define flush_tlb_one_all(v)                    \
     flush_tlb_one_mask(&cpu_online_map, v)
 
-static inline void flush_page_to_ram(unsigned long mfn) {}
+static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
 static inline int invalidate_dcache_va_range(const void *p,
                                              unsigned long size)
 { return -EOPNOTSUPP; }