diff mbox

xen/x86/shadow: adjust barriers around gtable_dirty_version.

Message ID 20170818142344.GD7137@deinos.phlegethon.org (mailing list archive)
State New, archived
Headers show

Commit Message

Tim Deegan Aug. 18, 2017, 2:23 p.m. UTC
Use the smp_ variants, as we're only synchronizing against other CPUs.

Add a write barrier before incrementing the version.

x86's memory ordering rules and the presence of various out-of-unit
function calls mean that this code worked OK before, and the barriers
are mostly decorative.

Signed-off-by: Tim Deegan <tim@xen.org>
---
 xen/arch/x86/mm/shadow/multi.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

Comments

Andrew Cooper Aug. 18, 2017, 2:26 p.m. UTC | #1
On 18/08/17 15:23, Tim Deegan wrote:
> Use the smp_ variants, as we're only synchronizing against other CPUs.
>
> Add a write barrier before incrementing the version.
>
> x86's memory ordering rules and the presence of various out-of-unit
> function calls mean that this code worked OK before, and the barriers
> are mostly decorative.
>
> Signed-off-by: Tim Deegan <tim@xen.org>

Thanks!

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

> ---
>  xen/arch/x86/mm/shadow/multi.c | 4 +++-
>  1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index c9c2252..f8a8928 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -206,6 +206,7 @@ shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version)
>  
>      ASSERT(paging_locked_by_me(d));
>  
> +    /* No need for smp_rmb() here; taking the paging lock was enough. */
>      if ( version == atomic_read(&d->arch.paging.shadow.gtable_dirty_version) )
>           return 1;
>  
> @@ -3112,7 +3113,7 @@ static int sh_page_fault(struct vcpu *v,
>       * will make sure no inconsistent mapping being translated into
>       * shadow page table. */
>      version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version);
> -    rmb();
> +    smp_rmb();
>      walk_ok = sh_walk_guest_tables(v, va, &gw, error_code);
>  
>  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
> @@ -3188,6 +3189,7 @@ static int sh_page_fault(struct vcpu *v,
>           * overlapping with this one may be inconsistent
>           */
>          perfc_incr(shadow_rm_write_flush_tlb);
> +        smp_wmb();
>          atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
>          flush_tlb_mask(d->domain_dirty_cpumask);
>      }
diff mbox

Patch

diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index c9c2252..f8a8928 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -206,6 +206,7 @@  shadow_check_gwalk(struct vcpu *v, unsigned long va, walk_t *gw, int version)
 
     ASSERT(paging_locked_by_me(d));
 
+    /* No need for smp_rmb() here; taking the paging lock was enough. */
     if ( version == atomic_read(&d->arch.paging.shadow.gtable_dirty_version) )
          return 1;
 
@@ -3112,7 +3113,7 @@  static int sh_page_fault(struct vcpu *v,
      * will make sure no inconsistent mapping being translated into
      * shadow page table. */
     version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version);
-    rmb();
+    smp_rmb();
     walk_ok = sh_walk_guest_tables(v, va, &gw, error_code);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3188,6 +3189,7 @@  static int sh_page_fault(struct vcpu *v,
          * overlapping with this one may be inconsistent
          */
         perfc_incr(shadow_rm_write_flush_tlb);
+        smp_wmb();
         atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
         flush_tlb_mask(d->domain_dirty_cpumask);
     }