diff mbox series

[v2,05/18] x86/mm: switch destroy_perdomain_mapping() parameter from domain to vCPU

Message ID 20250108142659.99490-6-roger.pau@citrix.com (mailing list archive)
State New
Headers show
Series x86: adventures in Address Space Isolation | expand

Commit Message

Roger Pau Monné Jan. 8, 2025, 2:26 p.m. UTC
In preparation for the per-domain area being populated with per-vCPU mappings
change the parameter of destroy_perdomain_mapping() to be a vCPU instead of a
domain, and also update the function logic to allow manipulation of per-domain
mappings using the linear page table mappings.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/include/asm/mm.h |  2 +-
 xen/arch/x86/mm.c             | 24 +++++++++++++++++++++++-
 xen/arch/x86/pv/domain.c      |  3 +--
 xen/arch/x86/x86_64/mm.c      |  2 +-
 4 files changed, 26 insertions(+), 5 deletions(-)

Comments

Alejandro Vallejo Jan. 9, 2025, 10:02 a.m. UTC | #1
On Wed Jan 8, 2025 at 2:26 PM GMT, Roger Pau Monne wrote:
> In preparation for the per-domain area being populated with per-vCPU mappings
> change the parameter of destroy_perdomain_mapping() to be a vCPU instead of a
> domain, and also update the function logic to allow manipulation of per-domain
> mappings using the linear page table mappings.
>
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> ---
>  xen/arch/x86/include/asm/mm.h |  2 +-
>  xen/arch/x86/mm.c             | 24 +++++++++++++++++++++++-
>  xen/arch/x86/pv/domain.c      |  3 +--
>  xen/arch/x86/x86_64/mm.c      |  2 +-
>  4 files changed, 26 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
> index b50a51327b2b..65cd751087dc 100644
> --- a/xen/arch/x86/include/asm/mm.h
> +++ b/xen/arch/x86/include/asm/mm.h
> @@ -605,7 +605,7 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
>                               struct page_info **ppg);
>  void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
>                                  mfn_t *mfn, unsigned long nr);
> -void destroy_perdomain_mapping(struct domain *d, unsigned long va,
> +void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
>                                 unsigned int nr);
>  void free_perdomain_mappings(struct domain *d);
>  
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 0abea792486c..713ae8dd6fa3 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -6511,10 +6511,11 @@ void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
>      unmap_domain_page(l3tab);
>  }
>  
> -void destroy_perdomain_mapping(struct domain *d, unsigned long va,
> +void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
>                                 unsigned int nr)
>  {
>      const l3_pgentry_t *l3tab, *pl3e;
> +    const struct domain *d = v->domain;
>  
>      ASSERT(va >= PERDOMAIN_VIRT_START &&
>             va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
> @@ -6523,6 +6524,27 @@ void destroy_perdomain_mapping(struct domain *d, unsigned long va,
>      if ( !d->arch.perdomain_l3_pg )
>          return;
>  
> +    /* Use likely to force the optimization for the fast path. */
> +    if ( likely(v == current) )

As in the previous patch, doesn't using curr_vcpu here...

> +    {
> +        l1_pgentry_t *pl1e;
> +
> +        /* Ensure page-tables are from current (if current != curr_vcpu). */
> +        sync_local_execstate();

... avoid the need for this?

> +
> +        pl1e = &__linear_l1_table[l1_linear_offset(va)];
> +
> +        /* Fast path: zap L1 entries using the recursive linear mappings. */
> +        for ( ; nr--; pl1e++ )
> +        {
> +            if ( perdomain_l1e_needs_freeing(*pl1e) )
> +                free_domheap_page(l1e_get_page(*pl1e));
> +            l1e_write(pl1e, l1e_empty());
> +        }
> +
> +        return;
> +    }
> +
>      l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
>      pl3e = l3tab + l3_table_offset(va);
>  
> diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
> index bc7cd0c62f0e..7e8bffaae9a0 100644
> --- a/xen/arch/x86/pv/domain.c
> +++ b/xen/arch/x86/pv/domain.c
> @@ -285,8 +285,7 @@ static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
>  
>  static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v)
>  {
> -    destroy_perdomain_mapping(v->domain, GDT_VIRT_START(v),
> -                              1U << GDT_LDT_VCPU_SHIFT);
> +    destroy_perdomain_mapping(v, GDT_VIRT_START(v), 1U << GDT_LDT_VCPU_SHIFT);
>  }
>  
>  void pv_vcpu_destroy(struct vcpu *v)
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 389d813ebe63..c08b28d9693b 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -737,7 +737,7 @@ int setup_compat_arg_xlat(struct vcpu *v)
>  
>  void free_compat_arg_xlat(struct vcpu *v)
>  {
> -    destroy_perdomain_mapping(v->domain, ARG_XLAT_START(v),
> +    destroy_perdomain_mapping(v, ARG_XLAT_START(v),
>                                PFN_UP(COMPAT_ARG_XLAT_SIZE));
>  }
>  

Cheers,
Alejandro
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index b50a51327b2b..65cd751087dc 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -605,7 +605,7 @@  int create_perdomain_mapping(struct domain *d, unsigned long va,
                              struct page_info **ppg);
 void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
                                 mfn_t *mfn, unsigned long nr);
-void destroy_perdomain_mapping(struct domain *d, unsigned long va,
+void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
                                unsigned int nr);
 void free_perdomain_mappings(struct domain *d);
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 0abea792486c..713ae8dd6fa3 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -6511,10 +6511,11 @@  void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
     unmap_domain_page(l3tab);
 }
 
-void destroy_perdomain_mapping(struct domain *d, unsigned long va,
+void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
                                unsigned int nr)
 {
     const l3_pgentry_t *l3tab, *pl3e;
+    const struct domain *d = v->domain;
 
     ASSERT(va >= PERDOMAIN_VIRT_START &&
            va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
@@ -6523,6 +6524,27 @@  void destroy_perdomain_mapping(struct domain *d, unsigned long va,
     if ( !d->arch.perdomain_l3_pg )
         return;
 
+    /* Use likely to force the optimization for the fast path. */
+    if ( likely(v == current) )
+    {
+        l1_pgentry_t *pl1e;
+
+        /* Ensure page-tables are from current (if current != curr_vcpu). */
+        sync_local_execstate();
+
+        pl1e = &__linear_l1_table[l1_linear_offset(va)];
+
+        /* Fast path: zap L1 entries using the recursive linear mappings. */
+        for ( ; nr--; pl1e++ )
+        {
+            if ( perdomain_l1e_needs_freeing(*pl1e) )
+                free_domheap_page(l1e_get_page(*pl1e));
+            l1e_write(pl1e, l1e_empty());
+        }
+
+        return;
+    }
+
     l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
     pl3e = l3tab + l3_table_offset(va);
 
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index bc7cd0c62f0e..7e8bffaae9a0 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -285,8 +285,7 @@  static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
 
 static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v)
 {
-    destroy_perdomain_mapping(v->domain, GDT_VIRT_START(v),
-                              1U << GDT_LDT_VCPU_SHIFT);
+    destroy_perdomain_mapping(v, GDT_VIRT_START(v), 1U << GDT_LDT_VCPU_SHIFT);
 }
 
 void pv_vcpu_destroy(struct vcpu *v)
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 389d813ebe63..c08b28d9693b 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -737,7 +737,7 @@  int setup_compat_arg_xlat(struct vcpu *v)
 
 void free_compat_arg_xlat(struct vcpu *v)
 {
-    destroy_perdomain_mapping(v->domain, ARG_XLAT_START(v),
+    destroy_perdomain_mapping(v, ARG_XLAT_START(v),
                               PFN_UP(COMPAT_ARG_XLAT_SIZE));
 }