diff mbox series

[06/16] x86/shadow: purge {write,cmpxchg}_guest_entry() hooks

Message ID 4d36db89-cd87-06b8-bb02-395ec79662c7@suse.com (mailing list archive)
State New, archived
Headers show
Series x86: assorted (mostly) shadow mode adjustments | expand

Commit Message

Jan Beulich March 22, 2023, 9:32 a.m. UTC
These aren't mode dependent (see 06f04f54ba97 ["x86/shadow:
sh_{write,cmpxchg}_guest_entry() are PV-only"], where they were moved
out of multi.c) and hence there's no need to have pointers to the
functions in struct shadow_paging_mode. Due to include dependencies,
however, the "paging" wrappers need to move out of paging.h; they're
needed from PV memory management code only anyway, so by moving them
their exposure is reduced at the same time.

By carefully placing the (moved and renamed) shadow function
declarations, #ifdef can also be dropped from the "paging" wrappers
(paging_mode_shadow() is constant false when !SHADOW_PAGING).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Andrew Cooper March 23, 2023, 1:13 p.m. UTC | #1
On 22/03/2023 9:32 am, Jan Beulich wrote:
> --- a/xen/arch/x86/pv/mm.h
> +++ b/xen/arch/x86/pv/mm.h
> @@ -32,6 +34,35 @@ static inline l1_pgentry_t guest_get_eff
>  }
>  
>  /*
> + * Write a new value into the guest pagetable, and update the
> + * paging-assistance state appropriately.  Returns false if we page-faulted,
> + * true for success.
> + */

I know you're just moving the comments as-are, but more than half of
this is definitely wrong now, and another part is wholly redundant.

"Write a new value into the guest pagetable" is about the best I can
think of, but it is borderline whether it even needs a comment.

> +static inline void paging_write_guest_entry(
> +    struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
> +{
> +    if ( unlikely(paging_mode_shadow(v->domain)) )
> +        shadow_write_guest_entry(v, p, new, gmfn);
> +    else
> +        write_atomic(p, new);
> +}
> +
> +
> +/*
> + * Cmpxchg a new value into the guest pagetable, and update the
> + * paging-assistance state appropriately.  Returns false if we page-faulted,
> + * true if not.  N.B. caller should check the value of "old" to see if the
> + * cmpxchg itself was successful.
> + */

"Compare and exchange a guest pagetable entry.  Returns the old value." 
We don't need to teach people how to use cmpxchg as a primitive here...

The comment next to shadow_cmpxchg_guest_entry() ideally wants the
grammar fix in the first clause too, and this is probably the right
patch to do it in.

For the content of the change, definitely an improvement.  With the
comments suitably adjusted, Reviewed-by: Andrew Cooper
<andrew.cooper3@citrix.com>
diff mbox series

Patch

--- a/xen/arch/x86/include/asm/paging.h
+++ b/xen/arch/x86/include/asm/paging.h
@@ -98,13 +98,6 @@ 
 
 struct shadow_paging_mode {
 #ifdef CONFIG_SHADOW_PAGING
-#ifdef CONFIG_PV
-    void          (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
-                                            intpte_t new, mfn_t gmfn);
-    intpte_t      (*cmpxchg_guest_entry   )(struct vcpu *v, intpte_t *p,
-                                            intpte_t old, intpte_t new,
-                                            mfn_t gmfn);
-#endif
 #ifdef CONFIG_HVM
     int           (*guess_wrmap           )(struct vcpu *v, 
                                             unsigned long vaddr, mfn_t gmfn);
@@ -326,44 +319,6 @@  static inline void paging_update_paging_
     v->domain->arch.paging.update_paging_modes(v);
 }
 
-#ifdef CONFIG_PV
-
-/*
- * Write a new value into the guest pagetable, and update the
- * paging-assistance state appropriately.  Returns false if we page-faulted,
- * true for success.
- */
-static inline void paging_write_guest_entry(
-    struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
-{
-#ifdef CONFIG_SHADOW_PAGING
-    if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
-        paging_get_hostmode(v)->shadow.write_guest_entry(v, p, new, gmfn);
-    else
-#endif
-        write_atomic(p, new);
-}
-
-
-/*
- * Cmpxchg a new value into the guest pagetable, and update the
- * paging-assistance state appropriately.  Returns false if we page-faulted,
- * true if not.  N.B. caller should check the value of "old" to see if the
- * cmpxchg itself was successful.
- */
-static inline intpte_t paging_cmpxchg_guest_entry(
-    struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn)
-{
-#ifdef CONFIG_SHADOW_PAGING
-    if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
-        return paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old,
-                                                                  new, gmfn);
-#endif
-    return cmpxchg(p, old, new);
-}
-
-#endif /* CONFIG_PV */
-
 /* Helper function that writes a pte in such a way that a concurrent read 
  * never sees a half-written entry that has _PAGE_PRESENT set */
 static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new)
--- a/xen/arch/x86/include/asm/shadow.h
+++ b/xen/arch/x86/include/asm/shadow.h
@@ -248,6 +248,12 @@  static inline void pv_l1tf_domain_destro
 #endif
 }
 
+/* Functions that atomically write PV guest PT entries */
+void shadow_write_guest_entry(
+    struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn);
+intpte_t shadow_cmpxchg_guest_entry(
+    struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn);
+
 #endif /* CONFIG_PV */
 
 /* Remove all shadows of the guest mfn. */
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4131,10 +4131,6 @@  const struct paging_mode sh_paging_mode
 #endif
     .update_cr3                    = sh_update_cr3,
     .guest_levels                  = GUEST_PAGING_LEVELS,
-#ifdef CONFIG_PV
-    .shadow.write_guest_entry      = sh_write_guest_entry,
-    .shadow.cmpxchg_guest_entry    = sh_cmpxchg_guest_entry,
-#endif
 #ifdef CONFIG_HVM
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
     .shadow.guess_wrmap            = sh_guess_wrmap,
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -428,12 +428,6 @@  static inline int sh_remove_write_access
 }
 #endif
 
-/* Functions that atomically write PV guest PT entries */
-void cf_check sh_write_guest_entry(
-    struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn);
-intpte_t cf_check sh_cmpxchg_guest_entry(
-    struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn);
-
 /* Unhook the non-Xen mappings in this top-level shadow mfn.
  * With user_only == 1, unhooks only the user-mode mappings. */
 void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
--- a/xen/arch/x86/mm/shadow/pv.c
+++ b/xen/arch/x86/mm/shadow/pv.c
@@ -28,8 +28,8 @@ 
  * Write a new value into the guest pagetable, and update the shadows
  * appropriately.
  */
-void cf_check
-sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
+void
+shadow_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
 {
     paging_lock(v->domain);
     write_atomic(p, new);
@@ -42,9 +42,9 @@  sh_write_guest_entry(struct vcpu *v, int
  * appropriately.  Returns the previous entry found, which the caller is
  * expected to check to see if the cmpxchg was successful.
  */
-intpte_t cf_check
-sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t old,
-                       intpte_t new, mfn_t gmfn)
+intpte_t
+shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t old,
+                           intpte_t new, mfn_t gmfn)
 {
     intpte_t t;
 
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -1,6 +1,8 @@ 
 #ifndef __PV_MM_H__
 #define __PV_MM_H__
 
+#include <asm/shadow.h>
+
 l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn);
 
 int new_guest_cr3(mfn_t mfn);
@@ -32,6 +34,35 @@  static inline l1_pgentry_t guest_get_eff
 }
 
 /*
+ * Write a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately.  Returns false if we page-faulted,
+ * true for success.
+ */
+static inline void paging_write_guest_entry(
+    struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
+{
+    if ( unlikely(paging_mode_shadow(v->domain)) )
+        shadow_write_guest_entry(v, p, new, gmfn);
+    else
+        write_atomic(p, new);
+}
+
+
+/*
+ * Cmpxchg a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately.  Returns false if we page-faulted,
+ * true if not.  N.B. caller should check the value of "old" to see if the
+ * cmpxchg itself was successful.
+ */
+static inline intpte_t paging_cmpxchg_guest_entry(
+    struct vcpu *v, intpte_t *p, intpte_t old, intpte_t new, mfn_t gmfn)
+{
+    if ( unlikely(paging_mode_shadow(v->domain)) )
+        return shadow_cmpxchg_guest_entry(v, p, old, new, gmfn);
+    return cmpxchg(p, old, new);
+}
+
+/*
  * PTE updates can be done with ordinary writes except:
  *  1. Debug builds get extra checking by using CMPXCHG[8B].
  */