diff mbox series

[01/22] x86/mm: drop l{1,2,3,4}e_write_atomic()

Message ID 20240726152206.28411-2-roger.pau@citrix.com (mailing list archive)
State New
Headers show
Series x86: adventures in Address Space Isolation | expand

Commit Message

Roger Pau Monné July 26, 2024, 3:21 p.m. UTC
The l{1,2,3,4}e_write_atomic() and non _atomic suffixed helpers share the same
implementation, so it seems pointless and possibly confusing to have both.

Remove the l{1,2,3,4}e_write_atomic() helpers and switch it's user to
l{1,2,3,4}e_write(), as that's also atomic.  While there also remove
pte_write{,_atomic}() and just use write_atomic() in the wrappers.

No functional change intended.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/include/asm/page.h        | 21 +++-----------
 xen/arch/x86/include/asm/x86_64/page.h |  2 --
 xen/arch/x86/mm.c                      | 39 +++++++++++---------------
 3 files changed, 20 insertions(+), 42 deletions(-)

Comments

Jan Beulich July 29, 2024, 7:52 a.m. UTC | #1
On 26.07.2024 17:21, Roger Pau Monne wrote:
> The l{1,2,3,4}e_write_atomic() and non _atomic suffixed helpers share the same
> implementation, so it seems pointless and possibly confusing to have both.
> 
> Remove the l{1,2,3,4}e_write_atomic() helpers and switch it's user to
> l{1,2,3,4}e_write(), as that's also atomic.  While there also remove
> pte_write{,_atomic}() and just use write_atomic() in the wrappers.
> 
> No functional change intended.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>

In the description, can we perhaps mention the historical aspect of why
these were there (and separate)? Happy to add a sentence when committing,
as long as you agree.

Jan
Roger Pau Monné July 29, 2024, 12:53 p.m. UTC | #2
On Mon, Jul 29, 2024 at 09:52:50AM +0200, Jan Beulich wrote:
> On 26.07.2024 17:21, Roger Pau Monne wrote:
> > The l{1,2,3,4}e_write_atomic() and non _atomic suffixed helpers share the same
> > implementation, so it seems pointless and possibly confusing to have both.
> > 
> > Remove the l{1,2,3,4}e_write_atomic() helpers and switch it's user to
> > l{1,2,3,4}e_write(), as that's also atomic.  While there also remove
> > pte_write{,_atomic}() and just use write_atomic() in the wrappers.
> > 
> > No functional change intended.
> > 
> > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> 
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> 
> In the description, can we perhaps mention the historical aspect of why
> these were there (and separate)? Happy to add a sentence when committing,
> as long as you agree.

Sure:

"x86 32bit mode used to have a non-atomic PTE write that would split
the write in two halves, but with Xen only supporting x86 64bit
that's no longer present."

Would be fine?  Possibly added after the first paragraph IMO.

Thanks, Roger.
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/page.h b/xen/arch/x86/include/asm/page.h
index 350d1fb1100f..3d20ee507a33 100644
--- a/xen/arch/x86/include/asm/page.h
+++ b/xen/arch/x86/include/asm/page.h
@@ -26,27 +26,14 @@ 
     l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
 
 /* Write a pte atomically to memory. */
-#define l1e_write_atomic(l1ep, l1e) \
-    pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
-#define l2e_write_atomic(l2ep, l2e) \
-    pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
-#define l3e_write_atomic(l3ep, l3e) \
-    pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
-#define l4e_write_atomic(l4ep, l4e) \
-    pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
-
-/*
- * Write a pte safely but non-atomically to memory.
- * The PTE may become temporarily not-present during the update.
- */
 #define l1e_write(l1ep, l1e) \
-    pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
+    write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
 #define l2e_write(l2ep, l2e) \
-    pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
+    write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
 #define l3e_write(l3ep, l3e) \
-    pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
+    write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
 #define l4e_write(l4ep, l4e) \
-    pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
+    write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
 
 /* Get direct integer representation of a pte's contents (intpte_t). */
 #define l1e_get_intpte(x)          ((x).l1)
diff --git a/xen/arch/x86/include/asm/x86_64/page.h b/xen/arch/x86/include/asm/x86_64/page.h
index 19ca64d79223..03fcce61c052 100644
--- a/xen/arch/x86/include/asm/x86_64/page.h
+++ b/xen/arch/x86/include/asm/x86_64/page.h
@@ -70,8 +70,6 @@  typedef l4_pgentry_t root_pgentry_t;
 #endif /* !__ASSEMBLY__ */
 
 #define pte_read_atomic(ptep)       read_atomic(ptep)
-#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
-#define pte_write(ptep, pte)        write_atomic(ptep, pte)
 
 /* Given a virtual address, get an entry offset into a linear page table. */
 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 95795567f2a5..fab2de5fae27 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5253,7 +5253,7 @@  int map_pages_to_xen(
              !(flags & (_PAGE_PAT | MAP_SMALL_PAGES)) )
         {
             /* 1GB-page mapping. */
-            l3e_write_atomic(pl3e, l3e_from_mfn(mfn, l1f_to_lNf(flags)));
+            l3e_write(pl3e, l3e_from_mfn(mfn, l1f_to_lNf(flags)));
 
             if ( (l3e_get_flags(ol3e) & _PAGE_PRESENT) )
             {
@@ -5353,8 +5353,7 @@  int map_pages_to_xen(
             if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
                  (l3e_get_flags(*pl3e) & _PAGE_PSE) )
             {
-                l3e_write_atomic(pl3e,
-                                 l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
+                l3e_write(pl3e, l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
                 l2mfn = INVALID_MFN;
             }
             if ( locking )
@@ -5375,7 +5374,7 @@  int map_pages_to_xen(
         {
             /* Super-page mapping. */
             ol2e = *pl2e;
-            l2e_write_atomic(pl2e, l2e_from_mfn(mfn, l1f_to_lNf(flags)));
+            l2e_write(pl2e, l2e_from_mfn(mfn, l1f_to_lNf(flags)));
 
             if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
             {
@@ -5457,8 +5456,7 @@  int map_pages_to_xen(
                 if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
                      (l2e_get_flags(*pl2e) & _PAGE_PSE) )
                 {
-                    l2e_write_atomic(pl2e, l2e_from_mfn(l1mfn,
-                                                        __PAGE_HYPERVISOR));
+                    l2e_write(pl2e, l2e_from_mfn(l1mfn, __PAGE_HYPERVISOR));
                     l1mfn = INVALID_MFN;
                 }
                 if ( locking )
@@ -5471,7 +5469,7 @@  int map_pages_to_xen(
             if ( !pl1e )
                 pl1e = map_l1t_from_l2e(*pl2e) + l1_table_offset(virt);
             ol1e  = *pl1e;
-            l1e_write_atomic(pl1e, l1e_from_mfn(mfn, flags));
+            l1e_write(pl1e, l1e_from_mfn(mfn, flags));
             UNMAP_DOMAIN_PAGE(pl1e);
             if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
             {
@@ -5524,8 +5522,7 @@  int map_pages_to_xen(
                 UNMAP_DOMAIN_PAGE(l1t);
                 if ( i == L1_PAGETABLE_ENTRIES )
                 {
-                    l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn,
-                                                        l1f_to_lNf(flags)));
+                    l2e_write(pl2e, l2e_from_pfn(base_mfn, l1f_to_lNf(flags)));
                     if ( locking )
                         spin_unlock(&map_pgdir_lock);
                     flush_area(virt - PAGE_SIZE,
@@ -5574,8 +5571,7 @@  int map_pages_to_xen(
             UNMAP_DOMAIN_PAGE(l2t);
             if ( i == L2_PAGETABLE_ENTRIES )
             {
-                l3e_write_atomic(pl3e, l3e_from_pfn(base_mfn,
-                                                    l1f_to_lNf(flags)));
+                l3e_write(pl3e, l3e_from_pfn(base_mfn, l1f_to_lNf(flags)));
                 if ( locking )
                     spin_unlock(&map_pgdir_lock);
                 flush_area(virt - PAGE_SIZE,
@@ -5674,7 +5670,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
                     : l3e_from_pfn(l3e_get_pfn(*pl3e),
                                    (l3e_get_flags(*pl3e) & ~FLAGS_MASK) | nf);
 
-                l3e_write_atomic(pl3e, nl3e);
+                l3e_write(pl3e, nl3e);
                 v += 1UL << L3_PAGETABLE_SHIFT;
                 continue;
             }
@@ -5696,8 +5692,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
             if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
                  (l3e_get_flags(*pl3e) & _PAGE_PSE) )
             {
-                l3e_write_atomic(pl3e,
-                                 l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
+                l3e_write(pl3e, l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
                 l2mfn = INVALID_MFN;
             }
             if ( locking )
@@ -5732,7 +5727,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
                     : l2e_from_pfn(l2e_get_pfn(*pl2e),
                                    (l2e_get_flags(*pl2e) & ~FLAGS_MASK) | nf);
 
-                l2e_write_atomic(pl2e, nl2e);
+                l2e_write(pl2e, nl2e);
                 v += 1UL << L2_PAGETABLE_SHIFT;
             }
             else
@@ -5755,8 +5750,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
                 if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
                      (l2e_get_flags(*pl2e) & _PAGE_PSE) )
                 {
-                    l2e_write_atomic(pl2e, l2e_from_mfn(l1mfn,
-                                                        __PAGE_HYPERVISOR));
+                    l2e_write(pl2e, l2e_from_mfn(l1mfn, __PAGE_HYPERVISOR));
                     l1mfn = INVALID_MFN;
                 }
                 if ( locking )
@@ -5785,7 +5779,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
                 : l1e_from_pfn(l1e_get_pfn(*pl1e),
                                (l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf);
 
-            l1e_write_atomic(pl1e, nl1e);
+            l1e_write(pl1e, nl1e);
             UNMAP_DOMAIN_PAGE(pl1e);
             v += PAGE_SIZE;
 
@@ -5824,7 +5818,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
             if ( i == L1_PAGETABLE_ENTRIES )
             {
                 /* Empty: zap the L2E and free the L1 page. */
-                l2e_write_atomic(pl2e, l2e_empty());
+                l2e_write(pl2e, l2e_empty());
                 if ( locking )
                     spin_unlock(&map_pgdir_lock);
                 flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
@@ -5868,7 +5862,7 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
             if ( i == L2_PAGETABLE_ENTRIES )
             {
                 /* Empty: zap the L3E and free the L2 page. */
-                l3e_write_atomic(pl3e, l3e_empty());
+                l3e_write(pl3e, l3e_empty());
                 if ( locking )
                     spin_unlock(&map_pgdir_lock);
                 flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
@@ -5940,7 +5934,7 @@  void init_or_livepatch modify_xen_mappings_lite(
         {
             ASSERT(IS_ALIGNED(v, 1UL << L2_PAGETABLE_SHIFT));
 
-            l2e_write_atomic(pl2e, l2e_from_intpte((l2e.l2 & ~fm) | flags));
+            l2e_write(pl2e, l2e_from_intpte((l2e.l2 & ~fm) | flags));
 
             v += 1UL << L2_PAGETABLE_SHIFT;
             continue;
@@ -5958,8 +5952,7 @@  void init_or_livepatch modify_xen_mappings_lite(
 
                 ASSERT(l1f & _PAGE_PRESENT);
 
-                l1e_write_atomic(pl1e,
-                                 l1e_from_intpte((l1e.l1 & ~fm) | flags));
+                l1e_write(pl1e, l1e_from_intpte((l1e.l1 & ~fm) | flags));
 
                 v += 1UL << L1_PAGETABLE_SHIFT;