diff mbox series

[RFC,4/4] mm/x86: Define __ARCH_SWP_OFFSET_BITS

Message ID 20220729014041.21292-5-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm: Remember young bit for migration entries | expand

Commit Message

Peter Xu July 29, 2022, 1:40 a.m. UTC
This will enable the new migration young bit for all x86 systems for both
32 bits and 64 bits systems (including PAE).

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 arch/x86/include/asm/pgtable-2level.h | 6 ++++++
 arch/x86/include/asm/pgtable-3level.h | 7 +++++++
 arch/x86/include/asm/pgtable_64.h     | 5 +++++
 3 files changed, 18 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 60d0f9015317..6e70833feb69 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -95,6 +95,12 @@  static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
 #define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
 
+/*
+ * This defines how many bits we have in the arch specific swp offset.
+ * For 32 bits vanilla systems the pte and swap entry has the same size.
+ */
+#define __ARCH_SWP_OFFSET_BITS	(sizeof(swp_entry_t) - SWP_TYPE_BITS)
+
 /* No inverted PFNs on 2 level page tables */
 
 static inline u64 protnone_mask(u64 val)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 28421a887209..8dbf29b51f8b 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -287,6 +287,13 @@  static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
 #define __pte_to_swp_entry(pte)	(__swp_entry(__pteval_swp_type(pte), \
 					     __pteval_swp_offset(pte)))
 
+/*
+ * This defines how many bits we have in the arch specific swp offset.
+ * Here since we're putting the 32 bits swap entry into 64 bits pte, the
+ * limitation is the 32 bits swap entry minus the swap type field.
+ */
+#define __ARCH_SWP_OFFSET_BITS	(sizeof(swp_entry_t) - SWP_TYPE_BITS)
+
 #include <asm/pgtable-invert.h>
 
 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index e479491da8d5..1714f0ded1db 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -217,6 +217,11 @@  static inline void native_pgd_clear(pgd_t *pgd)
 
 /* We always extract/encode the offset by shifting it all the way up, and then down again */
 #define SWP_OFFSET_SHIFT	(SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
+/*
+ * This defines how many bits we have in the arch specific swp offset.  64
+ * bits systems have both swp_entry_t and pte in 64 bits.
+ */
+#define __ARCH_SWP_OFFSET_BITS	(BITS_PER_LONG - SWP_OFFSET_SHIFT)
 
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)