@@ -95,6 +95,12 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+/*
+ * This defines how many bits we have in the arch specific swp offset.
+ * For 32 bits vanilla systems the pte and swap entry has the same size.
+ */
+#define __ARCH_SWP_OFFSET_BITS (sizeof(swp_entry_t) - SWP_TYPE_BITS)
+
/* No inverted PFNs on 2 level page tables */
static inline u64 protnone_mask(u64 val)
@@ -287,6 +287,13 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
__pteval_swp_offset(pte)))
+/*
+ * This defines how many bits we have in the arch specific swp offset.
+ * Here since we're putting the 32 bits swap entry into 64 bits pte, the
+ * limitation is the 32 bits swap entry minus the swap type field.
+ */
+#define __ARCH_SWP_OFFSET_BITS (sizeof(swp_entry_t) - SWP_TYPE_BITS)
+
#include <asm/pgtable-invert.h>
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
@@ -217,6 +217,11 @@ static inline void native_pgd_clear(pgd_t *pgd)
/* We always extract/encode the offset by shifting it all the way up, and then down again */
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
+/*
+ * This defines how many bits we have in the arch specific swp offset. 64
+ * bits systems have both swp_entry_t and pte in 64 bits.
+ */
+#define __ARCH_SWP_OFFSET_BITS (BITS_PER_LONG - SWP_OFFSET_SHIFT)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
This will enable the new migration young bit for all x86 systems for both 32 bits and 64 bits systems (including PAE). Signed-off-by: Peter Xu <peterx@redhat.com> --- arch/x86/include/asm/pgtable-2level.h | 6 ++++++ arch/x86/include/asm/pgtable-3level.h | 7 +++++++ arch/x86/include/asm/pgtable_64.h | 5 +++++ 3 files changed, 18 insertions(+)