@@ -151,8 +151,6 @@ bool __read_mostly machine_to_phys_mapping_valid;
struct rangeset *__read_mostly mmio_ro_ranges;
-#define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT)
-
bool __read_mostly opt_allow_superpage;
boolean_param("allowsuperpage", opt_allow_superpage);
@@ -315,6 +315,8 @@ void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
#define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12)
#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0)
+#define PAGE_CACHE_ATTRS (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
+
/*
* Debug option: Ensure that granted mappings are not implicitly unmapped.
* WARNING: This will need to be disabled to run OSes that use the spare PTE
Currently all the users are within x86/mm.c. But that will change once we split PV specific mm code to another file. Lift that to page.h along side _PAGE_* in preparation for later patches. No functional change. Add some spaces around "|" while moving. Signed-off-by: Wei Liu <wei.liu2@citrix.com> --- xen/arch/x86/mm.c | 2 -- xen/include/asm-x86/page.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-)