@@ -6294,6 +6294,12 @@ void __iomem *__init ioremap_wc(paddr_t pa, size_t len)
return (void __force __iomem *)(va + offs);
}
+static bool perdomain_l1e_needs_freeing(l1_pgentry_t l1e)
+{
+ return (l1e_get_flags(l1e) & (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
+ (_PAGE_PRESENT | _PAGE_AVAIL0);
+}
+
int create_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr, l1_pgentry_t **pl1tab,
struct page_info **ppg)
@@ -6446,9 +6452,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned long va,
for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
{
- if ( (l1e_get_flags(l1tab[i]) &
- (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
- (_PAGE_PRESENT | _PAGE_AVAIL0) )
+ if ( perdomain_l1e_needs_freeing(l1tab[i]) )
free_domheap_page(l1e_get_page(l1tab[i]));
l1tab[i] = l1e_empty();
}
@@ -6498,9 +6502,7 @@ void free_perdomain_mappings(struct domain *d)
unsigned int k;
for ( k = 0; k < L1_PAGETABLE_ENTRIES; ++k )
- if ( (l1e_get_flags(l1tab[k]) &
- (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
- (_PAGE_PRESENT | _PAGE_AVAIL0) )
+ if ( perdomain_l1e_needs_freeing(l1tab[k]) )
free_domheap_page(l1e_get_page(l1tab[k]));
unmap_domain_page(l1tab);
L1 present entries that require the underlying page to be freed have the _PAGE_AVAIL0 bit set, introduce a helper to unify the checking logic into a single place. No functional change intended. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- xen/arch/x86/mm.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-)