@@ -24,6 +24,7 @@ extern void xpfo_alloc_page(struct page *page, int order, gfp_t gfp);
extern void xpfo_free_page(struct page *page, int order);
extern bool xpfo_page_is_unmapped(struct page *page);
+extern bool xpfo_page_is_kernel(struct page *page);
#else /* !CONFIG_XPFO */
@@ -33,6 +34,7 @@ static inline void xpfo_alloc_page(struct page *page, int order, gfp_t gfp) { }
static inline void xpfo_free_page(struct page *page, int order) { }
static inline bool xpfo_page_is_unmapped(struct page *page) { return false; }
+static inline bool xpfo_page_is_kernel(struct page *page) { return false; }
#endif /* CONFIG_XPFO */
@@ -2440,7 +2440,13 @@ void free_hot_cold_page(struct page *page, bool cold)
}
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- if (!cold)
+ /*
+ * XPFO: Allocating a page to userspace that was previously allocated
+ * to the kernel requires an expensive TLB shootdown. To minimize this,
+ * we only put non-kernel pages into the hot cache to favor their
+ * allocation.
+ */
+ if (!cold && !xpfo_page_is_kernel(page))
list_add(&page->lru, &pcp->lists[migratetype]);
else
list_add_tail(&page->lru, &pcp->lists[migratetype]);
@@ -204,3 +204,11 @@ inline bool xpfo_page_is_unmapped(struct page *page)
return test_bit(PAGE_EXT_XPFO_UNMAPPED, &lookup_page_ext(page)->flags);
}
EXPORT_SYMBOL(xpfo_page_is_unmapped);
+
+inline bool xpfo_page_is_kernel(struct page *page)
+{
+ if (!static_branch_unlikely(&xpfo_inited))
+ return false;
+
+ return test_bit(PAGE_EXT_XPFO_KERNEL, &lookup_page_ext(page)->flags);
+}
Allocating a page to userspace that was previously allocated to the kernel requires an expensive TLB shootdown. To minimize this, we only put non-kernel pages into the hot cache to favor their allocation. Signed-off-by: Juerg Haefliger <juerg.haefliger@hpe.com> --- include/linux/xpfo.h | 2 ++ mm/page_alloc.c | 8 +++++++- mm/xpfo.c | 8 ++++++++ 3 files changed, 17 insertions(+), 1 deletion(-)