@@ -36,6 +36,7 @@ struct xpfo {
};
DEFINE_STATIC_KEY_FALSE(xpfo_inited);
+DEFINE_STATIC_KEY_FALSE(xpfo_do_tlb_flush);
static bool xpfo_disabled __initdata;
@@ -46,7 +47,15 @@ static int __init noxpfo_param(char *str)
return 0;
}
+static int __init xpfotlbflush_param(char *str)
+{
+ static_branch_enable(&xpfo_do_tlb_flush);
+
+ return 0;
+}
+
early_param("noxpfo", noxpfo_param);
+early_param("xpfotlbflush", xpfotlbflush_param);
static bool __init need_xpfo(void)
{
@@ -76,6 +85,13 @@ bool __init xpfo_enabled(void)
}
EXPORT_SYMBOL(xpfo_enabled);
+
+static void xpfo_cond_flush_kernel_tlb(struct page *page, int order)
+{
+ if (static_branch_unlikely(&xpfo_do_tlb_flush))
+ xpfo_flush_kernel_tlb(page, order);
+}
+
static inline struct xpfo *lookup_xpfo(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
@@ -114,12 +130,17 @@ void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp)
"xpfo: already mapped page being allocated\n");
if ((gfp & GFP_HIGHUSER) == GFP_HIGHUSER) {
- /*
- * Tag the page as a user page and flush the TLB if it
- * was previously allocated to the kernel.
- */
- if (!test_and_set_bit(XPFO_PAGE_USER, &xpfo->flags))
- flush_tlb = 1;
+ if (static_branch_unlikely(&xpfo_do_tlb_flush)) {
+ /*
+ * Tag the page as a user page and flush the TLB if it
+ * was previously allocated to the kernel.
+ */
+ if (!test_and_set_bit(XPFO_PAGE_USER, &xpfo->flags))
+ flush_tlb = 1;
+ } else {
+ set_bit(XPFO_PAGE_USER, &xpfo->flags);
+ }
+
} else {
/* Tag the page as a non-user (kernel) page */
clear_bit(XPFO_PAGE_USER, &xpfo->flags);
@@ -127,7 +148,7 @@ void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp)
}
if (flush_tlb)
- xpfo_flush_kernel_tlb(page, order);
+ xpfo_cond_flush_kernel_tlb(page, order);
}
void xpfo_free_pages(struct page *page, int order)
@@ -221,7 +242,7 @@ void xpfo_kunmap(void *kaddr, struct page *page)
"xpfo: unmapping already unmapped page\n");
set_bit(XPFO_PAGE_UNMAPPED, &xpfo->flags);
set_kpte(kaddr, page, __pgprot(0));
- xpfo_flush_kernel_tlb(page, 0);
+ xpfo_cond_flush_kernel_tlb(page, 0);
}
spin_unlock(&xpfo->maplock);