@@ -87,6 +87,11 @@ inline void set_kpte(void *kaddr, struct page *page, pgprot_t prot)
}
+void xpfo_flush_tlb_all(void)
+{
+ xpfo_flush_tlb_kernel_range(0, TLB_FLUSH_ALL);
+}
+
inline void xpfo_flush_kernel_tlb(struct page *page, int order)
{
int level;
@@ -406,9 +406,11 @@ PAGEFLAG(Idle, idle, PF_ANY)
PAGEFLAG(XpfoUser, xpfo_user, PF_ANY)
TESTCLEARFLAG(XpfoUser, xpfo_user, PF_ANY)
TESTSETFLAG(XpfoUser, xpfo_user, PF_ANY)
+#define __PG_XPFO_USER (1UL << PG_xpfo_user)
PAGEFLAG(XpfoUnmapped, xpfo_unmapped, PF_ANY)
TESTCLEARFLAG(XpfoUnmapped, xpfo_unmapped, PF_ANY)
TESTSETFLAG(XpfoUnmapped, xpfo_unmapped, PF_ANY)
+#define __PG_XPFO_UNMAPPED (1UL << PG_xpfo_unmapped)
#endif
/*
@@ -787,7 +789,8 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON & ~__PG_XPFO_USER & \
+ ~__PG_XPFO_UNMAPPED)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
@@ -34,6 +34,7 @@ void set_kpte(void *kaddr, struct page *page, pgprot_t prot);
void xpfo_dma_map_unmap_area(bool map, const void *addr, size_t size,
enum dma_data_direction dir);
void xpfo_flush_kernel_tlb(struct page *page, int order);
+void xpfo_flush_tlb_all(void);
void xpfo_kmap(void *kaddr, struct page *page);
void xpfo_kunmap(void *kaddr, struct page *page);
@@ -55,6 +56,8 @@ bool xpfo_enabled(void);
phys_addr_t user_virt_to_phys(unsigned long addr);
+bool xpfo_pcp_refill(struct page *page, enum migratetype migratetype,
+ int order);
#else /* !CONFIG_XPFO */
static inline void xpfo_init_single_page(struct page *page) { }
@@ -82,6 +85,11 @@ static inline bool xpfo_enabled(void) { return false; }
static inline phys_addr_t user_virt_to_phys(unsigned long addr) { return 0; }
+static inline bool xpfo_pcp_refill(struct page *page,
+ enum migratetype migratetype, int order)
+{
+}
+
#endif /* CONFIG_XPFO */
#endif /* _LINUX_XPFO_H */
@@ -2478,6 +2478,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
int migratetype)
{
int i, alloced = 0;
+ bool flush_tlb = false;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
@@ -2503,6 +2504,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (is_migrate_cma(get_pcppage_migratetype(page)))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
-(1 << order));
+ flush_tlb |= xpfo_pcp_refill(page, migratetype, order);
}
/*
@@ -2513,6 +2515,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
*/
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock(&zone->lock);
+ if (flush_tlb)
+ xpfo_flush_tlb_all();
return alloced;
}
@@ -47,7 +47,8 @@ void __meminit xpfo_init_single_page(struct page *page)
void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp)
{
- int i, flush_tlb = 0;
+ int i;
+ bool flush_tlb = false;
if (!static_branch_unlikely(&xpfo_inited))
return;
@@ -65,7 +66,7 @@ void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp)
* was previously allocated to the kernel.
*/
if (!TestSetPageXpfoUser(page + i))
- flush_tlb = 1;
+ flush_tlb = true;
} else {
/* Tag the page as a non-user (kernel) page */
ClearPageXpfoUser(page + i);
@@ -74,6 +75,8 @@ void xpfo_alloc_pages(struct page *page, int order, gfp_t gfp)
if (flush_tlb)
xpfo_flush_kernel_tlb(page, order);
+
+ return;
}
void xpfo_free_pages(struct page *page, int order)
@@ -190,3 +193,31 @@ void xpfo_temp_unmap(const void *addr, size_t size, void **mapping,
kunmap_atomic(mapping[i]);
}
EXPORT_SYMBOL(xpfo_temp_unmap);
+
+bool xpfo_pcp_refill(struct page *page, enum migratetype migratetype,
+ int order)
+{
+ int i;
+ bool flush_tlb = false;
+
+ if (!static_branch_unlikely(&xpfo_inited))
+ return false;
+
+ for (i = 0; i < 1 << order; i++) {
+ if (migratetype == MIGRATE_MOVABLE) {
+ /* GPF_HIGHUSER **
+ * Tag the page as a user page, mark it as unmapped
+ * in kernel space and flush the TLB if it was
+ * previously allocated to the kernel.
+ */
+ if (!TestSetPageXpfoUnmapped(page + i))
+ flush_tlb = true;
+ SetPageXpfoUser(page + i);
+ } else {
+ /* Tag the page as a non-user (kernel) page */
+ ClearPageXpfoUser(page + i);
+ }
+ }
+
+ return(flush_tlb);
+}