@@ -124,28 +124,35 @@ EXPORT_SYMBOL(xpfo_kmap);
void xpfo_kunmap(void *kaddr, struct page *page)
{
+ bool flush_tlb = false;
+
if (!static_branch_unlikely(&xpfo_inited))
return;
if (!PageXpfoUser(page))
return;
- spin_lock(&page->xpfo_lock);
-
/*
* The page is to be allocated back to user space, so unmap it from the
* kernel, flush the TLB and tag it as a user page.
*/
if (atomic_dec_return(&page->xpfo_mapcount) == 0) {
-#ifdef CONFIG_XPFO_DEBUG
- BUG_ON(PageXpfoUnmapped(page));
-#endif
- SetPageXpfoUnmapped(page);
- set_kpte(kaddr, page, __pgprot(0));
- xpfo_flush_kernel_tlb(page, 0);
+ spin_lock(&page->xpfo_lock);
+
+ /*
+ * In the case, where we raced with kmap after the
+ * atomic_dec_return, we must not nuke the mapping.
+ */
+ if (atomic_read(&page->xpfo_mapcount) == 0) {
+ SetPageXpfoUnmapped(page);
+ set_kpte(kaddr, page, __pgprot(0));
+ flush_tlb = true;
+ }
+ spin_unlock(&page->xpfo_lock);
}
- spin_unlock(&page->xpfo_lock);
+ if (flush_tlb)
+ xpfo_flush_kernel_tlb(page, 0);
}
EXPORT_SYMBOL(xpfo_kunmap);