@@ -52,7 +52,7 @@ static inline void *encoded_page_address(unsigned long encoded_va)
static inline void page_frag_cache_init(struct page_frag_cache *nc)
{
- nc->encoded_va = 0;
+ memset(nc, 0, sizeof(*nc));
}
static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
@@ -19,8 +19,27 @@
#include <linux/page_frag_cache.h>
#include "internal.h"
-static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
- gfp_t gfp_mask)
+static bool __page_frag_cache_reuse(unsigned long encoded_va,
+ unsigned int pagecnt_bias)
+{
+ struct page *page;
+
+ page = virt_to_page((void *)encoded_va);
+ if (!page_ref_sub_and_test(page, pagecnt_bias))
+ return false;
+
+ if (unlikely(encoded_page_pfmemalloc(encoded_va))) {
+ free_unref_page(page, encoded_page_order(encoded_va));
+ return false;
+ }
+
+ /* OK, page count is 0, we can safely set it */
+ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
+ return true;
+}
+
+static bool __page_frag_cache_refill(struct page_frag_cache *nc,
+ gfp_t gfp_mask)
{
unsigned long order = PAGE_FRAG_CACHE_MAX_ORDER;
struct page *page = NULL;
@@ -35,8 +54,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
if (unlikely(!page)) {
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
if (unlikely(!page)) {
- nc->encoded_va = 0;
- return NULL;
+ memset(nc, 0, sizeof(*nc));
+ return false;
}
order = 0;
@@ -45,7 +64,33 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
nc->encoded_va = encode_aligned_va(page_address(page), order,
page_is_pfmemalloc(page));
- return page;
+ /* Even if we own the page, we do not use atomic_set().
+ * This would break get_page_unless_zero() users.
+ */
+ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
+
+ return true;
+}
+
+/* Reload cache by reusing the old cache if it is possible, or
+ * refilling from the page allocator.
+ */
+static bool __page_frag_cache_reload(struct page_frag_cache *nc,
+ gfp_t gfp_mask)
+{
+ if (likely(nc->encoded_va)) {
+ if (__page_frag_cache_reuse(nc->encoded_va, nc->pagecnt_bias))
+ goto out;
+ }
+
+ if (unlikely(!__page_frag_cache_refill(nc, gfp_mask)))
+ return false;
+
+out:
+ /* reset page count bias and remaining to start of new frag */
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
+ nc->remaining = page_frag_cache_page_size(nc->encoded_va);
+ return true;
}
void page_frag_cache_drain(struct page_frag_cache *nc)
@@ -55,7 +100,7 @@ void page_frag_cache_drain(struct page_frag_cache *nc)
__page_frag_cache_drain(virt_to_head_page((void *)nc->encoded_va),
nc->pagecnt_bias);
- nc->encoded_va = 0;
+ memset(nc, 0, sizeof(*nc));
}
EXPORT_SYMBOL(page_frag_cache_drain);
@@ -73,67 +118,44 @@ void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
unsigned int align_mask)
{
unsigned long encoded_va = nc->encoded_va;
- unsigned int size, remaining;
- struct page *page;
-
- if (unlikely(!encoded_va)) {
-refill:
- page = __page_frag_cache_refill(nc, gfp_mask);
- if (!page)
- return NULL;
-
- encoded_va = nc->encoded_va;
- size = page_frag_cache_page_size(encoded_va);
-
- /* Even if we own the page, we do not use atomic_set().
- * This would break get_page_unless_zero() users.
- */
- page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
-
- /* reset page count bias and remaining to start of new frag */
- nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
- nc->remaining = size;
- } else {
- size = page_frag_cache_page_size(encoded_va);
- }
+ unsigned int remaining;
remaining = nc->remaining & align_mask;
- if (unlikely(remaining < fragsz)) {
- if (unlikely(fragsz > PAGE_SIZE)) {
- /*
- * The caller is trying to allocate a fragment
- * with fragsz > PAGE_SIZE but the cache isn't big
- * enough to satisfy the request, this may
- * happen in low memory conditions.
- * We don't release the cache page because
- * it could make memory pressure worse
- * so we simply return NULL here.
- */
- return NULL;
- }
-
- page = virt_to_page((void *)encoded_va);
- if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
- goto refill;
-
- if (unlikely(encoded_page_pfmemalloc(encoded_va))) {
- free_unref_page(page, encoded_page_order(encoded_va));
- goto refill;
- }
+ /* As we have ensured remaining is zero when initiating and draining old
+ * cache, 'remaining >= fragsz' checking is enough to indicate there is
+ * enough available space for the new fragment allocation.
+ */
+ if (likely(remaining >= fragsz)) {
+ nc->pagecnt_bias--;
+ nc->remaining = remaining - fragsz;
- /* OK, page count is 0, we can safely set it */
- set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
+ return encoded_page_address(encoded_va) +
+ (page_frag_cache_page_size(encoded_va) - remaining);
+ }
- /* reset page count bias and remaining to start of new frag */
- nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
- remaining = size;
+ if (unlikely(fragsz > PAGE_SIZE)) {
+ /*
+ * The caller is trying to allocate a fragment with
+ * fragsz > PAGE_SIZE but the cache isn't big enough to satisfy
+ * the request, this may happen in low memory conditions. We don't
+ * release the cache page because it could make memory pressure
+ * worse so we simply return NULL here.
+ */
+ return NULL;
}
+ if (unlikely(!__page_frag_cache_reload(nc, gfp_mask)))
+ return NULL;
+
+ /* As the we are allocating fragment from cache by count-up way, the offset
+ * of allocated fragment from the just reloaded cache is zero, so remaining
+ * aligning and offset calculation are not needed.
+ */
nc->pagecnt_bias--;
- nc->remaining = remaining - fragsz;
+ nc->remaining -= fragsz;
- return encoded_page_address(encoded_va) + (size - remaining);
+ return encoded_page_address(nc->encoded_va);
}
EXPORT_SYMBOL(__page_frag_alloc_va_align);
Refactor common codes from __page_frag_alloc_va_align() to __page_frag_cache_reload(), so that the new API can make use of them. CC: Alexander Duyck <alexander.duyck@gmail.com> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> --- include/linux/page_frag_cache.h | 2 +- mm/page_frag_cache.c | 138 ++++++++++++++++++-------------- 2 files changed, 81 insertions(+), 59 deletions(-)