From patchwork Mon Aug 26 12:40:13 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yunsheng Lin X-Patchwork-Id: 13777874 X-Patchwork-Delegate: kuba@kernel.org Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D3823187FFD; Mon, 26 Aug 2024 12:46:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.188 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724676382; cv=none; b=pctTSEL71g+Dt9BU+qdri6JNY6/G751MT3+neXzIGse/HEeAA1RLVyp98glesTEY2+vAvVdihEDCEpcPf5Y2Do60xB508KA26fI34/6Dl+6mUWtlcr/jgpmvnlZfBMFVq90zbAzt0epRP3LZAVl1EWgCxT4B6qSLbBg80/rHlq4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724676382; c=relaxed/simple; bh=6wE3KDeEzLb1dgg+BdVCCLkN4nCJRS2HUpAH0FLIF3U=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=X308ZJguQ31jih05BZM2dOAmZdHif54Q/eh/DTm5SSQC8paXBWENHv7ZqBm08epN3MW1hG85sc8GqUEk9qk7CTYxjszq9weMg/6Y4ByxI7bkmmS94JizplRxfc2Z6B69Kt9P740n9Qk1xIc4lpoDw6Q4tiBinyq5VaGAq7GVYkM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.188 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.163.252]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4Wsr3W6RyyzpTgj; Mon, 26 Aug 2024 20:44:39 +0800 (CST) Received: from dggpemf200006.china.huawei.com (unknown [7.185.36.61]) by mail.maildlp.com (Postfix) with ESMTPS id 04DFA1800A5; Mon, 26 Aug 2024 20:46:19 +0800 (CST) Received: from localhost.localdomain (10.90.30.45) by dggpemf200006.china.huawei.com (7.185.36.61) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Mon, 26 Aug 2024 20:46:18 +0800 From: Yunsheng Lin To: , , CC: , , Yunsheng Lin , Alexander Duyck , Andrew Morton , Subject: [PATCH net-next v15 06/13] mm: page_frag: reuse existing space for 'size' and 'pfmemalloc' Date: Mon, 26 Aug 2024 20:40:13 +0800 Message-ID: <20240826124021.2635705-7-linyunsheng@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20240826124021.2635705-1-linyunsheng@huawei.com> References: <20240826124021.2635705-1-linyunsheng@huawei.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-ClientProxiedBy: dggems705-chm.china.huawei.com (10.3.19.182) To dggpemf200006.china.huawei.com (7.185.36.61) X-Patchwork-Delegate: kuba@kernel.org Currently there is one 'struct page_frag' for every 'struct sock' and 'struct task_struct', we are about to replace the 'struct page_frag' with 'struct page_frag_cache' for them. Before begin the replacing, we need to ensure the size of 'struct page_frag_cache' is not bigger than the size of 'struct page_frag', as there may be tens of thousands of 'struct sock' and 'struct task_struct' instances in the system. By or'ing the page order & pfmemalloc with lower bits of 'va' instead of using 'u16' or 'u32' for page size and 'u8' for pfmemalloc, we are able to avoid 3 or 5 bytes space waste. And page address & pfmemalloc & order is unchanged for the same page in the same 'page_frag_cache' instance, it makes sense to fit them together. After this patch, the size of 'struct page_frag_cache' should be the same as the size of 'struct page_frag'. CC: Alexander Duyck Signed-off-by: Yunsheng Lin --- include/linux/mm_types_task.h | 19 ++++++----- include/linux/page_frag_cache.h | 60 +++++++++++++++++++++++++++++++-- mm/page_frag_cache.c | 51 +++++++++++++++------------- 3 files changed, 97 insertions(+), 33 deletions(-) diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index cdc1e3696439..a8635460e027 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -50,18 +50,21 @@ struct page_frag { #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) struct page_frag_cache { - void *va; -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) + /* encoded_page consists of the virtual address, pfmemalloc bit and order + * of a page. + */ + unsigned long encoded_page; + + /* we maintain a pagecount bias, so that we dont dirty cache line + * containing page->_refcount every time we allocate a fragment. + */ +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32) __u16 offset; - __u16 size; + __u16 pagecnt_bias; #else __u32 offset; + __u32 pagecnt_bias; #endif - /* we maintain a pagecount bias, so that we dont dirty cache line - * containing page->_refcount every time we allocate a fragment. - */ - unsigned int pagecnt_bias; - bool pfmemalloc; }; /* Track pages that require TLB flushes */ diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h index 0a52f7a179c8..372d6ed7e20a 100644 --- a/include/linux/page_frag_cache.h +++ b/include/linux/page_frag_cache.h @@ -3,18 +3,74 @@ #ifndef _LINUX_PAGE_FRAG_CACHE_H #define _LINUX_PAGE_FRAG_CACHE_H +#include +#include #include +#include #include #include +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) +/* Use a full byte here to enable assembler optimization as the shift + * operation is usually expecting a byte. + */ +#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0) +#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 8 +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT) +#else +/* Compiler should be able to figure out we don't read things as any value + * ANDed with 0 is 0. + */ +#define PAGE_FRAG_CACHE_ORDER_MASK 0 +#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 0 +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT) +#endif + +static inline unsigned long page_frag_encode_page(struct page *page, + unsigned int order, + bool pfmemalloc) +{ + BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_FRAG_CACHE_ORDER_MASK); + BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT >= PAGE_SIZE); + + return (unsigned long)page_address(page) | + (order & PAGE_FRAG_CACHE_ORDER_MASK) | + ((unsigned long)pfmemalloc << PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT); +} + +static inline unsigned long page_frag_encoded_page_order(unsigned long encoded_page) +{ + return encoded_page & PAGE_FRAG_CACHE_ORDER_MASK; +} + +static inline bool page_frag_encoded_page_pfmemalloc(unsigned long encoded_page) +{ + return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT); +} + +static inline void *page_frag_encoded_page_address(unsigned long encoded_page) +{ + return (void *)(encoded_page & PAGE_MASK); +} + +static inline struct page *page_frag_encoded_page_ptr(unsigned long encoded_page) +{ + return virt_to_page((void *)encoded_page); +} + static inline void page_frag_cache_init(struct page_frag_cache *nc) { - nc->va = NULL; + nc->encoded_page = 0; } static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc) { - return !!nc->pfmemalloc; + return page_frag_encoded_page_pfmemalloc(nc->encoded_page); +} + +static inline unsigned int page_frag_cache_page_size(unsigned long encoded_page) +{ + return PAGE_SIZE << page_frag_encoded_page_order(encoded_page); } void page_frag_cache_drain(struct page_frag_cache *nc); diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c index 4c8e04379cb3..228cff9a4cdb 100644 --- a/mm/page_frag_cache.c +++ b/mm/page_frag_cache.c @@ -15,13 +15,13 @@ #include #include #include -#include #include #include "internal.h" static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) { + unsigned long order = PAGE_FRAG_CACHE_MAX_ORDER; struct page *page = NULL; gfp_t gfp = gfp_mask; @@ -30,23 +30,31 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, PAGE_FRAG_CACHE_MAX_ORDER); - nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; #endif - if (unlikely(!page)) + if (unlikely(!page)) { page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); + if (unlikely(!page)) { + nc->encoded_page = 0; + return NULL; + } - nc->va = page ? page_address(page) : NULL; + order = 0; + } + + nc->encoded_page = page_frag_encode_page(page, order, + page_is_pfmemalloc(page)); return page; } void page_frag_cache_drain(struct page_frag_cache *nc) { - if (!nc->va) + if (!nc->encoded_page) return; - __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias); - nc->va = NULL; + __page_frag_cache_drain(page_frag_encoded_page_ptr(nc->encoded_page), + nc->pagecnt_bias); + nc->encoded_page = 0; } EXPORT_SYMBOL(page_frag_cache_drain); @@ -63,31 +71,27 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask) { -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) - unsigned int size = nc->size; -#else - unsigned int size = PAGE_SIZE; -#endif - unsigned int offset; + unsigned long encoded_page = nc->encoded_page; + unsigned int size, offset; struct page *page; - if (unlikely(!nc->va)) { + size = page_frag_cache_page_size(encoded_page); + + if (unlikely(!encoded_page)) { refill: page = __page_frag_cache_refill(nc, gfp_mask); if (!page) return NULL; -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) - /* if size can vary use size else just use PAGE_SIZE */ - size = nc->size; -#endif + encoded_page = nc->encoded_page; + size = page_frag_cache_page_size(encoded_page); + /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); /* reset page count bias and offset to start of new frag */ - nc->pfmemalloc = page_is_pfmemalloc(page); nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc->offset = 0; } @@ -107,13 +111,14 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc, return NULL; } - page = virt_to_page(nc->va); + page = page_frag_encoded_page_ptr(encoded_page); if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) goto refill; - if (unlikely(nc->pfmemalloc)) { - free_unref_page(page, compound_order(page)); + if (unlikely(page_frag_encoded_page_pfmemalloc(encoded_page))) { + free_unref_page(page, + page_frag_encoded_page_order(encoded_page)); goto refill; } @@ -128,7 +133,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc, nc->pagecnt_bias--; nc->offset = offset + fragsz; - return nc->va + offset; + return page_frag_encoded_page_address(encoded_page) + offset; } EXPORT_SYMBOL(__page_frag_alloc_align);