@@ -51,13 +51,13 @@ struct pp_alloc_cache {
};
struct page_pool_params {
- unsigned int flags;
+ unsigned int flags:30;
+ enum dma_data_direction dma_dir:2; /* DMA mapping direction */
unsigned int order;
unsigned int pool_size;
int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */
struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
- enum dma_data_direction dma_dir; /* DMA mapping direction */
unsigned int max_len; /* max DMA sync memory size */
unsigned int offset; /* DMA addr offset */
void (*init_callback)(struct page *page, void *arg);
@@ -129,6 +129,7 @@ static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
struct page_pool {
struct page_pool_params p;
+ long pad;
struct delayed_work release_dw;
void (*disconnect)(void *);
For now, this structure takes a whole 64-byte cacheline on x86_64. But in fact, it has a 4-byte hole before ::init_callback() (yet not sufficient to change its sizeof()). ::dma_dir is whole 4 bytes, although its values can only be 0 and 2. Merge it with ::flags and, so that its slot gets freed and reduces structure's size to 56 bytes. This adds an instruction when reading that field, but the upcoming change will make those reads happen way less often. Pad the freed slot explicitly in &page_pool to not alter cacheline layout while it's not used. Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> --- include/net/page_pool.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)