new file mode 100644
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* libie internal declarations not to be used in drivers.
+ *
+ * Copyright(c) 2023 Intel Corporation.
+ */
+
+#ifndef __LIBIE_INTERNAL_H
+#define __LIBIE_INTERNAL_H
+
+struct libie_rq_stats;
+struct page_pool;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+void libie_rq_stats_sync_pp(struct libie_rq_stats *stats,
+ struct page_pool *pool);
+#else
+static inline void libie_rq_stats_sync_pp(struct libie_rq_stats *stats,
+ struct page_pool *pool)
+{
+}
+#endif
+
+#endif /* __LIBIE_INTERNAL_H */
@@ -3,6 +3,8 @@
#include <linux/net/intel/libie/rx.h>
+#include "internal.h"
+
/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
* bitfield struct.
*/
@@ -133,6 +135,24 @@ struct page_pool *libie_rx_page_pool_create(struct napi_struct *napi,
}
EXPORT_SYMBOL_NS_GPL(libie_rx_page_pool_create, LIBIE);
+/**
+ * libie_rx_page_pool_destroy - destroy a &page_pool created by libie
+ * @pool: pool to destroy
+ * @stats: RQ stats from the ring (or %NULL to skip updating PP stats)
+ *
+ * As the stats usually has the same lifetime as the device, but PP is usually
+ * created/destroyed on ifup/ifdown, in order to not lose the stats accumulated
+ * during the last ifup, the PP stats need to be added to the driver stats
+ * container. Then the PP gets destroyed.
+ */
+void libie_rx_page_pool_destroy(struct page_pool *pool,
+ struct libie_rq_stats *stats)
+{
+ libie_rq_stats_sync_pp(stats, pool);
+ page_pool_destroy(pool);
+}
+EXPORT_SYMBOL_NS_GPL(libie_rx_page_pool_destroy, LIBIE);
+
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
MODULE_LICENSE("GPL");
@@ -3,6 +3,9 @@
#include <linux/ethtool.h>
#include <linux/net/intel/libie/stats.h>
+#include <net/page_pool.h>
+
+#include "internal.h"
/* Rx per-queue stats */
@@ -14,6 +17,70 @@ static const char * const libie_rq_stats_str[] = {
#define LIBIE_RQ_STATS_NUM ARRAY_SIZE(libie_rq_stats_str)
+#ifdef CONFIG_PAGE_POOL_STATS
+/**
+ * libie_rq_stats_get_pp - get the current stats from a &page_pool
+ * @sarr: local array to add stats to
+ * @pool: pool to get the stats from
+ *
+ * Adds the current "live" stats from an online PP to the stats read from
+ * the RQ container, so that the actual totals will be returned.
+ */
+static void libie_rq_stats_get_pp(u64 *sarr, struct page_pool *pool)
+{
+ struct page_pool_stats *pps;
+ /* Used only to calculate pos below */
+ struct libie_rq_stats tmp;
+ u32 pos;
+
+ /* Validate the libie PP stats array can be casted <-> PP struct */
+ static_assert(sizeof(tmp.pp) == sizeof(*pps));
+
+ if (!pool)
+ return;
+
+ /* Position of the first Page Pool stats field */
+ pos = (u64_stats_t *)&tmp.pp - tmp.raw;
+ pps = (typeof(pps))&sarr[pos];
+
+ page_pool_get_stats(pool, pps);
+}
+
+/**
+ * libie_rq_stats_sync_pp - add the current PP stats to the RQ stats container
+ * @stats: stats structure to update
+ * @pool: pool to read the stats
+ *
+ * Called by libie_rx_page_pool_destroy() to save the stats before destroying
+ * the pool.
+ */
+void libie_rq_stats_sync_pp(struct libie_rq_stats *stats,
+ struct page_pool *pool)
+{
+ u64_stats_t *qarr = (u64_stats_t *)&stats->pp;
+ struct page_pool_stats pps = { };
+ u64 *sarr = (u64 *)&pps;
+
+ if (!stats)
+ return;
+
+ page_pool_get_stats(pool, &pps);
+
+ u64_stats_update_begin(&stats->syncp);
+
+ for (u32 i = 0; i < sizeof(pps) / sizeof(*sarr); i++)
+ u64_stats_add(&qarr[i], sarr[i]);
+
+ u64_stats_update_end(&stats->syncp);
+}
+#else
+static void libie_rq_stats_get_pp(u64 *sarr, struct page_pool *pool)
+{
+}
+
+/* static inline void libie_rq_stats_sync_pp() is declared in "internal.h" */
+#endif
+
/**
* libie_rq_stats_get_sset_count - get the number of Ethtool RQ stats provided
*
@@ -41,8 +108,10 @@ EXPORT_SYMBOL_NS_GPL(libie_rq_stats_get_strings, LIBIE);
* libie_rq_stats_get_data - get the RQ stats in Ethtool format
* @data: reference to the cursor pointing to the output array
* @stats: RQ stats container from the queue
+ * @pool: &page_pool from the queue (%NULL to ignore PP "live" stats)
*/
-void libie_rq_stats_get_data(u64 **data, const struct libie_rq_stats *stats)
+void libie_rq_stats_get_data(u64 **data, const struct libie_rq_stats *stats,
+ struct page_pool *pool)
{
u64 sarr[LIBIE_RQ_STATS_NUM];
u32 start;
@@ -54,6 +123,8 @@ void libie_rq_stats_get_data(u64 **data, const struct libie_rq_stats *stats)
sarr[i] = u64_stats_read(&stats->raw[i]);
} while (u64_stats_fetch_retry(&stats->syncp, start));
+ libie_rq_stats_get_pp(sarr, pool);
+
for (u32 i = 0; i < LIBIE_RQ_STATS_NUM; i++)
(*data)[i] += sarr[i];
@@ -160,7 +160,11 @@ static inline void libie_skb_set_hash(struct sk_buff *skb, u32 hash,
/* Maximum frame size minus LL overhead */
#define LIBIE_MAX_MTU (LIBIE_MAX_RX_FRM_LEN - LIBIE_RX_LL_LEN)
+struct libie_rq_stats;
+
struct page_pool *libie_rx_page_pool_create(struct napi_struct *napi,
u32 size);
+void libie_rx_page_pool_destroy(struct page_pool *pool,
+ struct libie_rq_stats *stats);
#endif /* __LIBIE_RX_H */
@@ -49,6 +49,17 @@
* fragments: number of processed descriptors carrying only a fragment
* alloc_page_fail: number of Rx page allocation fails
* build_skb_fail: number of build_skb() fails
+ * pp_alloc_fast: pages taken from the cache or ring
+ * pp_alloc_slow: actual page allocations
+ * pp_alloc_slow_ho: non-order-0 page allocations
+ * pp_alloc_empty: number of times the pool was empty
+ * pp_alloc_refill: number of cache refills
+ * pp_alloc_waive: NUMA node mismatches during recycling
+ * pp_recycle_cached: direct recyclings into the cache
+ * pp_recycle_cache_full: number of times the cache was full
+ * pp_recycle_ring: recyclings into the ring
+ * pp_recycle_ring_full: number of times the ring was full
+ * pp_recycle_released_ref: pages released due to elevated refcnt
*/
#define DECLARE_LIBIE_RQ_NAPI_STATS(act) \
@@ -60,9 +71,29 @@
act(alloc_page_fail) \
act(build_skb_fail)
+#ifdef CONFIG_PAGE_POOL_STATS
+#define DECLARE_LIBIE_RQ_PP_STATS(act) \
+ act(pp_alloc_fast) \
+ act(pp_alloc_slow) \
+ act(pp_alloc_slow_ho) \
+ act(pp_alloc_empty) \
+ act(pp_alloc_refill) \
+ act(pp_alloc_waive) \
+ act(pp_recycle_cached) \
+ act(pp_recycle_cache_full) \
+ act(pp_recycle_ring) \
+ act(pp_recycle_ring_full) \
+ act(pp_recycle_released_ref)
+#else
+#define DECLARE_LIBIE_RQ_PP_STATS(act)
+#endif
+
#define DECLARE_LIBIE_RQ_STATS(act) \
DECLARE_LIBIE_RQ_NAPI_STATS(act) \
- DECLARE_LIBIE_RQ_FAIL_STATS(act)
+ DECLARE_LIBIE_RQ_FAIL_STATS(act) \
+ DECLARE_LIBIE_RQ_PP_STATS(act)
+
+struct page_pool;
struct libie_rq_stats {
struct u64_stats_sync syncp;
@@ -72,6 +103,9 @@ struct libie_rq_stats {
#define act(s) u64_stats_t s;
DECLARE_LIBIE_RQ_NAPI_STATS(act);
DECLARE_LIBIE_RQ_FAIL_STATS(act);
+ struct_group(pp,
+ DECLARE_LIBIE_RQ_PP_STATS(act);
+ );
#undef act
};
DECLARE_FLEX_ARRAY(u64_stats_t, raw);
@@ -110,7 +144,8 @@ libie_rq_napi_stats_add(struct libie_rq_stats *qs,
u32 libie_rq_stats_get_sset_count(void);
void libie_rq_stats_get_strings(u8 **data, u32 qid);
-void libie_rq_stats_get_data(u64 **data, const struct libie_rq_stats *stats);
+void libie_rq_stats_get_data(u64 **data, const struct libie_rq_stats *stats,
+ struct page_pool *pool);
/* Tx per-queue stats:
* packets: packets sent from this queue