@@ -2478,6 +2478,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+
+bool dev_check_flush(void);
+bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -109,4 +109,13 @@ static inline void __xsk_map_flush(void)
#endif /* CONFIG_XDP_SOCKETS */
+#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
+bool xsk_map_check_flush(void);
+#else
+static inline bool xsk_map_check_flush(void)
+{
+ return false;
+}
+#endif
+
#endif /* _LINUX_XDP_SOCK_H */
@@ -764,6 +764,16 @@ void __cpu_map_flush(void)
}
}
+#ifdef CONFIG_DEBUG_NET
+bool cpu_map_check_flush(void)
+{
+ if (list_empty(this_cpu_ptr(&cpu_map_flush_list)))
+ return false;
+ __cpu_map_flush();
+ return true;
+}
+#endif
+
static int __init cpu_map_init(void)
{
int cpu;
@@ -418,6 +418,16 @@ void __dev_flush(void)
}
}
+#ifdef CONFIG_DEBUG_NET
+bool dev_check_flush(void)
+{
+ if (list_empty(this_cpu_ptr(&dev_flush_list)))
+ return false;
+ __dev_flush();
+ return true;
+}
+#endif
+
/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
* by local_bh_disable() (from XDP calls inside NAPI). The
* rcu_read_lock_bh_held() below makes lockdep accept both.
@@ -6526,6 +6526,8 @@ static int __napi_poll(struct napi_struct *n, bool *repoll)
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n, work, weight);
+
+ xdp_do_check_flushed(n);
}
if (unlikely(work > weight))
@@ -136,4 +136,10 @@ static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
}
int rps_cpumask_housekeeping(struct cpumask *mask);
+
+#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
+void xdp_do_check_flushed(struct napi_struct *napi);
+#else
+static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
+#endif
#endif
@@ -83,6 +83,8 @@
#include <net/netfilter/nf_conntrack_bpf.h>
#include <linux/un.h>
+#include "dev.h"
+
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id);
@@ -4208,6 +4210,20 @@ void xdp_do_flush(void)
}
EXPORT_SYMBOL_GPL(xdp_do_flush);
+#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
+void xdp_do_check_flushed(struct napi_struct *napi)
+{
+ bool ret;
+
+ ret = dev_check_flush();
+ ret |= cpu_map_check_flush();
+ ret |= xsk_map_check_flush();
+
+ WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
+ napi->poll);
+}
+#endif
+
void bpf_clear_redirect_map(struct bpf_map *map)
{
struct bpf_redirect_info *ri;
@@ -391,6 +391,16 @@ void __xsk_map_flush(void)
}
}
+#ifdef CONFIG_DEBUG_NET
+bool xsk_map_check_flush(void)
+{
+ if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
+ return false;
+ __xsk_map_flush();
+ return true;
+}
+#endif
+
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
xskq_prod_submit_n(pool->cq, nb_entries);