@@ -47,6 +47,8 @@ bool sk_busy_loop_end(void *p, unsigned long start_time);
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+void napi_execute(unsigned int napi_id,
+ bool (*cb)(void *), void *cb_arg);
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
@@ -6291,6 +6291,57 @@ void napi_busy_loop(unsigned int napi_id,
}
EXPORT_SYMBOL(napi_busy_loop);
+void napi_execute(unsigned int napi_id,
+ bool (*cb)(void *), void *cb_arg)
+{
+ bool done = false;
+ unsigned long val;
+ void *have_poll_lock = NULL;
+ struct napi_struct *napi;
+
+ rcu_read_lock();
+ napi = napi_by_id(napi_id);
+ if (!napi)
+ goto out;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ for (;;) {
+ local_bh_disable();
+ val = READ_ONCE(napi->state);
+
+ /* If multiple threads are competing for this napi,
+ * we avoid dirtying napi->state as much as we can.
+ */
+ if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
+ NAPIF_STATE_IN_BUSY_POLL))
+ goto restart;
+
+ if (cmpxchg(&napi->state, val,
+ val | NAPIF_STATE_IN_BUSY_POLL |
+ NAPIF_STATE_SCHED) != val)
+ goto restart;
+
+ have_poll_lock = netpoll_poll_lock(napi);
+ cb(cb_arg);
+ done = true;
+ gro_normal_list(napi);
+ local_bh_enable();
+ break;
+restart:
+ local_bh_enable();
+ if (unlikely(need_resched()))
+ break;
+ cpu_relax();
+ }
+ if (done)
+ busy_poll_stop(napi, have_poll_lock, false, 1);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+out:
+ rcu_read_unlock();
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
static void napi_hash_add(struct napi_struct *napi)