@@ -388,6 +388,20 @@ static void skb_recv_done(struct virtque
}
}
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ vi->rvq->vq_ops->disable_cb(vi->rvq);
+ __napi_schedule(&vi->napi);
+ }
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
@@ -397,7 +411,7 @@ static void refill_work(struct work_stru
napi_disable(&vi->napi);
try_fill_recv(vi, GFP_KERNEL);
still_empty = (vi->num == 0);
- napi_enable(&vi->napi);
+ virtnet_napi_enable(vi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
@@ -589,16 +603,7 @@ static int virtnet_open(struct net_devic
{
struct virtnet_info *vi = netdev_priv(dev);
- napi_enable(&vi->napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
+ virtnet_napi_enable(vi);
return 0;
}