@@ -631,7 +631,8 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
return ret;
}
-int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+static int
+mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
@@ -640,8 +641,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->ndesc)
return 0;
- spin_lock_bh(&q->lock);
-
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf = {};
enum dma_data_direction dir;
@@ -674,6 +673,19 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
if (frames || mt76_queue_is_wed_rx(q))
mt76_dma_kick_queue(dev, q);
+ return frames;
+}
+
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
+{
+ int frames;
+
+ if (!q->ndesc)
+ return 0;
+
+ spin_lock_bh(&q->lock);
+ frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
return frames;
@@ -796,7 +808,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
return;
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
+ mt76_dma_rx_fill_buf(dev, q, false);
}
static void
@@ -969,7 +981,7 @@ mt76_dma_init(struct mt76_dev *dev,
mt76_for_each_q_rx(dev, i) {
netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}