@@ -351,6 +351,7 @@ struct wl1271 {
#define WL1271_FLAG_IDLE_REQUESTED (11)
#define WL1271_FLAG_PSPOLL_FAILURE (12)
#define WL1271_FLAG_STA_STATE_SENT (13)
+#define WL1271_FLAG_FW_TX_BUSY (14)
unsigned long flags;
struct wl1271_partition_set part;
@@ -481,9 +481,9 @@ static void wl1271_fw_status(struct wl1271 *wl,
total += cnt;
}
- /* if more blocks are available now, schedule some tx work */
- if (total && !skb_queue_empty(&wl->tx_queue))
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ /* if more blocks are available now, tx work can be scheduled */
+ if (total)
+ clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* update the host-chipset time offset */
getnstimeofday(&ts);
@@ -537,6 +537,16 @@ static void wl1271_irq_work(struct work_struct *work)
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
+ /* Check if any tx blocks were freed */
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+ !skb_queue_empty(&wl->tx_queue)) {
+ /*
+ * In order to avoid starvation of the TX path,
+ * call the work function directly.
+ */
+ wl1271_tx_work_locked(wl);
+ }
+
wl1271_rx(wl, wl->fw_status);
}
@@ -867,7 +877,8 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* before that, the tx_work will not be initialized!
*/
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
/*
* The workqueue is slow to process the tx_queue and we need stop
@@ -204,9 +204,8 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
return enabled_rates;
}
-void wl1271_tx_work(struct work_struct *work)
+void wl1271_tx_work_locked(struct wl1271 *wl)
{
- struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
struct sk_buff *skb;
bool woken_up = false;
u32 sta_rates = 0;
@@ -223,8 +222,6 @@ void wl1271_tx_work(struct work_struct *work)
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
- mutex_lock(&wl->mutex);
-
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
@@ -260,6 +257,8 @@ void wl1271_tx_work(struct work_struct *work)
* Queue back last skb, and stop aggregating.
*/
skb_queue_head(&wl->tx_queue, skb);
+ /* No work left, avoid scheduling redundant tx work */
+ set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
} else if (ret < 0) {
dev_kfree_skb(skb);
@@ -283,7 +282,14 @@ out_ack:
out:
if (woken_up)
wl1271_ps_elp_sleep(wl);
+}
+void wl1271_tx_work(struct work_struct *work)
+{
+ struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
+
+ mutex_lock(&wl->mutex);
+ wl1271_tx_work_locked(wl);
mutex_unlock(&wl->mutex);
}
@@ -140,6 +140,7 @@ static inline int wl1271_tx_get_queue(int queue)
}
void wl1271_tx_work(struct work_struct *work);
+void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);