new file mode 100644
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "tx/agg_cfm.h"
+#include "sta.h"
+#include "enhanced_tim.h"
+#include "tx/sw_txhdr.h"
+#include "chip.h"
+#include "tx/tx_inject.h"
+#include "tx/tx_amsdu.h"
+
+#define AGG_POLL_TIMEOUT 50
+
+/*
+ * cl_hw->agg_cfm_queues:
+ * These queues are used to keep pointers to skb's sent
+ * as aggregation and waiting for confirmation.
+ */
+
+void cl_agg_cfm_init(struct cl_hw *cl_hw)
+{
+ int i = 0;
+
+ for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+ INIT_LIST_HEAD(&cl_hw->agg_cfm_queues[i].head);
+}
+
+void cl_agg_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr, u8 agg_idx)
+{
+ spin_lock(&cl_hw->tx_lock_cfm_agg);
+ list_add_tail(&sw_txhdr->cfm_list, &cl_hw->agg_cfm_queues[agg_idx].head);
+ spin_unlock(&cl_hw->tx_lock_cfm_agg);
+}
+
+static void cl_agg_cfm_amsdu_free(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr)
+{
+ struct cl_amsdu_txhdr *amsdu_txhdr = NULL;
+ struct cl_amsdu_txhdr *tmp = NULL;
+ struct sk_buff *sub_skb = NULL;
+ struct ieee80211_tx_info *tx_info_sub_skb = NULL;
+
+ list_for_each_entry_safe(amsdu_txhdr, tmp, &sw_txhdr->amsdu_txhdr.list, list) {
+ sub_skb = amsdu_txhdr->skb;
+ tx_info_sub_skb = IEEE80211_SKB_CB(sub_skb);
+
+ if (cl_tx_ctrl_is_inject(tx_info_sub_skb))
+ cl_tx_inject_cfm(cl_hw);
+
+ list_del(&amsdu_txhdr->list);
+ dma_unmap_single(cl_hw->chip->dev, amsdu_txhdr->dma_addr,
+ (size_t)sub_skb->len, DMA_TO_DEVICE);
+ kfree_skb(sub_skb);
+ cl_tx_amsdu_txhdr_free(cl_hw, amsdu_txhdr);
+ }
+}
+
+void cl_agg_cfm_free_head_skb(struct cl_hw *cl_hw,
+ struct cl_agg_cfm_queue *cfm_queue,
+ u8 ba_queue_idx)
+{
+ struct cl_sw_txhdr *sw_txhdr = list_first_entry(&cfm_queue->head,
+ struct cl_sw_txhdr,
+ cfm_list);
+ struct sk_buff *skb = sw_txhdr->skb;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ dma_addr_t dma_addr = le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]);
+
+ dma_unmap_single(cl_hw->chip->dev, dma_addr, sw_txhdr->map_len, DMA_TO_DEVICE);
+
+ /* If amsdu list not empty free sub MSDU frames first, including amsdu_txhdr */
+ if (cl_tx_ctrl_is_amsdu(tx_info))
+ if (!list_empty(&sw_txhdr->amsdu_txhdr.list))
+ cl_agg_cfm_amsdu_free(cl_hw, sw_txhdr);
+
+ if (cl_tx_ctrl_is_inject(tx_info))
+ cl_tx_inject_cfm(cl_hw);
+
+ consume_skb(skb);
+ list_del(&sw_txhdr->cfm_list);
+ cl_sw_txhdr_free(cl_hw, sw_txhdr);
+}
+
+static void cl_agg_cfm_flush_queue(struct cl_hw *cl_hw, u8 agg_idx)
+{
+ struct cl_agg_cfm_queue *cfm_queue = &cl_hw->agg_cfm_queues[agg_idx];
+ struct cl_tx_queue *tx_queue = cfm_queue->tx_queue;
+ struct sk_buff *skb = NULL;
+ struct cl_sw_txhdr *sw_txhdr = NULL;
+ dma_addr_t dma_addr = 0;
+ struct ieee80211_tx_info *tx_info;
+
+ if (!tx_queue)
+ return;
+
+ if (list_empty(&cfm_queue->head))
+ return;
+
+ do {
+ sw_txhdr = list_first_entry(&cfm_queue->head, struct cl_sw_txhdr, cfm_list);
+ skb = sw_txhdr->skb;
+
+ dma_addr = le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]);
+ dma_unmap_single(cl_hw->chip->dev, dma_addr, sw_txhdr->map_len, DMA_TO_DEVICE);
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ /* If amsdu list not empty free sub MSDU frames first, including amsdu_txhdr */
+ if (cl_tx_ctrl_is_amsdu(tx_info))
+ if (!list_empty(&sw_txhdr->amsdu_txhdr.list))
+ cl_agg_cfm_amsdu_free(cl_hw, sw_txhdr);
+
+ tx_queue->total_fw_cfm++;
+
+ if (cl_tx_ctrl_is_inject(tx_info))
+ cl_tx_inject_cfm(cl_hw);
+
+ kfree_skb(skb);
+ list_del(&sw_txhdr->cfm_list);
+ cl_sw_txhdr_free(cl_hw, sw_txhdr);
+ } while (!list_empty(&cfm_queue->head));
+
+ /*
+ * Set fw_free_space back to maximum after flushing the queue
+ * and clear the enhanced TIM.
+ */
+ tx_queue->fw_free_space = tx_queue->fw_max_size;
+ cl_enhanced_tim_clear_tx_agg(cl_hw, agg_idx, tx_queue->hw_index,
+ tx_queue->cl_sta, tx_queue->tid);
+
+ cfm_queue->tx_queue = NULL;
+}
+
+void cl_agg_cfm_flush_all(struct cl_hw *cl_hw)
+{
+ int i = 0;
+
+ /* Don't use BH lock, because cl_agg_cfm_flush_all() is called with BH disabled */
+ spin_lock(&cl_hw->tx_lock_cfm_agg);
+
+ for (i = 0; i < IPC_MAX_BA_SESSIONS; i++)
+ cl_agg_cfm_flush_queue(cl_hw, i);
+
+ spin_unlock(&cl_hw->tx_lock_cfm_agg);
+}
+
+static void cl_agg_cfm_poll_timeout(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue,
+ u8 agg_idx, bool flush)
+{
+ /*
+ * When polling failed clear the enhanced TIM so that firmware will
+ * not try to transmit these packets.
+ * If flush is set cl_enhanced_tim_clear_tx_agg() is called inside
+ * cl_agg_cfm_flush_queue().
+ */
+ cl_dbg_err(cl_hw, "Polling timeout (queue_idx = %u)\n", agg_idx);
+
+ spin_lock_bh(&cl_hw->tx_lock_cfm_agg);
+
+ if (flush)
+ cl_agg_cfm_flush_queue(cl_hw, agg_idx);
+ else
+ cl_enhanced_tim_clear_tx_agg(cl_hw, agg_idx, tx_queue->hw_index,
+ tx_queue->cl_sta, tx_queue->tid);
+
+ spin_unlock_bh(&cl_hw->tx_lock_cfm_agg);
+}
+
+void cl_agg_cfm_poll_empty(struct cl_hw *cl_hw, u8 agg_idx, bool flush)
+{
+ struct cl_agg_cfm_queue *cfm_queue = &cl_hw->agg_cfm_queues[agg_idx];
+ bool empty = false;
+ int i = 0;
+
+ if (test_bit(CL_DEV_FW_ERROR, &cl_hw->drv_flags))
+ return;
+
+ while (true) {
+ spin_lock_bh(&cl_hw->tx_lock_cfm_agg);
+ empty = list_empty(&cfm_queue->head);
+ spin_unlock_bh(&cl_hw->tx_lock_cfm_agg);
+
+ if (empty)
+ return;
+
+ if (++i == AGG_POLL_TIMEOUT) {
+ cl_agg_cfm_poll_timeout(cl_hw, cfm_queue->tx_queue, agg_idx, flush);
+ return;
+ }
+
+ msleep(20);
+ }
+}
+
+void cl_agg_cfm_poll_empty_sta(struct cl_hw *cl_hw, struct cl_sta *cl_sta)
+{
+ int i = 0;
+ struct cl_tx_queue *tx_queue = NULL;
+
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ tx_queue = cl_sta->agg_tx_queues[i];
+
+ if (tx_queue)
+ cl_agg_cfm_poll_empty(cl_hw, tx_queue->index, false);
+ }
+}
+
+void cl_agg_cfm_set_ssn(struct cl_hw *cl_hw, u16 ssn, u8 idx)
+{
+ spin_lock_bh(&cl_hw->tx_lock_cfm_agg);
+ cl_hw->agg_cfm_queues[idx].ssn = ssn;
+ spin_unlock_bh(&cl_hw->tx_lock_cfm_agg);
+}
+
+void cl_agg_cfm_set_tx_queue(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue, u8 idx)
+{
+ spin_lock_bh(&cl_hw->tx_lock_cfm_agg);
+ cl_hw->agg_cfm_queues[idx].tx_queue = tx_queue;
+ spin_unlock_bh(&cl_hw->tx_lock_cfm_agg);
+}