diff mbox series

[RFC,v1,206/256] cl8k: add tx/bcmc_cfm.c

Message ID 20210617160223.160998-207-viktor.barna@celeno.com (mailing list archive)
State RFC
Delegated to: Kalle Valo
Headers show
Series wireless: cl8k driver for Celeno IEEE 802.11ax devices | expand

Commit Message

Viktor Barna June 17, 2021, 4:01 p.m. UTC
From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 .../net/wireless/celeno/cl8k/tx/bcmc_cfm.c    | 64 +++++++++++++++++++
 1 file changed, 64 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/tx/bcmc_cfm.c

--
2.30.0
diff mbox series

Patch

diff --git a/drivers/net/wireless/celeno/cl8k/tx/bcmc_cfm.c b/drivers/net/wireless/celeno/cl8k/tx/bcmc_cfm.c
new file mode 100644
index 000000000000..bc4bbdf40f54
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/tx/bcmc_cfm.c
@@ -0,0 +1,64 @@ 
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "tx/bcmc_cfm.h"
+#include "chip.h"
+#include "tx/sw_txhdr.h"
+
+/*
+ * cl_hw->bcmc_cfm_queue:
+ * This queue is used to keep pointers to already sent
+ * beacon skb's that are waiting for confirmation.
+ */
+
+void cl_bcmc_cfm_init(struct cl_hw *cl_hw)
+{
+       INIT_LIST_HEAD(&cl_hw->bcmc_cfm_queue.head);
+}
+
+void cl_bcmc_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr)
+{
+       list_add_tail(&sw_txhdr->cfm_list, &cl_hw->bcmc_cfm_queue.head);
+}
+
+struct cl_sw_txhdr *cl_bcmc_cfm_find(struct cl_hw *cl_hw, dma_addr_t dma_addr, bool keep_in_list)
+{
+       struct cl_single_cfm_queue *cfm_queue = &cl_hw->bcmc_cfm_queue;
+       struct cl_sw_txhdr *sw_txhdr = NULL;
+       struct cl_sw_txhdr *tmp = NULL;
+
+       list_for_each_entry_safe(sw_txhdr, tmp, &cfm_queue->head, cfm_list) {
+               if (le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]) == dma_addr) {
+                       if (!keep_in_list)
+                               list_del(&sw_txhdr->cfm_list);
+
+                       return sw_txhdr;
+               }
+       }
+
+       return NULL;
+}
+
+void cl_bcmc_cfm_flush_queue(struct cl_hw *cl_hw)
+{
+       struct cl_single_cfm_queue *cfm_queue = &cl_hw->bcmc_cfm_queue;
+       struct cl_sw_txhdr *sw_txhdr = NULL;
+       struct sk_buff *skb = NULL;
+       struct ieee80211_tx_info *tx_info = NULL;
+       dma_addr_t dma_addr;
+
+       while (!list_empty(&cfm_queue->head)) {
+               sw_txhdr = list_first_entry(&cfm_queue->head, struct cl_sw_txhdr, cfm_list);
+               dma_addr = le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]);
+               skb = sw_txhdr->skb;
+               tx_info = IEEE80211_SKB_CB(skb);
+
+               dma_unmap_single(cl_hw->chip->dev, dma_addr, sw_txhdr->map_len, DMA_TO_DEVICE);
+               dev_kfree_skb_irq(skb);
+               list_del(&sw_txhdr->cfm_list);
+               cl_sw_txhdr_free(cl_hw, sw_txhdr);
+       }
+
+       /* Set fw_free_space back to maximum after flushing the queue */
+       cl_hw->tx_queues.bcmc.fw_free_space = cl_hw->tx_queues.bcmc.fw_max_size;
+}