new file mode 100644
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "tx/bcmc_cfm.h"
+#include "chip.h"
+#include "tx/sw_txhdr.h"
+
+/*
+ * cl_hw->bcmc_cfm_queue:
+ * This queue is used to keep pointers to already sent
+ * beacon skb's that are waiting for confirmation.
+ */
+
+void cl_bcmc_cfm_init(struct cl_hw *cl_hw)
+{
+ INIT_LIST_HEAD(&cl_hw->bcmc_cfm_queue.head);
+}
+
+void cl_bcmc_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr)
+{
+ list_add_tail(&sw_txhdr->cfm_list, &cl_hw->bcmc_cfm_queue.head);
+}
+
+struct cl_sw_txhdr *cl_bcmc_cfm_find(struct cl_hw *cl_hw, dma_addr_t dma_addr, bool keep_in_list)
+{
+ struct cl_single_cfm_queue *cfm_queue = &cl_hw->bcmc_cfm_queue;
+ struct cl_sw_txhdr *sw_txhdr = NULL;
+ struct cl_sw_txhdr *tmp = NULL;
+
+ list_for_each_entry_safe(sw_txhdr, tmp, &cfm_queue->head, cfm_list) {
+ if (le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]) == dma_addr) {
+ if (!keep_in_list)
+ list_del(&sw_txhdr->cfm_list);
+
+ return sw_txhdr;
+ }
+ }
+
+ return NULL;
+}
+
+void cl_bcmc_cfm_flush_queue(struct cl_hw *cl_hw)
+{
+ struct cl_single_cfm_queue *cfm_queue = &cl_hw->bcmc_cfm_queue;
+ struct cl_sw_txhdr *sw_txhdr = NULL;
+ struct sk_buff *skb = NULL;
+ struct ieee80211_tx_info *tx_info = NULL;
+ dma_addr_t dma_addr;
+
+ while (!list_empty(&cfm_queue->head)) {
+ sw_txhdr = list_first_entry(&cfm_queue->head, struct cl_sw_txhdr, cfm_list);
+ dma_addr = le32_to_cpu(sw_txhdr->txdesc.umacdesc.packet_addr[0]);
+ skb = sw_txhdr->skb;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ dma_unmap_single(cl_hw->chip->dev, dma_addr, sw_txhdr->map_len, DMA_TO_DEVICE);
+ dev_kfree_skb_irq(skb);
+ list_del(&sw_txhdr->cfm_list);
+ cl_sw_txhdr_free(cl_hw, sw_txhdr);
+ }
+
+ /* Set fw_free_space back to maximum after flushing the queue */
+ cl_hw->tx_queues.bcmc.fw_free_space = cl_hw->tx_queues.bcmc.fw_max_size;
+}