new file mode 100644
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "enhanced_tim.h"
+#include "bus/pci/ipc.h"
+#include "utils/utils.h"
+
+/*
+ * The kernel's test_and_set_bit() gets unsigned long * as an argument, but we actually
+ * pass a pointer to u32, what cause to alignment fault in 64bit platforms.
+ * This function gets a pointer to u32 to prevent this alignment fault.
+ * Notice that the kernel's function sets the bit as an atomic operation,
+ * and our function doesn't. Vut it's not an issue since we set the bit from one context only.
+ */
+static int cl_test_and_set_bit(unsigned long nr, u32 *addr)
+{
+ u32 *new_addr, mask, old;
+
+ new_addr = ((u32 *)addr) + (nr >> 5);
+ mask = 1 << (nr & 31);
+ old = *new_addr & mask;
+ *new_addr |= mask;
+
+ return (old != 0);
+}
+
+static int CFM_TEST_AND_CLEAR_BIT(unsigned long nr, u32 *addr)
+{
+ u32 *new_addr, mask, old;
+
+ new_addr = ((u32 *)addr) + (nr >> 5);
+ mask = 1 << (nr & 31);
+ old = *new_addr & mask;
+ *new_addr &= ~mask;
+
+ return (old != 0);
+}
+
+void cl_enhanced_tim_reset(struct cl_hw *cl_hw)
+{
+ /*
+ * There is no need to reset cl_hw->ipc_env->shared->enhanced_tim.
+ * It is done as part of ipc_shared_env_init()
+ */
+ memset(&cl_hw->ipc_env->enhanced_tim, 0, sizeof(struct cl_ipc_enhanced_tim));
+}
+
+/*
+ * NOTE: the UMAC DRAM starts with the enhanced TIM elements stractures.
+ * This is hard coded in the FW, this memory allocation should be changed in
+ * the driver module .ELF file.
+ */
+
+void cl_enhanced_tim_clear_tx_agg(struct cl_hw *cl_hw, u32 ipc_queue_idx,
+ u8 ac, struct cl_sta *cl_sta, u8 tid)
+{
+ /* Pointer to HOST enhanced TIM */
+ u32 *source = cl_hw->ipc_env->enhanced_tim.tx_rx_agg[ac];
+ u32 ipc_queue_idx_common = IPC_TX_QUEUE_IDX_TO_COMMON_QUEUE_IDX(ipc_queue_idx);
+ /*
+ * Does the UMAC enhanced TIM need update?
+ * If the TIM element is set then clear it and update the UMAC TIM element
+ */
+ if (CFM_TEST_AND_CLEAR_BIT(ipc_queue_idx_common, source)) {
+ /* Pointer to UMAC enhanced TIM */
+ u32 *target = (u32 *)cl_hw->ipc_env->shared->enhanced_tim.tx_rx_agg[ac];
+ /* Offset to UMAC encahned TIM array position */
+ u32 agg_offset = ipc_queue_idx_common / (BITS_PER_BYTE * sizeof(u32));
+
+ /* Update tim element */
+ if (cl_sta && test_sta_flag(cl_sta->stainfo, WLAN_STA_PS_STA))
+ ieee80211_sta_set_buffered(&cl_sta->stainfo->sta, tid,
+ false);
+
+ target[agg_offset] = cpu_to_le32(source[agg_offset]);
+ }
+}
+
+void cl_enhanced_tim_clear_tx_single(struct cl_hw *cl_hw, u32 ipc_queue_idx, u8 ac,
+ bool no_ps_buffer, struct cl_sta *cl_sta, u8 tid)
+{
+ /* Pointer to HOST enhanced TIM */
+ u32 *source = cl_hw->ipc_env->enhanced_tim.tx_single[ac];
+ /* Staton index: 0 - 128 (do not use cl_sta->sta_idx which is 0 -127) */
+ u32 sta_idx = ipc_queue_idx % FW_MAX_NUM_STA;
+
+ /*
+ * Does the UMAC enhanced TIM need update?
+ * If the TIM element is set then clear it and update the UMAC TIM element
+ */
+ if (CFM_TEST_AND_CLEAR_BIT(sta_idx, source)) {
+ /* Pointer to UMAC enhanced TIM for singles or aggregation */
+ u32 *target = (u32 *)cl_hw->ipc_env->shared->enhanced_tim.tx_single[ac];
+ /* Offset to UMAC encahned TIM array position */
+ u32 sta_offset = sta_idx / (BITS_PER_BYTE * sizeof(u32));
+
+ /* Update tim element */
+ if (!no_ps_buffer && cl_sta &&
+ test_sta_flag(cl_sta->stainfo, WLAN_STA_PS_STA))
+ ieee80211_sta_set_buffered(&cl_sta->stainfo->sta, tid,
+ false);
+
+ target[sta_offset] = cpu_to_le32(source[sta_offset]);
+ }
+}
+
+void cl_enhanced_tim_set_tx_agg(struct cl_hw *cl_hw, u32 ipc_queue_idx, u8 ac,
+ bool no_ps_buffer, struct cl_sta *cl_sta, u8 tid)
+{
+ /* Pointer to HOST enhanced TIM */
+ u32 *source = cl_hw->ipc_env->enhanced_tim.tx_rx_agg[ac];
+ u32 ipc_queue_idx_common = IPC_TX_QUEUE_IDX_TO_COMMON_QUEUE_IDX(ipc_queue_idx);
+ /*
+ * Does the UMAC enhanced TIM need update?
+ * If the TIM element is cleared then set it and update the UMAC TIM element
+ */
+ if (!cl_test_and_set_bit(ipc_queue_idx_common, source)) {
+ /* Pointer to UMAC enhanced TIM */
+ u32 *target = (u32 *)cl_hw->ipc_env->shared->enhanced_tim.tx_rx_agg[ac];
+ /* Offset to UMAC encahned TIM array position */
+ u32 agg_offset = ipc_queue_idx_common / (BITS_PER_BYTE * sizeof(u32));
+
+ /* Update tim element */
+ if (!no_ps_buffer && cl_sta &&
+ test_sta_flag(cl_sta->stainfo, WLAN_STA_PS_STA))
+ ieee80211_sta_set_buffered(&cl_sta->stainfo->sta, tid,
+ true);
+
+ target[agg_offset] = cpu_to_le32(source[agg_offset]);
+ }
+}
+
+void cl_enhanced_tim_set_tx_single(struct cl_hw *cl_hw, u32 ipc_queue_idx, u8 ac,
+ bool no_ps_buffer, struct cl_sta *cl_sta, u8 tid)
+{
+ /* Pointer to HOST enhanced TIM */
+ u32 *source = cl_hw->ipc_env->enhanced_tim.tx_single[ac];
+ /* Staton index: 0 - 128 (do not use cl_sta->sta_idx which is 0 -127) */
+ u32 sta_idx = ipc_queue_idx % FW_MAX_NUM_STA;
+
+ /*
+ * Does the UMAC enhanced TIM need update?
+ * If the TIM element is cleared then set it and update the UMAC TIM element
+ */
+ if (!cl_test_and_set_bit(sta_idx, source)) {
+ /* Pointer to UMAC enhanced TIM */
+ u32 *target = (u32 *)cl_hw->ipc_env->shared->enhanced_tim.tx_single[ac];
+ /* Offset to UMAC encahned TIM array position */
+ u32 sta_offset = sta_idx / (BITS_PER_BYTE * sizeof(u32));
+
+ /* Update tim element */
+ if (!no_ps_buffer && cl_sta &&
+ test_sta_flag(cl_sta->stainfo, WLAN_STA_PS_STA))
+ ieee80211_sta_set_buffered(&cl_sta->stainfo->sta, tid,
+ true);
+
+ target[sta_offset] = cpu_to_le32(source[sta_offset]);
+ }
+}
+
+void cl_enhanced_tim_clear_rx(struct cl_hw *cl_hw, u8 ac, u8 sta_idx)
+{
+ /* Pointer to HOST enhanced TIM */
+ u32 *source = cl_hw->ipc_env->enhanced_tim.tx_rx_agg[ac];
+ u32 ipc_queue_idx_common = IPC_RX_QUEUE_IDX_TO_COMMON_QUEUE_IDX(sta_idx);
+ /*
+ * Does the UMAC enhanced TIM need update?
+ * If the TIM element is set then clear it and update the UMAC TIM element
+ */
+ if (CFM_TEST_AND_CLEAR_BIT(ipc_queue_idx_common, source)) {
+ /* Pointer to UMAC enhanced TIM for singles or aggregation */
+ u32 *target = (u32 *)cl_hw->ipc_env->shared->enhanced_tim.tx_rx_agg[ac];
+ /* Offset to UMAC encahned TIM array position */
+ u32 sta_offset = ipc_queue_idx_common / (BITS_PER_BYTE * sizeof(u32));
+
+ target[sta_offset] = cpu_to_le32(source[sta_offset]);
+ }
+}
+
+void cl_enhanced_tim_set_rx(struct cl_hw *cl_hw, u8 ac, u8 sta_idx)
+{
+ /* Pointer to HOST enhanced TIM */
+ u32 *source = cl_hw->ipc_env->enhanced_tim.tx_rx_agg[ac];
+ u32 ipc_queue_idx_common = IPC_RX_QUEUE_IDX_TO_COMMON_QUEUE_IDX(sta_idx);
+ /*
+ * Does the UMAC enhanced TIM need update?
+ * If the TIM element is cleared then set it and update the UMAC TIM element
+ */
+ if (!cl_test_and_set_bit(ipc_queue_idx_common, source)) {
+ /* Pointer to UMAC enhanced TIM */
+ u32 *target = (u32 *)cl_hw->ipc_env->shared->enhanced_tim.tx_rx_agg[ac];
+ /* Offset to UMAC encahned TIM array position */
+ u32 sta_offset = ipc_queue_idx_common / (BITS_PER_BYTE * sizeof(u32));
+
+ target[sta_offset] = cpu_to_le32(source[sta_offset]);
+
+ cl_hw->ipc_host2xmac_trigger_set(cl_hw->chip, BIT(IPC_IRQ_A2E_RX_STA_MAP(ac)));
+ }
+}
+
+void cl_enhanced_tim_clear_rx_sta(struct cl_hw *cl_hw, u8 sta_idx)
+{
+ u8 ac;
+
+ for (ac = 0; ac < AC_MAX; ac++)
+ cl_enhanced_tim_clear_rx(cl_hw, ac, sta_idx);
+}
+
+void cl_enhanced_tim_set_rx_sta(struct cl_hw *cl_hw, u8 sta_idx)
+{
+ u8 ac;
+
+ for (ac = 0; ac < AC_MAX; ac++)
+ cl_enhanced_tim_set_rx(cl_hw, ac, sta_idx);
+}
+