diff mbox series

[RFC,v1,216/256] cl8k: add tx/tx_inject.c

Message ID 20210617160223.160998-217-viktor.barna@celeno.com (mailing list archive)
State RFC
Delegated to: Kalle Valo
Headers show
Series wireless: cl8k driver for Celeno IEEE 802.11ax devices | expand

Commit Message

Viktor Barna June 17, 2021, 4:01 p.m. UTC
From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 .../net/wireless/celeno/cl8k/tx/tx_inject.c   | 364 ++++++++++++++++++
 1 file changed, 364 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/tx/tx_inject.c

--
2.30.0
diff mbox series

Patch

diff --git a/drivers/net/wireless/celeno/cl8k/tx/tx_inject.c b/drivers/net/wireless/celeno/cl8k/tx/tx_inject.c
new file mode 100644
index 000000000000..a311b7b8406a
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/tx/tx_inject.c
@@ -0,0 +1,364 @@ 
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "tx/tx_inject.h"
+#include "tx/tx.h"
+#include "edca.h"
+#include "reg/reg_access.h"
+#include "rate_ctrl.h"
+#include "edca.h"
+#include "tx/tx_queue.h"
+#include "ate.h"
+#include "tx/single_cfm.h"
+#include "mac_addr.h"
+#include "tx/baw.h"
+#include "ampdu.h"
+#include "key.h"
+
+#define TX_BA_SESSION_TIMEOUT 10
+
+const static u8 skb_inject_prefix_single[] = {
+       0x88, 0x02,                         /* Frame control - DATA, QOS-DATA, FROM-DS */
+       0x00, 0x00,                         /* Duration / ID */
+       0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* Addr1 - RA = DA */
+       0x00, 0x11, 0x22, 0x33, 0x44, 0x55, /* Addr2 - TA = BSSID */
+       0x00, 0x11, 0x22, 0x33, 0x44, 0x55, /* Addr3 - SA */
+       0x00, 0x00,                         /* Sequence control */
+       0x00, 0x00,                         /* QoS control */
+};
+
+struct sk_buff *cl_tx_inject_alloc_skb(struct cl_hw *cl_hw, struct cl_sta *cl_sta)
+{
+       u8 *skb_data = NULL;
+       u8 cyclic_data = 0;
+       u32 i;
+       struct sk_buff *skb = NULL;
+       struct ieee80211_tx_info *tx_info = NULL;
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+       u32 prefix_size = sizeof(skb_inject_prefix_single);
+
+       skb = dev_alloc_skb(tx_inject->packet_len);
+
+       if (!skb)
+               return NULL;
+
+       tx_info = IEEE80211_SKB_CB(skb);
+       memset(tx_info, 0, sizeof(struct ieee80211_tx_info));
+
+       /* Copy data */
+       skb_data = skb_put(skb, tx_inject->packet_len);
+
+       memcpy(skb_data, skb_inject_prefix_single, prefix_size);
+
+       if (cl_sta) {
+               skb->dev = cl_sta->cl_vif->dev;
+               struct ieee80211_qos_hdr *qos_hdr = (struct ieee80211_qos_hdr *)skb->data;
+
+               cl_mac_addr_copy(qos_hdr->addr1, cl_sta->addr);
+               cl_mac_addr_copy(qos_hdr->addr2, cl_sta->cl_vif->vif->addr);
+               cl_mac_addr_copy(qos_hdr->addr3, cl_sta->cl_vif->vif->addr);
+       }
+
+       for (i = prefix_size; i < tx_inject->packet_len; i++) {
+               *(skb_data + i) = cyclic_data;
+               cyclic_data++;
+       }
+
+       tx_info->band = cl_hw->nl_band;
+       tx_info->flags = IEEE80211_TX_CTL_INJECTED;
+
+       if (cl_sta) {
+               tx_info->hw_queue = CL_HWQ_BE;
+               tx_info->control.vif = cl_sta->cl_vif->vif;
+               tx_info->control.hw_key = cl_key_get(cl_sta);
+       } else {
+               struct cl_vif *cl_vif = cl_vif_get_first(cl_hw);
+
+               if (!cl_vif) {
+                       kfree_skb(skb);
+                       return NULL;
+               }
+
+               tx_info->hw_queue = CL_HWQ_VO;
+               tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+               tx_info->control.vif = cl_vif->vif;
+       }
+
+       tx_inject->alloc_counter++;
+
+       return skb;
+}
+
+static struct cl_sta *get_first_sta(struct cl_hw *cl_hw)
+{
+       return list_first_entry_or_null(&cl_hw->cl_sta_db.head, struct cl_sta, list);
+}
+
+static struct cl_sta *get_next_sta(struct cl_hw *cl_hw, struct cl_sta *cl_sta)
+{
+       if (list_is_last(&cl_sta->list, &cl_hw->cl_sta_db.head))
+               return get_first_sta(cl_hw);
+       else
+               return list_next_entry(cl_sta, list);
+}
+
+static void cl_tx_inject_tasklet(unsigned long data)
+{
+       struct cl_hw *cl_hw = (struct cl_hw *)data;
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+       struct sk_buff *skb = NULL;
+
+       while ((tx_inject->current_counter < tx_inject->max_counter) || tx_inject->continuous) {
+               u16 queue_idx = tx_inject->cl_sta ?
+                       QUEUE_IDX(tx_inject->cl_sta->sta_idx, TX_INJECT_SINGLE_AC) :
+                       HIGH_PRIORITY_QUEUE;
+
+               if (cl_txq_single_is_full(cl_hw, queue_idx))
+                       return;
+
+               if (tx_inject->alloc_counter == TX_INJECT_MAX_SKBS)
+                       return;
+
+               skb = cl_tx_inject_alloc_skb(cl_hw, tx_inject->cl_sta);
+
+               if (!skb)
+                       return;
+
+               cl_tx_single(cl_hw, tx_inject->cl_sta, skb, false, true);
+
+               if (tx_inject->cl_sta)
+                       tx_inject->cl_sta = get_next_sta(cl_hw, tx_inject->cl_sta);
+
+               if (!tx_inject->continuous)
+                       tx_inject->current_counter++;
+       }
+}
+
+static void edca_set_aggressive(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       if (!tx_inject->aggressive_edca) {
+               struct edca_params aggressive_params = {
+                       .aifsn = 1, .cw_min = 0, .cw_max = 1, .txop = 0
+               };
+
+               cl_edca_set(cl_hw, EDCA_AC_VO, &aggressive_params, NULL);
+               tx_inject->aggressive_edca = true;
+       }
+}
+
+static void edca_restore_default(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       if (tx_inject->aggressive_edca) {
+               cl_edca_restore_conf(cl_hw, EDCA_AC_VO);
+               tx_inject->aggressive_edca = false;
+       }
+}
+
+void cl_tx_inject_init(struct cl_hw *cl_hw)
+{
+       tasklet_init(&cl_hw->tx_inject.tasklet, cl_tx_inject_tasklet, (unsigned long)cl_hw);
+}
+
+void cl_tx_inject_close(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       tasklet_kill(&tx_inject->tasklet);
+}
+
+void cl_tx_inject_reset(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       /* Return packet_len tp default */
+       tx_inject->packet_len = TX_INJECT_SKB_LEN_DEFAULT;
+       tx_inject->cl_sta = NULL;
+}
+
+static void _cl_tx_inject_start(struct cl_tx_inject *tx_inject,
+                               u32 max_counter,
+                               bool continuous)
+
+{
+       tx_inject->current_counter = 0;
+       tx_inject->max_counter = max_counter;
+       tx_inject->continuous = continuous;
+       tx_inject->is_running = true;
+}
+
+void cl_tx_inject_start(struct cl_hw *cl_hw, u32 tx_cnt)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       edca_set_aggressive(cl_hw);
+
+       _cl_tx_inject_start(tx_inject, tx_cnt, false);
+
+       if (!tx_inject->cl_sta)
+               tx_inject->cl_sta = get_first_sta(cl_hw);
+
+       tasklet_schedule(&tx_inject->tasklet);
+}
+
+void cl_tx_inject_start_continuous(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       edca_set_aggressive(cl_hw);
+
+       _cl_tx_inject_start(tx_inject, 0, true);
+
+       if (!tx_inject->cl_sta)
+               tx_inject->cl_sta = get_first_sta(cl_hw);
+
+       tasklet_schedule(&tx_inject->tasklet);
+}
+
+static void _cl_tx_inject_stop(struct cl_tx_inject *tx_inject)
+{
+       tx_inject->current_counter = 0;
+       tx_inject->max_counter = 0;
+       tx_inject->continuous = false;
+       tx_inject->is_running = false;
+}
+
+void cl_tx_inject_stop(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       /* Return to default EDCA */
+       edca_restore_default(cl_hw);
+
+       _cl_tx_inject_stop(tx_inject);
+
+       if (tx_inject->cl_sta) {
+               struct cl_sta *cl_sta = NULL;
+
+               list_for_each_entry(cl_sta, &cl_hw->cl_sta_db.head, list) {
+                       u16 queue_idx = QUEUE_IDX(cl_sta->sta_idx, TX_INJECT_SINGLE_AC);
+
+                       cl_txq_flush_single(cl_hw, queue_idx);
+                       cl_single_cfm_poll_empty(cl_hw, queue_idx);
+               }
+       } else {
+               cl_txq_flush_single(cl_hw, HIGH_PRIORITY_QUEUE);
+               cl_single_cfm_poll_empty(cl_hw, HIGH_PRIORITY_QUEUE);
+       }
+}
+
+void cl_tx_inject_stop_in_recovery(struct cl_hw *cl_hw)
+{
+       /*
+        * When recovery starts:
+        *  - change edca back to default
+        *  - stop traffic
+        *  - kill tasklet
+        *  - free stations
+        */
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       if (!tx_inject->is_running)
+               return;
+
+       pr_debug("[TX inject] Stop due to recovery\n");
+
+       edca_restore_default(cl_hw);
+
+       _cl_tx_inject_stop(tx_inject);
+
+       cl_tx_inject_close(cl_hw);
+
+       cl_hw->ate_db.active = false;
+       cl_hw->entry_fixed_rate = false;
+}
+
+void cl_tx_inject_stop_traffic(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       _cl_tx_inject_stop(tx_inject);
+}
+
+bool cl_tx_inject_is_running(struct cl_hw *cl_hw)
+{
+       return cl_hw->tx_inject.is_running;
+}
+
+static void cl_tx_inject_cfm_single(struct cl_hw *cl_hw)
+{
+       struct cl_sta *cl_sta = NULL;
+
+       cl_sta_lock(cl_hw);
+
+       list_for_each_entry(cl_sta, &cl_hw->cl_sta_db.head, list) {
+               u16 queue_idx =  QUEUE_IDX(cl_sta->sta_idx, TX_INJECT_SINGLE_AC);
+
+               cl_txq_single_sched(cl_hw, queue_idx);
+       }
+
+       cl_sta_unlock(cl_hw);
+
+       cl_txq_single_sched(cl_hw, HIGH_PRIORITY_QUEUE);
+}
+
+void cl_tx_inject_cfm(struct cl_hw *cl_hw)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+
+       tx_inject->alloc_counter--;
+
+       if (tx_inject->current_counter < tx_inject->max_counter || tx_inject->continuous)
+               tasklet_schedule(&tx_inject->tasklet);
+       else
+               cl_tx_inject_cfm_single(cl_hw);
+
+       if (tx_inject->is_running &&
+           tx_inject->alloc_counter == 0 &&
+           tx_inject->current_counter == tx_inject->max_counter) {
+               pr_debug("[TX inject] Complete - %u packets\n", tx_inject->max_counter);
+               _cl_tx_inject_stop(tx_inject);
+       }
+}
+
+void cl_tx_inject_sta_remove(struct cl_hw *cl_hw, struct cl_sta *cl_sta)
+{
+       struct cl_tx_inject *tx_inject = &cl_hw->tx_inject;
+       bool stop_ate = false;
+
+       tasklet_disable(&tx_inject->tasklet);
+
+       if (cl_tx_inject_is_running(cl_hw)) {
+               if (tx_inject->cl_sta == cl_sta) {
+                       tx_inject->cl_sta = get_next_sta(cl_hw, cl_sta);
+
+                       /*
+                        * If next STA is the same then only the current cl_sta exists.
+                        * In this case - stop ATE
+                        */
+                       if (tx_inject->cl_sta == cl_sta)
+                               stop_ate = true;
+               }
+       }
+
+       if (stop_ate)
+               cl_ate_stop(cl_hw->hw->wiphy, NULL, NULL, 0);
+
+       tasklet_enable(&tx_inject->tasklet);
+}
+
+int cl_tx_inject_set_length(struct cl_hw *cl_hw, u32 length)
+{
+       if (length >= TX_INJECT_SKB_LEN_MIN && length <= TX_INJECT_SKB_LEN_MAX) {
+               cl_hw->tx_inject.packet_len = length;
+               return 0;
+       }
+
+       pr_debug("[TX inject] Packet length must be between %u and %u\n",
+                TX_INJECT_SKB_LEN_MIN, TX_INJECT_SKB_LEN_MAX);
+
+       return -EINVAL;
+}