diff mbox series

[RFC,v1,096/256] cl8k: add fw/msg_tx.c

Message ID 20210617160223.160998-97-viktor.barna@celeno.com (mailing list archive)
State RFC
Delegated to: Kalle Valo
Headers show
Series wireless: cl8k driver for Celeno IEEE 802.11ax devices | expand

Commit Message

Viktor Barna June 17, 2021, 3:59 p.m. UTC
From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 drivers/net/wireless/celeno/cl8k/fw/msg_tx.c | 1800 ++++++++++++++++++
 1 file changed, 1800 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/fw/msg_tx.c

--
2.30.0
diff mbox series

Patch

diff --git a/drivers/net/wireless/celeno/cl8k/fw/msg_tx.c b/drivers/net/wireless/celeno/cl8k/fw/msg_tx.c
new file mode 100644
index 000000000000..b63d5be24660
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/fw/msg_tx.c
@@ -0,0 +1,1800 @@ 
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include "chip.h"
+#include "tx/tx.h"
+#include "rx/rx.h"
+#include "fw/msg_tx.h"
+#include "fw/msg_cfm.h"
+#include "fw/fw_msg.h"
+#include "drv_ops.h"
+#include "temperature.h"
+#include "chan_info.h"
+#include "power.h"
+#include "env_det.h"
+#include "rx/rx_filter.h"
+#include "prot_mode.h"
+#include "rate_ctrl.h"
+#include "utils/utils.h"
+#include "calib.h"
+#include "band.h"
+#include "reg/reg_riu.h"
+#include "reg/reg_ricu.h"
+#include "calib.h"
+#include "recovery.h"
+#include "utils/math.h"
+#include "fem.h"
+#include "agc_params.h"
+#include "mac_addr.h"
+#include "cap.h"
+#include "ampdu.h"
+#include "phy/phy_common_lut.h"
+#include "channel.h"
+
+#define DRV_TASK_ID 100
+
+#define CL_DEF_ANT_BITMAP 0x55
+
+/* No scale-down on ASIC platform */
+#define CL_ASIC_FW_SCALEDOWN 1
+
+struct cl_msg_tx_work {
+       struct work_struct ws;
+
+       /* Background message info */
+       struct cl_hw *cl_hw;
+       void *msg_params;
+};
+
+void cl_msg_tx_free_cfm_params(struct cl_hw *cl_hw, u16 id)
+{
+       /* Free message and set pointer to NULL */
+       kfree(cl_hw->msg_cfm_params[id]);
+       cl_hw->msg_cfm_params[id] = NULL;
+}
+
+static inline void *cl_msg_zalloc(struct cl_hw *cl_hw, u16 msg_id, u8 dst_task_id, u16 param_len)
+{
+       struct fw_msg *msg;
+       u32 total_size = ALIGN(sizeof(struct fw_msg) + param_len, sizeof(u32));
+       u32 max_size = sizeof(u32) * IPC_A2E_MSG_BUF_SIZE;
+
+       if (total_size > max_size) {
+               cl_dbg_err(cl_hw, "total size (%u) > max size (%u)\n",
+                          total_size, max_size);
+               return NULL;
+       }
+
+       /* msg is freed out of the scope of this function */
+       msg = kzalloc(total_size, GFP_ATOMIC);
+       if (!msg)
+               return NULL;
+
+       msg->msg_id = cpu_to_le16(msg_id);
+       msg->dst_kern_id = cl_hw->fw_dst_kern_id;
+       msg->dst_task_id = dst_task_id;
+       msg->src_kern_id = KERN_HOST;
+       msg->src_task_id = DRV_TASK_ID;
+       msg->param_len = cpu_to_le16(param_len);
+
+       return msg->param;
+}
+
+static inline void cl_msg_free(const void *msg_param)
+{
+       kfree(container_of((void *)msg_param, struct fw_msg, param));
+}
+
+static void cl_send_msg_background_handler(struct work_struct *ws)
+{
+       struct cl_msg_tx_work *msg_tx_work = container_of(ws, struct cl_msg_tx_work, ws);
+
+       cl_drv_ops_msg_fw_send(msg_tx_work->cl_hw, msg_tx_work->msg_params, true);
+       kfree(msg_tx_work);
+}
+
+static int cl_send_msg_background(struct cl_hw *cl_hw,
+                                 const void *msg_params)
+{
+       /* Generate & populate the work struct wrapper for the background msg */
+       struct cl_msg_tx_work *msg_tx_work = kzalloc(sizeof(*msg_tx_work), GFP_ATOMIC);
+
+       if (msg_tx_work) {
+               INIT_WORK(&msg_tx_work->ws, cl_send_msg_background_handler);
+               msg_tx_work->cl_hw = cl_hw;
+               msg_tx_work->msg_params = (void *)msg_params;
+
+               /* Schedule work, the work will be executed in the background */
+               queue_work(cl_hw->drv_workqueue, &msg_tx_work->ws);
+
+               return 0;
+       }
+
+       cl_dbg_err(cl_hw, "msg_tx_work allocation failed\n");
+       cl_msg_free(msg_params);
+
+       return -ENODATA;
+}
+
+static int cl_send_request(struct cl_hw *cl_hw, const void *msg_params)
+{
+       int ret;
+       bool background = (preempt_count() != 0);
+
+       if (background) {
+               /*
+                * asynchronous operation mode, message would be triggered in the background
+                */
+               ret = cl_send_msg_background(cl_hw, msg_params);
+       } else {
+               /*
+                * synchronous operation mode, message would be triggered immediately
+                * feedback to caller given immediately
+                */
+               ret = cl_drv_ops_msg_fw_send(cl_hw, msg_params, false);
+       }
+
+       /*
+        * In case of synchronous mode ret success implies that the msg was successfully
+        * transmited where is asynchronous mode ret success implies that the msg was
+        * successfully pushed to background queue
+        */
+       return ret;
+}
+
+int cl_msg_tx_reset(struct cl_hw *cl_hw)
+{
+       void *void_param;
+
+       /* RESET REQ has no parameter */
+       void_param = cl_msg_zalloc(cl_hw, MM_RESET_REQ, TASK_MM, 0);
+       if (!void_param)
+               return -ENOMEM;
+
+       return cl_send_request(cl_hw, void_param);
+}
+
+static u8 copy_mask_bits(u8 mask, u8 num_bits)
+{
+       /* Copy first X bits that are set in mask to new_mask */
+       u8 i = 0, cntr = 0, new_mask = 0;
+
+       for (i = 0; i < MAX_ANTENNAS; i++) {
+               if (mask & (1 << i)) {
+                       new_mask |= (1 << i);
+
+                       cntr++;
+                       if (cntr == num_bits)
+                               break;
+               }
+       }
+
+       return new_mask;
+}
+
+static void cl_fill_ant_config(struct cl_hw *cl_hw,
+                              struct cl_antenna_config *ant_config,
+                              u8 num_antennas, u8 mask_antennas,
+                              u8 tx_mask_cck, u8 rx_mask_cck)
+{
+       struct cl_chip *chip = cl_hw->chip;
+       u8 ricu_cdb = 0;
+       u8 ant_shift = cl_hw_ant_shift(cl_hw);
+
+       ant_config->num_tx_he = num_antennas;
+       ant_config->num_rx = num_antennas;
+       ant_config->mask_tx_he = mask_antennas << ant_shift;
+       ant_config->mask_rx = mask_antennas << ant_shift;
+
+       /* Configuration for TX OFDM/HT/VHT (limited to 4 antennas) */
+       if (num_antennas <= MAX_ANTENNAS_OFDM_HT_VHT) {
+               ant_config->num_tx_ofdm_ht_vht = num_antennas;
+               ant_config->mask_tx_ofdm_ht_vht = mask_antennas << ant_shift;
+       } else {
+               ant_config->num_tx_ofdm_ht_vht = MAX_ANTENNAS_OFDM_HT_VHT;
+               ant_config->mask_tx_ofdm_ht_vht =
+                       copy_mask_bits(mask_antennas, MAX_ANTENNAS_OFDM_HT_VHT) << ant_shift;
+       }
+
+       /* Antenna configuration for CCK */
+       if (cl_band_is_24g(cl_hw)) {
+               ant_config->mask_tx_cck = tx_mask_cck << ant_shift;
+               ant_config->mask_rx_cck = rx_mask_cck << ant_shift;
+       }
+
+       ricu_cdb = ricu_static_conf_0_cdb_mode_maj_getf(chip);
+
+       /*
+        * In current implementation cdb_mode equals the num of ants for SX1
+        * cbd_mask 0x0 -> SX0 chain. 0x1-> SX1 chain.
+        */
+       ricu_cdb = MAX_ANTENNAS_CHIP - ricu_cdb;
+       ricu_cdb = ANT_MASK(ricu_cdb);
+       ricu_cdb = ~ricu_cdb;
+
+       ant_config->cdb_mask = ricu_cdb;
+}
+
+static void cl_fill_fem_config(struct cl_hw *cl_hw, struct cl_fem_config *fem_conf)
+{
+       int i;
+
+       cl_fem_get_registers(cl_hw, fem_conf->reg);
+
+       for (i = 0; i < ARRAY_SIZE(fem_conf->reg); i++)
+               fem_conf->reg[i] = cpu_to_le32(fem_conf->reg[i]);
+}
+
+static void cl_fill_calib_config(struct cl_hw *cl_hw, struct cl_calib_param *calib_param,
+                                u16 primary, u16 center, u8 mode)
+{
+       struct cl_hw *cl_hw_other = cl_hw_other_tcv(cl_hw);
+       struct cl_tcv_conf *conf = cl_hw->conf;
+       u8 ant = 0;
+       u8 calib_bitmap = cl_hw->mask_num_antennas;
+       u8 ant_shift = cl_hw_ant_shift(cl_hw);
+
+       memset(calib_param->ant_tx_pairs, U8_MAX, ARRAY_SIZE(calib_param->ant_tx_pairs));
+       memset(calib_param->ant_rx_pairs, U8_MAX, ARRAY_SIZE(calib_param->ant_rx_pairs));
+
+       ant_for_each(ant) {
+               if (calib_bitmap & (1 << ant)) {
+                       calib_param->ant_tx_pairs[ant] = conf->ci_calib_ant_tx[ant - ant_shift];
+                       if (mode & SET_CHANNEL_MODE_CALIB_IQ)
+                               calib_param->ant_rx_pairs[ant] =
+                                       conf->ci_calib_ant_rx[ant - ant_shift];
+               }
+       }
+
+       if (IS_PHY_ATHOS(cl_hw->chip)) {
+               calib_param->conf.initial_rx_gain = CALIB_RX_GAIN_DEFAULT_ATHOS;
+               calib_param->conf.rx_gain_upper_limit = CALIB_RX_GAIN_UPPER_LIMIT_ATHOS;
+               calib_param->conf.rx_gain_lower_limit = CALIB_RX_GAIN_LOWER_LIMIT_ATHOS;
+       } else {
+               calib_param->conf.initial_rx_gain = CALIB_RX_GAIN_DEFAULT;
+               calib_param->conf.rx_gain_upper_limit = CALIB_RX_GAIN_UPPER_LIMIT;
+               calib_param->conf.rx_gain_lower_limit = CALIB_RX_GAIN_LOWER_LIMIT;
+       }
+
+       calib_param->conf.initial_tx_gain = CALIB_TX_GAIN_DEFAULT;
+       calib_param->conf.nco_freq = cpu_to_le16(CALIB_NCO_FREQ_DEFAULT);
+       calib_param->conf.nco_amp = CALIB_NCO_AMP_DEFAULT;
+       calib_param->conf.sleeve_trshld = GAIN_SLEEVE_TRSHLD_DEFAULT;
+       calib_param->conf.n_samples_exp_lolc = N_SAMPLES_EXP_LOLC;
+       calib_param->conf.n_samples_exp_iqc = N_SAMPLES_EXP_IQC;
+       calib_param->conf.p_thresh = cpu_to_le32(LO_P_THRESH);
+       calib_param->conf.n_bit_fir_scale = N_BIT_FIR_SCALE;
+       calib_param->conf.n_bit_amp_scale = N_BIT_AMP_SCALE;
+       calib_param->conf.n_bit_phase_scale = N_BIT_PHASE_SCALE;
+
+       cl_calib_iq_get_tone_vector(cl_hw->bw, calib_param->conf.tone_vector);
+
+       calib_param->conf.gp_rad_trshld = cpu_to_le32(GP_RAD_TRSHLD_DEFAULT);
+       calib_param->conf.ga_lin_upper_trshld = cpu_to_le32(GA_LIN_UPPER_TRSHLD_DEFAULT);
+       calib_param->conf.ga_lin_lower_trshld = cpu_to_le32(GA_LIN_LOWER_TRSHLD_DEFAULT);
+       calib_param->conf.comp_filter_len = COMP_FILTER_LEN_DEFAULT;
+       calib_param->conf.singletons_num = SINGLETONS_NUM_DEFAULT;
+       calib_param->conf.tones_num = IQ_NUM_TONES_REQ;
+       calib_param->conf.rampup_time = cpu_to_le16(RAMPUP_TIME);
+       calib_param->conf.lo_coarse_step = cpu_to_le16(LO_COARSE_STEP);
+       calib_param->conf.lo_fine_step = cpu_to_le16(LO_FINE_STEP);
+
+       calib_param->other_tcv.prim20_freq = cpu_to_le16(primary + SX_FREQ_OFFSET_Q2);
+       cl_phy_oly_lut_update(cl_hw->nl_band,
+                             center + SX_FREQ_OFFSET_Q2,
+                             &calib_param->other_tcv.center1_freq_lut);
+
+       if (cl_chip_is_both_enabled(cl_hw->chip)) {
+               calib_param->other_tcv.mask_tx_he = cl_hw_other->mask_num_antennas;
+               calib_param->other_tcv.num_tx_he = cl_hw_other->num_antennas;
+               calib_param->other_tcv.band = cl_band_to_fw_idx(cl_hw_other);
+       } else {
+               calib_param->other_tcv.mask_tx_he = cl_hw->mask_num_antennas;
+               calib_param->other_tcv.num_tx_he = cl_hw->num_antennas;
+               calib_param->other_tcv.band = cl_band_to_fw_idx(cl_hw);
+       }
+}
+
+int cl_msg_tx_start(struct cl_hw *cl_hw)
+{
+       struct mm_start_req *req;
+       struct cl_phy_cfg *phy_cfg;
+       struct cl_start_param *param;
+       struct cl_cca_config *cca_config;
+       struct dbg_meta_data *dbg_metadata;
+       struct cl_chip *chip = cl_hw->chip;
+       struct cl_tcv_conf *tcv_conf = cl_hw->conf;
+       struct cl_chip_conf *chip_conf = chip->conf;
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+       u8 bw = 0, ant = 0;
+
+       req = cl_msg_zalloc(cl_hw, MM_START_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       phy_cfg = &req->phy_cfg;
+       param = &req->param;
+       cca_config = &phy_cfg->cca_config;
+       dbg_metadata = &param->dbg_metadata;
+
+       phy_cfg->band = cl_band_to_fw_idx(cl_hw);
+       phy_cfg->channel_bandwidth = tcv_conf->ce_channel_bandwidth;
+       phy_cfg->ht_rxldpc_en = tcv_conf->ce_ht_rxldpc_en;
+       phy_cfg->freq_offset = cpu_to_le16(chip->eeprom_cache->calib.freq_offset);
+       phy_cfg->vns_tx_power_mode = chip_conf->ce_production_mode ? 0 : tcv_conf->ci_vns_pwr_mode;
+       phy_cfg->vns_rssi_suto_resp_th = tcv_conf->ci_vns_rssi_auto_resp_thr;
+       phy_cfg->afe_config_en = true;
+       phy_cfg->no_capture_noise_sleep = chip_conf->ci_no_capture_noise_sleep;
+       phy_cfg->gain_update_enable = tcv_conf->ci_gain_update_enable;
+       phy_cfg->mcs_sig_b = tcv_conf->ci_mcs_sig_b;
+       phy_cfg->ofdm_only = tcv_conf->ci_ofdm_only;
+       phy_cfg->hr_factor = tcv_conf->ci_hr_factor[phy_cfg->channel_bandwidth];
+       phy_cfg->td_csd_en = tcv_conf->ci_csd_en;
+       phy_cfg->pe_duration_bcast = tcv_conf->ci_pe_duration_bcast;
+       phy_cfg->tx_digital_gain = cpu_to_le32(tcv_conf->ci_tx_digital_gain);
+       phy_cfg->tx_digital_gain_cck = cpu_to_le32(tcv_conf->ci_tx_digital_gain_cck);
+       phy_cfg->ofdm_cck_power_offset = (u8)tcv_conf->ci_ofdm_cck_power_offset;
+       phy_cfg->phy_clk_gating_en = tcv_conf->ci_phy_clk_gating_en;
+
+       /*
+        * Set rx_sensitivity according to number of antennas.
+        * For all other antennas set 0xff which is equal to -1
+        */
+       memcpy(phy_cfg->rx_sensitivity, cl_hw->rx_sensitivity, cl_hw->num_antennas);
+       if (cl_hw->num_antennas < ARRAY_SIZE(phy_cfg->rx_sensitivity))
+               memset(&phy_cfg->rx_sensitivity[cl_hw->num_antennas], U8_MAX,
+                      MAX_ANTENNAS - cl_hw->num_antennas);
+
+       if (!cl_hw->fw_send_start) {
+               cl_hw->fw_send_start = true;
+               phy_cfg->first_start = true;
+       }
+
+       cl_fill_ant_config(cl_hw, &phy_cfg->ant_config, cl_hw->num_antennas,
+                          cl_hw->mask_num_antennas, tcv_conf->ce_cck_tx_ant_mask,
+                          tcv_conf->ce_cck_rx_ant_mask);
+       cl_fill_fem_config(cl_hw, &phy_cfg->fem_conf);
+
+       cca_config->ed_rise_thr_dbm = (u8)tcv_conf->ci_cca_ed_rise_thr_dbm;
+       cca_config->ed_fall_thr_dbm = (u8)tcv_conf->ci_cca_ed_fall_thr_dbm;
+       cca_config->cs_en = tcv_conf->ci_cca_cs_en;
+       cca_config->modem_en = tcv_conf->ci_cca_modem_en;
+       cca_config->main_ant = tcv_conf->ci_cca_main_ant;
+       cca_config->second_ant = tcv_conf->ci_cca_second_ant;
+       cca_config->flag0_ctrl = tcv_conf->ci_cca_flag0_ctrl;
+       cca_config->flag1_ctrl = tcv_conf->ci_cca_flag1_ctrl;
+       cca_config->flag2_ctrl = tcv_conf->ci_cca_flag2_ctrl;
+       cca_config->flag3_ctrl = tcv_conf->ci_cca_flag3_ctrl;
+       cca_config->gi_rise_thr_dbm = (u8)tcv_conf->ci_cca_gi_rise_thr_dbm;
+       cca_config->gi_fall_thr_dbm = (u8)tcv_conf->ci_cca_gi_fall_thr_dbm;
+       cca_config->gi_pow_lim_dbm = (u8)tcv_conf->ci_cca_gi_pow_lim_dbm;
+       cca_config->ed_en = cpu_to_le16(tcv_conf->ci_cca_ed_en);
+       cca_config->gi_en = tcv_conf->ci_cca_gi_en;
+
+       param->prot_log_nav_en = tcv_conf->ce_prot_log_nav_en;
+       param->prot_mode = cl_prot_mode_get(cl_hw);
+       param->prot_rate_format = tcv_conf->ce_prot_rate_format;
+       param->prot_rate_mcs = tcv_conf->ce_prot_rate_mcs;
+       param->prot_rate_pre_type = tcv_conf->ce_prot_rate_pre_type;
+       param->bw_signaling_mode = tcv_conf->ce_bw_signaling_mode;
+       param->cfm_size = cpu_to_le16(IPC_CFM_SIZE);
+       param->cfm_dma_base_addr = cpu_to_le32(ipc_env->cfm_dma_base_addr);
+       param->phy_dev = cpu_to_le16(chip_conf->ci_phy_dev);
+       param->fw_scale_down = cpu_to_le16(CL_ASIC_FW_SCALEDOWN);
+       param->hal_timeout.idle = cpu_to_le32(tcv_conf->ci_hal_idle_to);
+       param->hal_timeout.ac0 = cpu_to_le32(tcv_conf->ci_tx_ac0_to);
+       param->hal_timeout.ac1 = cpu_to_le32(tcv_conf->ci_tx_ac1_to);
+       param->hal_timeout.ac2 = cpu_to_le32(tcv_conf->ci_tx_ac2_to);
+       param->hal_timeout.ac3 = cpu_to_le32(tcv_conf->ci_tx_ac3_to);
+       param->hal_timeout.bcn = cpu_to_le32(tcv_conf->ci_tx_bcn_to);
+
+       /* Update rxbuff/txqueue & ring_indices that hold the array metadata */
+       param->ipc_ring_indices_base = cpu_to_le32(ipc_env->ring_indices_elem->dma_addr);
+       param->host_rxbuf_base_addr[CL_RX_BUF_RXM] =
+               ipc_env->rx_hostbuf_array[CL_RX_BUF_RXM].dma_payload_base_addr;
+       param->host_rxbuf_base_addr[CL_RX_BUF_FW] =
+               ipc_env->rx_hostbuf_array[CL_RX_BUF_FW].dma_payload_base_addr;
+
+       /*
+        * The FW needs to be aware of the DMA addresses of the
+        * TX queues so it could fetch txdesc from the host.
+        */
+       param->ipc_host_tx_queues_dma_addr = cpu_to_le32(cl_hw->ipc_env->tx_queues.dma_addr);
+
+       /*
+        * Compilation flags match check - please add here all compilation flags
+        * which should be compiled on both driver and firmware.
+        */
+       param->comp_flags = cpu_to_le32(0) | cpu_to_le32(BIT(CENX_CFG_CE_TX_CFM));
+
+       param->dbg_test_mode_max = DBG_TEST_MODE_MAX;
+
+       param->ipc_rxbuf_size[CL_RX_BUF_RXM] =
+               cpu_to_le16(tcv_conf->ci_ipc_rxbuf_size[CL_RX_BUF_RXM]);
+       param->ipc_rxbuf_size[CL_RX_BUF_FW] =
+               cpu_to_le16(tcv_conf->ci_ipc_rxbuf_size[CL_RX_BUF_FW]);
+
+       param->ipc_rxbuf_extra_headroom = cpu_to_le32(IPC_RXBUF_EXTRA_HEADROOM);
+       param->host_pci_gen_ver = chip_conf->ce_host_pci_gen_ver;
+       param->dma_lli_max_chan[0] = chip_conf->ci_dma_lli_max_chan[0];
+       param->dma_lli_max_chan[1] = chip_conf->ci_dma_lli_max_chan[1];
+       param->production_mode = chip_conf->ce_production_mode;
+       param->mult_ampdu_in_txop_en = tcv_conf->ci_mult_ampdu_in_txop_en;
+       param->cca_timeout = cpu_to_le32(tcv_conf->ci_cca_timeout);
+       param->long_retry_limit = tcv_conf->ce_long_retry_limit;
+       param->short_retry_limit = tcv_conf->ce_short_retry_limit;
+       param->assoc_auth_retry_limit = tcv_conf->ci_assoc_auth_retry_limit;
+       param->bcn_tx_path_min_time = cpu_to_le16(tcv_conf->ce_bcn_tx_path_min_time);
+       param->backup_bcn_en = tcv_conf->ci_backup_bcn_en;
+       param->tx_txop_cut_en = tcv_conf->ce_tx_txop_cut_en;
+       param->ac_with_bcns_flushed_cnt_thr = tcv_conf->ci_bcns_flushed_cnt_thr;
+       param->txl_statistics_struct_size = cpu_to_le32(sizeof(struct cl_txl_statistics));
+       param->rxl_statistics_struct_size = cpu_to_le32(sizeof(struct cl_rxl_statistics));
+       param->phy_err_prevents_phy_dump = tcv_conf->ci_phy_err_prevents_phy_dump;
+       param->tx_rx_delay = tcv_conf->ci_tx_rx_delay;
+       param->assert_storm_detect_thd = tcv_conf->ci_fw_assert_storm_detect_thd;
+       param->assert_time_diff_sec = tcv_conf->ci_fw_assert_time_diff_sec;
+       param->ps_ctrl_enabled = tcv_conf->ce_ps_ctrl_enabled;
+       param->phy_data_dma_addr = cpu_to_le32(cl_hw->phy_data_info.dma_addr);
+       param->phy_remote_rom_dma_addr = cpu_to_le32(cl_hw->fw_remote_rom.dma_addr);
+       param->iq_dcoc_calib_tables_dma_addr = cpu_to_le32(cl_hw->iq_dcoc_data_info.dma_addr);
+       param->power_table_dma_addr = cpu_to_le32(cl_hw->power_table_info.dma_addr);
+       param->tf_info_dma_addr = 0;
+       param->min_ant_pwr_q1 = cl_power_min_ant_q1(cl_hw);
+
+       for (bw = 0; bw < ARRAY_SIZE(param->bw_factor_q2); bw++) {
+               cl_hw->power_db.bw_factor_q2[bw] = cl_power_bw_factor_q2(cl_hw, bw);
+               param->bw_factor_q2[bw] =
+                       cl_convert_signed_to_reg_value(cl_hw->power_db.bw_factor_q2[bw]);
+       }
+
+       for (ant = 0; ant < ARRAY_SIZE(param->ant_factor_q2); ant++) {
+               cl_hw->power_db.ant_factor_q2[ant] = cl_power_array_gain_q2(cl_hw, ant + 1);
+               param->ant_factor_q2[ant] = cl_hw->power_db.ant_factor_q2[ant];
+       }
+
+       param->default_distance.auto_resp_all = tcv_conf->ci_distance_auto_resp_all;
+       param->default_distance.auto_resp_msta = tcv_conf->ci_distance_auto_resp_msta;
+       param->su_force_min_spacing_usec = tcv_conf->ci_su_force_min_spacing;
+       param->mu_force_min_spacing_usec = tcv_conf->ci_mu_force_min_spacing;
+       param->force_tcv0_only = false;
+       param->rx_padding = tcv_conf->ci_rx_padding_en;
+       param->bar_cap_disable = tcv_conf->ci_bar_disable;
+       param->hw_bsr = 0; /* FIXME */
+       param->drop_to_lower_bw = tcv_conf->ci_drop_to_lower_bw;
+       param->dra_enable = cl_chip_is_both_enabled(chip); /* DRA enable only in CDB mode */
+       param->mac_clk_gating_en = tcv_conf->ci_mac_clk_gating_en;
+       param->imaging_blocker = tcv_conf->ci_imaging_blocker;
+       param->fec_coding = tcv_conf->ce_he_rxldpc_en;
+       param->cs_required = tcv_conf->ci_cs_required;
+
+       if (!chip->fw_first_tcv) {
+               chip->fw_first_tcv = true;
+               param->first_tcv = true;
+       }
+
+       dbg_metadata->lmac_req_buf_size = cpu_to_le32(sizeof(struct dbg_error_trace_info_drv));
+       dbg_metadata->physical_queue_cnt = CL_MAX_BA_PHYSICAL_QUEUE_CNT;
+       dbg_metadata->agg_index_max = AGG_IDX_MAX;
+       dbg_metadata->ce_ac_max = CE_AC_MAX;
+       dbg_metadata->mu_user_max = MU_MAX_STREAMS;
+       dbg_metadata->txl_exch_trace_depth = DBG_TXL_FRAME_EXCH_TRACE_DEPTH;
+       dbg_metadata->mac_hw_regs_max = cpu_to_le16(HAL_MACHW_REG_NUM);
+       dbg_metadata->phy_hw_regs_max = cpu_to_le16(PHY_HW_DBG_REGS_CNT);
+       dbg_metadata->thd_chains_data_size = cpu_to_le16(DBG_THD_CHAINS_INFO_ARRAY_SIZE);
+       dbg_metadata->chains_info_elem_cnt = DBG_CHAINS_INFO_ELEM_CNT;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_version(struct cl_hw *cl_hw)
+{
+       void *void_param;
+
+       /* VERSION REQ has no parameter */
+       void_param = cl_msg_zalloc(cl_hw, MM_VERSION_REQ, TASK_MM, 0);
+       if (!void_param)
+               return -ENOMEM;
+
+       return cl_send_request(cl_hw, void_param);
+}
+
+int cl_msg_tx_add_if(struct cl_hw *cl_hw, struct ieee80211_vif *vif,
+                    u8 vif_index)
+{
+       struct mm_add_if_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_ADD_IF_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       cl_mac_addr_copy(req->addr.array, vif->addr);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               req->type = MM_STA;
+               break;
+
+       case NL80211_IFTYPE_ADHOC:
+               req->type = MM_IBSS;
+               break;
+
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               req->type = MM_AP;
+               break;
+
+       case NL80211_IFTYPE_MONITOR:
+               req->type = MM_MONITOR;
+               break;
+
+       case NL80211_IFTYPE_MESH_POINT:
+               req->type = MM_MESH_POINT;
+               break;
+
+       default:
+               req->type = MM_STA;
+               break;
+       }
+
+       req->tx_strip_vlan = 1;
+       req->mac_addr_hi_mask = cpu_to_le32(cl_hw->mask_hi);
+       req->mac_addr_low_mask = cpu_to_le32(cl_hw->mask_low);
+       req->inst_nbr = vif_index;
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+               struct ps_data *ps = &sdata->u.ap.ps;
+
+               req->start_dtim_count = (u8)(ps->dtim_count);
+       }
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_remove_if(struct cl_hw *cl_hw, u8 vif_index)
+{
+       struct mm_remove_if_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_REMOVE_IF_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->inst_nbr = vif_index;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_sta_add(struct cl_hw *cl_hw, struct ieee80211_sta *sta,
+                     struct cl_vif *cl_vif, u8 recovery_sta_idx,
+                     u32 rate_ctrl_info)
+{
+       struct mm_sta_add_req *req;
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+       u16 my_aid = 0;
+       u8 inst_nbr = cl_vif->vif_index;
+       bool is_6g = cl_band_is_6g(cl_hw);
+       struct cl_sta *cl_sta = IEEE80211_STA_TO_CL_STA(sta);
+
+       req = cl_msg_zalloc(cl_hw, MM_STA_ADD_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       cl_mac_addr_copy(req->mac_addr.array, sta->addr);
+
+       if (cl_vif->vif->type == NL80211_IFTYPE_STATION)
+               my_aid = cl_vif->vif->bss_conf.aid;
+
+       if (is_6g) {
+               u8 mac_cap_info4 = he_cap->he_cap_elem.mac_cap_info[4];
+
+               req->su_bfee = (mac_cap_info4 & IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE) ? 1 : 0;
+               req->mu_bfee = (mac_cap_info4 & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER) ? 1 : 0;
+       } else if (vht_cap->vht_supported) {
+               req->su_bfee = (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) ? 1 : 0;
+               req->mu_bfee = (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) ? 1 : 0;
+       }
+
+       req->ampdu_min_spacing = cl_sta->ampdu_min_spacing;
+
+       if (he_cap->has_he) {
+               u8 mac_cap_info1 = he_cap->he_cap_elem.mac_cap_info[1];
+               u8 mac_cap_info3 = he_cap->he_cap_elem.mac_cap_info[3];
+
+               req->he_tf_mac_padding_duration =
+                       (mac_cap_info1 & IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK);
+
+               req->he_rx_ctrl_frm_to_mbss =
+                       (mac_cap_info3 & IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS) ?
+                       true : false;
+
+               /* Fill PE duration table */
+               cl_cap_ppe_duration(cl_hw, sta, req->pe_duration);
+       }
+
+       cl_ampdu_size_exp(cl_hw, sta, &req->ampdu_size_exp_he,
+                         &req->ampdu_size_exp_vht, &req->ampdu_size_exp_ht);
+
+       if (cl_hw->conf->ce_txldpc_en) {
+               if (he_cap->has_he)
+                       req->ldpc_enabled = (he_cap->he_cap_elem.phy_cap_info[1] &
+                                            IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) ? 1 : 0;
+               else if (vht_cap->vht_supported)
+                       req->ldpc_enabled = (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC) ? 1 : 0;
+               else if (ht_cap->ht_supported)
+                       req->ldpc_enabled = (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ? 1 : 0;
+       }
+
+       /* TODO Set the interface index from the vif structure */
+       req->inst_nbr = inst_nbr;
+
+       req->aid = cpu_to_le16(sta->aid);
+       req->my_aid = cpu_to_le16(my_aid);
+       req->recovery_sta_idx = recovery_sta_idx;
+
+       /* Station power save configuration */
+       req->uapsd_queues = sta->uapsd_queues;
+       req->max_sp = sta->max_sp;
+
+       /* Set WRS default parameters for rate control */
+       req->tx_params.rate = cpu_to_le32(rate_ctrl_info);
+
+       /* Fill TX antenna with default value */
+       req->tx_params.ant_set = CL_DEF_ANT_BITMAP;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_sta_del(struct cl_hw *cl_hw, u8 sta_idx)
+{
+       struct mm_sta_del_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_STA_DEL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = sta_idx;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_filter(struct cl_hw *cl_hw, u32 filter, bool force)
+{
+       struct mm_set_filter_req *req;
+       u32 rx_filter = 0;
+
+       if (cl_channel_is_scan_active(cl_hw)) {
+               cl_dbg_trace(cl_hw, "Set filter ignored due to active channel scan\n");
+               return 0;
+       }
+
+       if (force)
+               rx_filter = filter;
+       else
+               rx_filter = cl_rx_filter_update_flags(cl_hw, filter);
+
+       if (rx_filter == cl_hw->rx_filter) {
+               cl_dbg_trace(cl_hw, "Rx filter 0x%x already set - return\n", rx_filter);
+               return 0;
+       }
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_FILTER_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       /* Now copy all the flags into the message parameter */
+       req->filter = cpu_to_le32(rx_filter);
+       cl_hw->rx_filter = rx_filter;
+
+       cl_dbg_trace(cl_hw, "new total_flags = 0x%08x\nrx filter set to  0x%08x\n",
+                    filter, rx_filter);
+
+       return cl_send_request(cl_hw, req);
+}
+
+u8 cl_mark_calib_flags(struct cl_hw *cl_hw, u8 mode)
+{
+       int lna = 0;
+       int ant = 0;
+       u8 calib_info_set = 0;
+       struct cl_iq_dcoc_info *iq_dcoc_db = &cl_hw->phy_data_info.data->iq_dcoc_db;
+
+       /* In case DCOC is going to be calibrated, no need to raise any calibration flag. */
+       if (mode & SET_CHANNEL_MODE_CALIB_DCOC)
+               return calib_info_set;
+
+       /* Check if DCOC flag should be marked */
+       for (lna = 0; lna < ARRAY_SIZE(iq_dcoc_db->dcoc); lna++) {
+               for (ant = 0; ant < cl_hw->num_antennas; ant++) {
+                       if (iq_dcoc_db->dcoc[lna][ant].i || iq_dcoc_db->dcoc[lna][ant].q) {
+                               calib_info_set |= SET_PHY_DATA_FLAGS_DCOC;
+                               break;
+                       }
+               }
+       }
+
+       /* Check if IQ Tx LOLC flag should be marked */
+       for (ant = 0; ant < cl_hw->num_antennas; ant++) {
+               if (iq_dcoc_db->iq_tx_lolc[ant]) {
+                       calib_info_set |= SET_PHY_DATA_FLAGS_IQ_TX_LOLC;
+                       break;
+               }
+       }
+
+       /* Check if IQ Tx flag should be marked */
+       for (ant = 0; ant < cl_hw->num_antennas; ant++) {
+               if (iq_dcoc_db->iq_tx[ant].coef0 || iq_dcoc_db->iq_tx[ant].coef1 ||
+                   iq_dcoc_db->iq_tx[ant].coef2 || iq_dcoc_db->iq_tx[ant].gain) {
+                       calib_info_set |= SET_PHY_DATA_FLAGS_IQ_TX;
+                       break;
+               }
+       }
+
+       /* Check if IQ Rx flag should be marked */
+       for (ant = 0; ant < cl_hw->num_antennas; ant++) {
+               if (iq_dcoc_db->iq_rx[ant].coef0 || iq_dcoc_db->iq_rx[ant].coef1 ||
+                   iq_dcoc_db->iq_rx[ant].coef2 || iq_dcoc_db->iq_rx[ant].gain) {
+                       calib_info_set |= SET_PHY_DATA_FLAGS_IQ_RX;
+                       return calib_info_set;
+               }
+       }
+       return calib_info_set;
+}
+
+static int __cl_msg_tx_set_channel(struct cl_hw *cl_hw, u32 channel, u8 bw, u16 primary,
+                                  u16 center, u8 mode)
+{
+       struct mm_set_channel_req *req;
+       int res = 0;
+       struct cl_phy_data *data = cl_hw->phy_data_info.data;
+
+       /* Fill AGC parameters - check before we start building the message */
+       if ((res = cl_agc_params_fill(cl_hw, &data->agc_params)))
+               return res;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_CHANNEL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->band = cl_band_to_fw_idx(cl_hw);
+       req->bandwidth = bw;
+       req->prim20_freq = cpu_to_le16(primary);
+       cl_phy_oly_lut_update(cl_hw->nl_band, center, &req->center1_freq_lut);
+       req->hr_factor = cl_hw->conf->ci_hr_factor[bw];
+       req->signal_ext = cl_hw->conf->ci_signal_extension_en;
+
+       /* Set power per mcs offset after EIRP truncation */
+       cl_power_tables_update(cl_hw, &data->pwr_tables);
+
+       /* Get antenna power offset from eeprom */
+       cl_calib_power_offset_fill(cl_hw, channel, bw, req->ant_pwr_offset);
+
+       cl_calib_fill_phy_data(cl_hw, &data->iq_dcoc_db, SET_PHY_DATA_FLAGS_ALL);
+
+       if (mode == SET_CHANNEL_MODE_CALIB)
+               req->calib_info_set = SET_PHY_DATA_FLAGS_ALL;
+       else
+               req->calib_info_set = SET_PHY_DATA_FLAGS_NONE;
+
+       req->calib_param.mode = mode;
+
+       if (mode & (SET_CHANNEL_MODE_CALIB_LOLC | SET_CHANNEL_MODE_CALIB_IQ)) {
+               req->sx_freq_offset_mhz = SX_FREQ_OFFSET_Q2;
+               cl_fill_calib_config(cl_hw, &req->calib_param, primary, center, mode);
+       }
+
+       if (mode & SET_CHANNEL_MODE_CALIB_DCOC) {
+               if (IS_PHY_ATHOS(cl_hw->chip))
+                       req->calib_param.dcoc_max_vga = DCOC_MAX_VGA_ATHOS;
+               else
+                       req->calib_param.dcoc_max_vga = DCOC_MAX_VGA;
+       }
+
+       /* Antenna configuration */
+       cl_fill_ant_config(cl_hw, &req->ant_config, cl_hw->num_antennas, cl_hw->mask_num_antennas,
+                          cl_hw->conf->ce_cck_tx_ant_mask, cl_hw->conf->ce_cck_rx_ant_mask);
+       /* FEM configuration */
+       cl_fill_fem_config(cl_hw, &req->fem_conf);
+
+       res = cl_send_request(cl_hw, req);
+
+       cl_temperature_comp_update_calib(cl_hw);
+
+       cl_dbg_info(cl_hw,
+                   "band=%u, channel=%u, bw=%u, primary=%u.%u, center=%u.%u, sx_index=%u\n",
+                   cl_hw->conf->ci_band_num, channel, bw, GET_FREQ_INT(primary),
+                   GET_FREQ_FRAC(primary), GET_FREQ_INT(center), GET_FREQ_FRAC(center),
+                   cl_hw->tcv_idx);
+
+       return res;
+}
+
+int _cl_msg_tx_set_channel(struct cl_hw *cl_hw, u32 channel, u8 bw, u32 primary,
+                          u32 center, u8 mode)
+{
+       int res = 0;
+       u32 primary_q2 = FREQ_TO_Q2(primary);
+       u32 center_q2 = FREQ_TO_Q2(center);
+
+       /*
+        * Need to take mutex lock to ensure that no one touching the phy_data
+        * DMA before FW is reading all its values.
+        * The mutex is unlocked right after the iq_dcoc_data_info DMA is
+        * handled in cl_calib_handle_set_channel_cfm.
+        */
+       res = mutex_lock_interruptible(&cl_hw->set_channel_mutex);
+
+       if (res != 0) {
+               cl_dbg_verbose(cl_hw, "Error - mutex_lock_interruptible (%d)\n", res);
+               return res;
+       }
+
+       cl_hw->channel = channel;
+       cl_hw->bw = bw;
+       cl_hw->primary_freq = primary;
+       cl_hw->center_freq = center;
+
+       if (mode & SET_CHANNEL_MODE_CALIB)
+               cl_hw->msg_calib_timeout = true;
+
+       res = __cl_msg_tx_set_channel(cl_hw, channel, bw, primary_q2, center_q2, mode);
+
+       if (mode & SET_CHANNEL_MODE_CALIB) {
+               cl_hw->msg_calib_timeout = false;
+
+               if (!res)
+                       res = cl_calib_handle_cfm(cl_hw, mode);
+       }
+
+       mutex_unlock(&cl_hw->set_channel_mutex);
+
+       return res;
+}
+
+int cl_msg_tx_set_channel(struct cl_hw *cl_hw, u32 channel, u8 bw, u32 primary, u32 center)
+{
+       if (cl_calib_is_needed(cl_hw, channel, bw))
+               return cl_calib_set_channel(cl_hw, channel, bw, primary, center);
+       else
+               return _cl_msg_tx_set_channel(cl_hw, channel, bw, primary, center,
+                                             SET_CHANNEL_MODE_OPERETIONAL);
+}
+
+int cl_msg_tx_dtim(struct cl_hw *cl_hw, u8 dtim_period)
+{
+       struct mm_set_dtim_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_DTIM_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->dtim_period = dtim_period;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_beacon_int(struct cl_hw *cl_hw, u16 beacon_int, u8 vif_idx)
+{
+       struct mm_set_beacon_int_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_BEACON_INT_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->beacon_int = cpu_to_le16(beacon_int);
+       req->inst_nbr = vif_idx;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_basic_rates(struct cl_hw *cl_hw, u32 basic_rates)
+{
+       struct mm_set_basic_rates_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_BASIC_RATES_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->rates = cpu_to_le32(basic_rates);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_bssid(struct cl_hw *cl_hw, const u8 *bssid, u8 vif_idx)
+{
+       struct mm_set_bssid_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_BSSID_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       cl_mac_addr_copy(req->bssid.array, bssid);
+       req->inst_nbr = vif_idx;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_edca(struct cl_hw *cl_hw, u8 hw_queue, u32 param,
+                      struct ieee80211_he_mu_edca_param_ac_rec *mu_edca)
+{
+       struct mm_set_edca_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_EDCA_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->ac_param = cpu_to_le32(param);
+       req->hw_queue = hw_queue;
+
+       if (mu_edca) {
+               req->mu_edca_aifsn = mu_edca->aifsn;
+               req->mu_edca_ecw_min_max = mu_edca->ecw_min_max;
+               req->mu_edca_timer = mu_edca->mu_edca_timer;
+       }
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_associated(struct cl_hw *cl_hw,
+                            struct ieee80211_bss_conf *bss_conf)
+{
+       struct mm_set_associated_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_ASSOCIATED_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->aid = cpu_to_le16(bss_conf->aid);
+
+       /* Multiple BSSID feature support */
+       if (bss_conf->nontransmitted && bss_conf->assoc) {
+               u8 i = 0;
+               u8 mask_addr[ETH_ALEN] = {0};
+               u32 bssid_hi_mask = 0;
+               u32 bssid_low_mask = 0;
+
+               for (i = 0; i < ARRAY_SIZE(mask_addr); i++)
+                       mask_addr[i] = (bss_conf->transmitter_bssid[i] ^
+                                       bss_conf->bssid[i]);
+               cl_mac_addr_array_to_nxmac(mask_addr, &bssid_low_mask,
+                                          &bssid_hi_mask);
+               /* Set mask to allow the transmitter BSSID Rx reception */
+               req->bssid_hi_mask = cpu_to_le32(bssid_hi_mask);
+               req->bssid_low_mask = cpu_to_le32(bssid_low_mask);
+       }
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_slottime(struct cl_hw *cl_hw, bool use_short_slot)
+{
+       struct mm_set_slottime_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_SLOTTIME_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->slottime = use_short_slot ? 9 : 20;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_idle(struct cl_hw *cl_hw, u8 idle)
+{
+       struct mm_set_idle_req *req;
+
+       if (cl_fem_read_wiring_id(cl_hw->chip)) {
+               cl_dbg_err(cl_hw, "!!! Invalid wiring id [%u] !!! Aborting\n",
+                          cl_hw->chip->fem.wiring_id);
+               return -EINVAL;
+       }
+
+       /*
+        * Rearm last_tbtt_ind so that error message will
+        * not be printed in cl_irq_status_tbtt()
+        */
+       if (!idle)
+               cl_hw->last_tbtt_irq = jiffies;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_IDLE_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->hw_idle = idle;
+
+       cl_dbg_info(cl_hw, "idle = %s\n", idle ? "True" : "False");
+
+       return cl_send_request(cl_hw, req);
+}
+
+void cl_msg_tx_idle_async(struct cl_hw *cl_hw)
+{
+       cl_hw->idle_async_set = true;
+       cl_msg_tx_set_idle(cl_hw, MAC_IDLE_ASYNC);
+}
+
+int cl_msg_tx_key_add(struct cl_hw *cl_hw, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta,
+                     struct ieee80211_key_conf *key_conf,
+                     u8 cipher_suite)
+{
+       struct mm_key_add_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_KEY_ADD_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       if (sta) {
+               /* Pairwise key */
+               req->sta_idx = ((struct cl_sta *)sta->drv_priv)->sta_idx;
+       } else {
+               /* Default key */
+               req->sta_idx = 0xFF;
+               req->key_idx = (u8)(key_conf->keyidx); /* Only useful for default keys */
+       }
+
+       req->inst_nbr = ((struct cl_vif *)vif->drv_priv)->vif_index;
+       req->key.length = key_conf->keylen;
+
+       /* TODO: check if this works well in Big endian platforms */
+       memcpy(req->key.array, key_conf->key, key_conf->keylen);
+
+       req->cipher_suite = cipher_suite;
+       req->spp = cl_hw->conf->ci_spp_ksr_value;
+
+       cl_dbg_info(cl_hw, "sta_idx:%u, key_idx:%u, inst_nbr:%u, cipher:%u, key_len:%u, spp:%u\n",
+                   req->sta_idx, req->key_idx, req->inst_nbr,
+                   req->cipher_suite, req->key.length, req->spp);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_key_del(struct cl_hw *cl_hw, u8 hw_key_idx)
+{
+       struct mm_key_del_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_KEY_DEL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->hw_key_idx = hw_key_idx;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_ba_add(struct cl_hw *cl_hw, u8 type, u8 sta_idx,
+                    u16 tid, u16 bufsz, u16 ssn)
+{
+       struct mm_ba_add_req *req;
+       u16 msg_id = ((type == BA_AGMT_TX) ? MM_BA_ADD_TX_REQ : MM_BA_ADD_RX_REQ);
+
+       req = cl_msg_zalloc(cl_hw, msg_id, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->type = type;
+       req->sta_idx = sta_idx;
+       req->tid = (u8)tid;
+       req->bufsz = cpu_to_le16(bufsz);
+       req->ssn = cpu_to_le16(ssn);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_ba_del(struct cl_hw *cl_hw, u8 sta_idx, u16 tid)
+{
+       struct mm_ba_del_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_BA_DEL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = sta_idx;
+       req->tid = (u8)tid;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_phy_reset(struct cl_hw *cl_hw)
+{
+       struct mm_phy_reset_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_PHY_RESET_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_available_ba_txq(struct cl_hw *cl_hw, u8 sta_idx, u16 tid)
+{
+       struct mm_available_ba_txq_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_AVAILABLE_BA_TXQ_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = sta_idx;
+       req->tid = (u8)tid;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_update_rate_dl(struct cl_hw *cl_hw, u8 sta_idx, u32 rate, u32 rate_fallback,
+                            u8 req_bw_tx, u8 op_mode, u8 ltf, u8 ltf_fallback, u32 rate_he)
+{
+       struct mm_update_rate_dl_req *req;
+
+       cl_dbg_info(cl_hw, "sta_idx=%u, rate=0x%x, rate_fallback=0x%x, req_bw_tx=%u, "
+                   "op_mode=%u, ltf=%u, ltf_fallback=%u, rate_he=0x%x\n",
+                   sta_idx, rate, rate_fallback, req_bw_tx, op_mode,
+                   ltf, ltf_fallback, rate_he);
+
+       req = cl_msg_zalloc(cl_hw, MM_UPDATE_RATE_DL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       /* Populate tx_params */
+       req->tx_params.rate = cpu_to_le32(rate);
+       req->tx_params.rate_he = cpu_to_le32(rate_he);
+       req->tx_params.req_bw_tx = req_bw_tx;
+       req->tx_params.ant_set = CL_DEF_ANT_BITMAP;
+       req->tx_params.ltf = ltf;
+
+       req->op_mode = op_mode;
+       req->sta_idx = sta_idx;
+       req->rate_fallback = cpu_to_le32(rate_fallback);
+       req->ltf_fallback = ltf_fallback;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_vns(struct cl_hw *cl_hw, u8 sta_idx, u8 is_vns)
+{
+       struct mm_set_vns_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_VNS_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = sta_idx;
+       req->is_vns = is_vns;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_tx_bf(struct cl_hw *cl_hw, u8 sta_idx, u8 is_on, u8 is_on_fallback)
+{
+       struct mm_set_tx_bf_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_TX_BF_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = sta_idx;
+       req->is_on = is_on;
+       req->is_on_fallback = is_on_fallback;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_sounding(struct cl_hw *cl_hw,
+                      struct mm_sounding_req *sounding_req)
+{
+       struct mm_sounding_req *req;
+       u8 i;
+
+       req = cl_msg_zalloc(cl_hw, MM_SOUNDING_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       /* Populate mm_sounding_req */
+       memcpy(req, sounding_req, sizeof(struct mm_sounding_req));
+
+       /* In case of non-TB HE SU/CQI, nc should be set to 0 */
+       if (req->sounding_type == SOUNDING_TYPE_HE_CQI ||
+           req->sounding_type == SOUNDING_TYPE_HE_SU)
+               for (i = 0; i < req->sta_num; i++)
+                       req->info_per_sta[i].nc = 0;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_sounding_pairing(struct cl_hw *cl_hw, u8 sounding_id, u8 sounding_type,
+                              u8 gid, u8 sta_idx)
+{
+       struct mm_sounding_pairing *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SOUNDING_PAIRING_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sounding_type = sounding_type;
+       req->sta_idx = sta_idx;
+       req->gid = gid;
+       req->sounding_id = sounding_id;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_sounding_interval(struct cl_hw *cl_hw, u16 interval, u16 lifetime,
+                               u8 sounding_type, u8 sta_idx)
+{
+       struct mm_sounding_interval_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SOUNDING_INTERVAL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->interval = cpu_to_le16(interval);
+       req->bfr_lifetime = cpu_to_le16(lifetime);
+       req->sounding_type = sounding_type;
+       req->sta_idx = sta_idx;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_config_cca(struct cl_hw *cl_hw, bool enable)
+{
+       struct mm_config_cca_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_CONFIG_CCA_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->enable = enable;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_dfs(struct cl_hw *cl_hw, bool enable, u8 standard,
+                     u8 initial_gain, u8 agc_cd_th)
+{
+       struct mm_set_dfs_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_DFS_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->enable = enable;
+       req->standard_fcc = (standard == CL_STANDARD_FCC) ? true : false;
+       req->initial_gain = initial_gain;
+       req->agc_cd_th = agc_cd_th;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_ant_bitmap(struct cl_hw *cl_hw, u8 bitmap)
+{
+       struct mm_set_ant_bitmap_req *req;
+       u8 num_antennas = hweight8(bitmap);
+       u8 bitmap_cck = 0;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_ANT_BITMAP_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       if (cl_band_is_24g(cl_hw)) {
+               if (num_antennas > MAX_ANTENNAS_CCK)
+                       bitmap_cck = copy_mask_bits(bitmap, MAX_ANTENNAS_CCK);
+               else
+                       bitmap_cck = bitmap;
+       }
+
+       cl_fill_ant_config(cl_hw, &req->ant_config, num_antennas, bitmap, bitmap_cck, bitmap_cck);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_ndp_tx_control(struct cl_hw *cl_hw, u8 chain_mask, u8 bw, u8 format, u8 num_ltf)
+{
+       struct mm_ndp_tx_control_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_NDP_TX_CONTROL_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->chain_mask = chain_mask;
+       req->bw = bw;
+       req->format = format;
+       req->num_ltf = num_ltf;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_reg_write(struct cl_hw *cl_hw, u32 address, u32 value, u32 mask)
+{
+       struct mm_reg_write_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_REG_WRITE_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->address = cpu_to_le32(address);
+       req->value = cpu_to_le32(value);
+       req->mask = cpu_to_le32(mask);
+
+       cl_dbg_info(cl_hw, "address=0x%x, value=0x%x, mask=0x%x\n", address, value, mask);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_prot_mode(struct cl_hw *cl_hw, u8 log_nav_en, u8 mode, u8 rate_format,
+                       u8 rate_mcs, u8 rate_pre_type)
+{
+       struct mm_prot_mode_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_PROT_MODE_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->log_nav_en = log_nav_en;
+       req->mode = mode;
+       req->rate_format = rate_format;
+       req->rate_mcs = rate_mcs;
+       req->rate_pre_type = rate_pre_type;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_goto_power_reduction(struct cl_hw *cl_hw, u8 mode)
+{
+       struct mm_goto_power_reduction_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_GOTO_POWER_REDUCTION_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->goto_power_reduction_mode = mode;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_backup_bcn_en(struct cl_hw *cl_hw, bool backup_bcn_en)
+{
+       struct mm_set_backup_bcn_en_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_BACKUP_BCN_EN_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->backup_bcn_en = backup_bcn_en;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_start_periodic_tx_time(struct cl_hw *cl_hw, u16 periodic_tx_time_off,
+                                    u16 periodic_tx_time_on)
+{
+       struct mm_start_periodic_tx_time_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_START_PERIODIC_TX_TIME_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->periodic_tx_time_off = cpu_to_le16(periodic_tx_time_off);
+       req->periodic_tx_time_on = cpu_to_le16(periodic_tx_time_on);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_anamon_read(struct cl_hw *cl_hw, u8 mode, u8 param1, u8 param2)
+{
+       struct mm_anamon_read_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_ANAMON_READ_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->mode = mode;
+       req->param1 = param1;
+       req->param2 = param2;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_refresh_power(struct cl_hw *cl_hw)
+{
+       void *void_param;
+
+       /* MM_REFRESH_PWR_REQ has no parameter */
+       void_param = cl_msg_zalloc(cl_hw, MM_REFRESH_PWR_REQ, TASK_MM, 0);
+       if (!void_param)
+               return -ENOMEM;
+
+       return cl_send_request(cl_hw, void_param);
+}
+
+int cl_msg_tx_set_ant_pwr_offset(struct cl_hw *cl_hw, s8 pwr_offset[MAX_ANTENNAS])
+{
+       struct mm_set_ant_pwr_offset_req *req;
+       u8 i = 0;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_ANT_PWR_OFFSET_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       for (i = 0; i < ARRAY_SIZE(req->pwr_offset); i++) {
+               pwr_offset[i] = cl_power_offset_check_margin(cl_hw, cl_hw->bw, i, pwr_offset[i]);
+               req->pwr_offset[i] = cl_convert_signed_to_reg_value(pwr_offset[i]);
+       }
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_rate_fallback(struct cl_hw *cl_hw)
+{
+       struct mm_rate_fallback_req *req;
+       u8 *fb_conf = cl_hw->conf->ci_rate_fallback;
+
+       req = cl_msg_zalloc(cl_hw, MM_SET_RATE_FALLBACK_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->fallback_count_su = fb_conf[CL_RATE_FALLBACK_COUNT_SU];
+       req->fallback_count_mu = fb_conf[CL_RATE_FALLBACK_COUNT_MU];
+       req->retry_count_thr = fb_conf[CL_RATE_FALLBACK_RETRY_COUNT_THR];
+       req->ba_per_thr = fb_conf[CL_RATE_FALLBACK_BA_PER_THR];
+       req->ba_not_received_thr = fb_conf[CL_RATE_FALLBACK_BA_NOT_RECEIVED_THR];
+       req->disable_mcs0 = fb_conf[CL_RATE_FALLBACK_DISABLE_MCS];
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_twt_setup(struct cl_hw *cl_hw, struct mm_twt_setup_req *params)
+{
+       struct mm_twt_setup_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_TWT_SETUP_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = params->sta_idx;
+       req->twt_flow_id = params->twt_flow_id;
+       req->announced = params->announced;
+       req->triggered = params->triggered;
+       req->min_wake_duration_us = cpu_to_le32(params->min_wake_duration_us);
+       req->twt_interval_us = cpu_to_le64(params->twt_interval_us);
+       req->twt_start_time_tsf = cpu_to_le64(params->twt_start_time_tsf);
+
+       cl_dbg_info(cl_hw,
+                   "sta_idx %u, flow_id %u, interval_us %llu, min_wake_duration_us %u,"
+                   "start_time %llu, announced %u, triggered %u\n",
+                   req->sta_idx, req->twt_flow_id, req->twt_interval_us,
+                   req->min_wake_duration_us, req->twt_start_time_tsf,
+                   req->announced, req->triggered);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_twt_teardown(struct cl_hw *cl_hw, struct mm_twt_teardown_req *params)
+{
+       struct mm_twt_teardown_req *req;
+
+       req = cl_msg_zalloc(cl_hw, MM_TWT_TEARDOWN_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sta_idx = params->sta_idx;
+       req->twt_flow_id = params->twt_flow_id;
+
+       cl_dbg_info(cl_hw, "sta_idx %u, flow_id %u\n",
+                   req->sta_idx, req->twt_flow_id);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_rsrc_mgmt_traffic_event(struct cl_hw *cl_hw, u8 event_type,
+                                     enum cl_traffic_level level,
+                                     enum cl_traffic_direction direction,
+                                     u8 active_sta_cnt)
+{
+       struct mm_rsrc_mgmt_req *req = NULL;
+       struct cl_sta *cl_sta = NULL;
+       int curr_cnt = 0;
+       size_t size = sizeof(*req) + active_sta_cnt * sizeof(struct mm_rsrc_mgmt_active_sta);
+
+       req = cl_msg_zalloc(cl_hw, MM_RSRC_MGMT_REQ, TASK_MM, size);
+       if (!req)
+               return -ENOMEM;
+
+       req->subtype = event_type;
+       req->u.traffic_event.level = level;
+       req->u.traffic_event.direction = direction;
+
+       cl_sta_lock_bh(cl_hw);
+       list_for_each_entry(cl_sta, &cl_hw->cl_sta_db.head, list) {
+               struct cl_wrs_rate *max_rate_cap = &cl_sta->wrs_sta.max_rate_cap;
+               struct cl_wrs_tx_params *su_tx_params = &cl_sta->wrs_sta.su_params.tx_params;
+
+               if (!cl_sta->traffic_db[direction].activity_db[level].is_active)
+                       continue;
+
+               if (req->u.traffic_event.active_sta.cnt == active_sta_cnt) {
+                       WARN_ONCE(active_sta_cnt != 0,
+                                 "Synchronization failure between actual and "
+                                 "preallocated station entities!");
+                       break;
+               }
+
+               req->u.traffic_event.active_sta.list[curr_cnt] = (struct mm_rsrc_mgmt_active_sta) {
+                   .idx = cl_sta->sta_idx,
+                   .su_rate = {
+                       .bw = su_tx_params->bw,
+                       .nss = su_tx_params->nss,
+                       .mcs = su_tx_params->mcs
+                   },
+                   .max_rate = {
+                       .bw = max_rate_cap->bw,
+                       .nss = max_rate_cap->nss,
+                       .mcs = max_rate_cap->mcs
+                   },
+               };
+
+               curr_cnt++;
+       }
+       req->u.traffic_event.active_sta.cnt = curr_cnt;
+       cl_sta_unlock_bh(cl_hw);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_rsrc_mgmt_rates_event(struct cl_hw *cl_hw, u8 event_type,
+                                   struct cl_sta *cl_sta)
+{
+       struct mm_rsrc_mgmt_req *req = NULL;
+       struct cl_wrs_rate *max_rate_cap = &cl_sta->wrs_sta.max_rate_cap;
+       struct cl_wrs_tx_params *su_tx_params = &cl_sta->wrs_sta.su_params.tx_params;
+
+       req = cl_msg_zalloc(cl_hw, MM_RSRC_MGMT_REQ, TASK_MM, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->subtype = event_type;
+       req->u.rate_event.sta = (struct mm_rsrc_mgmt_active_sta) {
+           .idx = cl_sta->sta_idx,
+           .su_rate = {
+               .bw = su_tx_params->bw,
+               .nss = su_tx_params->nss,
+               .mcs = su_tx_params->mcs
+           },
+           .max_rate = {
+               .bw = max_rate_cap->bw,
+               .nss = max_rate_cap->nss,
+               .mcs = max_rate_cap->mcs
+           },
+       };
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_set_freq_offset(struct cl_hw *cl_hw, u16 val)
+{
+       struct mm_set_freq_offset_req *req;
+
+       /* Build the MM_SET_FREQ_OFFSET_REQ message */
+       req = cl_msg_zalloc(cl_hw, MM_SET_FREQ_OFFSET_REQ, TASK_MM,
+                           sizeof(struct mm_set_freq_offset_req));
+
+       if (!req)
+               return -ENOMEM;
+
+       /* Set parameters for the MM_SET_FREQ_OFFSET_REQ message */
+       req->val = cpu_to_le16(val);
+
+       /* Send the MM_SET_FREQ_OFFSET_REQ message to firmware */
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_mod_filter(struct cl_hw *cl_hw, u32 filter)
+{
+       struct dbg_set_mod_filter_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SET_MOD_FILTER_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->mod_filter = cpu_to_le32(filter);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_ce_mod_filter(struct cl_hw *cl_hw, u32 filter)
+{
+       struct dbg_set_mod_filter_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_CE_SET_MOD_FILTER_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->mod_filter = cpu_to_le32(filter);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_sev_filter(struct cl_hw *cl_hw, u32 filter)
+{
+       struct dbg_set_sev_filter_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SET_SEV_FILTER_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->sev_filter = cpu_to_le32(filter);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_beamforming_tx(struct cl_hw *cl_hw, u32 param32)
+{
+       struct dbg_beamforming_tx_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_BEAMFORMING_TX_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->bf_cmd = cpu_to_le32(param32);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_get_e2w_stats(struct cl_hw *cl_hw, bool clear)
+{
+       struct dbg_e2w_stats_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_GET_E2W_STATS_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->clear = cpu_to_le32(clear);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_la_mpif_mask(struct cl_hw *cl_hw, u32 mask)
+{
+       struct dbg_set_la_mpif_mask_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SET_LA_MPIF_MASK_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->mpif_mask = cpu_to_le32(mask);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_la_trig_point(struct cl_hw *cl_hw, u32 trigger_point)
+{
+       struct dbg_set_la_trig_point_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SET_LA_TRIG_POINT_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->trigger_point = cpu_to_le32(trigger_point);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_la_mpif_debug_mode(struct cl_hw *cl_hw, u8 mode)
+{
+       struct dbg_set_la_mpif_debug_mode_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SET_LA_MPIF_DEBUG_MODE_REQ, TASK_DBG,
+                           sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->mode = mode;
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_set_la_trig_rule(struct cl_hw *cl_hw, u8 idx, bool enable, u32 address,
+                                  u8 oper, u32 value, u32 mask, u32 duration)
+{
+       struct dbg_set_la_trig_rule_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SET_LA_TRIG_RULE_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->rule_id = idx;
+       req->oper = oper;
+       req->enable = enable;
+       req->address = cpu_to_le32(address);
+       req->value = cpu_to_le32(value);
+       req->mask = cpu_to_le32(mask);
+       req->duration = cpu_to_le32(duration);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_tx_trace_debug_flag(struct cl_hw *cl_hw, u32 bitmap, u8 w_r_cmd)
+{
+       struct dbg_tx_trace_debug_flag_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_TX_TRACE_DEBUG_FLAG_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->read_write_flag = w_r_cmd;
+       req->bitmap = cpu_to_le32(bitmap);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_print_stats(struct cl_hw *cl_hw, u32 command,
+                             u32 param0, u32 param1, u32 param2, u32 param3)
+{
+       struct dbg_print_stats_req *req;
+
+       req = cl_msg_zalloc(cl_hw, DBG_PRINT_STATS_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->command = cpu_to_le32(command);
+       req->param[0] = cpu_to_le32(param0);
+       req->param[1] = cpu_to_le32(param1);
+       req->param[2] = cpu_to_le32(param2);
+       req->param[3] = cpu_to_le32(param3);
+
+       cl_dbg_verbose(cl_hw, "param0 = 0x%x, param1 = 0x%x, param2 = 0x%x, param3 = 0x%x\n",
+                      req->param[0], req->param[1], req->param[2], req->param[3]);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_trigger(struct cl_hw *cl_hw, char *msg)
+{
+       struct dbg_trigger_req *req;
+       u8 msg_len = min(strlen(msg), sizeof(req->error) - 1);
+
+       req = cl_msg_zalloc(cl_hw, DBG_TRIGGER_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       strncpy(req->error, msg, msg_len);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_test_mode(struct cl_hw *cl_hw, u32 *params)
+{
+       struct dbg_test_mode_req *req;
+       int i = 0;
+
+       req = cl_msg_zalloc(cl_hw, DBG_TEST_MODE_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       req->command = cpu_to_le32(params[0]);
+
+       /* Param[0] is the command, therefore start from param[i + 1] */
+       for (i = 0; i < ARRAY_SIZE(req->params); i++)
+               req->params[i] = cpu_to_le32(params[i + 1]);
+
+       return cl_send_request(cl_hw, req);
+}
+
+int cl_msg_tx_dbg_sounding_cmd(struct cl_hw *cl_hw, struct dbg_sounding_cmd_param *params)
+{
+       struct dbg_sounding_cmd_param *req;
+       int i;
+
+       req = cl_msg_zalloc(cl_hw, DBG_SOUNDING_CMD_REQ, TASK_DBG, sizeof(*req));
+       if (!req)
+               return -ENOMEM;
+
+       memcpy(req, params, sizeof(struct dbg_sounding_cmd_param));
+       req->sounding_cmd_index = cpu_to_le32(params->sounding_cmd_index);
+
+       for (i = 0; i < ARRAY_SIZE(req->param); i++)
+               req->param[i] = cpu_to_le32(params->param[i]);
+
+       return cl_send_request(cl_hw, req);
+}