diff mbox series

[RFC,v1,019/256] cl8k: add bus/pci/irq.c

Message ID 20210617160223.160998-20-viktor.barna@celeno.com (mailing list archive)
State RFC
Delegated to: Kalle Valo
Headers show
Series wireless: cl8k driver for Celeno IEEE 802.11ax devices | expand

Commit Message

Viktor Barna June 17, 2021, 3:58 p.m. UTC
From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 .../net/wireless/celeno/cl8k/bus/pci/irq.c    | 331 ++++++++++++++++++
 1 file changed, 331 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/bus/pci/irq.c

--
2.30.0
diff mbox series

Patch

diff --git a/drivers/net/wireless/celeno/cl8k/bus/pci/irq.c b/drivers/net/wireless/celeno/cl8k/bus/pci/irq.c
new file mode 100644
index 000000000000..8ef5d2dba9ac
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/bus/pci/irq.c
@@ -0,0 +1,331 @@ 
+// SPDX-License-Identifier: MIT
+/* Copyright(c) 2019-2021, Celeno Communications Ltd. */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include "reg/reg_access.h"
+#include "reg/reg_ipc.h"
+#include "bus/pci/ipc.h"
+#include "fw/msg_rx.h"
+#include "bus/pci/irq.h"
+#include "chip.h"
+#include "hw.h"
+#include "tx/tx.h"
+#include "dfs/radar.h"
+#include "recovery.h"
+#include "radio.h"
+#include "bus/pci/rx_pci.h"
+
+static void cl_irq_status_rxdesc(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /* Handle the reception of a Rx Descriptor */
+
+       /*
+        * Disable the RX interrupt until rxelement/skb handled
+        * this would avoid redundant context switch + redundant tasklet scheduling
+        */
+       cl_irq_disable(cl_hw, cl_hw->ipc_e2a_irq.rxdesc);
+
+       /* Acknowledge the interrupt BEFORE handling the packet */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.rxdesc);
+
+       /*
+        * If more than 50% of buffer are populated handle them in the interrupt,
+        * otherwise schedule a tasklet to handle the buffers.
+        */
+       if (cl_rx_process_in_irq(cl_hw))
+               cl_rx_pci_desc_handler(cl_hw);
+       else
+               tasklet_schedule(&ipc_env->rxdesc_tasklet);
+}
+
+static void cl_irq_status_txcfm(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /*
+        * Disable the TXCFM interrupt bit - will be enabled
+        * at the end of cl_tx_pci_single_cfm_tasklet()
+        */
+       cl_irq_disable(cl_hw, cl_hw->ipc_e2a_irq.txcfm);
+
+       /* Acknowledge the TXCFM interrupt */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.txcfm);
+
+       /* Schedule tasklet to handle the TXCFM */
+       tasklet_schedule(&ipc_env->tx_single_cfm_tasklet);
+}
+
+static void cl_irq_status_tbtt(struct cl_hw *cl_hw)
+{
+       unsigned long tbtt_diff_msec = jiffies_to_msecs(jiffies - cl_hw->last_tbtt_irq);
+
+       /* Acknowledge the interrupt BEFORE handling the request */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.tbtt);
+
+       cl_hw->last_tbtt_irq = jiffies;
+       cl_hw->tbtt_cnt++;
+
+       /*
+        * Send beacon only if radio is on, there is at least one AP interface
+        * up, we aren't in the middle of recovery, and user didn't disable them.
+        */
+       if (cl_radio_is_off(cl_hw) ||
+           cl_hw->vif_db.num_iface_bcn == 0 ||
+           cl_recovery_in_progress(cl_hw) ||
+           cl_hw->tx_disable_flags ||
+           !test_bit(CL_DEV_STARTED, &cl_hw->drv_flags) ||
+           test_bit(CL_DEV_FW_ERROR, &cl_hw->drv_flags))
+               return;
+
+       if (cl_hw->iface_conf == CL_IFCONF_MESH_ONLY ||
+           (cl_hw->mesh_tbtt_div > 1 &&
+           ((cl_hw->tbtt_cnt % cl_hw->mesh_tbtt_div) == 0))) {
+               tasklet_hi_schedule(&cl_hw->tx_mesh_bcn_task);
+       } else {
+               /*
+                * More than 2 times the beacon interval passed between beacons - WARNING
+                * More than 3 times the beacon interval passed between beacons - ERROR
+                */
+               if (tbtt_diff_msec > (cl_hw->conf->ha_beacon_int * 3))
+                       cl_dbg_err(cl_hw, "last_tbtt_irq=%lu, curr_time=%lu, diff=%lu\n",
+                                  cl_hw->last_tbtt_irq, jiffies, tbtt_diff_msec);
+               else if (tbtt_diff_msec > (cl_hw->conf->ha_beacon_int * 2))
+                       cl_dbg_warn(cl_hw, "last_tbtt_irq=%lu, curr_time=%lu, diff=%lu\n",
+                                   cl_hw->last_tbtt_irq, jiffies, tbtt_diff_msec);
+       }
+
+       cl_tx_bcns(cl_hw);
+}
+
+static void cl_irq_status_msg(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /* Acknowledge the interrupt BEFORE handling the request */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.msg);
+
+       /* Schedule tasklet to handle the messages */
+       tasklet_schedule(&ipc_env->msg_tasklet);
+}
+
+static u8 cl_radar_handler(struct cl_hw *cl_hw, ptrdiff_t hostid)
+{
+       struct cl_radar_elem *radar_elem = (struct cl_radar_elem *)hostid;
+       u8 ret = 0;
+       struct cl_radar_pulse_array *pulses;
+
+       /* Retrieve the radar pulses structure */
+       pulses = (struct cl_radar_pulse_array *)radar_elem->radarbuf_ptr;
+
+       /* Look for pulse count meaning that this hostbuf contains RADAR pulses */
+       if (pulses->cnt == 0) {
+               ret = -1;
+               goto radar_no_push;
+       }
+
+       /* Push pulse information to queue and schedule a tasklet to handle it */
+       cl_radar_push(cl_hw, radar_elem);
+
+       /* Reset the radar element and re-use it */
+       pulses->cnt = 0;
+
+       /* Make sure memory is written before push to HW */
+       wmb();
+
+       /* Push back the buffer to the firmware */
+       cl_ipc_radarbuf_push(cl_hw->ipc_env, (ptrdiff_t)radar_elem, radar_elem->dma_addr);
+
+radar_no_push:
+       return ret;
+}
+
+static void cl_irq_status_radar(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /*
+        * Firmware has triggered an IT saying that a radar event has been sent to upper layer.
+        * Then we first need to check the validity of the current msg buf, and the validity
+        * of the next buffers too, because it is likely that several buffers have been
+        * filled within the time needed for this irq handling
+        */
+
+       /* Disable the RADAR interrupt bit - will be enabled at the end of cl_radar_tasklet() */
+       cl_irq_disable(cl_hw, cl_hw->ipc_e2a_irq.radar);
+
+       /* Acknowledge the RADAR interrupt */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.radar);
+
+       /* Push all new radar pulses to queue */
+       while (cl_radar_handler(cl_hw,
+                               ipc_env->radar_hostbuf_array[ipc_env->radar_host_idx].hostid) == 0)
+               ;
+
+       /* Schedule tasklet to handle the radar pulses */
+       cl_radar_tasklet_schedule(cl_hw);
+}
+
+static void cl_irq_status_dbg(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /* Disable the DBG interrupt bit - will be enabled at the end of cl_dbgfile_tasklet() */
+       cl_irq_disable(cl_hw, cl_hw->ipc_e2a_irq.dbg);
+
+       /* Acknowledge the DBG interrupt */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.dbg);
+
+       /* Schedule tasklet to handle the debug */
+       tasklet_schedule(&ipc_env->dbg_tasklet);
+}
+
+static void cl_irq_status_txdesc_ind(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /*
+        * Disable the TXDESC_IND interrupt bit -
+        * will be enabled at the end of cl_tx_pci_agg_cfm_tasklet()
+        */
+       cl_irq_disable(cl_hw, cl_hw->ipc_e2a_irq.txdesc_ind);
+
+       /* Acknowledge the TXDESC_IND interrupt */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.txdesc_ind);
+
+       tasklet_schedule(&ipc_env->tx_agg_cfm_tasklet);
+       tasklet_schedule(&cl_hw->tx_task);
+}
+
+static void cl_irq_status_sync(struct cl_hw *cl_hw, struct cl_ipc_host_env *ipc_env)
+{
+       /* Acknowledge the interrupt BEFORE handling the request */
+       ipc_xmac_2_host_ack_set(cl_hw->chip, cl_hw->ipc_e2a_irq.sync);
+
+       set_bit(CL_DEV_FW_SYNC, &cl_hw->drv_flags);
+       wake_up_interruptible(&cl_hw->fw_sync_wq);
+}
+
+void cl_irq_status(struct cl_hw *cl_hw, u32 status)
+{
+       /* Handle all IPC interrupts on the host side */
+       struct cl_ipc_host_env *ipc_env = cl_hw->ipc_env;
+
+       if (status & cl_hw->ipc_e2a_irq.rxdesc)
+               cl_irq_status_rxdesc(cl_hw, ipc_env);
+
+       if (status & cl_hw->ipc_e2a_irq.txcfm)
+               cl_irq_status_txcfm(cl_hw, ipc_env);
+
+       if (status & cl_hw->ipc_e2a_irq.tbtt)
+               cl_irq_status_tbtt(cl_hw);
+
+       if (status & cl_hw->ipc_e2a_irq.msg)
+               cl_irq_status_msg(cl_hw, ipc_env);
+
+       if (status & cl_hw->ipc_e2a_irq.radar)
+               cl_irq_status_radar(cl_hw, ipc_env);
+
+       if (status & cl_hw->ipc_e2a_irq.dbg)
+               cl_irq_status_dbg(cl_hw, ipc_env);
+
+       if (status & cl_hw->ipc_e2a_irq.txdesc_ind)
+               cl_irq_status_txdesc_ind(cl_hw, ipc_env);
+
+       if (status & cl_hw->ipc_e2a_irq.sync)
+               cl_irq_status_sync(cl_hw, ipc_env);
+}
+
+#ifdef CONFIG_CL_PCIE
+static void cl_irq_handler(struct cl_chip *chip)
+{
+       /* Interrupt handler */
+       u32 status, statuses = 0;
+       unsigned long now = jiffies;
+       struct cl_irq_stats *irq_stats = &chip->irq_stats;
+
+       while ((status = ipc_xmac_2_host_status_get(chip))) {
+               statuses |= status;
+
+               if (status & IPC_IRQ_L2H_ALL)
+                       cl_irq_status(chip->cl_hw_tcv0, status);
+
+               if (status & IPC_IRQ_S2H_ALL)
+                       cl_irq_status(chip->cl_hw_tcv1, status);
+       }
+
+       if (statuses & (IPC_IRQ_L2H_RXDESC | IPC_IRQ_S2H_RXDESC))
+               irq_stats->last_rx = now;
+
+       if (statuses & (IPC_IRQ_L2H_TXCFM | IPC_IRQ_S2H_TXCFM))
+               irq_stats->last_tx = now;
+
+       irq_stats->last_isr = now;
+       irq_stats->last_isr_statuses = statuses;
+}
+
+static irqreturn_t cl_irq_request_handler(int irq, void *dev_id)
+{
+       struct cl_chip *chip = (struct cl_chip *)dev_id;
+
+       cl_irq_handler(chip);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SMP
+static void cl_irq_set_affinity(struct cl_chip *chip, struct pci_dev *pci_dev)
+{
+       s32 irq_smp_affinity = chip->conf->ce_irq_smp_affinity;
+
+       if (irq_smp_affinity != -1) {
+               struct irq_data *data = irq_get_irq_data(pci_dev->irq);
+
+               if (data) {
+                       static struct cpumask mask;
+
+                       cpumask_clear(&mask);
+                       cpumask_set_cpu(irq_smp_affinity, &mask);
+
+                       if (data->chip->irq_set_affinity) {
+                               data->chip->irq_set_affinity(data, &mask, false);
+                               pr_debug("irq=%d, affinity=%d\n", pci_dev->irq, irq_smp_affinity);
+                       }
+               }
+       }
+}
+#endif
+
+int cl_irq_request(struct cl_chip *chip)
+{
+       /*
+        * Allocate host irq line.
+        * Enable PCIe device interrupts
+        */
+       int ret;
+       /* Request exclusive PCI interrupt in firmware test mode */
+       struct pci_dev *pci_dev = chip->pci_dev;
+
+       ret = request_irq(pci_dev->irq, cl_irq_request_handler, IRQF_SHARED, "cl", chip);
+
+       if (ret) {
+               pr_err("ERROR: could not assign interrupt %d, err=%d\n", pci_dev->irq, ret);
+               return ret;
+       }
+
+#ifdef CONFIG_SMP
+       cl_irq_set_affinity(chip, pci_dev);
+#endif
+
+       return ret;
+}
+
+void cl_irq_free(struct cl_chip *chip)
+{
+       struct pci_dev *pci_dev = chip->pci_dev;
+       /* Disable PCI device interrupt and release irq line */
+       free_irq(pci_dev->irq, chip);
+}
+#endif /* CONFIG_CL_PCIE */
+
+void cl_irq_enable(struct cl_hw *cl_hw, u32 value)
+{
+       /* Enable IPC interrupts */
+       ipc_xmac_2_host_enable_set_set(cl_hw->chip, value);
+}
+
+void cl_irq_disable(struct cl_hw *cl_hw, u32 value)
+{
+       /* Disable IPC interrupts */
+       ipc_xmac_2_host_enable_clear_set(cl_hw->chip, value);
+}