@@ -229,11 +229,11 @@ struct sched {
unsigned int port; /* port index (round robin ports) */
unsigned int num; /* num skbs in per port queues */
struct sched_port p[MAX_NPORTS];
- struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+ struct work_struct sched_bh_work;/* bh_work used to run scheduler */
struct sge *sge;
};
-static void restart_sched(struct tasklet_struct *t);
+static void restart_sched(struct work_struct *work);
/*
@@ -270,14 +270,14 @@ static const u8 ch_mac_addr[ETH_ALEN] = {
};
/*
- * stop tasklet and free all pending skb's
+ * stop bh_work and free all pending skb's
*/
static void tx_sched_stop(struct sge *sge)
{
struct sched *s = sge->tx_sched;
int i;
- tasklet_kill(&s->sched_tsk);
+ cancel_work_sync(&s->sched_bh_work);
for (i = 0; i < MAX_NPORTS; i++)
__skb_queue_purge(&s->p[s->port].skbq);
@@ -371,7 +371,7 @@ static int tx_sched_init(struct sge *sge)
return -ENOMEM;
pr_debug("tx_sched_init\n");
- tasklet_setup(&s->sched_tsk, restart_sched);
+ INIT_WORK(&s->sched_bh_work, restart_sched);
s->sge = sge;
sge->tx_sched = s;
@@ -1300,12 +1300,12 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
}
/*
- * Called from tasklet. Checks the scheduler for any
+ * Called from bh context. Checks the scheduler for any
* pending skbs that can be sent.
*/
-static void restart_sched(struct tasklet_struct *t)
+static void restart_sched(struct work_struct *work)
{
- struct sched *s = from_tasklet(s, t, sched_tsk);
+ struct sched *s = from_work(s, work, sched_bh_work);
struct sge *sge = s->sge;
struct adapter *adapter = sge->adapter;
struct cmdQ *q = &sge->cmdQ[0];
@@ -1451,7 +1451,8 @@ static unsigned int update_tx_info(struct adapter *adapter,
writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
}
if (sge->tx_sched)
- tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+ queue_work(system_bh_highpri_wq,
+ &sge->tx_sched->sched_bh_work);
flags &= ~F_CMDQ0_ENABLE;
}
@@ -53,6 +53,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/crash_dump.h>
+#include <linux/workqueue.h>
#include <linux/thermal.h>
#include <asm/io.h>
#include "t4_chip_type.h"
@@ -880,7 +881,7 @@ struct sge_uld_txq { /* state for an SGE offload Tx queue */
struct sge_txq q;
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
- struct tasklet_struct qresume_tsk; /* restarts the queue */
+ struct work_struct qresume_bh_work; /* restarts the queue */
bool service_ofldq_running; /* service_ofldq() is processing sendq */
u8 full; /* the Tx ring is full */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
@@ -890,7 +891,7 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */
struct sge_txq q;
struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */
- struct tasklet_struct qresume_tsk; /* restarts the queue */
+ struct work_struct qresume_bh_work; /* restarts the queue */
u8 full; /* the Tx ring is full */
} ____cacheline_aligned_in_smp;
@@ -946,7 +947,7 @@ struct sge_eosw_txq {
u32 hwqid; /* Underlying hardware queue index */
struct net_device *netdev; /* Pointer to netdevice */
- struct tasklet_struct qresume_tsk; /* Restarts the queue */
+ struct work_struct qresume_bh_work; /* Restarts the queue */
struct completion completion; /* completion for FLOWC rendezvous */
};
@@ -2107,7 +2108,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
u32 ndesc);
int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
-void cxgb4_ethofld_restart(struct tasklet_struct *t);
+void cxgb4_ethofld_restart(struct work_struct *work);
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
@@ -589,7 +589,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge_uld_txq *oq;
oq = container_of(txq, struct sge_uld_txq, q);
- tasklet_schedule(&oq->qresume_tsk);
+ queue_work(system_bh_wq, &oq->qresume_bh_work);
}
} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
const struct cpl_fw6_msg *p = (void *)rsp;
@@ -114,7 +114,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev,
eosw_txq->cred = adap->params.ofldq_wr_cred;
eosw_txq->hwqid = hwqid;
eosw_txq->netdev = dev;
- tasklet_setup(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart);
+ INIT_WORK(&eosw_txq->qresume_bh_work, cxgb4_ethofld_restart);
return 0;
}
@@ -143,7 +143,7 @@ static void cxgb4_free_eosw_txq(struct net_device *dev,
cxgb4_clean_eosw_txq(dev, eosw_txq);
kfree(eosw_txq->desc);
spin_unlock_bh(&eosw_txq->lock);
- tasklet_kill(&eosw_txq->qresume_tsk);
+ cancel_work_sync(&eosw_txq->qresume_bh_work);
}
static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
@@ -407,7 +407,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
if (txq->q.desc) {
- tasklet_kill(&txq->qresume_tsk);
+ cancel_work_sync(&txq->qresume_bh_work);
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
free_tx_desc(adap, &txq->q, txq->q.in_use, false);
@@ -2769,15 +2769,15 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
/**
* restart_ctrlq - restart a suspended control queue
- * @t: pointer to the tasklet associated with this handler
+ * @work: pointer to the work struct associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(struct tasklet_struct *t)
+static void restart_ctrlq(struct work_struct *work)
{
struct sk_buff *skb;
unsigned int written = 0;
- struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
+ struct sge_ctrl_txq *q = from_work(q, work, qresume_bh_work);
spin_lock(&q->sendq.lock);
reclaim_completed_tx_imm(&q->q);
@@ -3075,13 +3075,13 @@ static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
/**
* restart_ofldq - restart a suspended offload queue
- * @t: pointer to the tasklet associated with this handler
+ * @work: pointer to the work struct associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_ofldq(struct tasklet_struct *t)
+static void restart_ofldq(struct work_struct *work)
{
- struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
+ struct sge_uld_txq *q = from_work(q, work, qresume_bh_work);
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
@@ -4020,10 +4020,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
-void cxgb4_ethofld_restart(struct tasklet_struct *t)
+void cxgb4_ethofld_restart(struct work_struct *work)
{
- struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
- qresume_tsk);
+ struct sge_eosw_txq *eosw_txq = from_work(eosw_txq, work,
+ qresume_bh_work);
int pktcount;
spin_lock(&eosw_txq->lock);
@@ -4050,7 +4050,7 @@ void cxgb4_ethofld_restart(struct tasklet_struct *t)
* @si: the gather list of packet fragments
*
* Process a ETHOFLD Tx completion. Increment the cidx here, but
- * free up the descriptors in a tasklet later.
+ * free up the descriptors later in bh_work.
*/
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si)
@@ -4117,10 +4117,10 @@ int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
spin_unlock(&eosw_txq->lock);
- /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
+ /* Schedule a bh work to reclaim SKBs and restart ETHOFLD Tx,
* if there were packets waiting for completion.
*/
- tasklet_schedule(&eosw_txq->qresume_tsk);
+ queue_work(system_bh_wq, &eosw_txq->qresume_bh_work);
}
out_done:
@@ -4279,7 +4279,7 @@ static void sge_tx_timer_cb(struct timer_list *t)
struct sge_uld_txq *txq = s->egr_map[id];
clear_bit(id, s->txq_maperr);
- tasklet_schedule(&txq->qresume_tsk);
+ queue_work(system_bh_wq, &txq->qresume_bh_work);
}
if (!is_t4(adap->params.chip)) {
@@ -4719,7 +4719,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
+ INIT_WORK(&txq->qresume_bh_work, restart_ctrlq);
txq->full = 0;
return 0;
}
@@ -4809,7 +4809,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
txq->q.q_type = CXGB4_TXQ_ULD;
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_setup(&txq->qresume_tsk, restart_ofldq);
+ INIT_WORK(&txq->qresume_bh_work, restart_ofldq);
txq->full = 0;
txq->mapping_err = 0;
return 0;
@@ -4952,7 +4952,7 @@ void t4_free_sge_resources(struct adapter *adap)
struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
if (cq->q.desc) {
- tasklet_kill(&cq->qresume_tsk);
+ cancel_work_sync(&cq->qresume_bh_work);
t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
cq->q.cntxt_id);
__skb_queue_purge(&cq->sendq);
@@ -5002,7 +5002,7 @@ void t4_sge_start(struct adapter *adap)
* t4_sge_stop - disable SGE operation
* @adap: the adapter
*
- * Stop tasklets and timers associated with the DMA engine. Note that
+ * Stop bh works and timers associated with the DMA engine. Note that
* this is effective only if measures have been taken to disable any HW
* events that may restart them.
*/
@@ -5025,7 +5025,7 @@ void t4_sge_stop(struct adapter *adap)
for_each_ofldtxq(&adap->sge, i) {
if (txq->q.desc)
- tasklet_kill(&txq->qresume_tsk);
+ cancel_work_sync(&txq->qresume_bh_work);
}
}
}
@@ -5039,7 +5039,7 @@ void t4_sge_stop(struct adapter *adap)
for_each_ofldtxq(&adap->sge, i) {
if (txq->q.desc)
- tasklet_kill(&txq->qresume_tsk);
+ cancel_work_sync(&txq->qresume_bh_work);
}
}
}
@@ -5048,7 +5048,7 @@ void t4_sge_stop(struct adapter *adap)
struct sge_ctrl_txq *cq = &s->ctrlq[i];
if (cq->q.desc)
- tasklet_kill(&cq->qresume_tsk);
+ cancel_work_sync(&cq->qresume_bh_work);
}
}
@@ -2587,7 +2587,7 @@ void t4vf_free_sge_resources(struct adapter *adapter)
* t4vf_sge_start - enable SGE operation
* @adapter: the adapter
*
- * Start tasklets and timers associated with the DMA engine.
+ * Start bh work and timers associated with the DMA engine.
*/
void t4vf_sge_start(struct adapter *adapter)
{
@@ -2600,7 +2600,7 @@ void t4vf_sge_start(struct adapter *adapter)
* t4vf_sge_stop - disable SGE operation
* @adapter: the adapter
*
- * Stop tasklets and timers associated with the DMA engine. Note that
+ * Stop bh works and timers associated with the DMA engine. Note that
* this is effective only if measures have been taken to disable any HW
* events that may restart them.
*/
@@ -2692,7 +2692,7 @@ int t4vf_sge_init(struct adapter *adapter)
s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
/*
- * Set up tasklet timers.
+ * Set up bh work timers.
*/
timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
Migrate tasklet APIs to the new bottom half workqueue mechanism. It replaces all occurrences of tasklet usage with the appropriate workqueue APIs throughout the chelsio driver. This transition ensures compatibility with the latest design and enhances performance. Signed-off-by: Allen Pais <allen.lkml@gmail.com> --- drivers/net/ethernet/chelsio/cxgb/sge.c | 19 ++++----- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 +++-- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- .../ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c | 4 +- .../net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 40 +++++++++---------- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 6 +-- 7 files changed, 42 insertions(+), 40 deletions(-)