@@ -925,7 +925,7 @@ int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
if (OCTEON_CN23XX_VF(oct))
dev_err(&oct->pci_dev->dev,
"should not come here should not get rx when poll mode = 0 for vf\n");
- tasklet_schedule(&oct_priv->droq_tasklet);
+ queue_work(system_bh_wq, &oct_priv->droq_bh_work);
return 1;
}
/* this will be flushed periodically by check iq db */
@@ -975,7 +975,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
droq->ops.napi_fn(droq);
oct_priv->napi_mask |= BIT_ULL(oq_no);
} else {
- tasklet_schedule(&oct_priv->droq_tasklet);
+ queue_work(system_bh_wq, &oct_priv->droq_bh_work);
}
}
}
@@ -150,12 +150,12 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
static struct handshake handshake[MAX_OCTEON_DEVICES];
static struct completion first_stage;
-static void octeon_droq_bh(struct tasklet_struct *t)
+static void octeon_droq_bh(struct work_struct *work)
{
int q_no;
int reschedule = 0;
- struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
- droq_tasklet);
+ struct octeon_device_priv *oct_priv = from_work(oct_priv, work,
+ droq_bh_work);
struct octeon_device *oct = oct_priv->dev;
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
@@ -180,7 +180,7 @@ static void octeon_droq_bh(struct tasklet_struct *t)
}
if (reschedule)
- tasklet_schedule(&oct_priv->droq_tasklet);
+ queue_work(system_bh_wq, &oct_priv->droq_bh_work);
}
static int lio_wait_for_oq_pkts(struct octeon_device *oct)
@@ -199,7 +199,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
- tasklet_schedule(&oct_priv->droq_tasklet);
+ queue_work(system_bh_wq, &oct_priv->droq_bh_work);
}
pkt_cnt = 0;
schedule_timeout_uninterruptible(1);
@@ -1130,7 +1130,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
break;
} /* end switch (oct->status) */
- tasklet_kill(&oct_priv->droq_tasklet);
+ cancel_work_sync(&oct_priv->droq_bh_work);
}
/**
@@ -1234,7 +1234,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi);
- tasklet_enable(&oct_priv->droq_tasklet);
+ enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
@@ -1770,7 +1770,7 @@ static int liquidio_open(struct net_device *netdev)
int ret = 0;
if (oct->props[lio->ifidx].napi_enabled == 0) {
- tasklet_disable(&oct_priv->droq_tasklet);
+ disable_work_sync(&oct_priv->droq_bh_work);
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_enable(napi);
@@ -1896,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
if (OCTEON_CN23XX_PF(oct))
oct->droq[0]->ops.poll_mode = 0;
- tasklet_enable(&oct_priv->droq_tasklet);
+ enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
}
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
@@ -4204,9 +4204,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
}
}
- /* Initialize the tasklet that handles output queue packet processing.*/
- dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
- tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
+ /* Initialize the bh work that handles output queue packet processing.*/
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq bh work\n");
+ INIT_WORK(&oct_priv->droq_bh_work, octeon_droq_bh);
/* Setup the interrupt handler and record the INT SUM register address
*/
@@ -87,7 +87,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
- tasklet_schedule(&oct_priv->droq_tasklet);
+ queue_work(system_bh_wq, &oct_priv->droq_bh_work);
}
pkt_cnt = 0;
schedule_timeout_uninterruptible(1);
@@ -584,7 +584,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
break;
}
- tasklet_kill(&oct_priv->droq_tasklet);
+ cancel_work_sync(&oct_priv->droq_bh_work);
}
/**
@@ -687,7 +687,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi);
- tasklet_enable(&oct_priv->droq_tasklet);
+ enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
@@ -911,7 +911,7 @@ static int liquidio_open(struct net_device *netdev)
int ret = 0;
if (!oct->props[lio->ifidx].napi_enabled) {
- tasklet_disable(&oct_priv->droq_tasklet);
+ disable_work_sync(&oct_priv->droq_bh_work);
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_enable(napi);
@@ -986,7 +986,7 @@ static int liquidio_stop(struct net_device *netdev)
oct->droq[0]->ops.poll_mode = 0;
- tasklet_enable(&oct_priv->droq_tasklet);
+ enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
}
cancel_delayed_work_sync(&lio->stats_wk.work);
@@ -96,7 +96,7 @@ u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
last_count = pkt_count - droq->pkt_count;
droq->pkt_count = pkt_count;
- /* we shall write to cnts at napi irq enable or end of droq tasklet */
+ /* we shall write to cnts at napi irq enable or end of droq bh_work */
if (last_count)
atomic_add(last_count, &droq->pkts_pending);
@@ -764,7 +764,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
(u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
}
- /* If there are packets pending. schedule tasklet again */
+ /* If there are packets pending. schedule bh_work again */
if (atomic_read(&droq->pkts_pending))
return 1;
@@ -24,6 +24,7 @@
#define _OCTEON_MAIN_H_
#include <linux/sched/signal.h>
+#include <linux/workqueue.h>
#if BITS_PER_LONG == 32
#define CVM_CAST64(v) ((long long)(v))
@@ -36,8 +37,7 @@
#define DRV_NAME "LiquidIO"
struct octeon_device_priv {
- /** Tasklet structures for this device. */
- struct tasklet_struct droq_tasklet;
+ struct work_struct droq_bh_work;
unsigned long napi_mask;
struct octeon_device *dev;
};
Migrate tasklet APIs to the new bottom half workqueue mechanism. It replaces all occurrences of tasklet usage with the appropriate workqueue APIs throughout the cavium/liquidio driver. This transition ensures compatibility with the latest design and enhances performance. Signed-off-by: Allen Pais <allen.lkml@gmail.com> --- .../net/ethernet/cavium/liquidio/lio_core.c | 4 ++-- .../net/ethernet/cavium/liquidio/lio_main.c | 24 +++++++++---------- .../ethernet/cavium/liquidio/lio_vf_main.c | 10 ++++---- .../ethernet/cavium/liquidio/octeon_droq.c | 4 ++-- .../ethernet/cavium/liquidio/octeon_main.h | 4 ++-- 5 files changed, 23 insertions(+), 23 deletions(-)