diff mbox series

[13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism

Message ID 20240621050525.3720069-14-allen.lkml@gmail.com (mailing list archive)
State Superseded
Headers show
Series ethernet: Convert from tasklet to BH workqueue | expand

Commit Message

Allen June 21, 2024, 5:05 a.m. UTC
Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the jme driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
 drivers/net/ethernet/jme.h |  8 ++---
 2 files changed, 40 insertions(+), 40 deletions(-)

Comments

Paolo Abeni June 25, 2024, 10:38 a.m. UTC | #1
On Thu, 2024-06-20 at 22:05 -0700, Allen Pais wrote:
> Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> replaces all occurrences of tasklet usage with the appropriate workqueue
> APIs throughout the jme driver. This transition ensures compatibility
> with the latest design and enhances performance.
> 
> Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> ---
>  drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
>  drivers/net/ethernet/jme.h |  8 ++---
>  2 files changed, 40 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
> index b06e24562973..b1a92b851b3b 100644
> --- a/drivers/net/ethernet/jme.c
> +++ b/drivers/net/ethernet/jme.c
> @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
>  
>  	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
>  		if (dpi->attempt < dpi->cur)
> -			tasklet_schedule(&jme->rxclean_task);
> +			queue_work(system_bh_wq, &jme->rxclean_bh_work);
>  		jme_set_rx_pcc(jme, dpi->attempt);
>  		dpi->cur = dpi->attempt;
>  		dpi->cnt = 0;
> @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
>  }
>  
>  static void
> -jme_pcc_tasklet(struct tasklet_struct *t)
> +jme_pcc_bh_work(struct work_struct *work)
>  {
> -	struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
> +	struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
>  	struct net_device *netdev = jme->dev;
>  
>  	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
> @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
>  		jme_stop_shutdown_timer(jme);
>  
>  	jme_stop_pcc_timer(jme);
> -	tasklet_disable(&jme->txclean_task);
> -	tasklet_disable(&jme->rxclean_task);
> -	tasklet_disable(&jme->rxempty_task);
> +	disable_work_sync(&jme->txclean_bh_work);
> +	disable_work_sync(&jme->rxclean_bh_work);
> +	disable_work_sync(&jme->rxempty_bh_work);
>  
>  	if (netif_carrier_ok(netdev)) {
>  		jme_disable_rx_engine(jme);
> @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
>  		rc = jme_setup_rx_resources(jme);
>  		if (rc) {
>  			pr_err("Allocating resources for RX error, Device STOPPED!\n");
> -			goto out_enable_tasklet;
> +			goto out_enable_bh_work;
>  		}
>  
>  		rc = jme_setup_tx_resources(jme);
> @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
>  		jme_start_shutdown_timer(jme);
>  	}
>  
> -	goto out_enable_tasklet;
> +	goto out_enable_bh_work;
>  
>  err_out_free_rx_resources:
>  	jme_free_rx_resources(jme);
> -out_enable_tasklet:
> -	tasklet_enable(&jme->txclean_task);
> -	tasklet_enable(&jme->rxclean_task);
> -	tasklet_enable(&jme->rxempty_task);
> +out_enable_bh_work:
> +	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> +	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> +	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

This will unconditionally schedule the rxempty_bh_work and is AFAICS a
different behavior WRT prior this patch.

In turn the rxempty_bh_work() will emit (almost unconditionally) the
'RX Queue Full!' message, so the change should be visibile to the user.

I think you should queue the work only if it was queued at cancel time.
You likely need additional status to do that.

Thanks,

Paolo
Allen July 1, 2024, 10:13 a.m. UTC | #2
> > Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> > replaces all occurrences of tasklet usage with the appropriate workqueue
> > APIs throughout the jme driver. This transition ensures compatibility
> > with the latest design and enhances performance.
> >
> > Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> > ---
> >  drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
> >  drivers/net/ethernet/jme.h |  8 ++---
> >  2 files changed, 40 insertions(+), 40 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
> > index b06e24562973..b1a92b851b3b 100644
> > --- a/drivers/net/ethernet/jme.c
> > +++ b/drivers/net/ethernet/jme.c
> > @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
> >
> >       if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
> >               if (dpi->attempt < dpi->cur)
> > -                     tasklet_schedule(&jme->rxclean_task);
> > +                     queue_work(system_bh_wq, &jme->rxclean_bh_work);
> >               jme_set_rx_pcc(jme, dpi->attempt);
> >               dpi->cur = dpi->attempt;
> >               dpi->cnt = 0;
> > @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
> >  }
> >
> >  static void
> > -jme_pcc_tasklet(struct tasklet_struct *t)
> > +jme_pcc_bh_work(struct work_struct *work)
> >  {
> > -     struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
> > +     struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
> >       struct net_device *netdev = jme->dev;
> >
> >       if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
> > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
> >               jme_stop_shutdown_timer(jme);
> >
> >       jme_stop_pcc_timer(jme);
> > -     tasklet_disable(&jme->txclean_task);
> > -     tasklet_disable(&jme->rxclean_task);
> > -     tasklet_disable(&jme->rxempty_task);
> > +     disable_work_sync(&jme->txclean_bh_work);
> > +     disable_work_sync(&jme->rxclean_bh_work);
> > +     disable_work_sync(&jme->rxempty_bh_work);
> >
> >       if (netif_carrier_ok(netdev)) {
> >               jme_disable_rx_engine(jme);
> > @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
> >               rc = jme_setup_rx_resources(jme);
> >               if (rc) {
> >                       pr_err("Allocating resources for RX error, Device STOPPED!\n");
> > -                     goto out_enable_tasklet;
> > +                     goto out_enable_bh_work;
> >               }
> >
> >               rc = jme_setup_tx_resources(jme);
> > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> >               jme_start_shutdown_timer(jme);
> >       }
> >
> > -     goto out_enable_tasklet;
> > +     goto out_enable_bh_work;
> >
> >  err_out_free_rx_resources:
> >       jme_free_rx_resources(jme);
> > -out_enable_tasklet:
> > -     tasklet_enable(&jme->txclean_task);
> > -     tasklet_enable(&jme->rxclean_task);
> > -     tasklet_enable(&jme->rxempty_task);
> > +out_enable_bh_work:
> > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> different behavior WRT prior this patch.
>
> In turn the rxempty_bh_work() will emit (almost unconditionally) the
> 'RX Queue Full!' message, so the change should be visibile to the user.
>
> I think you should queue the work only if it was queued at cancel time.
> You likely need additional status to do that.
>

 Thank you for taking the time out to review. Now that it's been a week, I was
preparing to send out version 3. Before I do that, I want to make sure if this
the below approach is acceptable.

diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b3fc2e5c379f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)

        if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
                if (dpi->attempt < dpi->cur)
-                       tasklet_schedule(&jme->rxclean_task);
+                       queue_work(system_bh_wq, &jme->rxclean_bh_work);
                jme_set_rx_pcc(jme, dpi->attempt);
                dpi->cur = dpi->attempt;
                dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
 }

 static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+       struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
        struct net_device *netdev = jme->dev;

        if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
                jme_stop_shutdown_timer(jme);

        jme_stop_pcc_timer(jme);
-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       disable_work_sync(&jme->txclean_bh_work);
+       disable_work_sync(&jme->rxclean_bh_work);
+       disable_work_sync(&jme->rxempty_bh_work);

        if (netif_carrier_ok(netdev)) {
                jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
                rc = jme_setup_rx_resources(jme);
                if (rc) {
                        pr_err("Allocating resources for RX error,
Device STOPPED!\n");
-                       goto out_enable_tasklet;
+                       goto out_enable_bh_work;
                }

                rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
work_struct *work)
                jme_start_shutdown_timer(jme);
        }

-       goto out_enable_tasklet;
+       goto out_enable_bh_work;

 err_out_free_rx_resources:
        jme_free_rx_resources(jme);
-out_enable_tasklet:
-       tasklet_enable(&jme->txclean_task);
-       tasklet_enable(&jme->rxclean_task);
-       tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+       if (jme->rxempty_bh_work_queued)
+               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 out:
        atomic_inc(&jme->link_changing);
 }

 static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+       struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
        struct dynpcc_info *dpi = &(jme->dpi);

        jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1375,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 }

 static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+       struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);

        if (unlikely(atomic_read(&jme->link_changing) != 1))
                return;
@@ -1386,7 +1387,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)

        netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");

-       jme_rx_clean_tasklet(&jme->rxclean_task);
+       jme_rx_clean_bh_work(&jme->rxclean_bh_work);

        while (atomic_read(&jme->rx_empty) > 0) {
                atomic_dec(&jme->rx_empty);
@@ -1410,9 +1411,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)

 }

-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+       struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
        struct jme_ring *txring = &(jme->txring[0]);
        struct txdesc *txdesc = txring->desc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1511,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)

        if (intrstat & INTR_TMINTR) {
                jwrite32(jme, JME_IEVE, INTR_TMINTR);
-               tasklet_schedule(&jme->pcc_task);
+               queue_work(system_bh_wq, &jme->pcc_bh_work);
        }

        if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
                jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
-               tasklet_schedule(&jme->txclean_task);
+               queue_work(system_bh_wq, &jme->txclean_bh_work);
        }

        if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1539,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
        } else {
                if (intrstat & INTR_RX0EMP) {
                        atomic_inc(&jme->rx_empty);
-                       tasklet_hi_schedule(&jme->rxempty_task);
+                       queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
                } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
-                       tasklet_hi_schedule(&jme->rxclean_task);
+                       queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
                }
        }

@@ -1826,9 +1827,9 @@ jme_open(struct net_device *netdev)
        jme_clear_pm_disable_wol(jme);
        JME_NAPI_ENABLE(jme);

-       tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
-       tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
-       tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+       INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+       INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+       INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);

        rc = jme_request_irq(jme);
        if (rc)
@@ -1914,9 +1915,10 @@ jme_close(struct net_device *netdev)
        JME_NAPI_DISABLE(jme);

        cancel_work_sync(&jme->linkch_task);
-       tasklet_kill(&jme->txclean_task);
-       tasklet_kill(&jme->rxclean_task);
-       tasklet_kill(&jme->rxempty_task);
+       cancel_work_sync(&jme->txclean_bh_work);
+       cancel_work_sync(&jme->rxclean_bh_work);
+       jme->rxempty_bh_work_queued = false;
+       cancel_work_sync(&jme->rxempty_bh_work);

        jme_disable_rx_engine(jme);
        jme_disable_tx_engine(jme);
@@ -3020,7 +3022,7 @@ jme_init_one(struct pci_dev *pdev,
        atomic_set(&jme->tx_cleaning, 1);
        atomic_set(&jme->rx_empty, 1);

-       tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+       INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
        INIT_WORK(&jme->linkch_task, jme_link_change_work);
        jme->dpi.cur = PCC_P1;

@@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
        netif_stop_queue(netdev);
        jme_stop_irq(jme);

-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       disable_work_sync(&jme->txclean_bh_work);
+       disable_work_sync(&jme->rxclean_bh_work);
+       disable_work_sync(&jme->rxempty_bh_work);

        if (netif_carrier_ok(netdev)) {
                if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
                jme->phylink = 0;
        }

-       tasklet_enable(&jme->txclean_task);
-       tasklet_enable(&jme->rxclean_task);
-       tasklet_enable(&jme->rxempty_task);
+       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+       jme->rxempty_bh_work_queued = true;
+       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

        jme_powersave_phy(jme);

diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..44aaf7625dc3 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,12 @@ struct jme_adapter {
        spinlock_t              phy_lock;
        spinlock_t              macaddr_lock;
        spinlock_t              rxmcs_lock;
-       struct tasklet_struct   rxempty_task;
-       struct tasklet_struct   rxclean_task;
-       struct tasklet_struct   txclean_task;
+       struct work_struct      rxempty_bh_work;
+       struct work_struct      rxclean_bh_work;
+       struct work_struct      txclean_bh_work;
+       bool                    rxempty_bh_work_queued;
        struct work_struct      linkch_task;
-       struct tasklet_struct   pcc_task;
+       struct work_struct      pcc_bh_work;
        unsigned long           flags;
        u32                     reg_txcs;
        u32                     reg_txpfc;



  Do we need a flag for rxclean and txclean too?

Thanks,
Allen

> Thanks,
>
> Paolo
>
Paolo Abeni July 1, 2024, 2:23 p.m. UTC | #3
On Mon, 2024-07-01 at 03:13 -0700, Allen wrote:
> > > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> > >               jme_start_shutdown_timer(jme);
> > >       }
> > > 
> > > -     goto out_enable_tasklet;
> > > +     goto out_enable_bh_work;
> > > 
> > >  err_out_free_rx_resources:
> > >       jme_free_rx_resources(jme);
> > > -out_enable_tasklet:
> > > -     tasklet_enable(&jme->txclean_task);
> > > -     tasklet_enable(&jme->rxclean_task);
> > > -     tasklet_enable(&jme->rxempty_task);
> > > +out_enable_bh_work:
> > > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> > 
> > This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> > different behavior WRT prior this patch.
> > 
> > In turn the rxempty_bh_work() will emit (almost unconditionally) the
> > 'RX Queue Full!' message, so the change should be visibile to the user.
> > 
> > I think you should queue the work only if it was queued at cancel time.
> > You likely need additional status to do that.
> > 
> 
>  Thank you for taking the time out to review. Now that it's been a week, I was
> preparing to send out version 3. Before I do that, I want to make sure if this
> the below approach is acceptable.

I _think_ the following does not track the  rxempty_bh_work 'queued'
status fully/correctly.

> @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
>                 jme_stop_shutdown_timer(jme);
> 
>         jme_stop_pcc_timer(jme);
> -       tasklet_disable(&jme->txclean_task);
> -       tasklet_disable(&jme->rxclean_task);
> -       tasklet_disable(&jme->rxempty_task);
> +       disable_work_sync(&jme->txclean_bh_work);
> +       disable_work_sync(&jme->rxclean_bh_work);
> +       disable_work_sync(&jme->rxempty_bh_work);

I think the above should be:

	  jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);

[...]
> @@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
> work_struct *work)
>                 jme_start_shutdown_timer(jme);
>         }
> 
> -       goto out_enable_tasklet;
> +       goto out_enable_bh_work;
> 
>  err_out_free_rx_resources:
>         jme_free_rx_resources(jme);
> -out_enable_tasklet:
> -       tasklet_enable(&jme->txclean_task);
> -       tasklet_enable(&jme->rxclean_task);
> -       tasklet_enable(&jme->rxempty_task);
> +out_enable_bh_work:
> +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> +       if (jme->rxempty_bh_work_queued)
> +               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

Missing:

	  else
		enable_work(system_bh_wq, &jme->rxempty_bh_work);

[...]
> @@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
>         netif_stop_queue(netdev);
>         jme_stop_irq(jme);
> 
> -       tasklet_disable(&jme->txclean_task);
> -       tasklet_disable(&jme->rxclean_task);
> -       tasklet_disable(&jme->rxempty_task);
> +       disable_work_sync(&jme->txclean_bh_work);
> +       disable_work_sync(&jme->rxclean_bh_work);
> +       disable_work_sync(&jme->rxempty_bh_work);

should be:

	  jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);	


> 
> @@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
>                 jme->phylink = 0;
>         }
> 
> -       tasklet_enable(&jme->txclean_task);
> -       tasklet_enable(&jme->rxclean_task);
> -       tasklet_enable(&jme->rxempty_task);
> +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> +       jme->rxempty_bh_work_queued = true;
> +       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

should be:

	if (jme->rxempty_bh_work_queued)
        	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
	else
		enable_work(system_bh_wq, &jme->rxempty_bh_work);

I think the above ones are the only places where you need to touch
'rxempty_bh_work_queued'.


[...]
>   Do we need a flag for rxclean and txclean too?

Functionally speaking I don't think it will be necessary, as
rxclean_bh_work() and txclean_bh_work() don't emit warnings on spurious
invocation.

Thanks,

Paolo
Allen July 15, 2024, 5:50 p.m. UTC | #4
> > > > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> > > >               jme_start_shutdown_timer(jme);
> > > >       }
> > > >
> > > > -     goto out_enable_tasklet;
> > > > +     goto out_enable_bh_work;
> > > >
> > > >  err_out_free_rx_resources:
> > > >       jme_free_rx_resources(jme);
> > > > -out_enable_tasklet:
> > > > -     tasklet_enable(&jme->txclean_task);
> > > > -     tasklet_enable(&jme->rxclean_task);
> > > > -     tasklet_enable(&jme->rxempty_task);
> > > > +out_enable_bh_work:
> > > > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > > > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > > > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> > >
> > > This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> > > different behavior WRT prior this patch.
> > >
> > > In turn the rxempty_bh_work() will emit (almost unconditionally) the
> > > 'RX Queue Full!' message, so the change should be visibile to the user.
> > >
> > > I think you should queue the work only if it was queued at cancel time.
> > > You likely need additional status to do that.
> > >
> >
> >  Thank you for taking the time out to review. Now that it's been a week, I was
> > preparing to send out version 3. Before I do that, I want to make sure if this
> > the below approach is acceptable.
>
> I _think_ the following does not track the  rxempty_bh_work 'queued'
> status fully/correctly.
>
> > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
> >                 jme_stop_shutdown_timer(jme);
> >
> >         jme_stop_pcc_timer(jme);
> > -       tasklet_disable(&jme->txclean_task);
> > -       tasklet_disable(&jme->rxclean_task);
> > -       tasklet_disable(&jme->rxempty_task);
> > +       disable_work_sync(&jme->txclean_bh_work);
> > +       disable_work_sync(&jme->rxclean_bh_work);
> > +       disable_work_sync(&jme->rxempty_bh_work);
>
> I think the above should be:
>
>           jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);
>
> [...]
> > @@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
> > work_struct *work)
> >                 jme_start_shutdown_timer(jme);
> >         }
> >
> > -       goto out_enable_tasklet;
> > +       goto out_enable_bh_work;
> >
> >  err_out_free_rx_resources:
> >         jme_free_rx_resources(jme);
> > -out_enable_tasklet:
> > -       tasklet_enable(&jme->txclean_task);
> > -       tasklet_enable(&jme->rxclean_task);
> > -       tasklet_enable(&jme->rxempty_task);
> > +out_enable_bh_work:
> > +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +       if (jme->rxempty_bh_work_queued)
> > +               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> Missing:
>
>           else
>                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
>
> [...]
> > @@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
> >         netif_stop_queue(netdev);
> >         jme_stop_irq(jme);
> >
> > -       tasklet_disable(&jme->txclean_task);
> > -       tasklet_disable(&jme->rxclean_task);
> > -       tasklet_disable(&jme->rxempty_task);
> > +       disable_work_sync(&jme->txclean_bh_work);
> > +       disable_work_sync(&jme->rxclean_bh_work);
> > +       disable_work_sync(&jme->rxempty_bh_work);
>
> should be:
>
>           jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);
>
>
> >
> > @@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
> >                 jme->phylink = 0;
> >         }
> >
> > -       tasklet_enable(&jme->txclean_task);
> > -       tasklet_enable(&jme->rxclean_task);
> > -       tasklet_enable(&jme->rxempty_task);
> > +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +       jme->rxempty_bh_work_queued = true;
> > +       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> should be:
>
>         if (jme->rxempty_bh_work_queued)
>                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>         else
>                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
>
> I think the above ones are the only places where you need to touch
> 'rxempty_bh_work_queued'.
>
>
> [...]
> >   Do we need a flag for rxclean and txclean too?
>
> Functionally speaking I don't think it will be necessary, as
> rxclean_bh_work() and txclean_bh_work() don't emit warnings on spurious
> invocation.
>
> Thanks,
>
> Paolo
>

Thank you very much. Will send out v3 later today with these changes.
Note, it will be as follows, enable_work() does not have workqueue type.

+  if (jme->rxempty_bh_work_queued)
+                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
+         else
-                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
+                enable_work(&jme->rxempty_bh_work);

Thanks,
Allen
>
Paolo Abeni July 16, 2024, 8:47 a.m. UTC | #5
On 7/15/24 19:50, Allen wrote:
> Thank you very much. Will send out v3 later today with these changes.
> Note, it will be as follows, enable_work() does not have workqueue type.
> 
> +  if (jme->rxempty_bh_work_queued)
> +                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> +         else
> -                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
> +                enable_work(&jme->rxempty_bh_work);

Yup, sorry I was very hasty.

More important topic: net-next is currently closed for the merge window, 
You will have to wait to post the new revision of this series until we 
re-open net-next in ~2w.

Thanks,

Paolo
Allen July 17, 2024, 4:55 p.m. UTC | #6
> > Thank you very much. Will send out v3 later today with these changes.
> > Note, it will be as follows, enable_work() does not have workqueue type.
> >
> > +  if (jme->rxempty_bh_work_queued)
> > +                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> > +         else
> > -                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
> > +                enable_work(&jme->rxempty_bh_work);
>
> Yup, sorry I was very hasty.
>
> More important topic: net-next is currently closed for the merge window,
> You will have to wait to post the new revision of this series until we
> re-open net-next in ~2w.
>

Noted. And Thank you for the heads up.
Meanwhile, I will prepare the second series which can go out for review in
two weeks.

Thanks.


       - Allen
diff mbox series

Patch

diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b1a92b851b3b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@  jme_dynamic_pcc(struct jme_adapter *jme)
 
 	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
 		if (dpi->attempt < dpi->cur)
-			tasklet_schedule(&jme->rxclean_task);
+			queue_work(system_bh_wq, &jme->rxclean_bh_work);
 		jme_set_rx_pcc(jme, dpi->attempt);
 		dpi->cur = dpi->attempt;
 		dpi->cnt = 0;
@@ -1182,9 +1182,9 @@  jme_shutdown_nic(struct jme_adapter *jme)
 }
 
 static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+	struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
 	struct net_device *netdev = jme->dev;
 
 	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@  static void jme_link_change_work(struct work_struct *work)
 		jme_stop_shutdown_timer(jme);
 
 	jme_stop_pcc_timer(jme);
-	tasklet_disable(&jme->txclean_task);
-	tasklet_disable(&jme->rxclean_task);
-	tasklet_disable(&jme->rxempty_task);
+	disable_work_sync(&jme->txclean_bh_work);
+	disable_work_sync(&jme->rxclean_bh_work);
+	disable_work_sync(&jme->rxempty_bh_work);
 
 	if (netif_carrier_ok(netdev)) {
 		jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@  static void jme_link_change_work(struct work_struct *work)
 		rc = jme_setup_rx_resources(jme);
 		if (rc) {
 			pr_err("Allocating resources for RX error, Device STOPPED!\n");
-			goto out_enable_tasklet;
+			goto out_enable_bh_work;
 		}
 
 		rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,22 @@  static void jme_link_change_work(struct work_struct *work)
 		jme_start_shutdown_timer(jme);
 	}
 
-	goto out_enable_tasklet;
+	goto out_enable_bh_work;
 
 err_out_free_rx_resources:
 	jme_free_rx_resources(jme);
-out_enable_tasklet:
-	tasklet_enable(&jme->txclean_task);
-	tasklet_enable(&jme->rxclean_task);
-	tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 out:
 	atomic_inc(&jme->link_changing);
 }
 
 static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+	struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
 	struct dynpcc_info *dpi = &(jme->dpi);
 
 	jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1374,9 @@  jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 }
 
 static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+	struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);
 
 	if (unlikely(atomic_read(&jme->link_changing) != 1))
 		return;
@@ -1386,7 +1386,7 @@  jme_rx_empty_tasklet(struct tasklet_struct *t)
 
 	netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
 
-	jme_rx_clean_tasklet(&jme->rxclean_task);
+	jme_rx_clean_bh_work(&jme->rxclean_bh_work);
 
 	while (atomic_read(&jme->rx_empty) > 0) {
 		atomic_dec(&jme->rx_empty);
@@ -1410,9 +1410,9 @@  jme_wake_queue_if_stopped(struct jme_adapter *jme)
 
 }
 
-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+	struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
 	struct jme_ring *txring = &(jme->txring[0]);
 	struct txdesc *txdesc = txring->desc;
 	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1510,12 @@  jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
 
 	if (intrstat & INTR_TMINTR) {
 		jwrite32(jme, JME_IEVE, INTR_TMINTR);
-		tasklet_schedule(&jme->pcc_task);
+		queue_work(system_bh_wq, &jme->pcc_bh_work);
 	}
 
 	if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
 		jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
-		tasklet_schedule(&jme->txclean_task);
+		queue_work(system_bh_wq, &jme->txclean_bh_work);
 	}
 
 	if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1538,9 @@  jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
 	} else {
 		if (intrstat & INTR_RX0EMP) {
 			atomic_inc(&jme->rx_empty);
-			tasklet_hi_schedule(&jme->rxempty_task);
+			queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
 		} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
-			tasklet_hi_schedule(&jme->rxclean_task);
+			queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
 		}
 	}
 
@@ -1826,9 +1826,9 @@  jme_open(struct net_device *netdev)
 	jme_clear_pm_disable_wol(jme);
 	JME_NAPI_ENABLE(jme);
 
-	tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
-	tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
-	tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+	INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+	INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+	INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);
 
 	rc = jme_request_irq(jme);
 	if (rc)
@@ -1914,9 +1914,9 @@  jme_close(struct net_device *netdev)
 	JME_NAPI_DISABLE(jme);
 
 	cancel_work_sync(&jme->linkch_task);
-	tasklet_kill(&jme->txclean_task);
-	tasklet_kill(&jme->rxclean_task);
-	tasklet_kill(&jme->rxempty_task);
+	cancel_work_sync(&jme->txclean_bh_work);
+	cancel_work_sync(&jme->rxclean_bh_work);
+	cancel_work_sync(&jme->rxempty_bh_work);
 
 	jme_disable_rx_engine(jme);
 	jme_disable_tx_engine(jme);
@@ -3020,7 +3020,7 @@  jme_init_one(struct pci_dev *pdev,
 	atomic_set(&jme->tx_cleaning, 1);
 	atomic_set(&jme->rx_empty, 1);
 
-	tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+	INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
 	INIT_WORK(&jme->linkch_task, jme_link_change_work);
 	jme->dpi.cur = PCC_P1;
 
@@ -3180,9 +3180,9 @@  jme_suspend(struct device *dev)
 	netif_stop_queue(netdev);
 	jme_stop_irq(jme);
 
-	tasklet_disable(&jme->txclean_task);
-	tasklet_disable(&jme->rxclean_task);
-	tasklet_disable(&jme->rxempty_task);
+	disable_work_sync(&jme->txclean_bh_work);
+	disable_work_sync(&jme->rxclean_bh_work);
+	disable_work_sync(&jme->rxempty_bh_work);
 
 	if (netif_carrier_ok(netdev)) {
 		if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3198,9 @@  jme_suspend(struct device *dev)
 		jme->phylink = 0;
 	}
 
-	tasklet_enable(&jme->txclean_task);
-	tasklet_enable(&jme->rxclean_task);
-	tasklet_enable(&jme->rxempty_task);
+	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 
 	jme_powersave_phy(jme);
 
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..73a8a1438340 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,11 @@  struct jme_adapter {
 	spinlock_t		phy_lock;
 	spinlock_t		macaddr_lock;
 	spinlock_t		rxmcs_lock;
-	struct tasklet_struct	rxempty_task;
-	struct tasklet_struct	rxclean_task;
-	struct tasklet_struct	txclean_task;
+	struct work_struct	rxempty_bh_work;
+	struct work_struct	rxclean_bh_work;
+	struct work_struct	txclean_bh_work;
 	struct work_struct	linkch_task;
-	struct tasklet_struct	pcc_task;
+	struct work_struct	pcc_bh_work;
 	unsigned long		flags;
 	u32			reg_txcs;
 	u32			reg_txpfc;