Message ID | 20230922161603.3461104-1-michal.kubiak@intel.com (mailing list archive) |
---|---|
State | Not Applicable |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | [iwl-next] idpf: set scheduling mode for completion queue | expand |
On 9/22/2023 9:16 AM, Michal Kubiak wrote: > The HW must be programmed differently for queue-based scheduling mode. > To program the completion queue context correctly, the control plane > must know the scheduling mode not only for the Tx queue, but also for > the completion queue. > Unfortunately, currently the driver sets the scheduling mode only for > the Tx queues. > > Propagate the scheduling mode data for the completion queue as > well when sending the queue configuration messages. > > Fixes: 1c325aac10a8 ("idpf: configure resources for TX queues") > Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> > Signed-off-by: Michal Kubiak <michal.kubiak@intel.com> > --- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 10 ++++++++-- > drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 8 +++++++- > 2 files changed, 15 insertions(+), 3 deletions(-) > > diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c > index 6fa79898c42c..58c5412d3173 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c > +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c > @@ -1160,6 +1160,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) > */ > static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) > { > + bool flow_sch_en; > int err, i; > > vport->txq_grps = kcalloc(vport->num_txq_grp, > @@ -1167,6 +1168,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) > if (!vport->txq_grps) > return -ENOMEM; > > + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, > + VIRTCHNL2_CAP_SPLITQ_QSCHED); > + > for (i = 0; i < vport->num_txq_grp; i++) { > struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; > struct idpf_adapter *adapter = vport->adapter; > @@ -1195,8 +1199,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) > q->txq_grp = tx_qgrp; > hash_init(q->sched_buf_hash); > > - if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, > - VIRTCHNL2_CAP_SPLITQ_QSCHED)) > + if (flow_sch_en) > set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); > } > > @@ -1215,6 +1218,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) > tx_qgrp->complq->desc_count = vport->complq_desc_count; > tx_qgrp->complq->vport = vport; > tx_qgrp->complq->txq_grp = tx_qgrp; > + > + if (flow_sch_en) > + __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); > } > > return 0; > diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c > index 9bc85b2f1709..e276b5360c2e 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c > +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c > @@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) > /* Populate the queue info buffer with all queue context info */ > for (i = 0; i < vport->num_txq_grp; i++) { > struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; > - int j; > + int j, sched_mode; > > for (j = 0; j < tx_qgrp->num_txq; j++, k++) { > qi[k].queue_id = > @@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) > qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); > qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); > > + if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) > + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; > + else > + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; > + qi[k].sched_mode = cpu_to_le16(sched_mode); > + > k++; > } > Reviewed-by: Alan Brady <alan.brady@intel.com>
On 9/22/23 18:16, Michal Kubiak wrote: > The HW must be programmed differently for queue-based scheduling mode. > To program the completion queue context correctly, the control plane > must know the scheduling mode not only for the Tx queue, but also for > the completion queue. > Unfortunately, currently the driver sets the scheduling mode only for > the Tx queues. > > Propagate the scheduling mode data for the completion queue as > well when sending the queue configuration messages. > > Fixes: 1c325aac10a8 ("idpf: configure resources for TX queues") > Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> > Signed-off-by: Michal Kubiak <michal.kubiak@intel.com> > --- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 10 ++++++++-- > drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 8 +++++++- > 2 files changed, 15 insertions(+), 3 deletions(-) > Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
> -----Original Message----- > From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf Of > Michal Kubiak > Sent: Friday, September 22, 2023 9:16 AM > To: intel-wired-lan@lists.osuosl.org > Cc: Tantilov, Emil S <emil.s.tantilov@intel.com>; Zaremba, Larysa > <larysa.zaremba@intel.com>; netdev@vger.kernel.org; Hay, Joshua A > <joshua.a.hay@intel.com>; Lobakin, Aleksander > <aleksander.lobakin@intel.com>; Kubiak, Michal <michal.kubiak@intel.com>; > Brady, Alan <alan.brady@intel.com> > Subject: [Intel-wired-lan] [PATCH iwl-next] idpf: set scheduling mode for > completion queue > > The HW must be programmed differently for queue-based scheduling mode. > To program the completion queue context correctly, the control plane > must know the scheduling mode not only for the Tx queue, but also for > the completion queue. > Unfortunately, currently the driver sets the scheduling mode only for > the Tx queues. > > Propagate the scheduling mode data for the completion queue as > well when sending the queue configuration messages. > > Fixes: 1c325aac10a8 ("idpf: configure resources for TX queues") > Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> > Signed-off-by: Michal Kubiak <michal.kubiak@intel.com> > --- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 10 ++++++++-- > drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 8 +++++++- > 2 files changed, 15 insertions(+), 3 deletions(-) > Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 6fa79898c42c..58c5412d3173 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1160,6 +1160,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) */ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) { + bool flow_sch_en; int err, i; vport->txq_grps = kcalloc(vport->num_txq_grp, @@ -1167,6 +1168,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) if (!vport->txq_grps) return -ENOMEM; + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_SPLITQ_QSCHED); + for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; @@ -1195,8 +1199,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) q->txq_grp = tx_qgrp; hash_init(q->sched_buf_hash); - if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, - VIRTCHNL2_CAP_SPLITQ_QSCHED)) + if (flow_sch_en) set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); } @@ -1215,6 +1218,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) tx_qgrp->complq->desc_count = vport->complq_desc_count; tx_qgrp->complq->vport = vport; tx_qgrp->complq->txq_grp = tx_qgrp; + + if (flow_sch_en) + __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); } return 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 9bc85b2f1709..e276b5360c2e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) /* Populate the queue info buffer with all queue context info */ for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - int j; + int j, sched_mode; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { qi[k].queue_id = @@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); + if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + else + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; + qi[k].sched_mode = cpu_to_le16(sched_mode); + k++; }