@@ -3997,6 +3997,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_init_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
+}
+
+static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ int i;
+
+ rxr->page_pool->p.napi = NULL;
+ rxr->page_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+}
+
static void bnxt_init_ring_struct(struct bnxt *bp)
{
int i, j;
@@ -14935,6 +14991,253 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+ int rc;
+
+ if (bnxt_get_max_rss_ring(bp) >= idx)
+ return -EOPNOTSUPP;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+ memcpy(clone, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, clone);
+ bnxt_reset_rx_ring_struct(bp, clone);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ return rc;
+
+ ring = &clone->rx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_ring;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ring = &clone->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_agg_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ bnxt_init_one_rx_ring_rxbd(bp, clone);
+ bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+
+ bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+
+ return 0;
+
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+ clone->page_pool->p.napi = NULL;
+ page_pool_destroy(clone->page_pool);
+ clone->page_pool = NULL;
+ return rc;
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+ bnxt_free_one_rx_agg_ring(bp, rxr);
+
+ /* At this point, this NAPI instance has another page pool associated
+ * with it. Disconnect here before freeing the old page pool to avoid
+ * warnings.
+ */
+ rxr->page_pool->p.napi = NULL;
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+}
+
+static void bnxt_copy_rx_ring(struct bnxt *bp,
+ struct bnxt_rx_ring_info *dst,
+ struct bnxt_rx_ring_info *src)
+{
+ struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
+ struct bnxt_ring_struct *dst_ring, *src_ring;
+ int i;
+
+ dst_ring = &dst->rx_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return;
+
+ dst_ring = &dst->rx_agg_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_agg_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+ WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ dst->rx_agg_bmap = src->rx_agg_bmap;
+}
+
+static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ if (bnxt_get_max_rss_ring(bp) >= idx)
+ return -EOPNOTSUPP;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+
+ rxr->rx_prod = clone->rx_prod;
+ rxr->rx_agg_prod = clone->rx_agg_prod;
+ rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->page_pool = clone->page_pool;
+
+ bnxt_copy_rx_ring(bp, rxr, clone);
+
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_free_hwrm_rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+
+ napi_enable(&rxr->bnapi->napi);
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ rc = bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ if (rc)
+ goto err_free_hwrm_rx_agg_ring;
+
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats->rx.rx_resets++;
+
+ return 0;
+
+err_free_hwrm_rx_agg_ring:
+ napi_disable(&rxr->bnapi->napi);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+err_free_hwrm_rx_ring:
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ return rc;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ if (bnxt_get_max_rss_ring(bp) >= idx)
+ return -EOPNOTSUPP;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ vnic->mru = 0;
+ rc = bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ if (rc)
+ return rc;
+
+ rxr = &bp->rx_ring[idx];
+ napi_disable(&rxr->bnapi->napi);
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ rxr->rx_next_cons = 0;
+
+ memcpy(qmem, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, qmem);
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
+ .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
+ .ndo_queue_mem_free = bnxt_queue_mem_free,
+ .ndo_queue_start = bnxt_queue_start,
+ .ndo_queue_stop = bnxt_queue_stop,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -15400,6 +15703,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);