@@ -4146,10 +4146,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
}
-static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static void ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
- struct ena_com_dev *ena_dev = ctx->ena_dev;
+ struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 max_tx_queue_size;
@@ -4157,7 +4158,7 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
- &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth);
max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
@@ -4169,13 +4170,13 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queue_ext->max_tx_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
} else {
struct ena_admin_queue_feature_desc *max_queues =
- &ctx->get_feat_ctx->max_queues;
+ &get_feat_ctx->max_queues;
max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
max_queues->max_sq_depth);
max_tx_queue_size = max_queues->max_cq_depth;
@@ -4187,10 +4188,10 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queues->max_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
}
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
@@ -4204,10 +4205,10 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
tx_queue_size = rounddown_pow_of_two(tx_queue_size);
rx_queue_size = rounddown_pow_of_two(rx_queue_size);
- ctx->max_tx_queue_size = max_tx_queue_size;
- ctx->max_rx_queue_size = max_rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
- ctx->rx_queue_size = rx_queue_size;
+ adapter->max_tx_ring_size = max_tx_queue_size;
+ adapter->max_rx_ring_size = max_rx_queue_size;
+ adapter->requested_tx_ring_size = tx_queue_size;
+ adapter->requested_rx_ring_size = rx_queue_size;
}
/* ena_probe - Device Initialization Routine
@@ -4222,7 +4223,6 @@ static void ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4307,10 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- calc_queue_ctx.ena_dev = ena_dev;
- calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- calc_queue_ctx.pdev = pdev;
-
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
@@ -4318,7 +4314,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- ena_calc_io_queue_size(&calc_queue_ctx);
+ ena_calc_io_queue_size(adapter, &get_feat_ctx);
if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
@@ -4328,13 +4324,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
- adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
- adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
- adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
- adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
-
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
@@ -140,18 +140,6 @@ struct ena_napi {
struct dim dim;
};
-struct ena_calc_queue_size_ctx {
- struct ena_com_dev_get_features_ctx *get_feat_ctx;
- struct ena_com_dev *ena_dev;
- struct pci_dev *pdev;
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
- u16 max_tx_sgl_size;
- u16 max_rx_sgl_size;
-};
-
struct ena_tx_buffer {
struct sk_buff *skb;
/* num of ena desc for this specific skb