diff mbox series

[net-next,02/12] ibmvnic: Introduce indirect subordinate Command Response Queue buffer

Message ID 1605208207-1896-3-git-send-email-tlfalcon@linux.ibm.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series ibmvnic: Performance improvements and other updates | expand

Commit Message

Thomas Falcon Nov. 12, 2020, 7:09 p.m. UTC
This patch introduces the infrastructure to send batched subordinate
Command Response Queue descriptors, which are used by the ibmvnic
driver to send TX frame and RX buffer descriptors.

Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
---
 drivers/net/ethernet/ibm/ibmvnic.c | 19 +++++++++++++++++++
 drivers/net/ethernet/ibm/ibmvnic.h | 10 ++++++++++
 2 files changed, 29 insertions(+)

Comments

Brian King Nov. 13, 2020, 4:17 p.m. UTC | #1
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Jakub Kicinski Nov. 14, 2020, 11:35 p.m. UTC | #2
On Thu, 12 Nov 2020 13:09:57 -0600 Thomas Falcon wrote:
> This patch introduces the infrastructure to send batched subordinate
> Command Response Queue descriptors, which are used by the ibmvnic
> driver to send TX frame and RX buffer descriptors.
> 
> Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>

> @@ -2957,6 +2963,19 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
>  
>  	scrq->adapter = adapter;
>  	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
> +	scrq->ind_buf.index = 0;
> +
> +	scrq->ind_buf.indir_arr =
> +		dma_alloc_coherent(dev,
> +				   IBMVNIC_IND_ARR_SZ,
> +				   &scrq->ind_buf.indir_dma,
> +				   GFP_KERNEL);
> +
> +	if (!scrq->ind_buf.indir_arr) {
> +		dev_err(dev, "Couldn't allocate indirect scrq buffer\n");

This warning/error is not necessary, memory allocation will trigger an
OOM message already.

> +		goto reg_failed;

Don't you have to do something like 

                        rc = plpar_hcall_norets(H_FREE_SUB_CRQ,                 
                                                adapter->vdev->unit_address,    
                                                scrq->crq_num); 

?

> +	}
> +
>  	spin_lock_init(&scrq->lock);
>
Thomas Falcon Nov. 16, 2020, 6:18 p.m. UTC | #3
On 11/14/20 5:35 PM, Jakub Kicinski wrote:
> On Thu, 12 Nov 2020 13:09:57 -0600 Thomas Falcon wrote:
>> This patch introduces the infrastructure to send batched subordinate
>> Command Response Queue descriptors, which are used by the ibmvnic
>> driver to send TX frame and RX buffer descriptors.
>>
>> Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
>> @@ -2957,6 +2963,19 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
>>   
>>   	scrq->adapter = adapter;
>>   	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
>> +	scrq->ind_buf.index = 0;
>> +
>> +	scrq->ind_buf.indir_arr =
>> +		dma_alloc_coherent(dev,
>> +				   IBMVNIC_IND_ARR_SZ,
>> +				   &scrq->ind_buf.indir_dma,
>> +				   GFP_KERNEL);
>> +
>> +	if (!scrq->ind_buf.indir_arr) {
>> +		dev_err(dev, "Couldn't allocate indirect scrq buffer\n");
> This warning/error is not necessary, memory allocation will trigger an
> OOM message already.
Thanks, I can fix that in a v2.
>
>> +		goto reg_failed;
> Don't you have to do something like
>
>                          rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
>                                                  adapter->vdev->unit_address,
>                                                  scrq->crq_num);
>
> ?

Yes, you're right, I will include that in a v2 also.

>> +	}
>> +
>>   	spin_lock_init(&scrq->lock);
>>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5647f54bf387..dd9ca06f355b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2860,6 +2860,7 @@  static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
 	atomic_set(&scrq->used, 0);
 	scrq->cur = 0;
+	scrq->ind_buf.index = 0;
 
 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
@@ -2911,6 +2912,11 @@  static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
 		}
 	}
 
+	dma_free_coherent(dev,
+			  IBMVNIC_IND_ARR_SZ,
+			  scrq->ind_buf.indir_arr,
+			  scrq->ind_buf.indir_dma);
+
 	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
 			 DMA_BIDIRECTIONAL);
 	free_pages((unsigned long)scrq->msgs, 2);
@@ -2957,6 +2963,19 @@  static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
 	scrq->adapter = adapter;
 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
+	scrq->ind_buf.index = 0;
+
+	scrq->ind_buf.indir_arr =
+		dma_alloc_coherent(dev,
+				   IBMVNIC_IND_ARR_SZ,
+				   &scrq->ind_buf.indir_dma,
+				   GFP_KERNEL);
+
+	if (!scrq->ind_buf.indir_arr) {
+		dev_err(dev, "Couldn't allocate indirect scrq buffer\n");
+		goto reg_failed;
+	}
+
 	spin_lock_init(&scrq->lock);
 
 	netdev_dbg(adapter->netdev,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 217dcc7ded70..05bf212d387d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -31,6 +31,7 @@ 
 #define IBMVNIC_BUFFS_PER_POOL	100
 #define IBMVNIC_MAX_QUEUES	16
 #define IBMVNIC_MAX_QUEUE_SZ   4096
+#define IBMVNIC_MAX_IND_DESCS  128
 
 #define IBMVNIC_TSO_BUF_SZ	65536
 #define IBMVNIC_TSO_BUFS	64
@@ -861,6 +862,14 @@  union sub_crq {
 	struct ibmvnic_rx_buff_add_desc rx_add;
 };
 
+#define IBMVNIC_IND_ARR_SZ	(IBMVNIC_MAX_IND_DESCS * sizeof(union sub_crq))
+
+struct ibmvnic_ind_xmit_queue {
+	union sub_crq *indir_arr;
+	dma_addr_t indir_dma;
+	int index;
+};
+
 struct ibmvnic_sub_crq_queue {
 	union sub_crq *msgs;
 	int size, cur;
@@ -873,6 +882,7 @@  struct ibmvnic_sub_crq_queue {
 	spinlock_t lock;
 	struct sk_buff *rx_skb_top;
 	struct ibmvnic_adapter *adapter;
+	struct ibmvnic_ind_xmit_queue ind_buf;
 	atomic_t used;
 	char name[32];
 	u64 handle;