diff mbox series

[2/2] wifi: ath12k: Add support to allocate MLO global memory region

Message ID 20240730170910.3281816-3-quic_rajkbhag@quicinc.com (mailing list archive)
State Deferred
Delegated to: Kalle Valo
Headers show
Series wifi: ath12k: Add support to allocate MLO global memory region | expand

Commit Message

Raj Kumar Bhagat July 30, 2024, 5:09 p.m. UTC
From: Karthikeyan Periyasamy <quic_periyasa@quicinc.com>

To enable Multi Link Operation (MLO), QCN9274 firmware requests MLO
global memory (MLO_GLOBAL_MEM_REGION_TYPE). This memory region is
shared across all the firmware (SoC) that are participation in the
MLO.

Hence, add support to allocate and free MLO global memory region.
Allocate one MLO global memory per ath12k_hw_group and assign the same
memory to all firmware in the same ath12k_hw_group. WCN7850 firmware
does not request this memory type, therefore this change will have no
impact on WCN7850 device.

Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00210-QCAHKSWPL_SILICONZ-1
Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3

Signed-off-by: Karthikeyan Periyasamy <quic_periyasa@quicinc.com>
Co-developed-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com>
Signed-off-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com>
---
 drivers/net/wireless/ath/ath12k/core.h |   7 ++
 drivers/net/wireless/ath/ath12k/qmi.c  | 118 ++++++++++++++++++++++---
 drivers/net/wireless/ath/ath12k/qmi.h  |   1 +
 3 files changed, 116 insertions(+), 10 deletions(-)

Comments

Jeff Johnson July 31, 2024, 4:12 p.m. UTC | #1
On 7/30/2024 10:09 AM, Raj Kumar Bhagat wrote:
> From: Karthikeyan Periyasamy <quic_periyasa@quicinc.com>
> 
> To enable Multi Link Operation (MLO), QCN9274 firmware requests MLO
> global memory (MLO_GLOBAL_MEM_REGION_TYPE). This memory region is
> shared across all the firmware (SoC) that are participation in the
> MLO.
> 
> Hence, add support to allocate and free MLO global memory region.
> Allocate one MLO global memory per ath12k_hw_group and assign the same
> memory to all firmware in the same ath12k_hw_group. WCN7850 firmware
> does not request this memory type, therefore this change will have no
> impact on WCN7850 device.
> 
> Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00210-QCAHKSWPL_SILICONZ-1
> Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3
> 
> Signed-off-by: Karthikeyan Periyasamy <quic_periyasa@quicinc.com>
> Co-developed-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com>
> Signed-off-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com>
> ---
>  drivers/net/wireless/ath/ath12k/core.h |   7 ++
>  drivers/net/wireless/ath/ath12k/qmi.c  | 118 ++++++++++++++++++++++---
>  drivers/net/wireless/ath/ath12k/qmi.h  |   1 +
>  3 files changed, 116 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
> index 27bd0a14b9f4..c8cd687847e7 100644
> --- a/drivers/net/wireless/ath/ath12k/core.h
> +++ b/drivers/net/wireless/ath/ath12k/core.h
> @@ -758,6 +758,12 @@ struct ath12k_soc_dp_stats {
>  	struct ath12k_soc_dp_tx_err_stats tx_err;
>  };
>  
> +struct ath12k_mlo_memory {
> +	struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
> +	int mlo_mem_size;
> +	bool init_done;
> +};
> +
>  /* Holds info on the group of devices that are registered as a single wiphy */
>  struct ath12k_hw_group {
>  	struct list_head list;
> @@ -786,6 +792,7 @@ struct ath12k_hw_group {
>  	 */
>  	struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
>  	u8 num_hw;
> +	struct ath12k_mlo_memory mlo_mem;
>  };
>  
>  /**
> diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
> index 11bf16eaadd9..da61ea60e5f8 100644
> --- a/drivers/net/wireless/ath/ath12k/qmi.c
> +++ b/drivers/net/wireless/ath/ath12k/qmi.c
> @@ -2350,19 +2350,61 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
>  	return ret;
>  }
>  
> +static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
> +					  struct target_mem_chunk *chunk,
> +					  int idx)
> +{
> +	struct ath12k_hw_group *ag = ab->ag;
> +	struct target_mem_chunk *mlo_chunk;
> +
> +	lockdep_assert_held(&ag->mutex_lock);
> +	if (!ag->mlo_mem.init_done || ag->num_started)
> +		return;
> +
> +	if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
> +		ath12k_warn(ab, "invalid idx %d for MLO memory chunk free\n", idx);
> +		return;
> +	}
> +
> +	mlo_chunk = &ag->mlo_mem.chunk[idx];
> +	if (mlo_chunk->v.addr) {
> +		dma_free_coherent(ab->dev,
> +				  mlo_chunk->size,
> +				  mlo_chunk->v.addr,
> +				  mlo_chunk->paddr);
> +		mlo_chunk->v.addr = NULL;
> +	}
> +	mlo_chunk->paddr = 0;
> +	mlo_chunk->size = 0;
> +	chunk->v.addr = NULL;
> +	chunk->paddr = 0;
> +	chunk->size = 0;

is there a reason to leave the type, prev_size, & prev_type unchanged?
just curious why you don't just zero the entire struct instead of zeroing just
half of the individual members. but then again, why zero paddr and size based
upon my observation below...

> +}
> +
>  static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
>  {
> -	int i;
> +	struct ath12k_hw_group *ag = ab->ag;
> +	int i, mlo_idx;
>  
> -	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
> +	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
>  		if (!ab->qmi.target_mem[i].v.addr)
>  			continue;
>  
> -		dma_free_coherent(ab->dev,
> -				  ab->qmi.target_mem[i].prev_size,
> -				  ab->qmi.target_mem[i].v.addr,
> -				  ab->qmi.target_mem[i].paddr);
> -		ab->qmi.target_mem[i].v.addr = NULL;
> +		if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
> +			ath12k_qmi_free_mlo_mem_chunk(ab,
> +						      &ab->qmi.target_mem[i],
> +						      mlo_idx++);
> +		} else {
> +			dma_free_coherent(ab->dev,
> +					  ab->qmi.target_mem[i].prev_size,
> +					  ab->qmi.target_mem[i].v.addr,
> +					  ab->qmi.target_mem[i].paddr);
> +			ab->qmi.target_mem[i].v.addr = NULL;

note here you only zero v.addr, so why zero paddr and size in the code I
commented on above?  seems there should be consistency.

> +		}
> +	}
> +	if (!ag->num_started && ag->mlo_mem.init_done) {
> +		ag->mlo_mem.init_done = false;
> +		ag->mlo_mem.mlo_mem_size = 0;
>  	}
>  }
>  
> @@ -2409,12 +2451,21 @@ static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
>  
>  static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
>  {
> -	int i, ret = 0;
> -	struct target_mem_chunk *chunk;
> +	struct ath12k_hw_group *ag = ab->ag;
> +	struct target_mem_chunk *chunk, *mlo_chunk;
> +	int i, mlo_idx, ret;
> +	int mlo_size = 0;
> +
> +	mutex_lock(&ag->mutex_lock);

since you hold this for the entire function this would be a good candidate for:
	guard(mutex, &ag->mutex_lock);

then you can get rid of the mutex_unlock() calls and can directly return in
places where no additional cleanup is required

> +
> +	if (!ag->mlo_mem.init_done) {
> +		memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
> +		ag->mlo_mem.init_done = true;
> +	}
>  
>  	ab->qmi.target_mem_delayed = false;
>  
> -	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
> +	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
>  		chunk = &ab->qmi.target_mem[i];
>  
>  		/* Allocate memory for the region and the functionality supported
> @@ -2427,6 +2478,37 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
>  		case PAGEABLE_MEM_REGION_TYPE:
>  		case CALDB_MEM_REGION_TYPE:
>  			ret = ath12k_qmi_alloc_chunk(ab, chunk);
> +			if (ret)
> +				goto out;
> +			break;
> +		case MLO_GLOBAL_MEM_REGION_TYPE:
> +			mlo_size += chunk->size;
> +			if (ag->mlo_mem.mlo_mem_size &&
> +			    mlo_size > ag->mlo_mem.mlo_mem_size) {
> +				ret = -EINVAL;
> +				ath12k_err(ab, "qmi MLO mem err, req size %d is more than alloc size %d",
> +					   mlo_size, ag->mlo_mem.mlo_mem_size);
> +				goto out;
> +			}
> +			mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
> +			if (mlo_chunk->paddr) {
> +				if (chunk->size != mlo_chunk->size) {
> +					ret = -EINVAL;
> +					ath12k_err(ab, "qmi MLO mem err, mlo_idx %d, req chunk %d, alloc chunk %d",
> +						   mlo_idx, chunk->size, mlo_chunk->size);
> +					goto out;
> +				}
> +			} else {
> +				mlo_chunk->size = chunk->size;
> +				mlo_chunk->type = chunk->type;
> +				ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
> +				if (ret)
> +					goto out;
> +				memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
> +			}
> +			chunk->paddr = mlo_chunk->paddr;
> +			chunk->v.addr = mlo_chunk->v.addr;
> +			mlo_idx++;
>  			break;
>  		default:
>  			ath12k_warn(ab, "memory type %u not supported\n",
> @@ -2436,6 +2518,22 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
>  			break;
>  		}
>  	}
> +
> +	if (!ag->mlo_mem.mlo_mem_size) {
> +		ag->mlo_mem.mlo_mem_size = mlo_size;
> +	} else if (ag->mlo_mem.mlo_mem_size != mlo_size) {
> +		ath12k_err(ab, "qmi MLO mem err, expected size %d, req size %d",
> +			   ag->mlo_mem.mlo_mem_size, mlo_size);
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	mutex_unlock(&ag->mutex_lock);
> +	return 0;
> +
> +out:
> +	ath12k_qmi_free_target_mem_chunk(ab);
> +	mutex_unlock(&ag->mutex_lock);
>  	return ret;
>  }
>  
> diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
> index 0dfcbd8cb59b..d8f78794e67e 100644
> --- a/drivers/net/wireless/ath/ath12k/qmi.h
> +++ b/drivers/net/wireless/ath/ath12k/qmi.h
> @@ -167,6 +167,7 @@ enum ath12k_qmi_target_mem {
>  	BDF_MEM_REGION_TYPE = 0x2,
>  	M3_DUMP_REGION_TYPE = 0x3,
>  	CALDB_MEM_REGION_TYPE = 0x4,
> +	MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
>  	PAGEABLE_MEM_REGION_TYPE = 0x9,
>  };
>
diff mbox series

Patch

diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 27bd0a14b9f4..c8cd687847e7 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -758,6 +758,12 @@  struct ath12k_soc_dp_stats {
 	struct ath12k_soc_dp_tx_err_stats tx_err;
 };
 
+struct ath12k_mlo_memory {
+	struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+	int mlo_mem_size;
+	bool init_done;
+};
+
 /* Holds info on the group of devices that are registered as a single wiphy */
 struct ath12k_hw_group {
 	struct list_head list;
@@ -786,6 +792,7 @@  struct ath12k_hw_group {
 	 */
 	struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
 	u8 num_hw;
+	struct ath12k_mlo_memory mlo_mem;
 };
 
 /**
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 11bf16eaadd9..da61ea60e5f8 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2350,19 +2350,61 @@  static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
 	return ret;
 }
 
+static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
+					  struct target_mem_chunk *chunk,
+					  int idx)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct target_mem_chunk *mlo_chunk;
+
+	lockdep_assert_held(&ag->mutex_lock);
+	if (!ag->mlo_mem.init_done || ag->num_started)
+		return;
+
+	if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
+		ath12k_warn(ab, "invalid idx %d for MLO memory chunk free\n", idx);
+		return;
+	}
+
+	mlo_chunk = &ag->mlo_mem.chunk[idx];
+	if (mlo_chunk->v.addr) {
+		dma_free_coherent(ab->dev,
+				  mlo_chunk->size,
+				  mlo_chunk->v.addr,
+				  mlo_chunk->paddr);
+		mlo_chunk->v.addr = NULL;
+	}
+	mlo_chunk->paddr = 0;
+	mlo_chunk->size = 0;
+	chunk->v.addr = NULL;
+	chunk->paddr = 0;
+	chunk->size = 0;
+}
+
 static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
 {
-	int i;
+	struct ath12k_hw_group *ag = ab->ag;
+	int i, mlo_idx;
 
-	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
 		if (!ab->qmi.target_mem[i].v.addr)
 			continue;
 
-		dma_free_coherent(ab->dev,
-				  ab->qmi.target_mem[i].prev_size,
-				  ab->qmi.target_mem[i].v.addr,
-				  ab->qmi.target_mem[i].paddr);
-		ab->qmi.target_mem[i].v.addr = NULL;
+		if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+			ath12k_qmi_free_mlo_mem_chunk(ab,
+						      &ab->qmi.target_mem[i],
+						      mlo_idx++);
+		} else {
+			dma_free_coherent(ab->dev,
+					  ab->qmi.target_mem[i].prev_size,
+					  ab->qmi.target_mem[i].v.addr,
+					  ab->qmi.target_mem[i].paddr);
+			ab->qmi.target_mem[i].v.addr = NULL;
+		}
+	}
+	if (!ag->num_started && ag->mlo_mem.init_done) {
+		ag->mlo_mem.init_done = false;
+		ag->mlo_mem.mlo_mem_size = 0;
 	}
 }
 
@@ -2409,12 +2451,21 @@  static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
 
 static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
 {
-	int i, ret = 0;
-	struct target_mem_chunk *chunk;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct target_mem_chunk *chunk, *mlo_chunk;
+	int i, mlo_idx, ret;
+	int mlo_size = 0;
+
+	mutex_lock(&ag->mutex_lock);
+
+	if (!ag->mlo_mem.init_done) {
+		memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+		ag->mlo_mem.init_done = true;
+	}
 
 	ab->qmi.target_mem_delayed = false;
 
-	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
 		chunk = &ab->qmi.target_mem[i];
 
 		/* Allocate memory for the region and the functionality supported
@@ -2427,6 +2478,37 @@  static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
 		case PAGEABLE_MEM_REGION_TYPE:
 		case CALDB_MEM_REGION_TYPE:
 			ret = ath12k_qmi_alloc_chunk(ab, chunk);
+			if (ret)
+				goto out;
+			break;
+		case MLO_GLOBAL_MEM_REGION_TYPE:
+			mlo_size += chunk->size;
+			if (ag->mlo_mem.mlo_mem_size &&
+			    mlo_size > ag->mlo_mem.mlo_mem_size) {
+				ret = -EINVAL;
+				ath12k_err(ab, "qmi MLO mem err, req size %d is more than alloc size %d",
+					   mlo_size, ag->mlo_mem.mlo_mem_size);
+				goto out;
+			}
+			mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+			if (mlo_chunk->paddr) {
+				if (chunk->size != mlo_chunk->size) {
+					ret = -EINVAL;
+					ath12k_err(ab, "qmi MLO mem err, mlo_idx %d, req chunk %d, alloc chunk %d",
+						   mlo_idx, chunk->size, mlo_chunk->size);
+					goto out;
+				}
+			} else {
+				mlo_chunk->size = chunk->size;
+				mlo_chunk->type = chunk->type;
+				ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
+				if (ret)
+					goto out;
+				memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
+			}
+			chunk->paddr = mlo_chunk->paddr;
+			chunk->v.addr = mlo_chunk->v.addr;
+			mlo_idx++;
 			break;
 		default:
 			ath12k_warn(ab, "memory type %u not supported\n",
@@ -2436,6 +2518,22 @@  static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
 			break;
 		}
 	}
+
+	if (!ag->mlo_mem.mlo_mem_size) {
+		ag->mlo_mem.mlo_mem_size = mlo_size;
+	} else if (ag->mlo_mem.mlo_mem_size != mlo_size) {
+		ath12k_err(ab, "qmi MLO mem err, expected size %d, req size %d",
+			   ag->mlo_mem.mlo_mem_size, mlo_size);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	mutex_unlock(&ag->mutex_lock);
+	return 0;
+
+out:
+	ath12k_qmi_free_target_mem_chunk(ab);
+	mutex_unlock(&ag->mutex_lock);
 	return ret;
 }
 
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 0dfcbd8cb59b..d8f78794e67e 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -167,6 +167,7 @@  enum ath12k_qmi_target_mem {
 	BDF_MEM_REGION_TYPE = 0x2,
 	M3_DUMP_REGION_TYPE = 0x3,
 	CALDB_MEM_REGION_TYPE = 0x4,
+	MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
 	PAGEABLE_MEM_REGION_TYPE = 0x9,
 };