diff mbox series

[1/4] media: ipu6: move the l2_unmap() up before l2_map()

Message ID 1729754679-6402-1-git-send-email-bingbu.cao@intel.com (mailing list archive)
State New
Headers show
Series [1/4] media: ipu6: move the l2_unmap() up before l2_map() | expand

Commit Message

Bingbu Cao Oct. 24, 2024, 7:24 a.m. UTC
l2_map() and l2_unmap() are better to be grouped together, and
l2_unmap() could also be called in l2_map() for error handling.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Jianhui Dai <jianhui.j.dai@intel.com>
---
 drivers/media/pci/intel/ipu6/ipu6-mmu.c | 80 ++++++++++++++++-----------------
 1 file changed, 40 insertions(+), 40 deletions(-)

Comments

Sakari Ailus Oct. 24, 2024, 10:34 a.m. UTC | #1
Hi Bingbu,

Thank you for the update.

On Thu, Oct 24, 2024 at 03:24:36PM +0800, Bingbu Cao wrote:
> l2_map() and l2_unmap() are better to be grouped together, and
> l2_unmap() could also be called in l2_map() for error handling.

Can you confirm there are no changes to the function itself? This should be
part of the commit message.

I'd also add that l2_unmap() will soon be called from l2_map(), rather than
being something that could be done.

> 
> Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
> Signed-off-by: Jianhui Dai <jianhui.j.dai@intel.com>
> ---
>  drivers/media/pci/intel/ipu6/ipu6-mmu.c | 80 ++++++++++++++++-----------------
>  1 file changed, 40 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
> index c3a20507d6db..9ea6789bca5e 100644
> --- a/drivers/media/pci/intel/ipu6/ipu6-mmu.c
> +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
> @@ -252,6 +252,46 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
>  	return pt;
>  }
>  
> +static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
> +		       phys_addr_t dummy, size_t size)
> +{
> +	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
> +	u32 iova_start = iova;
> +	unsigned int l2_idx;
> +	size_t unmapped = 0;
> +	unsigned long flags;
> +	u32 *l2_pt;
> +
> +	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
> +		l1_idx, iova);
> +
> +	spin_lock_irqsave(&mmu_info->lock, flags);
> +	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
> +		spin_unlock_irqrestore(&mmu_info->lock, flags);
> +		dev_err(mmu_info->dev,
> +			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
> +			iova, l1_idx);
> +		return 0;
> +	}
> +
> +	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
> +	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
> +		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
> +		l2_pt = mmu_info->l2_pts[l1_idx];
> +		dev_dbg(mmu_info->dev,
> +			"unmap l2 index %u with pteval 0x%10.10llx\n",
> +			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
> +		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
> +
> +		clflush_cache_range((void *)&l2_pt[l2_idx],
> +				    sizeof(l2_pt[l2_idx]));
> +		unmapped++;
> +	}
> +	spin_unlock_irqrestore(&mmu_info->lock, flags);
> +
> +	return unmapped << ISP_PAGE_SHIFT;
> +}
> +
>  static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
>  		  phys_addr_t paddr, size_t size)
>  {
> @@ -336,46 +376,6 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
>  	return l2_map(mmu_info, iova_start, paddr, size);
>  }
>  
> -static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
> -		       phys_addr_t dummy, size_t size)
> -{
> -	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
> -	u32 iova_start = iova;
> -	unsigned int l2_idx;
> -	size_t unmapped = 0;
> -	unsigned long flags;
> -	u32 *l2_pt;
> -
> -	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
> -		l1_idx, iova);
> -
> -	spin_lock_irqsave(&mmu_info->lock, flags);
> -	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
> -		spin_unlock_irqrestore(&mmu_info->lock, flags);
> -		dev_err(mmu_info->dev,
> -			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
> -			iova, l1_idx);
> -		return 0;
> -	}
> -
> -	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
> -	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
> -		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
> -		l2_pt = mmu_info->l2_pts[l1_idx];
> -		dev_dbg(mmu_info->dev,
> -			"unmap l2 index %u with pteval 0x%10.10llx\n",
> -			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
> -		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
> -
> -		clflush_cache_range((void *)&l2_pt[l2_idx],
> -				    sizeof(l2_pt[l2_idx]));
> -		unmapped++;
> -	}
> -	spin_unlock_irqrestore(&mmu_info->lock, flags);
> -
> -	return unmapped << ISP_PAGE_SHIFT;
> -}
> -
>  static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
>  			       unsigned long iova, size_t size)
>  {
> -- 
> 2.7.4
>
Bingbu Cao Oct. 25, 2024, 4:18 a.m. UTC | #2
Sakari,

Thanks for the review.

On 10/24/24 6:34 PM, Sakari Ailus wrote:
> Hi Bingbu,
> 
> Thank you for the update.
> 
> On Thu, Oct 24, 2024 at 03:24:36PM +0800, Bingbu Cao wrote:
>> l2_map() and l2_unmap() are better to be grouped together, and
>> l2_unmap() could also be called in l2_map() for error handling.
> 
> Can you confirm there are no changes to the function itself? This should be
> part of the commit message.

Yes, the l2_unmap() here is not changed.

> 
> I'd also add that l2_unmap() will soon be called from l2_map(), rather than
> being something that could be done.

I will add that into v2.
> 
>>
>> Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
>> Signed-off-by: Jianhui Dai <jianhui.j.dai@intel.com>
>> ---
>>  drivers/media/pci/intel/ipu6/ipu6-mmu.c | 80 ++++++++++++++++-----------------
>>  1 file changed, 40 insertions(+), 40 deletions(-)
>>
>> diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
>> index c3a20507d6db..9ea6789bca5e 100644
>> --- a/drivers/media/pci/intel/ipu6/ipu6-mmu.c
>> +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
>> @@ -252,6 +252,46 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
>>  	return pt;
>>  }
>>  
>> +static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
>> +		       phys_addr_t dummy, size_t size)
>> +{
>> +	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
>> +	u32 iova_start = iova;
>> +	unsigned int l2_idx;
>> +	size_t unmapped = 0;
>> +	unsigned long flags;
>> +	u32 *l2_pt;
>> +
>> +	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
>> +		l1_idx, iova);
>> +
>> +	spin_lock_irqsave(&mmu_info->lock, flags);
>> +	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
>> +		spin_unlock_irqrestore(&mmu_info->lock, flags);
>> +		dev_err(mmu_info->dev,
>> +			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
>> +			iova, l1_idx);
>> +		return 0;
>> +	}
>> +
>> +	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
>> +	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
>> +		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
>> +		l2_pt = mmu_info->l2_pts[l1_idx];
>> +		dev_dbg(mmu_info->dev,
>> +			"unmap l2 index %u with pteval 0x%10.10llx\n",
>> +			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
>> +		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
>> +
>> +		clflush_cache_range((void *)&l2_pt[l2_idx],
>> +				    sizeof(l2_pt[l2_idx]));
>> +		unmapped++;
>> +	}
>> +	spin_unlock_irqrestore(&mmu_info->lock, flags);
>> +
>> +	return unmapped << ISP_PAGE_SHIFT;
>> +}
>> +
>>  static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
>>  		  phys_addr_t paddr, size_t size)
>>  {
>> @@ -336,46 +376,6 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
>>  	return l2_map(mmu_info, iova_start, paddr, size);
>>  }
>>  
>> -static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
>> -		       phys_addr_t dummy, size_t size)
>> -{
>> -	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
>> -	u32 iova_start = iova;
>> -	unsigned int l2_idx;
>> -	size_t unmapped = 0;
>> -	unsigned long flags;
>> -	u32 *l2_pt;
>> -
>> -	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
>> -		l1_idx, iova);
>> -
>> -	spin_lock_irqsave(&mmu_info->lock, flags);
>> -	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
>> -		spin_unlock_irqrestore(&mmu_info->lock, flags);
>> -		dev_err(mmu_info->dev,
>> -			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
>> -			iova, l1_idx);
>> -		return 0;
>> -	}
>> -
>> -	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
>> -	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
>> -		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
>> -		l2_pt = mmu_info->l2_pts[l1_idx];
>> -		dev_dbg(mmu_info->dev,
>> -			"unmap l2 index %u with pteval 0x%10.10llx\n",
>> -			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
>> -		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
>> -
>> -		clflush_cache_range((void *)&l2_pt[l2_idx],
>> -				    sizeof(l2_pt[l2_idx]));
>> -		unmapped++;
>> -	}
>> -	spin_unlock_irqrestore(&mmu_info->lock, flags);
>> -
>> -	return unmapped << ISP_PAGE_SHIFT;
>> -}
>> -
>>  static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
>>  			       unsigned long iova, size_t size)
>>  {
>> -- 
>> 2.7.4
>>
>
diff mbox series

Patch

diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.c b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
index c3a20507d6db..9ea6789bca5e 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-mmu.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.c
@@ -252,6 +252,46 @@  static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
 	return pt;
 }
 
+static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+		       phys_addr_t dummy, size_t size)
+{
+	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+	u32 iova_start = iova;
+	unsigned int l2_idx;
+	size_t unmapped = 0;
+	unsigned long flags;
+	u32 *l2_pt;
+
+	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+		l1_idx, iova);
+
+	spin_lock_irqsave(&mmu_info->lock, flags);
+	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+		spin_unlock_irqrestore(&mmu_info->lock, flags);
+		dev_err(mmu_info->dev,
+			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+			iova, l1_idx);
+		return 0;
+	}
+
+	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+		l2_pt = mmu_info->l2_pts[l1_idx];
+		dev_dbg(mmu_info->dev,
+			"unmap l2 index %u with pteval 0x%10.10llx\n",
+			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+		clflush_cache_range((void *)&l2_pt[l2_idx],
+				    sizeof(l2_pt[l2_idx]));
+		unmapped++;
+	}
+	spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+	return unmapped << ISP_PAGE_SHIFT;
+}
+
 static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
 		  phys_addr_t paddr, size_t size)
 {
@@ -336,46 +376,6 @@  static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
 	return l2_map(mmu_info, iova_start, paddr, size);
 }
 
-static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
-		       phys_addr_t dummy, size_t size)
-{
-	u32 l1_idx = iova >> ISP_L1PT_SHIFT;
-	u32 iova_start = iova;
-	unsigned int l2_idx;
-	size_t unmapped = 0;
-	unsigned long flags;
-	u32 *l2_pt;
-
-	dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
-		l1_idx, iova);
-
-	spin_lock_irqsave(&mmu_info->lock, flags);
-	if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
-		spin_unlock_irqrestore(&mmu_info->lock, flags);
-		dev_err(mmu_info->dev,
-			"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
-			iova, l1_idx);
-		return 0;
-	}
-
-	for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
-	     (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
-		     < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
-		l2_pt = mmu_info->l2_pts[l1_idx];
-		dev_dbg(mmu_info->dev,
-			"unmap l2 index %u with pteval 0x%10.10llx\n",
-			l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
-		l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
-
-		clflush_cache_range((void *)&l2_pt[l2_idx],
-				    sizeof(l2_pt[l2_idx]));
-		unmapped++;
-	}
-	spin_unlock_irqrestore(&mmu_info->lock, flags);
-
-	return unmapped << ISP_PAGE_SHIFT;
-}
-
 static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
 			       unsigned long iova, size_t size)
 {