diff mbox series

[3/3] swiotlb: use the right nslabs-derived sizes in swiotlb_init_late

Message ID 20220511125805.1377025-4-hch@lst.de (mailing list archive)
State Accepted
Commit 1b8e5d1a53696d92374acce2b19a649427f1ec1e
Headers show
Series [1/3] swiotlb: don't panic when the swiotlb buffer can't be allocated | expand

Commit Message

Christoph Hellwig May 11, 2022, 12:58 p.m. UTC
nslabs can shrink when allocations or the remap don't succeed, so make
sure to use it for all sizing.  For that remove the bytes value that
can get stale and replace it with local calculations and a boolean to
indicate if the originally requested size could not be allocated.

Fixes: 6424e31b1c05 ("swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl")
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 kernel/dma/swiotlb.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

Comments

Stefano Stabellini May 13, 2022, 1:44 a.m. UTC | #1
On Wed, 11 May 2022, Christoph Hellwig wrote:
> nslabs can shrink when allocations or the remap don't succeed, so make
> sure to use it for all sizing.  For that remove the bytes value that
> can get stale and replace it with local calculations and a boolean to
> indicate if the originally requested size could not be allocated.
> 
> Fixes: 6424e31b1c05 ("swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl")
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>  kernel/dma/swiotlb.c | 19 +++++++++++--------
>  1 file changed, 11 insertions(+), 8 deletions(-)
> 
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 113e1e8aaca37..d6e62a6a42ceb 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -297,9 +297,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
>  {
>  	struct io_tlb_mem *mem = &io_tlb_default_mem;
>  	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
> -	unsigned long bytes;
>  	unsigned char *vstart = NULL;
>  	unsigned int order;
> +	bool retried = false;
>  	int rc = 0;
>  
>  	if (swiotlb_force_disable)
> @@ -308,7 +308,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
>  retry:
>  	order = get_order(nslabs << IO_TLB_SHIFT);
>  	nslabs = SLABS_PER_PAGE << order;
> -	bytes = nslabs << IO_TLB_SHIFT;
>  
>  	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
>  		vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
> @@ -316,16 +315,13 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
>  		if (vstart)
>  			break;
>  		order--;
> +		nslabs = SLABS_PER_PAGE << order;
> +		retried = true;
>  	}
>  
>  	if (!vstart)
>  		return -ENOMEM;
>  
> -	if (order != get_order(bytes)) {
> -		pr_warn("only able to allocate %ld MB\n",
> -			(PAGE_SIZE << order) >> 20);
> -		nslabs = SLABS_PER_PAGE << order;
> -	}
>  	if (remap)
>  		rc = remap(vstart, nslabs);
>  	if (rc) {
> @@ -334,9 +330,15 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
>  		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
>  		if (nslabs < IO_TLB_MIN_SLABS)
>  			return rc;
> +		retried = true;
>  		goto retry;
>  	}
>  
> +	if (retried) {
> +		pr_warn("only able to allocate %ld MB\n",
> +			(PAGE_SIZE << order) >> 20);
> +	}
> +
>  	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
>  		get_order(array_size(sizeof(*mem->slots), nslabs)));
>  	if (!mem->slots) {
> @@ -344,7 +346,8 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
>  		return -ENOMEM;
>  	}
>  
> -	set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
> +	set_memory_decrypted((unsigned long)vstart,
> +			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
>  	swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
>  
>  	swiotlb_print_info();
> -- 
> 2.30.2
>
diff mbox series

Patch

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 113e1e8aaca37..d6e62a6a42ceb 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -297,9 +297,9 @@  int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 {
 	struct io_tlb_mem *mem = &io_tlb_default_mem;
 	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
-	unsigned long bytes;
 	unsigned char *vstart = NULL;
 	unsigned int order;
+	bool retried = false;
 	int rc = 0;
 
 	if (swiotlb_force_disable)
@@ -308,7 +308,6 @@  int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 retry:
 	order = get_order(nslabs << IO_TLB_SHIFT);
 	nslabs = SLABS_PER_PAGE << order;
-	bytes = nslabs << IO_TLB_SHIFT;
 
 	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 		vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
@@ -316,16 +315,13 @@  int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 		if (vstart)
 			break;
 		order--;
+		nslabs = SLABS_PER_PAGE << order;
+		retried = true;
 	}
 
 	if (!vstart)
 		return -ENOMEM;
 
-	if (order != get_order(bytes)) {
-		pr_warn("only able to allocate %ld MB\n",
-			(PAGE_SIZE << order) >> 20);
-		nslabs = SLABS_PER_PAGE << order;
-	}
 	if (remap)
 		rc = remap(vstart, nslabs);
 	if (rc) {
@@ -334,9 +330,15 @@  int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
 		if (nslabs < IO_TLB_MIN_SLABS)
 			return rc;
+		retried = true;
 		goto retry;
 	}
 
+	if (retried) {
+		pr_warn("only able to allocate %ld MB\n",
+			(PAGE_SIZE << order) >> 20);
+	}
+
 	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 		get_order(array_size(sizeof(*mem->slots), nslabs)));
 	if (!mem->slots) {
@@ -344,7 +346,8 @@  int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 		return -ENOMEM;
 	}
 
-	set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
+	set_memory_decrypted((unsigned long)vstart,
+			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
 	swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
 
 	swiotlb_print_info();