diff mbox series

[v3,5/7] swiotlb: determine potential physical address limit

Message ID ca2a1e6ef6cc91a932c223a4c0b784267d565680.1687859323.git.petr.tesarik.ext@huawei.com (mailing list archive)
State Superseded
Headers show
Series Allow dynamic allocation of software IO TLB bounce buffers | expand

Commit Message

Petr Tesarik June 27, 2023, 9:54 a.m. UTC
From: Petr Tesarik <petr.tesarik.ext@huawei.com>

The value returned by default_swiotlb_limit() should not change, because it
is used to decide whether DMA can be used. To allow allocating memory pools
on the fly, use the maximum possible physical address rather than the
highest address used by the default pool.

For swiotlb_init_remap(), this is either an arch-specific limit used by
memblock_alloc_low(), or the highest directly mapped physical address if
the initialization flags include SWIOTLB_ANY. For swiotlb_init_late(), the
highest address is determined by the GFP flags.

Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com>
---
 include/linux/swiotlb.h |  2 ++
 kernel/dma/swiotlb.c    | 11 ++++++++++-
 2 files changed, 12 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index ae1688438850..4a3af1c216d0 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -105,6 +105,7 @@  struct io_tlb_pool {
  * struct io_tlb_mem - Software IO TLB allocator
  * @pool:	IO TLB memory pool descriptor.
  * @nslabs:	Total number of IO TLB slabs in all pools.
+ * @phys_limit:	Maximum allowed physical address.
  * @debugfs:	The dentry to debugfs.
  * @force_bounce: %true if swiotlb bouncing is forced
  * @for_alloc:  %true if the pool is used for memory allocation
@@ -117,6 +118,7 @@  struct io_tlb_pool {
 struct io_tlb_mem {
 	struct io_tlb_pool *pool;
 	unsigned long nslabs;
+	u64 phys_limit;
 	struct dentry *debugfs;
 	bool force_bounce;
 	bool for_alloc;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 06b4fa7c2e9b..5bb83097ade6 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -333,6 +333,9 @@  void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 
 	io_tlb_default_mem.force_bounce =
 		swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
+	io_tlb_default_mem.phys_limit = flags & SWIOTLB_ANY
+		? virt_to_phys(high_memory - 1)
+		: ARCH_LOW_ADDRESS_LIMIT;
 
 	if (!default_nareas)
 		swiotlb_adjust_nareas(num_possible_cpus());
@@ -400,6 +403,12 @@  int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 		return 0;
 
 	io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
+	io_tlb_default_mem.phys_limit =
+		IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA)
+		? DMA_BIT_MASK(zone_dma_bits)
+		: (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32)
+		   ? DMA_BIT_MASK(32)
+		   : virt_to_phys(high_memory - 1));
 
 	if (!default_nareas)
 		swiotlb_adjust_nareas(num_possible_cpus());
@@ -1308,7 +1317,7 @@  phys_addr_t default_swiotlb_start(void)
  */
 phys_addr_t default_swiotlb_limit(void)
 {
-	return io_tlb_default_pool.end - 1;
+	return io_tlb_default_mem.phys_limit;
 }
 
 #ifdef CONFIG_DEBUG_FS