diff mbox

arm64: mm: only initialize swiotlb when necessary

Message ID 1465372426-4077-1-git-send-email-jszhang@marvell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jisheng Zhang June 8, 2016, 7:53 a.m. UTC
we only initialize swiotlb when swiotlb_force is true or not all system
memory is DMA-able, this trivial optimization saves us 64MB when
swiotlb is not necessary.

Signed-off-by: Jisheng Zhang <jszhang@marvell.com>
---
 arch/arm64/mm/dma-mapping.c | 15 ++++++++++++++-
 arch/arm64/mm/init.c        |  3 ++-
 2 files changed, 16 insertions(+), 2 deletions(-)

Comments

Jisheng Zhang June 8, 2016, 8:01 a.m. UTC | #1
Dear all,

On Wed, 8 Jun 2016 15:53:46 +0800 Jisheng Zhang wrote:

> we only initialize swiotlb when swiotlb_force is true or not all system
> memory is DMA-able, this trivial optimization saves us 64MB when
> swiotlb is not necessary.

another solution is to call swiotlb_free() as ppc does. Either solution can
solve my problem. If maintainers prefer that solution, I can send a v2 patch.

Thanks,
Jisheng

> 
> Signed-off-by: Jisheng Zhang <jszhang@marvell.com>
> ---
>  arch/arm64/mm/dma-mapping.c | 15 ++++++++++++++-
>  arch/arm64/mm/init.c        |  3 ++-
>  2 files changed, 16 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
> index c566ec8..46a4157 100644
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -19,6 +19,7 @@
>  
>  #include <linux/gfp.h>
>  #include <linux/acpi.h>
> +#include <linux/bootmem.h>
>  #include <linux/export.h>
>  #include <linux/slab.h>
>  #include <linux/genalloc.h>
> @@ -29,6 +30,8 @@
>  
>  #include <asm/cacheflush.h>
>  
> +static int swiotlb __read_mostly;
> +
>  static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
>  				 bool coherent)
>  {
> @@ -341,6 +344,13 @@ static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
>  	return ret;
>  }
>  
> +static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
> +{
> +	if (swiotlb)
> +		return swiotlb_dma_supported(hwdev, mask);
> +	return 1;
> +}
> +
>  static struct dma_map_ops swiotlb_dma_ops = {
>  	.alloc = __dma_alloc,
>  	.free = __dma_free,
> @@ -354,7 +364,7 @@ static struct dma_map_ops swiotlb_dma_ops = {
>  	.sync_single_for_device = __swiotlb_sync_single_for_device,
>  	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
>  	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
> -	.dma_supported = swiotlb_dma_supported,
> +	.dma_supported = __swiotlb_dma_supported,
>  	.mapping_error = swiotlb_dma_mapping_error,
>  };
>  
> @@ -513,6 +523,9 @@ EXPORT_SYMBOL(dummy_dma_ops);
>  
>  static int __init arm64_dma_init(void)
>  {
> +	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> +		swiotlb = 1;
> +
>  	return atomic_pool_init();
>  }
>  arch_initcall(arm64_dma_init);
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index d45f862..7d25b4d 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -403,7 +403,8 @@ static void __init free_unused_memmap(void)
>   */
>  void __init mem_init(void)
>  {
> -	swiotlb_init(1);
> +	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> +		swiotlb_init(1);
>  
>  	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
>
Catalin Marinas June 8, 2016, 12:08 p.m. UTC | #2
On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
>  static int __init arm64_dma_init(void)
>  {
> +	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> +		swiotlb = 1;
> +
>  	return atomic_pool_init();
>  }

So any platform with RAM larger than 4GB would still initialise swiotlb.
I wouldn't say it's an issue, 64MB is not a significant loss on such
systems.

An alternative would be to defer the freeing until we are aware of all
possible dma masks for the populated devices (e.g. from DT), though I'm
not sure that's enough, drivers may try to change such masks when
probed.
Catalin Marinas June 8, 2016, 12:13 p.m. UTC | #3
On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -19,6 +19,7 @@
>  
>  #include <linux/gfp.h>
>  #include <linux/acpi.h>
> +#include <linux/bootmem.h>
>  #include <linux/export.h>
>  #include <linux/slab.h>
>  #include <linux/genalloc.h>

Why is this include needed?
Arnd Bergmann June 8, 2016, 3:49 p.m. UTC | #4
On Wednesday, June 8, 2016 1:08:29 PM CEST Catalin Marinas wrote:
> On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
> >  static int __init arm64_dma_init(void)
> >  {
> > +     if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> > +             swiotlb = 1;
> > +
> >       return atomic_pool_init();
> >  }
> 
> So any platform with RAM larger than 4GB would still initialise swiotlb.
> I wouldn't say it's an issue, 64MB is not a significant loss on such
> systems.
> 
> An alternative would be to defer the freeing until we are aware of all
> possible dma masks for the populated devices (e.g. from DT), though I'm
> not sure that's enough, drivers may try to change such masks when
> probed.

Right, this is a hard problem, because you can in theory have odd devices
that require a DMA mask lower than the limit of ZONE_DMA.

In the kernel sources, I find these:

# git grep DMA_BIT_MASK | grep -v 'DMA_BIT_MASK(\(3[2-9]\|[456][0-9]\))'
arch/arm/mach-ixp4xx/common.c:		dev->coherent_dma_mask = DMA_BIT_MASK(28); /* 64 MB */
drivers/base/isa.c:		isa_dev->dev.coherent_dma_mask = DMA_BIT_MASK(24);
drivers/media/pci/sta2x11/sta2x11_vip.c:	err = dma_set_coherent_mask(&vip->pdev->dev, DMA_BIT_MASK(29));
drivers/media/pci/sta2x11/sta2x11_vip.c:	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(26))) {
drivers/net/ethernet/broadcom/b44.c:		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c:		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c:	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c:		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c:			rx_ring_dma + size > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c:			tx_ring_dma + size > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c:	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
drivers/net/wan/wanxl.c:	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) ||
drivers/net/wan/wanxl.c:	    pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) {
drivers/net/wireless/broadcom/b43/dma.c:	return DMA_BIT_MASK(30);
drivers/net/wireless/broadcom/b43/dma.c:	if (dmamask == DMA_BIT_MASK(30))
drivers/net/wireless/broadcom/b43/dma.c:			mask = DMA_BIT_MASK(30);
drivers/net/wireless/broadcom/b43legacy/dma.c:	return DMA_BIT_MASK(30);
drivers/net/wireless/broadcom/b43legacy/dma.c:	if (dmamask == DMA_BIT_MASK(30))
drivers/net/wireless/broadcom/b43legacy/dma.c:			mask = DMA_BIT_MASK(30);
drivers/parport/parport_pc.c:		ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
drivers/pnp/card.c:	card->dev.coherent_dma_mask = DMA_BIT_MASK(24);
drivers/pnp/core.c:	dev->dma_mask = DMA_BIT_MASK(24);
drivers/scsi/aacraid/commsup.c:		if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
drivers/scsi/aacraid/commsup.c:		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
drivers/scsi/aacraid/linit.c:		dmamask = DMA_BIT_MASK(31);
drivers/usb/host/ehci-pci.c:						DMA_BIT_MASK(31)) < 0)
include/linux/blkdev.h:#define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
sound/pci/ali5451/ali5451.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(31)) < 0 ||
sound/pci/als300.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(28)) < 0 ||
sound/pci/als4000.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(24)) < 0 ||
sound/pci/azt3328.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(24)) < 0 ||
sound/pci/emu10k1/emu10k1x.c:	if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 ||
sound/pci/es1938.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(24)) < 0 ||
sound/pci/es1968.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(28)) < 0 ||
sound/pci/ice1712/ice1712.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(28)) < 0 ||
sound/pci/maestro3.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(28)) < 0 ||
sound/pci/sis7019.c:	rc = dma_set_mask(&pci->dev, DMA_BIT_MASK(30));
sound/pci/sonicvibes.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(24)) < 0 ||
sound/pci/trident/trident_main.c:	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(30)) < 0 ||
sound/pci/trident/trident_main.c:	    dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(30)) < 0) {
sound/soc/intel/common/sst-firmware.c:	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
sound/soc/intel/haswell/sst-haswell-dsp.c:	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));

	Arnd
Jisheng Zhang June 12, 2016, 7:14 a.m. UTC | #5
Dear Catalin,

On Wed, 8 Jun 2016 13:13:56 +0100 Catalin Marinas wrote:

> On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
> > --- a/arch/arm64/mm/dma-mapping.c
> > +++ b/arch/arm64/mm/dma-mapping.c
> > @@ -19,6 +19,7 @@
> >  
> >  #include <linux/gfp.h>
> >  #include <linux/acpi.h>
> > +#include <linux/bootmem.h>
> >  #include <linux/export.h>
> >  #include <linux/slab.h>
> >  #include <linux/genalloc.h>  
> 
> Why is this include needed?
> 

This is for max_pfn

Thanks,
Jisheng
Catalin Marinas June 21, 2016, 4:06 p.m. UTC | #6
On Wed, Jun 08, 2016 at 05:49:59PM +0200, Arnd Bergmann wrote:
> On Wednesday, June 8, 2016 1:08:29 PM CEST Catalin Marinas wrote:
> > On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
> > >  static int __init arm64_dma_init(void)
> > >  {
> > > +     if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> > > +             swiotlb = 1;
> > > +
> > >       return atomic_pool_init();
> > >  }
> > 
> > So any platform with RAM larger than 4GB would still initialise swiotlb.
> > I wouldn't say it's an issue, 64MB is not a significant loss on such
> > systems.
> > 
> > An alternative would be to defer the freeing until we are aware of all
> > possible dma masks for the populated devices (e.g. from DT), though I'm
> > not sure that's enough, drivers may try to change such masks when
> > probed.
> 
> Right, this is a hard problem, because you can in theory have odd devices
> that require a DMA mask lower than the limit of ZONE_DMA.

I'm not sure what we can do about such devices even with swiotlb. The
bounce buffer is allocated from ZONE_DMA and currently it assumes a
32-bit mask from the start of RAM, so it is not guaranteed to support a
smaller mask. We may need to come up with some DT memory node attribute
to define the minimum DMA-able memory and restrict ZONE_DMA during early
boot but I would rather wait until we hit a real issue in practice.

Anyway, I plan to merge this patch as is, we can improve the logic in
the future if we have a better idea.
Arnd Bergmann June 21, 2016, 8:13 p.m. UTC | #7
On Tuesday, June 21, 2016 5:06:25 PM CEST Catalin Marinas wrote:
> On Wed, Jun 08, 2016 at 05:49:59PM +0200, Arnd Bergmann wrote:
> > On Wednesday, June 8, 2016 1:08:29 PM CEST Catalin Marinas wrote:
> > > On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
> > > >  static int __init arm64_dma_init(void)
> > > >  {
> > > > +     if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> > > > +             swiotlb = 1;
> > > > +
> > > >       return atomic_pool_init();
> > > >  }
> > > 
> > > So any platform with RAM larger than 4GB would still initialise swiotlb.
> > > I wouldn't say it's an issue, 64MB is not a significant loss on such
> > > systems.
> > > 
> > > An alternative would be to defer the freeing until we are aware of all
> > > possible dma masks for the populated devices (e.g. from DT), though I'm
> > > not sure that's enough, drivers may try to change such masks when
> > > probed.
> > 
> > Right, this is a hard problem, because you can in theory have odd devices
> > that require a DMA mask lower than the limit of ZONE_DMA.
> 
> I'm not sure what we can do about such devices even with swiotlb. The
> bounce buffer is allocated from ZONE_DMA and currently it assumes a
> 32-bit mask from the start of RAM, so it is not guaranteed to support a
> smaller mask. We may need to come up with some DT memory node attribute
> to define the minimum DMA-able memory and restrict ZONE_DMA during early
> boot but I would rather wait until we hit a real issue in practice.

The bounce buffer is allocated at early boot time in order to get an address
as low as possible, in the hope that it is small enough for those cases.

	Arnd
Catalin Marinas June 22, 2016, 4:56 p.m. UTC | #8
On Tue, Jun 21, 2016 at 10:13:45PM +0200, Arnd Bergmann wrote:
> On Tuesday, June 21, 2016 5:06:25 PM CEST Catalin Marinas wrote:
> > On Wed, Jun 08, 2016 at 05:49:59PM +0200, Arnd Bergmann wrote:
> > > On Wednesday, June 8, 2016 1:08:29 PM CEST Catalin Marinas wrote:
> > > > On Wed, Jun 08, 2016 at 03:53:46PM +0800, Jisheng Zhang wrote:
> > > > >  static int __init arm64_dma_init(void)
> > > > >  {
> > > > > +     if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
> > > > > +             swiotlb = 1;
> > > > > +
> > > > >       return atomic_pool_init();
> > > > >  }
> > > > 
> > > > So any platform with RAM larger than 4GB would still initialise swiotlb.
> > > > I wouldn't say it's an issue, 64MB is not a significant loss on such
> > > > systems.
> > > > 
> > > > An alternative would be to defer the freeing until we are aware of all
> > > > possible dma masks for the populated devices (e.g. from DT), though I'm
> > > > not sure that's enough, drivers may try to change such masks when
> > > > probed.
> > > 
> > > Right, this is a hard problem, because you can in theory have odd devices
> > > that require a DMA mask lower than the limit of ZONE_DMA.
> > 
> > I'm not sure what we can do about such devices even with swiotlb. The
> > bounce buffer is allocated from ZONE_DMA and currently it assumes a
> > 32-bit mask from the start of RAM, so it is not guaranteed to support a
> > smaller mask. We may need to come up with some DT memory node attribute
> > to define the minimum DMA-able memory and restrict ZONE_DMA during early
> > boot but I would rather wait until we hit a real issue in practice.
> 
> The bounce buffer is allocated at early boot time in order to get an address
> as low as possible, in the hope that it is small enough for those cases.

The swiotlb allocation is triggered from mem_init() and that's well
after the memblock has been initialised. The swiotlb_init() function
uses memblock_virt_alloc_low_nopanic() which is a top-down allocator
with an upper bound of ARCH_LOW_ADDRESS_LIMIT. The latter is set by the
arm64 code to the top 32-bit of RAM (plus some offset), so it's likely
that the swiotlb bounce buffer will be placed in the upper range of the
32-bit mask.
diff mbox

Patch

diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index c566ec8..46a4157 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -19,6 +19,7 @@ 
 
 #include <linux/gfp.h>
 #include <linux/acpi.h>
+#include <linux/bootmem.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/genalloc.h>
@@ -29,6 +30,8 @@ 
 
 #include <asm/cacheflush.h>
 
+static int swiotlb __read_mostly;
+
 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
 				 bool coherent)
 {
@@ -341,6 +344,13 @@  static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 	return ret;
 }
 
+static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
+{
+	if (swiotlb)
+		return swiotlb_dma_supported(hwdev, mask);
+	return 1;
+}
+
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
@@ -354,7 +364,7 @@  static struct dma_map_ops swiotlb_dma_ops = {
 	.sync_single_for_device = __swiotlb_sync_single_for_device,
 	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
 	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
-	.dma_supported = swiotlb_dma_supported,
+	.dma_supported = __swiotlb_dma_supported,
 	.mapping_error = swiotlb_dma_mapping_error,
 };
 
@@ -513,6 +523,9 @@  EXPORT_SYMBOL(dummy_dma_ops);
 
 static int __init arm64_dma_init(void)
 {
+	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+		swiotlb = 1;
+
 	return atomic_pool_init();
 }
 arch_initcall(arm64_dma_init);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index d45f862..7d25b4d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -403,7 +403,8 @@  static void __init free_unused_memmap(void)
  */
 void __init mem_init(void)
 {
-	swiotlb_init(1);
+	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+		swiotlb_init(1);
 
 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);