diff mbox

arm64: avoid increasing DMA masks above what hardware supports

Message ID 1484056844-9567-1-git-send-email-nikita.yoush@cogentembedded.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nikita Yushchenko Jan. 10, 2017, 2 p.m. UTC
There are cases when device supports wide DMA addresses wider than
device's connection supports.

In this case driver sets DMA mask based on knowledge of device
capabilities. That must succeed to allow drivers to initialize.

However, swiotlb or iommu still need knowledge about actual device
capabilities. To avoid breakage, actual mask must not be set wider than
device connection allows.

Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
CC: Arnd Bergmann <arnd@arndb.de>
CC: Robin Murphy <robin.murphy@arm.com>
CC: Will Deacon <will.deacon@arm.com>
---
 arch/arm64/Kconfig                   |  3 +++
 arch/arm64/include/asm/device.h      |  1 +
 arch/arm64/include/asm/dma-mapping.h |  3 +++
 arch/arm64/mm/dma-mapping.c          | 43 ++++++++++++++++++++++++++++++++++++
 4 files changed, 50 insertions(+)

Comments

Robin Murphy Jan. 10, 2017, 5:14 p.m. UTC | #1
On 10/01/17 14:00, Nikita Yushchenko wrote:
> There are cases when device supports wide DMA addresses wider than
> device's connection supports.
> 
> In this case driver sets DMA mask based on knowledge of device
> capabilities. That must succeed to allow drivers to initialize.
> 
> However, swiotlb or iommu still need knowledge about actual device
> capabilities. To avoid breakage, actual mask must not be set wider than
> device connection allows.
> 
> Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
> CC: Arnd Bergmann <arnd@arndb.de>
> CC: Robin Murphy <robin.murphy@arm.com>
> CC: Will Deacon <will.deacon@arm.com>
> ---
>  arch/arm64/Kconfig                   |  3 +++
>  arch/arm64/include/asm/device.h      |  1 +
>  arch/arm64/include/asm/dma-mapping.h |  3 +++
>  arch/arm64/mm/dma-mapping.c          | 43 ++++++++++++++++++++++++++++++++++++
>  4 files changed, 50 insertions(+)
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 1117421..afb2c08 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -216,6 +216,9 @@ config NEED_DMA_MAP_STATE
>  config NEED_SG_DMA_LENGTH
>  	def_bool y
>  
> +config ARCH_HAS_DMA_SET_COHERENT_MASK
> +	def_bool y
> +
>  config SMP
>  	def_bool y
>  
> diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
> index 243ef25..a57e7bb 100644
> --- a/arch/arm64/include/asm/device.h
> +++ b/arch/arm64/include/asm/device.h
> @@ -22,6 +22,7 @@ struct dev_archdata {
>  	void *iommu;			/* private IOMMU data */
>  #endif
>  	bool dma_coherent;
> +	u64 parent_dma_mask;
>  };
>  
>  struct pdev_archdata {
> diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
> index ccea82c..eab36d2 100644
> --- a/arch/arm64/include/asm/dma-mapping.h
> +++ b/arch/arm64/include/asm/dma-mapping.h
> @@ -51,6 +51,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
>  			const struct iommu_ops *iommu, bool coherent);
>  #define arch_setup_dma_ops	arch_setup_dma_ops
>  
> +#define HAVE_ARCH_DMA_SET_MASK 1
> +extern int dma_set_mask(struct device *dev, u64 dma_mask);
> +
>  #ifdef CONFIG_IOMMU_DMA
>  void arch_teardown_dma_ops(struct device *dev);
>  #define arch_teardown_dma_ops	arch_teardown_dma_ops
> diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
> index e040827..7b1bb87 100644
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -203,6 +203,37 @@ static void __dma_free(struct device *dev, size_t size,
>  	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
>  }
>  
> +int dma_set_mask(struct device *dev, u64 dma_mask)
> +{
> +	const struct dma_map_ops *ops = get_dma_ops(dev);
> +
> +	if (mask > dev->archdata.parent_dma_mask)
> +		mask = dev->archdata.parent_dma_mask;
> +
> +	if (ops->set_dma_mask)
> +		return ops->set_dma_mask(dev, mask);
> +
> +	if (!dev->dma_mask || !dma_supported(dev, mask))
> +		return -EIO;
> +
> +	*dev->dma_mask = mask;
> +	return 0;
> +}
> +EXPORT_SYMBOL(dma_set_mask);
> +
> +int dma_set_coherent_mask(struct device *dev, u64 mask)
> +{
> +	if (mask > dev->archdata.parent_dma_mask)
> +		mask = dev->archdata.parent_dma_mask;
> +
> +	if (!dma_supported(dev, mask))
> +		return -EIO;
> +
> +	dev->coherent_dma_mask = mask;
> +	return 0;
> +}
> +EXPORT_SYMBOL(dma_set_coherent_mask);
> +
>  static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
>  				     unsigned long offset, size_t size,
>  				     enum dma_data_direction dir,
> @@ -958,6 +989,18 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
>  	if (!dev->archdata.dma_ops)
>  		dev->archdata.dma_ops = &swiotlb_dma_ops;
>  
> +	/*
> +	 * we don't yet support buses that have a non-zero mapping.
> +	 *  Let's hope we won't need it
> +	 */
> +	WARN_ON(dma_base != 0);

I believe we now accomodate the bus remap bits on BCM2837 as a DMA
offset, so unfortunately I think this is no longer true.

> +	/*
> +	 * Whatever the parent bus can set. A device must not set
> +	 * a DMA mask larger than this.
> +	 */
> +	dev->archdata.parent_dma_mask = size - 1;

This will effectively constrain *all* DMA masks to be 32-bit, since for
99% of devices we're going to see a size derived from the default mask
passed in here. I worry that that's liable to lead to performance and
stability regressions (now that the block layer can apparently generate
sufficient readahead to ovflow a typical SWIOTLB bounce buffer[1]).
Whilst DT users would be able to mitigate that by putting explicit
"dma-ranges" properties on every device node, it's less clear what we'd
do for ACPI.

I reckon the easiest way forward would be to pass in some flag to
arch_setup_dma_ops to indicate whether it's an explicitly-configured
range or not - then simply initialising parent_dma_mask to ~0 for the
default case *should* keep things working as before.

Robin.

[1]:https://www.mail-archive.com/virtualization@lists.linux-foundation.org/msg26532.html

> +
>  	dev->archdata.dma_coherent = coherent;
>  	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
>  }
>
Nikita Yushchenko Jan. 11, 2017, 7:59 a.m. UTC | #2
>> +	/*
>> +	 * we don't yet support buses that have a non-zero mapping.
>> +	 *  Let's hope we won't need it
>> +	 */
>> +	WARN_ON(dma_base != 0);
> 
> I believe we now accomodate the bus remap bits on BCM2837 as a DMA
> offset, so unfortunately I think this is no longer true.

Arnd, this check is from you. Any updates? Perhaps this check can be
just dropped?

In swiotlb code, dma address (i.e. with offset already applied) is
checked against mask.  Not sure what 'dma_base' means in iommu case.

>> +	/*
>> +	 * Whatever the parent bus can set. A device must not set
>> +	 * a DMA mask larger than this.
>> +	 */
>> +	dev->archdata.parent_dma_mask = size - 1;
> 
> This will effectively constrain *all* DMA masks to be 32-bit, since for
> 99% of devices we're going to see a size derived from the default mask
> passed in here. I worry that that's liable to lead to performance and
> stability regressions

That was exactly my concern when I first tried to address this issue. My
first attempt was to alter very locally exact configuration where
problem shows, while ensuring that everything else stays as is. See
https://lkml.org/lkml/2016/12/29/218

But looks like people want a generic solution.

> I reckon the easiest way forward would be to pass in some flag to
> arch_setup_dma_ops to indicate whether it's an explicitly-configured
> range or not - then simply initialising parent_dma_mask to ~0 for the
> default case *should* keep things working as before.

Currently only arm, arm64 and mips define arch_setup_dma_ops().
Mips version only checks 'coherent' argument, 'size' is used only by arm
and arm64.

Maybe move setting the default from caller to callee?
I.e. pass size=0 if no explicit information exists, and let architecture
handle that?
Robin Murphy Jan. 11, 2017, 11:54 a.m. UTC | #3
On 11/01/17 07:59, Nikita Yushchenko wrote:
>>> +	/*
>>> +	 * we don't yet support buses that have a non-zero mapping.
>>> +	 *  Let's hope we won't need it
>>> +	 */
>>> +	WARN_ON(dma_base != 0);
>>
>> I believe we now accomodate the bus remap bits on BCM2837 as a DMA
>> offset, so unfortunately I think this is no longer true.
> 
> Arnd, this check is from you. Any updates? Perhaps this check can be
> just dropped?
> 
> In swiotlb code, dma address (i.e. with offset already applied) is
> checked against mask.  Not sure what 'dma_base' means in iommu case.
> 
>>> +	/*
>>> +	 * Whatever the parent bus can set. A device must not set
>>> +	 * a DMA mask larger than this.
>>> +	 */
>>> +	dev->archdata.parent_dma_mask = size - 1;
>>
>> This will effectively constrain *all* DMA masks to be 32-bit, since for
>> 99% of devices we're going to see a size derived from the default mask
>> passed in here. I worry that that's liable to lead to performance and
>> stability regressions
> 
> That was exactly my concern when I first tried to address this issue. My
> first attempt was to alter very locally exact configuration where
> problem shows, while ensuring that everything else stays as is. See
> https://lkml.org/lkml/2016/12/29/218
> 
> But looks like people want a generic solution.
> 
>> I reckon the easiest way forward would be to pass in some flag to
>> arch_setup_dma_ops to indicate whether it's an explicitly-configured
>> range or not - then simply initialising parent_dma_mask to ~0 for the
>> default case *should* keep things working as before.
> 
> Currently only arm, arm64 and mips define arch_setup_dma_ops().
> Mips version only checks 'coherent' argument, 'size' is used only by arm
> and arm64.
> 
> Maybe move setting the default from caller to callee?
> I.e. pass size=0 if no explicit information exists, and let architecture
> handle that?

Yes, I think that ought to work, although the __iommu_setup_dma_ops()
call will still want a real size reflecting the default mask, so
something like:

if (size == 0) {
	dev->archdata.parent_dma_mask = ~0;
	size = 1ULL << 32;
} else {
	dev->archdata.parent_dma_mask = size - 1;
}

should probably do the trick.

Robin.
Nikita Yushchenko Jan. 11, 2017, 1:41 p.m. UTC | #4
> Yes, I think that ought to work, although the __iommu_setup_dma_ops()
> call will still want a real size reflecting the default mask

I see iommu_dma_ops do not define set_dma_mask.

So what if setup was done for size reflecting one mask and then driver
changes mask?  Will things still operate correctly?
Robin Murphy Jan. 11, 2017, 2:50 p.m. UTC | #5
On 11/01/17 13:41, Nikita Yushchenko wrote:
>> Yes, I think that ought to work, although the __iommu_setup_dma_ops()
>> call will still want a real size reflecting the default mask
> 
> I see iommu_dma_ops do not define set_dma_mask.
> 
> So what if setup was done for size reflecting one mask and then driver
> changes mask?  Will things still operate correctly?

We've overridden dma_set_mask() at the function level, so it should
always apply regardless. Besides, none of the arm64 ops implement
.set_dma_mask anyway, so we could possibly drop the references to it
altogether.

Conversely, I suppose we could just implement said callback for
swiotlb_dma_ops and iommu_dma_ops with the parent_dma_mask-checking
function and drop the HAVE_ARCH_DMA_SET_MASK override instead. I'm not
sure which approach is preferable - the latter seems arguably cleaner in
isolation, but would also be less consistent with how the coherent mask
has to be handled. Ho hum.

Robin.
Nikita Yushchenko Jan. 11, 2017, 4:03 p.m. UTC | #6
11.01.2017 17:50, Robin Murphy пишет:
> On 11/01/17 13:41, Nikita Yushchenko wrote:
>>> Yes, I think that ought to work, although the __iommu_setup_dma_ops()
>>> call will still want a real size reflecting the default mask
>>
>> I see iommu_dma_ops do not define set_dma_mask.
>>
>> So what if setup was done for size reflecting one mask and then driver
>> changes mask?  Will things still operate correctly?
> 
> We've overridden dma_set_mask() at the function level, so it should
> always apply regardless. Besides, none of the arm64 ops implement
> .set_dma_mask anyway, so we could possibly drop the references to it
> altogether.
> 
> Conversely, I suppose we could just implement said callback for
> swiotlb_dma_ops and iommu_dma_ops with the parent_dma_mask-checking
> function and drop the HAVE_ARCH_DMA_SET_MASK override instead. I'm not
> sure which approach is preferable - the latter seems arguably cleaner in
> isolation, but would also be less consistent with how the coherent mask
> has to be handled. Ho hum.

I mean, before patch is applied.

In the current mainline codebase, arm64 iommu does setup dependent on
[default] dma_mask, but does not anyhow react on dma mask change.

I don't know much details about arm64 iommu, but from distant view this
combination looks incorrect:
- if behavior of this hardware should depend on dma_mask of device, then
it should handle mask change,
- if behavior of this hardware should not depend on dma_mask of device,
then what for to pass size to it's setup?
Robin Murphy Jan. 11, 2017, 4:50 p.m. UTC | #7
On 11/01/17 16:03, Nikita Yushchenko wrote:
> 
> 
> 11.01.2017 17:50, Robin Murphy пишет:
>> On 11/01/17 13:41, Nikita Yushchenko wrote:
>>>> Yes, I think that ought to work, although the __iommu_setup_dma_ops()
>>>> call will still want a real size reflecting the default mask
>>>
>>> I see iommu_dma_ops do not define set_dma_mask.
>>>
>>> So what if setup was done for size reflecting one mask and then driver
>>> changes mask?  Will things still operate correctly?
>>
>> We've overridden dma_set_mask() at the function level, so it should
>> always apply regardless. Besides, none of the arm64 ops implement
>> .set_dma_mask anyway, so we could possibly drop the references to it
>> altogether.
>>
>> Conversely, I suppose we could just implement said callback for
>> swiotlb_dma_ops and iommu_dma_ops with the parent_dma_mask-checking
>> function and drop the HAVE_ARCH_DMA_SET_MASK override instead. I'm not
>> sure which approach is preferable - the latter seems arguably cleaner in
>> isolation, but would also be less consistent with how the coherent mask
>> has to be handled. Ho hum.
> 
> I mean, before patch is applied.
> 
> In the current mainline codebase, arm64 iommu does setup dependent on
> [default] dma_mask, but does not anyhow react on dma mask change.
> 
> I don't know much details about arm64 iommu, but from distant view this
> combination looks incorrect:
> - if behavior of this hardware should depend on dma_mask of device, then
> it should handle mask change,
> - if behavior of this hardware should not depend on dma_mask of device,
> then what for to pass size to it's setup?

Ah, right, I did indeed misunderstand. The IOMMU code is happy with the
DMA masks changing, since it always checks the appropriate one at the
point of IOVA allocation. The initial size given to
__iommu_setup_dma_ops() really just sets up a caching threshold in the
IOVA domain - if there is an explicitly-configured mask, then it's
generally good to set the limit based on that, but otherwise the default
32-bit limit is fine even if the driver subsequently alters the device's
mask(s) before making DMA API calls. Your patch won't actually change
any behaviour in this regard, as long as it still passes a 4GB size by
default.

Robin.
Nikita Yushchenko Jan. 11, 2017, 6:31 p.m. UTC | #8
> I reckon the easiest way forward would be to pass in some flag to
> arch_setup_dma_ops to indicate whether it's an explicitly-configured
> range or not - then simply initialising parent_dma_mask to ~0 for the
> default case *should* keep things working as before.

Tried to do that.

---

Nikita Yushchenko (2):
  dma-mapping: let arch know origin of dma range passed to arch_setup_dma_ops()
  arm64: avoid increasing DMA masks above what hardware supports

 arch/arm/include/asm/dma-mapping.h             |  1 +
 arch/arm/mm/dma-mapping.c                      |  3 +-
 arch/arm64/Kconfig                             |  3 ++
 arch/arm64/include/asm/device.h                |  1 +
 arch/arm64/include/asm/dma-mapping.h           |  6 +++-
 arch/arm64/mm/dma-mapping.c                    | 43 +++++++++++++++++++++++++-
 arch/mips/include/asm/dma-mapping.h            |  3 +-
 drivers/acpi/scan.c                            |  2 +-
 drivers/iommu/rockchip-iommu.c                 |  2 +-
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c |  2 +-
 drivers/of/device.c                            |  5 ++-
 drivers/staging/fsl-mc/bus/fsl-mc-bus.c        |  2 +-
 12 files changed, 64 insertions(+), 9 deletions(-)
diff mbox

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1117421..afb2c08 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -216,6 +216,9 @@  config NEED_DMA_MAP_STATE
 config NEED_SG_DMA_LENGTH
 	def_bool y
 
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+	def_bool y
+
 config SMP
 	def_bool y
 
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 243ef25..a57e7bb 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -22,6 +22,7 @@  struct dev_archdata {
 	void *iommu;			/* private IOMMU data */
 #endif
 	bool dma_coherent;
+	u64 parent_dma_mask;
 };
 
 struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index ccea82c..eab36d2 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -51,6 +51,9 @@  void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 			const struct iommu_ops *iommu, bool coherent);
 #define arch_setup_dma_ops	arch_setup_dma_ops
 
+#define HAVE_ARCH_DMA_SET_MASK 1
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
 #ifdef CONFIG_IOMMU_DMA
 void arch_teardown_dma_ops(struct device *dev);
 #define arch_teardown_dma_ops	arch_teardown_dma_ops
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index e040827..7b1bb87 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -203,6 +203,37 @@  static void __dma_free(struct device *dev, size_t size,
 	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
 }
 
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (mask > dev->archdata.parent_dma_mask)
+		mask = dev->archdata.parent_dma_mask;
+
+	if (ops->set_dma_mask)
+		return ops->set_dma_mask(dev, mask);
+
+	if (!dev->dma_mask || !dma_supported(dev, mask))
+		return -EIO;
+
+	*dev->dma_mask = mask;
+	return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+	if (mask > dev->archdata.parent_dma_mask)
+		mask = dev->archdata.parent_dma_mask;
+
+	if (!dma_supported(dev, mask))
+		return -EIO;
+
+	dev->coherent_dma_mask = mask;
+	return 0;
+}
+EXPORT_SYMBOL(dma_set_coherent_mask);
+
 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
 				     unsigned long offset, size_t size,
 				     enum dma_data_direction dir,
@@ -958,6 +989,18 @@  void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 	if (!dev->archdata.dma_ops)
 		dev->archdata.dma_ops = &swiotlb_dma_ops;
 
+	/*
+	 * we don't yet support buses that have a non-zero mapping.
+	 *  Let's hope we won't need it
+	 */
+	WARN_ON(dma_base != 0);
+
+	/*
+	 * Whatever the parent bus can set. A device must not set
+	 * a DMA mask larger than this.
+	 */
+	dev->archdata.parent_dma_mask = size - 1;
+
 	dev->archdata.dma_coherent = coherent;
 	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
 }