@@ -216,6 +216,9 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+ def_bool y
+
config SMP
def_bool y
@@ -22,6 +22,7 @@ struct dev_archdata {
void *iommu; /* private IOMMU data */
#endif
bool dma_coherent;
+ u64 parent_dma_mask;
};
struct pdev_archdata {
@@ -352,6 +352,31 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
return 1;
}
+static int __swiotlb_set_dma_mask(struct device *dev, u64 mask)
+{
+ /* device is not DMA capable */
+ if (!dev->dma_mask)
+ return -EIO;
+
+ /* mask is below swiotlb bounce buffer, so fail */
+ if (!swiotlb_dma_supported(dev, mask))
+ return -EIO;
+
+ /*
+ * because of the swiotlb, we can return success for
+ * larger masks, but need to ensure that bounce buffers
+ * are used above parent_dma_mask, so set that as
+ * the effective mask.
+ */
+ if (mask > dev->archdata.parent_dma_mask)
+ mask = dev->archdata.parent_dma_mask;
+
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
@@ -367,8 +392,23 @@ static struct dma_map_ops swiotlb_dma_ops = {
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
.dma_supported = __swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
+ .set_dma_mask = __swiotlb_set_dma_mask,
};
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (!dma_supported(dev, mask))
+ return -EIO;
+
+ if (get_dma_ops(dev) == &swiotlb_dma_ops &&
+ mask > dev->archdata.parent_dma_mask)
+ mask = dev->archdata.parent_dma_mask;
+
+ dev->coherent_dma_mask = mask;
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_coherent_mask);
+
static int __init atomic_pool_init(void)
{
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
It is possible that device is capable of 64-bit DMA addresses, and device driver tries to set wide DMA mask, but bridge or bus used to connect device to the system can't handle wide addresses. With swiotlb, memory above 4G still can be used by drivers for streaming DMA, but *dev->mask and dev->dma_coherent_mask must still keep values that hardware handles physically. This patch enforces that. Based on original version by Arnd Bergmann <arnd@arndb.de>, extended with coherent mask hadnling. Signed-off-by: Nikita Yushchenko <nikita.yoush@cogentembedded.com> CC: Arnd Bergmann <arnd@arndb.de> --- arch/arm64/Kconfig | 3 +++ arch/arm64/include/asm/device.h | 1 + arch/arm64/mm/dma-mapping.c | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+)