@@ -217,6 +217,9 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+ def_bool y
+
config SMP
def_bool y
@@ -20,6 +20,7 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+ u64 parent_dma_mask;
bool dma_coherent;
};
@@ -564,6 +564,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
__dma_flush_area(virt, PAGE_SIZE);
}
+
static void *__iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
unsigned long attrs)
@@ -795,6 +796,20 @@ static void __iommu_unmap_sg_attrs(struct device *dev,
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}
+static int __iommu_set_dma_mask(struct device *dev, u64 mask)
+{
+ /* device is not DMA capable */
+ if (!dev->dma_mask)
+ return -EIO;
+
+ if (mask > dev->archdata.parent_dma_mask)
+ mask = dev->archdata.parent_dma_mask;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.alloc = __iommu_alloc_attrs,
.free = __iommu_free_attrs,
@@ -811,8 +826,21 @@ static void __iommu_unmap_sg_attrs(struct device *dev,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.mapping_error = iommu_dma_mapping_error,
+ .set_dma_mask = __iommu_set_dma_mask,
};
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (get_dma_ops(dev) == &iommu_dma_ops &&
+ mask > dev->archdata.parent_dma_mask)
+ mask = dev->archdata.parent_dma_mask;
+
+ dev->coherent_dma_mask = mask;
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_coherent_mask);
+
+
/*
* TODO: Right now __iommu_setup_dma_ops() gets called too early to do
* everything it needs to - the device is only partially created and the
@@ -975,6 +1003,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (!dev->dma_ops)
dev->dma_ops = &swiotlb_dma_ops;
+ dev->archdata.parent_dma_mask = size - 1;
+
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}
@@ -283,6 +283,51 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
return err;
}
EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+
+int of_pci_get_dma_ranges(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
+{
+ struct device_node *node = of_node_get(np);
+ int rlen, naddr, nsize, pna;
+ int ret = 0;
+ const int na = 3, ns = 2;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+
+ if (!node)
+ return -EINVAL;
+
+ parser.node = node;
+ parser.pna = of_n_addr_cells(node);
+ parser.np = parser.pna + na + ns;
+
+ parser.range = of_get_property(node, "dma-ranges", &rlen);
+
+ if (!parser.range) {
+ pr_debug("pcie device has no dma-ranges defined for node(%s)\n", np->full_name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ parser.end = parser.range + rlen / sizeof(__be32);
+
+ /* how do we take care of multiple dma windows ?. */
+ for_each_of_pci_range(&parser, &range) {
+ *dma_addr = range.pci_addr;
+ *size = range.size;
+ *paddr = range.cpu_addr;
+ }
+
+ pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
+ *dma_addr, *paddr, *size);
+ *dma_addr = range.pci_addr;
+ *size = range.size;
+
+out:
+ of_node_put(node);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(of_pci_get_dma_ranges);
#endif /* CONFIG_OF_ADDRESS */
#ifdef CONFIG_PCI_MSI
@@ -76,6 +76,7 @@ static inline void of_pci_check_probe_only(void) { }
int of_pci_get_host_bridge_resources(struct device_node *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base);
+int of_pci_get_dma_ranges(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size);
#else
static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
unsigned char busno, unsigned char bus_max,
@@ -83,6 +84,11 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
{
return -EINVAL;
}
+
+static inline int of_pci_get_dma_ranges(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)