@@ -163,6 +163,19 @@ bool xen_arch_need_swiotlb(struct device *dev,
!is_device_dma_coherent(dev));
}
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+static int xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ if (__generic_dma_ops(dev)->mmap)
+ return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
@@ -198,6 +211,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
+ .mmap = xen_swiotlb_dma_mmap,
};
int __init xen_mm_init(void)