@@ -38,6 +38,39 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
return bus;
}
+#ifdef CONFIG_HAS_DMA_P2P
+static dma_peer_addr_t nommu_map_peer_resource(struct device *dev,
+ struct device *peer,
+ struct resource *res,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct pci_dev *pdev;
+ struct pci_dev *ppeer;
+ struct pci_bus_region region;
+ dma_peer_addr_t dma_address;
+
+ if (!dev_is_pci(dev) || !dev_is_pci(peer))
+ return DMA_ERROR_CODE;
+
+ pdev = to_pci_dev(dev);
+ ppeer = to_pci_dev(peer);
+
+ if (!pci_peer_traffic_supported(pdev, ppeer))
+ return DMA_ERROR_CODE;
+
+ pcibios_resource_to_bus(pdev->bus, ®ion, res);
+ dma_address = region.start + offset;
+ WARN_ON(size == 0);
+ if (!check_addr("map_peer_resource", dev, dma_address, size))
+ return DMA_ERROR_CODE;
+ flush_write_buffers();
+ return dma_address;
+}
+#endif
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list
@@ -93,6 +126,9 @@ struct dma_map_ops nommu_dma_ops = {
.free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
+#ifdef CONFIG_HAS_DMA_P2P
+ .map_peer_resource = nommu_map_peer_resource,
+#endif
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1,
Add Intel nommu implementation of 'map_peer_resource', which simply checks whether peer traffic should be allowed and converts the resource to a bus address. Add behind CONFIG_HAS_DMA_P2P guards, since the dma_map_ops members are behind them as well. Signed-off-by: Will Davis <wdavis@nvidia.com> --- arch/x86/kernel/pci-nommu.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+)