@@ -1043,6 +1043,16 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
+ if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
+ if (i > 0)
+ cur = sg_next(cur);
+
+ pci_p2pdma_map_bus_segment(s, cur);
+ count++;
+ cur_len = 0;
+ continue;
+ }
+
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
@@ -1139,6 +1149,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+ struct dev_pagemap *pgmap = NULL;
+ enum pci_p2pdma_map_type map_type;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1174,6 +1186,35 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
s_length = iova_align(iovad, s_length + s_iova_off);
s->length = s_length;
+ if (is_pci_p2pdma_page(sg_page(s))) {
+ if (sg_page(s)->pgmap != pgmap) {
+ pgmap = sg_page(s)->pgmap;
+ map_type = pci_p2pdma_map_type(pgmap, dev);
+ }
+
+ switch (map_type) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * A zero length will be ignored by
+ * iommu_map_sg() and then can be detected
+ * in __finalise_sg() to actually map the
+ * bus address.
+ */
+ s->length = 0;
+ continue;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be
+ * mapped with regular IOVAs, thus we
+ * do nothing here and continue below.
+ */
+ break;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
+ }
+ }
+
/*
* Due to the alignment of our single IOVA allocation, we can
* depend on these assumptions about the segment boundary mask:
@@ -1196,6 +1237,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
+ if (!iova_len)
+ return __finalise_sg(dev, sg, nents, 0);
+
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) {
ret = -ENOMEM;
@@ -1217,7 +1261,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
out_restore_sg:
__invalidate_sg(sg, nents);
out:
- if (ret != -ENOMEM)
+ if (ret != -ENOMEM && ret != -EREMOTEIO)
return -EINVAL;
return ret;
}
@@ -1225,7 +1269,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t start, end;
+ dma_addr_t end, start = DMA_MAPPING_ERROR;
struct scatterlist *tmp;
int i;
@@ -1241,14 +1285,22 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
*/
- start = sg_dma_address(sg);
- for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ for_each_sg(sg, tmp, nents, i) {
+ if (sg_is_dma_bus_address(tmp)) {
+ sg_dma_unmark_bus_address(tmp);
+ continue;
+ }
if (sg_dma_len(tmp) == 0)
break;
- sg = tmp;
+
+ if (start == DMA_MAPPING_ERROR)
+ start = sg_dma_address(tmp);
+
+ end = sg_dma_address(tmp) + sg_dma_len(tmp);
}
- end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(dev, start, end - start);
+
+ if (start != DMA_MAPPING_ERROR)
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1441,6 +1493,7 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
}
static const struct dma_map_ops iommu_dma_ops = {
+ .flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,