diff mbox series

[v7,08/21] iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg

Message ID 20220615161233.17527-9-logang@deltatee.com (mailing list archive)
State New, archived
Headers show
Series Userspace P2PDMA with O_DIRECT NVMe devices | expand

Commit Message

Logan Gunthorpe June 15, 2022, 4:12 p.m. UTC
When a PCI P2PDMA page is seen, set the IOVA length of the segment
to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
apply the appropriate bus address to the segment. The IOVA is not
created if the scatterlist only consists of P2PDMA pages.

A P2PDMA page may have three possible outcomes when being mapped:
  1) If the data path between the two devices doesn't go through
     the root port, then it should be mapped with a PCI bus address
  2) If the data path goes through the host bridge, it should be mapped
     normally with an IOMMU IOVA.
  3) It is not possible for the two devices to communicate and thus
     the mapping operation should fail (and it will return -EREMOTEIO).

Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
indicate bus address segments. On unmap, P2PDMA segments are skipped
over when determining the start and end IOVA addresses.

With this change, the flags variable in the dma_map_ops is set to
DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/dma-iommu.c | 68 +++++++++++++++++++++++++++++++++++----
 1 file changed, 61 insertions(+), 7 deletions(-)

Comments

Robin Murphy June 29, 2022, 12:07 p.m. UTC | #1
On 2022-06-15 17:12, Logan Gunthorpe wrote:
> When a PCI P2PDMA page is seen, set the IOVA length of the segment
> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
> apply the appropriate bus address to the segment. The IOVA is not
> created if the scatterlist only consists of P2PDMA pages.
> 
> A P2PDMA page may have three possible outcomes when being mapped:
>    1) If the data path between the two devices doesn't go through
>       the root port, then it should be mapped with a PCI bus address
>    2) If the data path goes through the host bridge, it should be mapped
>       normally with an IOMMU IOVA.
>    3) It is not possible for the two devices to communicate and thus
>       the mapping operation should fail (and it will return -EREMOTEIO).
> 
> Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
> indicate bus address segments. On unmap, P2PDMA segments are skipped
> over when determining the start and end IOVA addresses.
> 
> With this change, the flags variable in the dma_map_ops is set to
> DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.
> 
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>   drivers/iommu/dma-iommu.c | 68 +++++++++++++++++++++++++++++++++++----
>   1 file changed, 61 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index f90251572a5d..b01ca0c6a7ab 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -21,6 +21,7 @@
>   #include <linux/iova.h>
>   #include <linux/irq.h>
>   #include <linux/list_sort.h>
> +#include <linux/memremap.h>
>   #include <linux/mm.h>
>   #include <linux/mutex.h>
>   #include <linux/pci.h>
> @@ -1062,6 +1063,16 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
>   		sg_dma_address(s) = DMA_MAPPING_ERROR;
>   		sg_dma_len(s) = 0;
>   
> +		if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {

Logically, should we not be able to use sg_is_dma_bus_address() here? I 
think it should be feasible, and simpler, to prepare the p2p segments 
up-front, such that at this point all we need to do is restore the 
original length (if even that, see below).

> +			if (i > 0)
> +				cur = sg_next(cur);
> +
> +			pci_p2pdma_map_bus_segment(s, cur);
> +			count++;
> +			cur_len = 0;
> +			continue;
> +		}
> +
>   		/*
>   		 * Now fill in the real DMA data. If...
>   		 * - there is a valid output segment to append to
> @@ -1158,6 +1169,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   	struct iova_domain *iovad = &cookie->iovad;
>   	struct scatterlist *s, *prev = NULL;
>   	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
> +	struct dev_pagemap *pgmap = NULL;
> +	enum pci_p2pdma_map_type map_type;
>   	dma_addr_t iova;
>   	size_t iova_len = 0;
>   	unsigned long mask = dma_get_seg_boundary(dev);
> @@ -1193,6 +1206,35 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   		s_length = iova_align(iovad, s_length + s_iova_off);
>   		s->length = s_length;
>   
> +		if (is_pci_p2pdma_page(sg_page(s))) {
> +			if (sg_page(s)->pgmap != pgmap) {
> +				pgmap = sg_page(s)->pgmap;
> +				map_type = pci_p2pdma_map_type(pgmap, dev);
> +			}

There's a definite code smell here, but per above and below I think we 
*should* actually call the new helper instead of copy-pasting half of it.

> +
> +			switch (map_type) {
> +			case PCI_P2PDMA_MAP_BUS_ADDR:
> +				/*
> +				 * A zero length will be ignored by
> +				 * iommu_map_sg() and then can be detected

If that is required behaviour then it needs an explicit check in 
iommu_map_sg() to guarantee (and document) it. It's only by chance that 
__iommu_map() happens to return success for size == 0 *if* all the other 
arguments still line up, which is a far cry from a safe no-op.

However, rather than play yet more silly tricks, I think it would make 
even more sense to make iommu_map_sg() properly aware and able to skip 
direct p2p segments on its own. Once it becomes normal to pass mixed 
scatterlists around, it's only a matter of time until one ends up being 
handed to a driver which manages its own IOMMU domain, and then what?

> +				 * in __finalise_sg() to actually map the
> +				 * bus address.
> +				 */
> +				s->length = 0;
> +				continue;
> +			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
> +				/*
> +				 * Mapping through host bridge should be
> +				 * mapped with regular IOVAs, thus we
> +				 * do nothing here and continue below.
> +				 */
> +				break;
> +			default:
> +				ret = -EREMOTEIO;
> +				goto out_restore_sg;
> +			}
> +		}
> +
>   		/*
>   		 * Due to the alignment of our single IOVA allocation, we can
>   		 * depend on these assumptions about the segment boundary mask:
> @@ -1215,6 +1257,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   		prev = s;
>   	}
>   
> +	if (!iova_len)
> +		return __finalise_sg(dev, sg, nents, 0);
> +
>   	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
>   	if (!iova) {
>   		ret = -ENOMEM;
> @@ -1236,7 +1281,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   out_restore_sg:
>   	__invalidate_sg(sg, nents);
>   out:
> -	if (ret != -ENOMEM)
> +	if (ret != -ENOMEM && ret != -EREMOTEIO)
>   		return -EINVAL;
>   	return ret;
>   }
> @@ -1244,7 +1289,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>   		int nents, enum dma_data_direction dir, unsigned long attrs)
>   {
> -	dma_addr_t start, end;
> +	dma_addr_t end, start = DMA_MAPPING_ERROR;

There are several things I don't like about this logic, I'd rather have 
"end = 0" here...

>   	struct scatterlist *tmp;
>   	int i;
>   
> @@ -1260,14 +1305,22 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>   	 * The scatterlist segments are mapped into a single
>   	 * contiguous IOVA allocation, so this is incredibly easy.
>   	 */

[ This comment rather stops being true :( ]

> -	start = sg_dma_address(sg);
> -	for_each_sg(sg_next(sg), tmp, nents - 1, i) {

...then generalise the first-element special case here into a dedicated 
"walk to the first non-p2p element" loop...

> +	for_each_sg(sg, tmp, nents, i) {
> +		if (sg_is_dma_bus_address(tmp)) {
> +			sg_dma_unmark_bus_address(tmp);

[ Again I question what this actually achieves ]

> +			continue;
> +		}
>   		if (sg_dma_len(tmp) == 0)
>   			break;
> -		sg = tmp;
> +
> +		if (start == DMA_MAPPING_ERROR)
> +			start = sg_dma_address(tmp);
> +
> +		end = sg_dma_address(tmp) + sg_dma_len(tmp);
>   	}
> -	end = sg_dma_address(sg) + sg_dma_len(sg);
> -	__iommu_dma_unmap(dev, start, end - start);
> +
> +	if (start != DMA_MAPPING_ERROR)

...then "if (end)" here.

Thanks,
Robin.

> +		__iommu_dma_unmap(dev, start, end - start);
>   }
>   
>   static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
> @@ -1460,6 +1513,7 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
>   }
>   
>   static const struct dma_map_ops iommu_dma_ops = {
> +	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
>   	.alloc			= iommu_dma_alloc,
>   	.free			= iommu_dma_free,
>   	.alloc_pages		= dma_common_alloc_pages,
Logan Gunthorpe June 29, 2022, 3:57 p.m. UTC | #2
On 2022-06-29 06:07, Robin Murphy wrote:
> On 2022-06-15 17:12, Logan Gunthorpe wrote:
>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
>> apply the appropriate bus address to the segment. The IOVA is not
>> created if the scatterlist only consists of P2PDMA pages.
>>
>> A P2PDMA page may have three possible outcomes when being mapped:
>>    1) If the data path between the two devices doesn't go through
>>       the root port, then it should be mapped with a PCI bus address
>>    2) If the data path goes through the host bridge, it should be mapped
>>       normally with an IOMMU IOVA.
>>    3) It is not possible for the two devices to communicate and thus
>>       the mapping operation should fail (and it will return -EREMOTEIO).
>>
>> Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
>> indicate bus address segments. On unmap, P2PDMA segments are skipped
>> over when determining the start and end IOVA addresses.
>>
>> With this change, the flags variable in the dma_map_ops is set to
>> DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.
>>
>> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
>> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>> ---
>>   drivers/iommu/dma-iommu.c | 68 +++++++++++++++++++++++++++++++++++----
>>   1 file changed, 61 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>> index f90251572a5d..b01ca0c6a7ab 100644
>> --- a/drivers/iommu/dma-iommu.c
>> +++ b/drivers/iommu/dma-iommu.c
>> @@ -21,6 +21,7 @@
>>   #include <linux/iova.h>
>>   #include <linux/irq.h>
>>   #include <linux/list_sort.h>
>> +#include <linux/memremap.h>
>>   #include <linux/mm.h>
>>   #include <linux/mutex.h>
>>   #include <linux/pci.h>
>> @@ -1062,6 +1063,16 @@ static int __finalise_sg(struct device *dev,
>> struct scatterlist *sg, int nents,
>>           sg_dma_address(s) = DMA_MAPPING_ERROR;
>>           sg_dma_len(s) = 0;
>>   +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
> 
> Logically, should we not be able to use sg_is_dma_bus_address() here? I
> think it should be feasible, and simpler, to prepare the p2p segments
> up-front, such that at this point all we need to do is restore the
> original length (if even that, see below).

Per my previous email, no, because sg_is_dma_bus_address() is not set
yet and not meant to tell you something about the page. That flag will
be set below by pci_p2pdma_map_bus_segment() and then checkd in
iommu_dma_unmap_sg() to determine if the dma_address in the segment
needs to be unmapped.

> 
>> +            if (i > 0)
>> +                cur = sg_next(cur);
>> +
>> +            pci_p2pdma_map_bus_segment(s, cur);
>> +            count++;
>> +            cur_len = 0;
>> +            continue;
>> +        }
>> +
>>           /*
>>            * Now fill in the real DMA data. If...
>>            * - there is a valid output segment to append to
>> @@ -1158,6 +1169,8 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>       struct iova_domain *iovad = &cookie->iovad;
>>       struct scatterlist *s, *prev = NULL;
>>       int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
>> +    struct dev_pagemap *pgmap = NULL;
>> +    enum pci_p2pdma_map_type map_type;
>>       dma_addr_t iova;
>>       size_t iova_len = 0;
>>       unsigned long mask = dma_get_seg_boundary(dev);
>> @@ -1193,6 +1206,35 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>           s_length = iova_align(iovad, s_length + s_iova_off);
>>           s->length = s_length;
>>   +        if (is_pci_p2pdma_page(sg_page(s))) {
>> +            if (sg_page(s)->pgmap != pgmap) {
>> +                pgmap = sg_page(s)->pgmap;
>> +                map_type = pci_p2pdma_map_type(pgmap, dev);
>> +            }
> 
> There's a definite code smell here, but per above and below I think we
> *should* actually call the new helper instead of copy-pasting half of it.


> 
>> +
>> +            switch (map_type) {
>> +            case PCI_P2PDMA_MAP_BUS_ADDR:
>> +                /*
>> +                 * A zero length will be ignored by
>> +                 * iommu_map_sg() and then can be detected
> 
> If that is required behaviour then it needs an explicit check in
> iommu_map_sg() to guarantee (and document) it. It's only by chance that
> __iommu_map() happens to return success for size == 0 *if* all the other
> arguments still line up, which is a far cry from a safe no-op.

What should such a check look like? I could certainly add some comments
to iommu_map_sg(), but I don't see what the code would need to check for...

> However, rather than play yet more silly tricks, I think it would make
> even more sense to make iommu_map_sg() properly aware and able to skip
> direct p2p segments on its own. Once it becomes normal to pass mixed
> scatterlists around, it's only a matter of time until one ends up being
> handed to a driver which manages its own IOMMU domain, and then what?

I suppose we can add another call to is_pci_p2pdma_page() inside
iommu_map_sg() if you think that is cleaner. Seems like more work on the
fast path to me, but I'm not opposed to it.

>> +                 * in __finalise_sg() to actually map the
>> +                 * bus address.
>> +                 */
>> +                s->length = 0;
>> +                continue;
>> +            case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
>> +                /*
>> +                 * Mapping through host bridge should be
>> +                 * mapped with regular IOVAs, thus we
>> +                 * do nothing here and continue below.
>> +                 */
>> +                break;
>> +            default:
>> +                ret = -EREMOTEIO;
>> +                goto out_restore_sg;
>> +            }
>> +        }
>> +
>>           /*
>>            * Due to the alignment of our single IOVA allocation, we can
>>            * depend on these assumptions about the segment boundary mask:
>> @@ -1215,6 +1257,9 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>           prev = s;
>>       }
>>   +    if (!iova_len)
>> +        return __finalise_sg(dev, sg, nents, 0);
>> +
>>       iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev),
>> dev);
>>       if (!iova) {
>>           ret = -ENOMEM;
>> @@ -1236,7 +1281,7 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>   out_restore_sg:
>>       __invalidate_sg(sg, nents);
>>   out:
>> -    if (ret != -ENOMEM)
>> +    if (ret != -ENOMEM && ret != -EREMOTEIO)
>>           return -EINVAL;
>>       return ret;
>>   }
>> @@ -1244,7 +1289,7 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>   static void iommu_dma_unmap_sg(struct device *dev, struct
>> scatterlist *sg,
>>           int nents, enum dma_data_direction dir, unsigned long attrs)
>>   {
>> -    dma_addr_t start, end;
>> +    dma_addr_t end, start = DMA_MAPPING_ERROR;
> 
> There are several things I don't like about this logic, I'd rather have
> "end = 0" here...

Ok, I think that should work.

>>       struct scatterlist *tmp;
>>       int i;
>>   @@ -1260,14 +1305,22 @@ static void iommu_dma_unmap_sg(struct device
>> *dev, struct scatterlist *sg,
>>        * The scatterlist segments are mapped into a single
>>        * contiguous IOVA allocation, so this is incredibly easy.
>>        */
> 
> [ This comment rather stops being true :( ]

Not exactly. Sure there are some segments in the SGL that have bus
addresses, but all the regular IOVAs still have a single contiguous
allocation and only require one call to  __iommu_dma_unmap(). The only
trick issues is finding the first and last actual IOVA SG to get the range.

> 
>> -    start = sg_dma_address(sg);
>> -    for_each_sg(sg_next(sg), tmp, nents - 1, i) {
> 
> ...then generalise the first-element special case here into a dedicated
> "walk to the first non-p2p element" loop...

Ok, I'll see what I can do for that.

Logan
Robin Murphy June 29, 2022, 7:15 p.m. UTC | #3
On 2022-06-29 16:57, Logan Gunthorpe wrote:
> 
> 
> 
> On 2022-06-29 06:07, Robin Murphy wrote:
>> On 2022-06-15 17:12, Logan Gunthorpe wrote:
>>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>>> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
>>> apply the appropriate bus address to the segment. The IOVA is not
>>> created if the scatterlist only consists of P2PDMA pages.
>>>
>>> A P2PDMA page may have three possible outcomes when being mapped:
>>>     1) If the data path between the two devices doesn't go through
>>>        the root port, then it should be mapped with a PCI bus address
>>>     2) If the data path goes through the host bridge, it should be mapped
>>>        normally with an IOMMU IOVA.
>>>     3) It is not possible for the two devices to communicate and thus
>>>        the mapping operation should fail (and it will return -EREMOTEIO).
>>>
>>> Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
>>> indicate bus address segments. On unmap, P2PDMA segments are skipped
>>> over when determining the start and end IOVA addresses.
>>>
>>> With this change, the flags variable in the dma_map_ops is set to
>>> DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.
>>>
>>> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
>>> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>>> ---
>>>    drivers/iommu/dma-iommu.c | 68 +++++++++++++++++++++++++++++++++++----
>>>    1 file changed, 61 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>>> index f90251572a5d..b01ca0c6a7ab 100644
>>> --- a/drivers/iommu/dma-iommu.c
>>> +++ b/drivers/iommu/dma-iommu.c
>>> @@ -21,6 +21,7 @@
>>>    #include <linux/iova.h>
>>>    #include <linux/irq.h>
>>>    #include <linux/list_sort.h>
>>> +#include <linux/memremap.h>
>>>    #include <linux/mm.h>
>>>    #include <linux/mutex.h>
>>>    #include <linux/pci.h>
>>> @@ -1062,6 +1063,16 @@ static int __finalise_sg(struct device *dev,
>>> struct scatterlist *sg, int nents,
>>>            sg_dma_address(s) = DMA_MAPPING_ERROR;
>>>            sg_dma_len(s) = 0;
>>>    +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>>
>> Logically, should we not be able to use sg_is_dma_bus_address() here? I
>> think it should be feasible, and simpler, to prepare the p2p segments
>> up-front, such that at this point all we need to do is restore the
>> original length (if even that, see below).
> 
> Per my previous email, no, because sg_is_dma_bus_address() is not set
> yet and not meant to tell you something about the page. That flag will
> be set below by pci_p2pdma_map_bus_segment() and then checkd in
> iommu_dma_unmap_sg() to determine if the dma_address in the segment
> needs to be unmapped.

I know it's not set yet as-is; I'm suggesting things should be 
restructured so that it *would be*. In the logical design of this code, 
the DMA addresses are effectively determined in iommu_dma_map_sg(), and 
__finalise_sg() merely converts them from a relative to an absolute form 
(along with undoing the other trickery). Thus the call to 
pci_p2pdma_map_bus_segment() absolutely belongs in the main 
iommu_map_sg() loop.

>>> +            if (i > 0)
>>> +                cur = sg_next(cur);
>>> +
>>> +            pci_p2pdma_map_bus_segment(s, cur);
>>> +            count++;
>>> +            cur_len = 0;
>>> +            continue;
>>> +        }
>>> +
>>>            /*
>>>             * Now fill in the real DMA data. If...
>>>             * - there is a valid output segment to append to
>>> @@ -1158,6 +1169,8 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>        struct iova_domain *iovad = &cookie->iovad;
>>>        struct scatterlist *s, *prev = NULL;
>>>        int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
>>> +    struct dev_pagemap *pgmap = NULL;
>>> +    enum pci_p2pdma_map_type map_type;
>>>        dma_addr_t iova;
>>>        size_t iova_len = 0;
>>>        unsigned long mask = dma_get_seg_boundary(dev);
>>> @@ -1193,6 +1206,35 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>            s_length = iova_align(iovad, s_length + s_iova_off);
>>>            s->length = s_length;
>>>    +        if (is_pci_p2pdma_page(sg_page(s))) {
>>> +            if (sg_page(s)->pgmap != pgmap) {
>>> +                pgmap = sg_page(s)->pgmap;
>>> +                map_type = pci_p2pdma_map_type(pgmap, dev);
>>> +            }
>>
>> There's a definite code smell here, but per above and below I think we
>> *should* actually call the new helper instead of copy-pasting half of it.
> 
> 
>>
>>> +
>>> +            switch (map_type) {
>>> +            case PCI_P2PDMA_MAP_BUS_ADDR:
>>> +                /*
>>> +                 * A zero length will be ignored by
>>> +                 * iommu_map_sg() and then can be detected
>>
>> If that is required behaviour then it needs an explicit check in
>> iommu_map_sg() to guarantee (and document) it. It's only by chance that
>> __iommu_map() happens to return success for size == 0 *if* all the other
>> arguments still line up, which is a far cry from a safe no-op.
> 
> What should such a check look like? I could certainly add some comments
> to iommu_map_sg(), but I don't see what the code would need to check for...

I'd say a check for zero-length segments would look like "if (sg->length 
== 0)", most likely with a "continue;" on the following line.

>> However, rather than play yet more silly tricks, I think it would make
>> even more sense to make iommu_map_sg() properly aware and able to skip
>> direct p2p segments on its own. Once it becomes normal to pass mixed
>> scatterlists around, it's only a matter of time until one ends up being
>> handed to a driver which manages its own IOMMU domain, and then what?
> 
> I suppose we can add another call to is_pci_p2pdma_page() inside
> iommu_map_sg() if you think that is cleaner. Seems like more work on the
> fast path to me, but I'm not opposed to it.

I was thinking more of sg_is_dma_bus_address() but admittedly under my 
initial misapprehension of that. I suppose there's still a tenuous 
argument that even though we're not actually consuming sg_dma_address() 
at that point, if a segment *has* been earmarked for direct p2p, then 
skipping it rather than mapping it at the root complex TA that's 
probably never going to see those transactions might seem the more 
logical choice.

However it's all a bit hypothetical, and not significantly cleaner than 
a zero-size special case, so I'm not particularly tied to the idea either.

>>> +                 * in __finalise_sg() to actually map the
>>> +                 * bus address.
>>> +                 */
>>> +                s->length = 0;
>>> +                continue;
>>> +            case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
>>> +                /*
>>> +                 * Mapping through host bridge should be
>>> +                 * mapped with regular IOVAs, thus we
>>> +                 * do nothing here and continue below.
>>> +                 */
>>> +                break;
>>> +            default:
>>> +                ret = -EREMOTEIO;
>>> +                goto out_restore_sg;
>>> +            }
>>> +        }
>>> +
>>>            /*
>>>             * Due to the alignment of our single IOVA allocation, we can
>>>             * depend on these assumptions about the segment boundary mask:
>>> @@ -1215,6 +1257,9 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>            prev = s;
>>>        }
>>>    +    if (!iova_len)
>>> +        return __finalise_sg(dev, sg, nents, 0);
>>> +
>>>        iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev),
>>> dev);
>>>        if (!iova) {
>>>            ret = -ENOMEM;
>>> @@ -1236,7 +1281,7 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>    out_restore_sg:
>>>        __invalidate_sg(sg, nents);
>>>    out:
>>> -    if (ret != -ENOMEM)
>>> +    if (ret != -ENOMEM && ret != -EREMOTEIO)
>>>            return -EINVAL;
>>>        return ret;
>>>    }
>>> @@ -1244,7 +1289,7 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>    static void iommu_dma_unmap_sg(struct device *dev, struct
>>> scatterlist *sg,
>>>            int nents, enum dma_data_direction dir, unsigned long attrs)
>>>    {
>>> -    dma_addr_t start, end;
>>> +    dma_addr_t end, start = DMA_MAPPING_ERROR;
>>
>> There are several things I don't like about this logic, I'd rather have
>> "end = 0" here...
> 
> Ok, I think that should work.
> 
>>>        struct scatterlist *tmp;
>>>        int i;
>>>    @@ -1260,14 +1305,22 @@ static void iommu_dma_unmap_sg(struct device
>>> *dev, struct scatterlist *sg,
>>>         * The scatterlist segments are mapped into a single
>>>         * contiguous IOVA allocation, so this is incredibly easy.
>>>         */
>>
>> [ This comment rather stops being true :( ]
> 
> Not exactly. Sure there are some segments in the SGL that have bus
> addresses, but all the regular IOVAs still have a single contiguous
> allocation and only require one call to  __iommu_dma_unmap(). The only
> trick issues is finding the first and last actual IOVA SG to get the range.

"The scatterlist segments" means all of them, including any p2p ones 
which are demonstrably not mapped into anything. The extra logic 
required to skip non-mapped p2p segments goes beyond "incredibly easy". 
If there's one thing I *do* definitely understand, it's my own comments ;)

Thanks,
Robin.

> 
>>
>>> -    start = sg_dma_address(sg);
>>> -    for_each_sg(sg_next(sg), tmp, nents - 1, i) {
>>
>> ...then generalise the first-element special case here into a dedicated
>> "walk to the first non-p2p element" loop...
> 
> Ok, I'll see what I can do for that.
> 
> Logan
Logan Gunthorpe June 29, 2022, 10:41 p.m. UTC | #4
On 2022-06-29 13:15, Robin Murphy wrote:
> On 2022-06-29 16:57, Logan Gunthorpe wrote:
>>
>>
>>
>> On 2022-06-29 06:07, Robin Murphy wrote:
>>> On 2022-06-15 17:12, Logan Gunthorpe wrote:
>>>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>>>> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
>>>> apply the appropriate bus address to the segment. The IOVA is not
>>>> created if the scatterlist only consists of P2PDMA pages.
>>>>
>>>> A P2PDMA page may have three possible outcomes when being mapped:
>>>>     1) If the data path between the two devices doesn't go through
>>>>        the root port, then it should be mapped with a PCI bus address
>>>>     2) If the data path goes through the host bridge, it should be
>>>> mapped
>>>>        normally with an IOMMU IOVA.
>>>>     3) It is not possible for the two devices to communicate and thus
>>>>        the mapping operation should fail (and it will return
>>>> -EREMOTEIO).
>>>>
>>>> Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
>>>> indicate bus address segments. On unmap, P2PDMA segments are skipped
>>>> over when determining the start and end IOVA addresses.
>>>>
>>>> With this change, the flags variable in the dma_map_ops is set to
>>>> DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.
>>>>
>>>> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
>>>> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>>>> ---
>>>>    drivers/iommu/dma-iommu.c | 68
>>>> +++++++++++++++++++++++++++++++++++----
>>>>    1 file changed, 61 insertions(+), 7 deletions(-)
>>>>
>>>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>>>> index f90251572a5d..b01ca0c6a7ab 100644
>>>> --- a/drivers/iommu/dma-iommu.c
>>>> +++ b/drivers/iommu/dma-iommu.c
>>>> @@ -21,6 +21,7 @@
>>>>    #include <linux/iova.h>
>>>>    #include <linux/irq.h>
>>>>    #include <linux/list_sort.h>
>>>> +#include <linux/memremap.h>
>>>>    #include <linux/mm.h>
>>>>    #include <linux/mutex.h>
>>>>    #include <linux/pci.h>
>>>> @@ -1062,6 +1063,16 @@ static int __finalise_sg(struct device *dev,
>>>> struct scatterlist *sg, int nents,
>>>>            sg_dma_address(s) = DMA_MAPPING_ERROR;
>>>>            sg_dma_len(s) = 0;
>>>>    +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>>>
>>> Logically, should we not be able to use sg_is_dma_bus_address() here? I
>>> think it should be feasible, and simpler, to prepare the p2p segments
>>> up-front, such that at this point all we need to do is restore the
>>> original length (if even that, see below).
>>
>> Per my previous email, no, because sg_is_dma_bus_address() is not set
>> yet and not meant to tell you something about the page. That flag will
>> be set below by pci_p2pdma_map_bus_segment() and then checkd in
>> iommu_dma_unmap_sg() to determine if the dma_address in the segment
>> needs to be unmapped.
> 
> I know it's not set yet as-is; I'm suggesting things should be
> restructured so that it *would be*. In the logical design of this code,
> the DMA addresses are effectively determined in iommu_dma_map_sg(), and
> __finalise_sg() merely converts them from a relative to an absolute form
> (along with undoing the other trickery). Thus the call to
> pci_p2pdma_map_bus_segment() absolutely belongs in the main
> iommu_map_sg() loop.

I don't see how that can work: __finalise_sg() does more than convert
them from relative to absolute, it also figures out which SG entry will
contain which dma_address segment. Which segment a P2PDMA address needs
to be programmed into depends on the how 'cur' is calculated which in
turn depends on things like seg_mask and max_len. This calculation is
not done in iommu_dma_map_sg() so I don't see how there's any hope of
assigning the bus address for the P2P segments in that function.

If there's a way to restructure things so that's possible that I'm not
seeing, I'm open to it but it's certainly not immediately obvious.

>>>> +
>>>> +            switch (map_type) {
>>>> +            case PCI_P2PDMA_MAP_BUS_ADDR:
>>>> +                /*
>>>> +                 * A zero length will be ignored by
>>>> +                 * iommu_map_sg() and then can be detected
>>>
>>> If that is required behaviour then it needs an explicit check in
>>> iommu_map_sg() to guarantee (and document) it. It's only by chance that
>>> __iommu_map() happens to return success for size == 0 *if* all the other
>>> arguments still line up, which is a far cry from a safe no-op.
>>
>> What should such a check look like? I could certainly add some comments
>> to iommu_map_sg(), but I don't see what the code would need to check
>> for...
> 
> I'd say a check for zero-length segments would look like "if (sg->length
> == 0)", most likely with a "continue;" on the following line.

Oh, that's pretty simple to add. Will change.

>>> However, rather than play yet more silly tricks, I think it would make
>>> even more sense to make iommu_map_sg() properly aware and able to skip
>>> direct p2p segments on its own. Once it becomes normal to pass mixed
>>> scatterlists around, it's only a matter of time until one ends up being
>>> handed to a driver which manages its own IOMMU domain, and then what?
>>
>> I suppose we can add another call to is_pci_p2pdma_page() inside
>> iommu_map_sg() if you think that is cleaner. Seems like more work on the
>> fast path to me, but I'm not opposed to it.
> 
> I was thinking more of sg_is_dma_bus_address() but admittedly under my
> initial misapprehension of that. I suppose there's still a tenuous
> argument that even though we're not actually consuming sg_dma_address()
> at that point, if a segment *has* been earmarked for direct p2p, then
> skipping it rather than mapping it at the root complex TA that's
> probably never going to see those transactions might seem the more
> logical choice.
> 
> However it's all a bit hypothetical, and not significantly cleaner than
> a zero-size special case, so I'm not particularly tied to the idea either.

Yeah, looking at it closer, I can't see how to get rid of the zero size
special case without doing the whole pci_p2pdma_map_type() calculation
twice which we really want to avoid.

Logan
Robin Murphy June 30, 2022, 2:56 p.m. UTC | #5
On 2022-06-29 23:41, Logan Gunthorpe wrote:
> 
> 
> On 2022-06-29 13:15, Robin Murphy wrote:
>> On 2022-06-29 16:57, Logan Gunthorpe wrote:
>>>
>>>
>>>
>>> On 2022-06-29 06:07, Robin Murphy wrote:
>>>> On 2022-06-15 17:12, Logan Gunthorpe wrote:
>>>>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>>>>> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
>>>>> apply the appropriate bus address to the segment. The IOVA is not
>>>>> created if the scatterlist only consists of P2PDMA pages.
>>>>>
>>>>> A P2PDMA page may have three possible outcomes when being mapped:
>>>>>      1) If the data path between the two devices doesn't go through
>>>>>         the root port, then it should be mapped with a PCI bus address
>>>>>      2) If the data path goes through the host bridge, it should be
>>>>> mapped
>>>>>         normally with an IOMMU IOVA.
>>>>>      3) It is not possible for the two devices to communicate and thus
>>>>>         the mapping operation should fail (and it will return
>>>>> -EREMOTEIO).
>>>>>
>>>>> Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
>>>>> indicate bus address segments. On unmap, P2PDMA segments are skipped
>>>>> over when determining the start and end IOVA addresses.
>>>>>
>>>>> With this change, the flags variable in the dma_map_ops is set to
>>>>> DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.
>>>>>
>>>>> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
>>>>> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>>>>> ---
>>>>>     drivers/iommu/dma-iommu.c | 68
>>>>> +++++++++++++++++++++++++++++++++++----
>>>>>     1 file changed, 61 insertions(+), 7 deletions(-)
>>>>>
>>>>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>>>>> index f90251572a5d..b01ca0c6a7ab 100644
>>>>> --- a/drivers/iommu/dma-iommu.c
>>>>> +++ b/drivers/iommu/dma-iommu.c
>>>>> @@ -21,6 +21,7 @@
>>>>>     #include <linux/iova.h>
>>>>>     #include <linux/irq.h>
>>>>>     #include <linux/list_sort.h>
>>>>> +#include <linux/memremap.h>
>>>>>     #include <linux/mm.h>
>>>>>     #include <linux/mutex.h>
>>>>>     #include <linux/pci.h>
>>>>> @@ -1062,6 +1063,16 @@ static int __finalise_sg(struct device *dev,
>>>>> struct scatterlist *sg, int nents,
>>>>>             sg_dma_address(s) = DMA_MAPPING_ERROR;
>>>>>             sg_dma_len(s) = 0;
>>>>>     +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>>>>
>>>> Logically, should we not be able to use sg_is_dma_bus_address() here? I
>>>> think it should be feasible, and simpler, to prepare the p2p segments
>>>> up-front, such that at this point all we need to do is restore the
>>>> original length (if even that, see below).
>>>
>>> Per my previous email, no, because sg_is_dma_bus_address() is not set
>>> yet and not meant to tell you something about the page. That flag will
>>> be set below by pci_p2pdma_map_bus_segment() and then checkd in
>>> iommu_dma_unmap_sg() to determine if the dma_address in the segment
>>> needs to be unmapped.
>>
>> I know it's not set yet as-is; I'm suggesting things should be
>> restructured so that it *would be*. In the logical design of this code,
>> the DMA addresses are effectively determined in iommu_dma_map_sg(), and
>> __finalise_sg() merely converts them from a relative to an absolute form
>> (along with undoing the other trickery). Thus the call to
>> pci_p2pdma_map_bus_segment() absolutely belongs in the main
>> iommu_map_sg() loop.
> 
> I don't see how that can work: __finalise_sg() does more than convert
> them from relative to absolute, it also figures out which SG entry will
> contain which dma_address segment. Which segment a P2PDMA address needs
> to be programmed into depends on the how 'cur' is calculated which in
> turn depends on things like seg_mask and max_len. This calculation is
> not done in iommu_dma_map_sg() so I don't see how there's any hope of
> assigning the bus address for the P2P segments in that function.
> 
> If there's a way to restructure things so that's possible that I'm not
> seeing, I'm open to it but it's certainly not immediately obvious.

Huh? It's still virtually the same thing; iommu_dma_map_sg() calls 
pci_p2pdma_map_bus_segment(s) and sets s->length to 0 if 
PCI_P2PDMA_MAP_BUS_ADDR, then __finalise_sg() can use 
sg_is_dma_bus_address(s) in place of is_pci_p2pdma_page(sg_page(s)), and 
just propagate the DMA address and original length from s to cur.

Here you've written a patch which looks to correctly interrupt any 
ongoing concatenation state and convey some data from the given input 
segment to the appropriate output segment, so I'm baffled by why you'd 
think you couldn't do what you've already done.

Thanks,
Robin.
Logan Gunthorpe June 30, 2022, 9:21 p.m. UTC | #6
On 2022-06-30 08:56, Robin Murphy wrote:
> On 2022-06-29 23:41, Logan Gunthorpe wrote:
>>
>>
>> On 2022-06-29 13:15, Robin Murphy wrote:
>>> On 2022-06-29 16:57, Logan Gunthorpe wrote:
>>>>
>>>>
>>>>
>>>> On 2022-06-29 06:07, Robin Murphy wrote:
>>>>> On 2022-06-15 17:12, Logan Gunthorpe wrote:
>>>>>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>>>>>> to zero so that it is not mapped into the IOVA. Then, in
>>>>>> finalise_sg(),
>>>>>> apply the appropriate bus address to the segment. The IOVA is not
>>>>>> created if the scatterlist only consists of P2PDMA pages.
>>>>>>
>>>>>> A P2PDMA page may have three possible outcomes when being mapped:
>>>>>>      1) If the data path between the two devices doesn't go through
>>>>>>         the root port, then it should be mapped with a PCI bus
>>>>>> address
>>>>>>      2) If the data path goes through the host bridge, it should be
>>>>>> mapped
>>>>>>         normally with an IOMMU IOVA.
>>>>>>      3) It is not possible for the two devices to communicate and
>>>>>> thus
>>>>>>         the mapping operation should fail (and it will return
>>>>>> -EREMOTEIO).
>>>>>>
>>>>>> Similar to dma-direct, the sg_dma_mark_pci_p2pdma() flag is used to
>>>>>> indicate bus address segments. On unmap, P2PDMA segments are skipped
>>>>>> over when determining the start and end IOVA addresses.
>>>>>>
>>>>>> With this change, the flags variable in the dma_map_ops is set to
>>>>>> DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for P2PDMA pages.
>>>>>>
>>>>>> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
>>>>>> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>>>>>> ---
>>>>>>     drivers/iommu/dma-iommu.c | 68
>>>>>> +++++++++++++++++++++++++++++++++++----
>>>>>>     1 file changed, 61 insertions(+), 7 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>>>>>> index f90251572a5d..b01ca0c6a7ab 100644
>>>>>> --- a/drivers/iommu/dma-iommu.c
>>>>>> +++ b/drivers/iommu/dma-iommu.c
>>>>>> @@ -21,6 +21,7 @@
>>>>>>     #include <linux/iova.h>
>>>>>>     #include <linux/irq.h>
>>>>>>     #include <linux/list_sort.h>
>>>>>> +#include <linux/memremap.h>
>>>>>>     #include <linux/mm.h>
>>>>>>     #include <linux/mutex.h>
>>>>>>     #include <linux/pci.h>
>>>>>> @@ -1062,6 +1063,16 @@ static int __finalise_sg(struct device *dev,
>>>>>> struct scatterlist *sg, int nents,
>>>>>>             sg_dma_address(s) = DMA_MAPPING_ERROR;
>>>>>>             sg_dma_len(s) = 0;
>>>>>>     +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>>>>>
>>>>> Logically, should we not be able to use sg_is_dma_bus_address()
>>>>> here? I
>>>>> think it should be feasible, and simpler, to prepare the p2p segments
>>>>> up-front, such that at this point all we need to do is restore the
>>>>> original length (if even that, see below).
>>>>
>>>> Per my previous email, no, because sg_is_dma_bus_address() is not set
>>>> yet and not meant to tell you something about the page. That flag will
>>>> be set below by pci_p2pdma_map_bus_segment() and then checkd in
>>>> iommu_dma_unmap_sg() to determine if the dma_address in the segment
>>>> needs to be unmapped.
>>>
>>> I know it's not set yet as-is; I'm suggesting things should be
>>> restructured so that it *would be*. In the logical design of this code,
>>> the DMA addresses are effectively determined in iommu_dma_map_sg(), and
>>> __finalise_sg() merely converts them from a relative to an absolute form
>>> (along with undoing the other trickery). Thus the call to
>>> pci_p2pdma_map_bus_segment() absolutely belongs in the main
>>> iommu_map_sg() loop.
>>
>> I don't see how that can work: __finalise_sg() does more than convert
>> them from relative to absolute, it also figures out which SG entry will
>> contain which dma_address segment. Which segment a P2PDMA address needs
>> to be programmed into depends on the how 'cur' is calculated which in
>> turn depends on things like seg_mask and max_len. This calculation is
>> not done in iommu_dma_map_sg() so I don't see how there's any hope of
>> assigning the bus address for the P2P segments in that function.
>>
>> If there's a way to restructure things so that's possible that I'm not
>> seeing, I'm open to it but it's certainly not immediately obvious.
> 
> Huh? It's still virtually the same thing; iommu_dma_map_sg() calls
> pci_p2pdma_map_bus_segment(s) and sets s->length to 0 if
> PCI_P2PDMA_MAP_BUS_ADDR, then __finalise_sg() can use
> sg_is_dma_bus_address(s) in place of is_pci_p2pdma_page(sg_page(s)), and
> just propagate the DMA address and original length from s to cur.
> 
> Here you've written a patch which looks to correctly interrupt any
> ongoing concatenation state and convey some data from the given input
> segment to the appropriate output segment, so I'm baffled by why you'd
> think you couldn't do what you've already done.

Ah, I understand now, thanks for the patience. It took me a couple of
read throughs before I got it, but I figured it out and now have a
working implementation that looks really nice. It's a big improvement
not needing the two different P2PDMA helpers.

I'll send a v8 of just the first 13 patches next week after a bit more
testing. I've put a draft git branch here if you want to look at it
before that:

https://github.com/sbates130272/linux-p2pmem  p2pdma_map_v8

Thanks!

Logan
diff mbox series

Patch

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f90251572a5d..b01ca0c6a7ab 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,6 +21,7 @@ 
 #include <linux/iova.h>
 #include <linux/irq.h>
 #include <linux/list_sort.h>
+#include <linux/memremap.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
@@ -1062,6 +1063,16 @@  static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 		sg_dma_address(s) = DMA_MAPPING_ERROR;
 		sg_dma_len(s) = 0;
 
+		if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
+			if (i > 0)
+				cur = sg_next(cur);
+
+			pci_p2pdma_map_bus_segment(s, cur);
+			count++;
+			cur_len = 0;
+			continue;
+		}
+
 		/*
 		 * Now fill in the real DMA data. If...
 		 * - there is a valid output segment to append to
@@ -1158,6 +1169,8 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	struct iova_domain *iovad = &cookie->iovad;
 	struct scatterlist *s, *prev = NULL;
 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
+	struct dev_pagemap *pgmap = NULL;
+	enum pci_p2pdma_map_type map_type;
 	dma_addr_t iova;
 	size_t iova_len = 0;
 	unsigned long mask = dma_get_seg_boundary(dev);
@@ -1193,6 +1206,35 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		s_length = iova_align(iovad, s_length + s_iova_off);
 		s->length = s_length;
 
+		if (is_pci_p2pdma_page(sg_page(s))) {
+			if (sg_page(s)->pgmap != pgmap) {
+				pgmap = sg_page(s)->pgmap;
+				map_type = pci_p2pdma_map_type(pgmap, dev);
+			}
+
+			switch (map_type) {
+			case PCI_P2PDMA_MAP_BUS_ADDR:
+				/*
+				 * A zero length will be ignored by
+				 * iommu_map_sg() and then can be detected
+				 * in __finalise_sg() to actually map the
+				 * bus address.
+				 */
+				s->length = 0;
+				continue;
+			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+				/*
+				 * Mapping through host bridge should be
+				 * mapped with regular IOVAs, thus we
+				 * do nothing here and continue below.
+				 */
+				break;
+			default:
+				ret = -EREMOTEIO;
+				goto out_restore_sg;
+			}
+		}
+
 		/*
 		 * Due to the alignment of our single IOVA allocation, we can
 		 * depend on these assumptions about the segment boundary mask:
@@ -1215,6 +1257,9 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		prev = s;
 	}
 
+	if (!iova_len)
+		return __finalise_sg(dev, sg, nents, 0);
+
 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
 	if (!iova) {
 		ret = -ENOMEM;
@@ -1236,7 +1281,7 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 out_restore_sg:
 	__invalidate_sg(sg, nents);
 out:
-	if (ret != -ENOMEM)
+	if (ret != -ENOMEM && ret != -EREMOTEIO)
 		return -EINVAL;
 	return ret;
 }
@@ -1244,7 +1289,7 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 		int nents, enum dma_data_direction dir, unsigned long attrs)
 {
-	dma_addr_t start, end;
+	dma_addr_t end, start = DMA_MAPPING_ERROR;
 	struct scatterlist *tmp;
 	int i;
 
@@ -1260,14 +1305,22 @@  static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 	 * The scatterlist segments are mapped into a single
 	 * contiguous IOVA allocation, so this is incredibly easy.
 	 */
-	start = sg_dma_address(sg);
-	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+	for_each_sg(sg, tmp, nents, i) {
+		if (sg_is_dma_bus_address(tmp)) {
+			sg_dma_unmark_bus_address(tmp);
+			continue;
+		}
 		if (sg_dma_len(tmp) == 0)
 			break;
-		sg = tmp;
+
+		if (start == DMA_MAPPING_ERROR)
+			start = sg_dma_address(tmp);
+
+		end = sg_dma_address(tmp) + sg_dma_len(tmp);
 	}
-	end = sg_dma_address(sg) + sg_dma_len(sg);
-	__iommu_dma_unmap(dev, start, end - start);
+
+	if (start != DMA_MAPPING_ERROR)
+		__iommu_dma_unmap(dev, start, end - start);
 }
 
 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1460,6 +1513,7 @@  static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
 }
 
 static const struct dma_map_ops iommu_dma_ops = {
+	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
 	.alloc			= iommu_dma_alloc,
 	.free			= iommu_dma_free,
 	.alloc_pages		= dma_common_alloc_pages,