diff mbox series

[RFC,v2,08/11] iommu/dma: Support PCI P2PDMA pages in dma-iommu map_sg

Message ID 20210311233142.7900-9-logang@deltatee.com (mailing list archive)
State Not Applicable
Headers show
Series Add support to dma_map_sg for P2PDMA | expand

Commit Message

Logan Gunthorpe March 11, 2021, 11:31 p.m. UTC
When a PCI P2PDMA page is seen, set the IOVA length of the segment
to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
apply the appropriate bus address to the segment. The IOVA is not
created if the scatterlist only consists of P2PDMA pages.

Similar to dma-direct, the sg_mark_pci_p2pdma() flag is used to
indicate bus address segments. On unmap, P2PDMA segments are skipped
over when determining the start and end IOVA addresses.

With this change, the flags variable in the dma_map_ops is
set to DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for
P2PDMA pages.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
---
 drivers/iommu/dma-iommu.c | 63 ++++++++++++++++++++++++++++++++-------
 1 file changed, 53 insertions(+), 10 deletions(-)

Comments

Robin Murphy March 12, 2021, 3:52 p.m. UTC | #1
On 2021-03-11 23:31, Logan Gunthorpe wrote:
> When a PCI P2PDMA page is seen, set the IOVA length of the segment
> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
> apply the appropriate bus address to the segment. The IOVA is not
> created if the scatterlist only consists of P2PDMA pages.

This misled me at first, but I see the implementation does actually 
appear to accomodate the case of working ACS where P2P *would* still 
need to be mapped at the IOMMU.

> Similar to dma-direct, the sg_mark_pci_p2pdma() flag is used to
> indicate bus address segments. On unmap, P2PDMA segments are skipped
> over when determining the start and end IOVA addresses.
> 
> With this change, the flags variable in the dma_map_ops is
> set to DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for
> P2PDMA pages.
> 
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
> ---
>   drivers/iommu/dma-iommu.c | 63 ++++++++++++++++++++++++++++++++-------
>   1 file changed, 53 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index af765c813cc8..c0821e9051a9 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -20,6 +20,7 @@
>   #include <linux/mm.h>
>   #include <linux/mutex.h>
>   #include <linux/pci.h>
> +#include <linux/pci-p2pdma.h>
>   #include <linux/swiotlb.h>
>   #include <linux/scatterlist.h>
>   #include <linux/vmalloc.h>
> @@ -846,7 +847,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
>    * segment's start address to avoid concatenating across one.
>    */
>   static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
> -		dma_addr_t dma_addr)
> +		dma_addr_t dma_addr, unsigned long attrs)
>   {
>   	struct scatterlist *s, *cur = sg;
>   	unsigned long seg_mask = dma_get_seg_boundary(dev);
> @@ -864,6 +865,20 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
>   		sg_dma_address(s) = DMA_MAPPING_ERROR;
>   		sg_dma_len(s) = 0;
>   
> +		if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
> +			if (i > 0)
> +				cur = sg_next(cur);
> +
> +			sg_dma_address(cur) = sg_phys(s) + s->offset -

Are you sure about that? ;)

> +				pci_p2pdma_bus_offset(sg_page(s));

Can the bus offset make P2P addresses overlap with regions of mem space 
that we might use for regular IOVA allocation? That would be very bad...

> +			sg_dma_len(cur) = s->length;
> +			sg_mark_pci_p2pdma(cur);
> +
> +			count++;
> +			cur_len = 0;
> +			continue;
> +		}
> +
>   		/*
>   		 * Now fill in the real DMA data. If...
>   		 * - there is a valid output segment to append to
> @@ -960,11 +975,12 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   	struct iommu_dma_cookie *cookie = domain->iova_cookie;
>   	struct iova_domain *iovad = &cookie->iovad;
>   	struct scatterlist *s, *prev = NULL;
> +	struct dev_pagemap *pgmap = NULL;
>   	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
>   	dma_addr_t iova;
>   	size_t iova_len = 0;
>   	unsigned long mask = dma_get_seg_boundary(dev);
> -	int i;
> +	int i, map = -1, ret = 0;
>   
>   	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
>   	    iommu_deferred_attach(dev, domain))
> @@ -993,6 +1009,23 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   		s_length = iova_align(iovad, s_length + s_iova_off);
>   		s->length = s_length;
>   
> +		if (is_pci_p2pdma_page(sg_page(s))) {
> +			if (sg_page(s)->pgmap != pgmap) {
> +				pgmap = sg_page(s)->pgmap;
> +				map = pci_p2pdma_dma_map_type(dev, pgmap);
> +			}
> +
> +			if (map < 0) {

It rather feels like it should be the job of whoever creates the list in 
the first place not to put unusable pages in it, especially since the 
p2pdma_map_type looks to be a fairly coarse-grained and static thing. 
The DMA API isn't responsible for validating normal memory pages, so 
what makes P2P special?

> +				ret = -EREMOTEIO;
> +				goto out_restore_sg;
> +			}
> +
> +			if (map) {
> +				s->length = 0;

I'm not really thrilled about the idea of passing zero-length segments 
to iommu_map_sg(). Yes, it happens to trick the concatenation logic in 
the current implementation into doing what you want, but it feels fragile.

> +				continue;
> +			}
> +		}
> +
>   		/*
>   		 * Due to the alignment of our single IOVA allocation, we can
>   		 * depend on these assumptions about the segment boundary mask:
> @@ -1015,6 +1048,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   		prev = s;
>   	}
>   
> +	if (!iova_len)
> +		return __finalise_sg(dev, sg, nents, 0, attrs);
> +
>   	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
>   	if (!iova)
>   		goto out_restore_sg;
> @@ -1026,19 +1062,19 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
>   	if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
>   		goto out_free_iova;
>   
> -	return __finalise_sg(dev, sg, nents, iova);
> +	return __finalise_sg(dev, sg, nents, iova, attrs);
>   
>   out_free_iova:
>   	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
>   out_restore_sg:
>   	__invalidate_sg(sg, nents);
> -	return 0;
> +	return ret;
>   }
>   
>   static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>   		int nents, enum dma_data_direction dir, unsigned long attrs)
>   {
> -	dma_addr_t start, end;
> +	dma_addr_t end, start = DMA_MAPPING_ERROR;
>   	struct scatterlist *tmp;
>   	int i;
>   
> @@ -1054,14 +1090,20 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
>   	 * The scatterlist segments are mapped into a single
>   	 * contiguous IOVA allocation, so this is incredibly easy.
>   	 */
> -	start = sg_dma_address(sg);
> -	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
> +	for_each_sg(sg, tmp, nents, i) {
> +		if (sg_is_pci_p2pdma(tmp))

Since the flag is associated with the DMA address which will no longer 
be valid, shouldn't it be cleared? The circumstances in which leaving it 
around could cause a problem are tenuous, but definitely possible.

Robin.

> +			continue;
>   		if (sg_dma_len(tmp) == 0)
>   			break;
> -		sg = tmp;
> +
> +		if (start == DMA_MAPPING_ERROR)
> +			start = sg_dma_address(tmp);
> +
> +		end = sg_dma_address(tmp) + sg_dma_len(tmp);
>   	}
> -	end = sg_dma_address(sg) + sg_dma_len(sg);
> -	__iommu_dma_unmap(dev, start, end - start);
> +
> +	if (start != DMA_MAPPING_ERROR)
> +		__iommu_dma_unmap(dev, start, end - start);
>   }
>   
>   static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
> @@ -1254,6 +1296,7 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
>   }
>   
>   static const struct dma_map_ops iommu_dma_ops = {
> +	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
>   	.alloc			= iommu_dma_alloc,
>   	.free			= iommu_dma_free,
>   	.alloc_pages		= dma_common_alloc_pages,
>
Logan Gunthorpe March 12, 2021, 5:03 p.m. UTC | #2
On 2021-03-12 8:52 a.m., Robin Murphy wrote:
> On 2021-03-11 23:31, Logan Gunthorpe wrote:
>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
>> apply the appropriate bus address to the segment. The IOVA is not
>> created if the scatterlist only consists of P2PDMA pages.
> 
> This misled me at first, but I see the implementation does actually
> appear to accomodate the case of working ACS where P2P *would* still
> need to be mapped at the IOMMU.

Yes, that's correct.
>>   static int __finalise_sg(struct device *dev, struct scatterlist *sg,
>> int nents,
>> -        dma_addr_t dma_addr)
>> +        dma_addr_t dma_addr, unsigned long attrs)
>>   {
>>       struct scatterlist *s, *cur = sg;
>>       unsigned long seg_mask = dma_get_seg_boundary(dev);
>> @@ -864,6 +865,20 @@ static int __finalise_sg(struct device *dev,
>> struct scatterlist *sg, int nents,
>>           sg_dma_address(s) = DMA_MAPPING_ERROR;
>>           sg_dma_len(s) = 0;
>>   +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>> +            if (i > 0)
>> +                cur = sg_next(cur);
>> +
>> +            sg_dma_address(cur) = sg_phys(s) + s->offset -
> 
> Are you sure about that? ;)

Do you see a bug? I don't follow you...

>> +                pci_p2pdma_bus_offset(sg_page(s));
> 
> Can the bus offset make P2P addresses overlap with regions of mem space
> that we might use for regular IOVA allocation? That would be very bad...

No. IOMMU drivers already disallow all PCI addresses from being used as
IOVA addresses. See, for example,  dmar_init_reserved_ranges(). It would
be a huge problem for a whole lot of other reasons if it didn't.


>> @@ -960,11 +975,12 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>       struct iommu_dma_cookie *cookie = domain->iova_cookie;
>>       struct iova_domain *iovad = &cookie->iovad;
>>       struct scatterlist *s, *prev = NULL;
>> +    struct dev_pagemap *pgmap = NULL;
>>       int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
>>       dma_addr_t iova;
>>       size_t iova_len = 0;
>>       unsigned long mask = dma_get_seg_boundary(dev);
>> -    int i;
>> +    int i, map = -1, ret = 0;
>>         if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
>>           iommu_deferred_attach(dev, domain))
>> @@ -993,6 +1009,23 @@ static int iommu_dma_map_sg(struct device *dev,
>> struct scatterlist *sg,
>>           s_length = iova_align(iovad, s_length + s_iova_off);
>>           s->length = s_length;
>>   +        if (is_pci_p2pdma_page(sg_page(s))) {
>> +            if (sg_page(s)->pgmap != pgmap) {
>> +                pgmap = sg_page(s)->pgmap;
>> +                map = pci_p2pdma_dma_map_type(dev, pgmap);
>> +            }
>> +
>> +            if (map < 0) {
> 
> It rather feels like it should be the job of whoever creates the list in
> the first place not to put unusable pages in it, especially since the
> p2pdma_map_type looks to be a fairly coarse-grained and static thing.
> The DMA API isn't responsible for validating normal memory pages, so
> what makes P2P special?

Yes, that would be ideal, but there's some difficulties there. For the
driver to check the pages, it would need to loop through the entire SG
one more time on every transaction, regardless of whether there are
P2PDMA pages, or not. So that will have a performance impact even when
the feature isn't being used. I don't think that'll be acceptable for
many drivers.

The other possibility is for GUP to do it when it gets the pages from
userspace. But GUP doesn't have all the information to do this at the
moment. We'd have to pass the struct device that will eventually map the
pages through all the nested functions in the GUP to do that test at
that time. This might not be a bad option (that I half looked into), but
I'm not sure how acceptable it would be to the GUP developers.

But even if we do verify the pages ahead of time, we still need the same
infrastructure in dma_map_sg(); it could only now be a BUG if the driver
sent invalid pages instead of an error return.

>> +                ret = -EREMOTEIO;
>> +                goto out_restore_sg;
>> +            }
>> +
>> +            if (map) {
>> +                s->length = 0;
> 
> I'm not really thrilled about the idea of passing zero-length segments
> to iommu_map_sg(). Yes, it happens to trick the concatenation logic in
> the current implementation into doing what you want, but it feels fragile.

We're not passing zero length segments to iommu_map_sg() (or any
function). This loop is just scanning to calculate the length of the
required IOVA. __finalise_sg() (which is intimately tied to this loop)
then needs a way to determine which segments were P2P segments. The
existing code already overwrites s->length with an aligned length and
stores the original length in sg_dma_len. So we're not relying on
tricking any logic here.


>>   }
>>     static void iommu_dma_unmap_sg(struct device *dev, struct
>> scatterlist *sg,
>>           int nents, enum dma_data_direction dir, unsigned long attrs)
>>   {
>> -    dma_addr_t start, end;
>> +    dma_addr_t end, start = DMA_MAPPING_ERROR;
>>       struct scatterlist *tmp;
>>       int i;
>>   @@ -1054,14 +1090,20 @@ static void iommu_dma_unmap_sg(struct device
>> *dev, struct scatterlist *sg,
>>        * The scatterlist segments are mapped into a single
>>        * contiguous IOVA allocation, so this is incredibly easy.
>>        */
>> -    start = sg_dma_address(sg);
>> -    for_each_sg(sg_next(sg), tmp, nents - 1, i) {
>> +    for_each_sg(sg, tmp, nents, i) {
>> +        if (sg_is_pci_p2pdma(tmp))
> 
> Since the flag is associated with the DMA address which will no longer
> be valid, shouldn't it be cleared? The circumstances in which leaving it
> around could cause a problem are tenuous, but definitely possible.

Yes, that's a good idea.

Thanks for the review!

Logan
Robin Murphy March 12, 2021, 7:47 p.m. UTC | #3
On 2021-03-12 17:03, Logan Gunthorpe wrote:
> 
> 
> On 2021-03-12 8:52 a.m., Robin Murphy wrote:
>> On 2021-03-11 23:31, Logan Gunthorpe wrote:
>>> When a PCI P2PDMA page is seen, set the IOVA length of the segment
>>> to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
>>> apply the appropriate bus address to the segment. The IOVA is not
>>> created if the scatterlist only consists of P2PDMA pages.
>>
>> This misled me at first, but I see the implementation does actually
>> appear to accomodate the case of working ACS where P2P *would* still
>> need to be mapped at the IOMMU.
> 
> Yes, that's correct.
>>>    static int __finalise_sg(struct device *dev, struct scatterlist *sg,
>>> int nents,
>>> -        dma_addr_t dma_addr)
>>> +        dma_addr_t dma_addr, unsigned long attrs)
>>>    {
>>>        struct scatterlist *s, *cur = sg;
>>>        unsigned long seg_mask = dma_get_seg_boundary(dev);
>>> @@ -864,6 +865,20 @@ static int __finalise_sg(struct device *dev,
>>> struct scatterlist *sg, int nents,
>>>            sg_dma_address(s) = DMA_MAPPING_ERROR;
>>>            sg_dma_len(s) = 0;
>>>    +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>>> +            if (i > 0)
>>> +                cur = sg_next(cur);
>>> +
>>> +            sg_dma_address(cur) = sg_phys(s) + s->offset -
>>
>> Are you sure about that? ;)
> 
> Do you see a bug? I don't follow you...

sg_phys() already accounts for the offset, so you're adding it twice.

>>> +                pci_p2pdma_bus_offset(sg_page(s));
>>
>> Can the bus offset make P2P addresses overlap with regions of mem space
>> that we might use for regular IOVA allocation? That would be very bad...
> 
> No. IOMMU drivers already disallow all PCI addresses from being used as
> IOVA addresses. See, for example,  dmar_init_reserved_ranges(). It would
> be a huge problem for a whole lot of other reasons if it didn't.

I know we reserve the outbound windows (largely *because* some host 
bridges will consider those addresses as attempts at unsupported P2P and 
prevent them working), I just wanted to confirm that this bus offset is 
always something small that stays within the relevant window, rather 
than something that might make a BAR appear in a completely different 
place for P2P purposes. If so, that's good.

>>> @@ -960,11 +975,12 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>        struct iommu_dma_cookie *cookie = domain->iova_cookie;
>>>        struct iova_domain *iovad = &cookie->iovad;
>>>        struct scatterlist *s, *prev = NULL;
>>> +    struct dev_pagemap *pgmap = NULL;
>>>        int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
>>>        dma_addr_t iova;
>>>        size_t iova_len = 0;
>>>        unsigned long mask = dma_get_seg_boundary(dev);
>>> -    int i;
>>> +    int i, map = -1, ret = 0;
>>>          if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
>>>            iommu_deferred_attach(dev, domain))
>>> @@ -993,6 +1009,23 @@ static int iommu_dma_map_sg(struct device *dev,
>>> struct scatterlist *sg,
>>>            s_length = iova_align(iovad, s_length + s_iova_off);
>>>            s->length = s_length;
>>>    +        if (is_pci_p2pdma_page(sg_page(s))) {
>>> +            if (sg_page(s)->pgmap != pgmap) {
>>> +                pgmap = sg_page(s)->pgmap;
>>> +                map = pci_p2pdma_dma_map_type(dev, pgmap);
>>> +            }
>>> +
>>> +            if (map < 0) {
>>
>> It rather feels like it should be the job of whoever creates the list in
>> the first place not to put unusable pages in it, especially since the
>> p2pdma_map_type looks to be a fairly coarse-grained and static thing.
>> The DMA API isn't responsible for validating normal memory pages, so
>> what makes P2P special?
> 
> Yes, that would be ideal, but there's some difficulties there. For the
> driver to check the pages, it would need to loop through the entire SG
> one more time on every transaction, regardless of whether there are
> P2PDMA pages, or not. So that will have a performance impact even when
> the feature isn't being used. I don't think that'll be acceptable for
> many drivers.
> 
> The other possibility is for GUP to do it when it gets the pages from
> userspace. But GUP doesn't have all the information to do this at the
> moment. We'd have to pass the struct device that will eventually map the
> pages through all the nested functions in the GUP to do that test at
> that time. This might not be a bad option (that I half looked into), but
> I'm not sure how acceptable it would be to the GUP developers.

Urgh, yes, if a page may or may not be valid for p2p depending on which 
device is trying to map it, then it probably is most reasonable to 
figure that out at this point. It's a little unfortunate having to cope 
with failure so late, but oh well.

> But even if we do verify the pages ahead of time, we still need the same
> infrastructure in dma_map_sg(); it could only now be a BUG if the driver
> sent invalid pages instead of an error return.

The hope was that we could save doing even that - e.g. if you pass a 
dodgy page into dma_map_page(), maybe page_to_phys() will crash, maybe 
you'll just end up with a DMA address that won't work, but either way it 
doesn't care in its own right - but it seems that's moot.

>>> +                ret = -EREMOTEIO;
>>> +                goto out_restore_sg;
>>> +            }
>>> +
>>> +            if (map) {
>>> +                s->length = 0;
>>
>> I'm not really thrilled about the idea of passing zero-length segments
>> to iommu_map_sg(). Yes, it happens to trick the concatenation logic in
>> the current implementation into doing what you want, but it feels fragile.
> 
> We're not passing zero length segments to iommu_map_sg() (or any
> function). This loop is just scanning to calculate the length of the
> required IOVA. __finalise_sg() (which is intimately tied to this loop)
> then needs a way to determine which segments were P2P segments. The
> existing code already overwrites s->length with an aligned length and
> stores the original length in sg_dma_len. So we're not relying on
> tricking any logic here.

Yes, we temporarily shuffle in page-aligned quantities to satisfy the 
needs of the iommu_map_sg() call, before unpacking things again in 
__finalise_sg(). It's some disgusting trickery that I'm particularly 
proud of. My point is that if you have a mix of both p2p and normal 
segments - which seems to be a case you want to support - then the 
length of 0 that you set to flag p2p segments here will be seen by 
iommu_map_sg() (as it walks the list to map the other segments) before 
you then use it as a key to override the DMA address in the final step. 
It's not a concern if you have a p2p-only list and short-circuit 
straight to that step (in which case all the shuffling was wasted effort 
anyway), but since it's not entirely clear what a segment with zero 
length would mean in general, it seems like a good idea to avoid passing 
the list across a public boundary in that state, if possible.

Robin.

>>>    }
>>>      static void iommu_dma_unmap_sg(struct device *dev, struct
>>> scatterlist *sg,
>>>            int nents, enum dma_data_direction dir, unsigned long attrs)
>>>    {
>>> -    dma_addr_t start, end;
>>> +    dma_addr_t end, start = DMA_MAPPING_ERROR;
>>>        struct scatterlist *tmp;
>>>        int i;
>>>    @@ -1054,14 +1090,20 @@ static void iommu_dma_unmap_sg(struct device
>>> *dev, struct scatterlist *sg,
>>>         * The scatterlist segments are mapped into a single
>>>         * contiguous IOVA allocation, so this is incredibly easy.
>>>         */
>>> -    start = sg_dma_address(sg);
>>> -    for_each_sg(sg_next(sg), tmp, nents - 1, i) {
>>> +    for_each_sg(sg, tmp, nents, i) {
>>> +        if (sg_is_pci_p2pdma(tmp))
>>
>> Since the flag is associated with the DMA address which will no longer
>> be valid, shouldn't it be cleared? The circumstances in which leaving it
>> around could cause a problem are tenuous, but definitely possible.
> 
> Yes, that's a good idea.
> 
> Thanks for the review!
> 
> Logan
>
Logan Gunthorpe March 12, 2021, 8:06 p.m. UTC | #4
On 2021-03-12 12:47 p.m., Robin Murphy wrote:
>>>>    {
>>>>        struct scatterlist *s, *cur = sg;
>>>>        unsigned long seg_mask = dma_get_seg_boundary(dev);
>>>> @@ -864,6 +865,20 @@ static int __finalise_sg(struct device *dev,
>>>> struct scatterlist *sg, int nents,
>>>>            sg_dma_address(s) = DMA_MAPPING_ERROR;
>>>>            sg_dma_len(s) = 0;
>>>>    +        if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
>>>> +            if (i > 0)
>>>> +                cur = sg_next(cur);
>>>> +
>>>> +            sg_dma_address(cur) = sg_phys(s) + s->offset -
>>>
>>> Are you sure about that? ;)
>>
>> Do you see a bug? I don't follow you...
> 
> sg_phys() already accounts for the offset, so you're adding it twice.

Ah, oops. Nice catch. I missed that.

> 
>>>> +                pci_p2pdma_bus_offset(sg_page(s));
>>>
>>> Can the bus offset make P2P addresses overlap with regions of mem space
>>> that we might use for regular IOVA allocation? That would be very bad...
>>
>> No. IOMMU drivers already disallow all PCI addresses from being used as
>> IOVA addresses. See, for example,  dmar_init_reserved_ranges(). It would
>> be a huge problem for a whole lot of other reasons if it didn't.
> 
> I know we reserve the outbound windows (largely *because* some host 
> bridges will consider those addresses as attempts at unsupported P2P and 
> prevent them working), I just wanted to confirm that this bus offset is 
> always something small that stays within the relevant window, rather 
> than something that might make a BAR appear in a completely different 
> place for P2P purposes. If so, that's good.

Yes, well if an IOVA overlaps with any PCI bus address there's going to 
be some difficult brokenness because when the IOVA is used it might be 
directed to the a PCI device and not the IOMMU. I fixed a bug like that 
once.
>>> I'm not really thrilled about the idea of passing zero-length segments
>>> to iommu_map_sg(). Yes, it happens to trick the concatenation logic in
>>> the current implementation into doing what you want, but it feels 
>>> fragile.
>>
>> We're not passing zero length segments to iommu_map_sg() (or any
>> function). This loop is just scanning to calculate the length of the
>> required IOVA. __finalise_sg() (which is intimately tied to this loop)
>> then needs a way to determine which segments were P2P segments. The
>> existing code already overwrites s->length with an aligned length and
>> stores the original length in sg_dma_len. So we're not relying on
>> tricking any logic here.
> 
> Yes, we temporarily shuffle in page-aligned quantities to satisfy the 
> needs of the iommu_map_sg() call, before unpacking things again in 
> __finalise_sg(). It's some disgusting trickery that I'm particularly 
> proud of. My point is that if you have a mix of both p2p and normal 
> segments - which seems to be a case you want to support - then the 
> length of 0 that you set to flag p2p segments here will be seen by 
> iommu_map_sg() (as it walks the list to map the other segments) before 
> you then use it as a key to override the DMA address in the final step. 
> It's not a concern if you have a p2p-only list and short-circuit 
> straight to that step (in which case all the shuffling was wasted effort 
> anyway), but since it's not entirely clear what a segment with zero 
> length would mean in general, it seems like a good idea to avoid passing 
> the list across a public boundary in that state, if possible.

Ok, well, I mean the iommu_map_sg() does the right thing as is without 
changing it and IMO sg->length set to zero does make sense. Supporting 
mixed P2P and normal segments is really the whole point of this series 
(the current kernel supports homogeneous SGLs with a specialized path -- 
see pci_p2pdma_unmap_sg_attrs()). But do you have an alternate solution 
for sg->length?

Logan
diff mbox series

Patch

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index af765c813cc8..c0821e9051a9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,6 +20,7 @@ 
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
 #include <linux/swiotlb.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
@@ -846,7 +847,7 @@  static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  * segment's start address to avoid concatenating across one.
  */
 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
-		dma_addr_t dma_addr)
+		dma_addr_t dma_addr, unsigned long attrs)
 {
 	struct scatterlist *s, *cur = sg;
 	unsigned long seg_mask = dma_get_seg_boundary(dev);
@@ -864,6 +865,20 @@  static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 		sg_dma_address(s) = DMA_MAPPING_ERROR;
 		sg_dma_len(s) = 0;
 
+		if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
+			if (i > 0)
+				cur = sg_next(cur);
+
+			sg_dma_address(cur) = sg_phys(s) + s->offset -
+				pci_p2pdma_bus_offset(sg_page(s));
+			sg_dma_len(cur) = s->length;
+			sg_mark_pci_p2pdma(cur);
+
+			count++;
+			cur_len = 0;
+			continue;
+		}
+
 		/*
 		 * Now fill in the real DMA data. If...
 		 * - there is a valid output segment to append to
@@ -960,11 +975,12 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
 	struct scatterlist *s, *prev = NULL;
+	struct dev_pagemap *pgmap = NULL;
 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
 	dma_addr_t iova;
 	size_t iova_len = 0;
 	unsigned long mask = dma_get_seg_boundary(dev);
-	int i;
+	int i, map = -1, ret = 0;
 
 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
 	    iommu_deferred_attach(dev, domain))
@@ -993,6 +1009,23 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		s_length = iova_align(iovad, s_length + s_iova_off);
 		s->length = s_length;
 
+		if (is_pci_p2pdma_page(sg_page(s))) {
+			if (sg_page(s)->pgmap != pgmap) {
+				pgmap = sg_page(s)->pgmap;
+				map = pci_p2pdma_dma_map_type(dev, pgmap);
+			}
+
+			if (map < 0) {
+				ret = -EREMOTEIO;
+				goto out_restore_sg;
+			}
+
+			if (map) {
+				s->length = 0;
+				continue;
+			}
+		}
+
 		/*
 		 * Due to the alignment of our single IOVA allocation, we can
 		 * depend on these assumptions about the segment boundary mask:
@@ -1015,6 +1048,9 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		prev = s;
 	}
 
+	if (!iova_len)
+		return __finalise_sg(dev, sg, nents, 0, attrs);
+
 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
 	if (!iova)
 		goto out_restore_sg;
@@ -1026,19 +1062,19 @@  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
 		goto out_free_iova;
 
-	return __finalise_sg(dev, sg, nents, iova);
+	return __finalise_sg(dev, sg, nents, iova, attrs);
 
 out_free_iova:
 	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
 out_restore_sg:
 	__invalidate_sg(sg, nents);
-	return 0;
+	return ret;
 }
 
 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 		int nents, enum dma_data_direction dir, unsigned long attrs)
 {
-	dma_addr_t start, end;
+	dma_addr_t end, start = DMA_MAPPING_ERROR;
 	struct scatterlist *tmp;
 	int i;
 
@@ -1054,14 +1090,20 @@  static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 	 * The scatterlist segments are mapped into a single
 	 * contiguous IOVA allocation, so this is incredibly easy.
 	 */
-	start = sg_dma_address(sg);
-	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+	for_each_sg(sg, tmp, nents, i) {
+		if (sg_is_pci_p2pdma(tmp))
+			continue;
 		if (sg_dma_len(tmp) == 0)
 			break;
-		sg = tmp;
+
+		if (start == DMA_MAPPING_ERROR)
+			start = sg_dma_address(tmp);
+
+		end = sg_dma_address(tmp) + sg_dma_len(tmp);
 	}
-	end = sg_dma_address(sg) + sg_dma_len(sg);
-	__iommu_dma_unmap(dev, start, end - start);
+
+	if (start != DMA_MAPPING_ERROR)
+		__iommu_dma_unmap(dev, start, end - start);
 }
 
 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1254,6 +1296,7 @@  static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
 }
 
 static const struct dma_map_ops iommu_dma_ops = {
+	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
 	.alloc			= iommu_dma_alloc,
 	.free			= iommu_dma_free,
 	.alloc_pages		= dma_common_alloc_pages,