diff mbox series

[v2,05/18] IOMMU: have iommu_{,un}map() split requests into largest possible chunks

Message ID 134785fb-8ac1-50f0-de75-e0d6fe22f711@suse.com (mailing list archive)
State New, archived
Headers show
Series IOMMU: superpage support when not sharing pagetables | expand

Commit Message

Jan Beulich Sept. 24, 2021, 9:45 a.m. UTC
Introduce a helper function to determine the largest possible mapping
that allows covering a request (or the next part of it that is left to
be processed).

In order to not add yet more recurring dfn_add() / mfn_add() to the two
callers of the new helper, also introduce local variables holding the
values presently operated on.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Roger Pau Monne Nov. 30, 2021, 3:24 p.m. UTC | #1
On Fri, Sep 24, 2021 at 11:45:57AM +0200, Jan Beulich wrote:
> Introduce a helper function to determine the largest possible mapping
> that allows covering a request (or the next part of it that is left to
> be processed).
> 
> In order to not add yet more recurring dfn_add() / mfn_add() to the two
> callers of the new helper, also introduce local variables holding the
> values presently operated on.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -260,12 +260,38 @@ void iommu_domain_destroy(struct domain
>      arch_iommu_domain_destroy(d);
>  }
>  
> -int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
> +static unsigned int mapping_order(const struct domain_iommu *hd,
> +                                  dfn_t dfn, mfn_t mfn, unsigned long nr)
> +{
> +    unsigned long res = dfn_x(dfn) | mfn_x(mfn);
> +    unsigned long sizes = hd->platform_ops->page_sizes;
> +    unsigned int bit = find_first_set_bit(sizes), order = 0;
> +
> +    ASSERT(bit == PAGE_SHIFT);
> +
> +    while ( (sizes = (sizes >> bit) & ~1) )
> +    {
> +        unsigned long mask;
> +
> +        bit = find_first_set_bit(sizes);
> +        mask = (1UL << bit) - 1;
> +        if ( nr <= mask || (res & mask) )
> +            break;
> +        order += bit;
> +        nr >>= bit;
> +        res >>= bit;
> +    }
> +
> +    return order;
> +}

This looks like it could be used in other places, I would at least
consider using it in pvh_populate_memory_range where we also need to
figure out the maximum order given an address and a number of pages.

Do you think you could place it in a more generic file and also use
more generic parameters (ie: unsigned long gfn and mfn)?

> +
> +int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
>                unsigned long page_count, unsigned int flags,
>                unsigned int *flush_flags)
>  {
>      const struct domain_iommu *hd = dom_iommu(d);
>      unsigned long i;
> +    unsigned int order;
>      int rc = 0;
>  
>      if ( !is_iommu_enabled(d) )
> @@ -273,10 +299,16 @@ int iommu_map(struct domain *d, dfn_t df
>  
>      ASSERT(!IOMMUF_order(flags));
>  
> -    for ( i = 0; i < page_count; i++ )
> +    for ( i = 0; i < page_count; i += 1UL << order )
>      {
> -        rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i),
> -                        mfn_add(mfn, i), flags, flush_flags);
> +        dfn_t dfn = dfn_add(dfn0, i);
> +        mfn_t mfn = mfn_add(mfn0, i);
> +        unsigned long j;
> +
> +        order = mapping_order(hd, dfn, mfn, page_count - i);
> +
> +        rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
> +                        flags | IOMMUF_order(order), flush_flags);
>  
>          if ( likely(!rc) )
>              continue;
> @@ -284,14 +316,18 @@ int iommu_map(struct domain *d, dfn_t df
>          if ( !d->is_shutting_down && printk_ratelimit() )
>              printk(XENLOG_ERR
>                     "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
> -                   d->domain_id, dfn_x(dfn_add(dfn, i)),
> -                   mfn_x(mfn_add(mfn, i)), rc);
> +                   d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
> +
> +        for ( j = 0; j < i; j += 1UL << order )
> +        {
> +            dfn = dfn_add(dfn0, j);
> +            order = mapping_order(hd, dfn, _mfn(0), i - j);
>  
> -        while ( i-- )
>              /* if statement to satisfy __must_check */
> -            if ( iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
> -                            0, flush_flags) )
> +            if ( iommu_call(hd->platform_ops, unmap_page, d, dfn, order,
> +                            flush_flags) )
>                  continue;
> +        }

Why you need this unmap loop here, can't you just use iommu_unmap?

Thanks, Roger.
Jan Beulich Dec. 2, 2021, 3:59 p.m. UTC | #2
On 30.11.2021 16:24, Roger Pau Monné wrote:
> On Fri, Sep 24, 2021 at 11:45:57AM +0200, Jan Beulich wrote:
>> --- a/xen/drivers/passthrough/iommu.c
>> +++ b/xen/drivers/passthrough/iommu.c
>> @@ -260,12 +260,38 @@ void iommu_domain_destroy(struct domain
>>      arch_iommu_domain_destroy(d);
>>  }
>>  
>> -int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
>> +static unsigned int mapping_order(const struct domain_iommu *hd,
>> +                                  dfn_t dfn, mfn_t mfn, unsigned long nr)
>> +{
>> +    unsigned long res = dfn_x(dfn) | mfn_x(mfn);
>> +    unsigned long sizes = hd->platform_ops->page_sizes;
>> +    unsigned int bit = find_first_set_bit(sizes), order = 0;
>> +
>> +    ASSERT(bit == PAGE_SHIFT);
>> +
>> +    while ( (sizes = (sizes >> bit) & ~1) )
>> +    {
>> +        unsigned long mask;
>> +
>> +        bit = find_first_set_bit(sizes);
>> +        mask = (1UL << bit) - 1;
>> +        if ( nr <= mask || (res & mask) )
>> +            break;
>> +        order += bit;
>> +        nr >>= bit;
>> +        res >>= bit;
>> +    }
>> +
>> +    return order;
>> +}
> 
> This looks like it could be used in other places, I would at least
> consider using it in pvh_populate_memory_range where we also need to
> figure out the maximum order given an address and a number of pages.
> 
> Do you think you could place it in a more generic file and also use
> more generic parameters (ie: unsigned long gfn and mfn)?

The function as is surely isn't reusable, for its use of IOMMU
specific data. If and when a 2nd user appears, it'll be far clearer
whether and if so how much of it is worth generalizing. (Among other
things I'd like to retain the typesafe parameter types here.)

>> @@ -284,14 +316,18 @@ int iommu_map(struct domain *d, dfn_t df
>>          if ( !d->is_shutting_down && printk_ratelimit() )
>>              printk(XENLOG_ERR
>>                     "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
>> -                   d->domain_id, dfn_x(dfn_add(dfn, i)),
>> -                   mfn_x(mfn_add(mfn, i)), rc);
>> +                   d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
>> +
>> +        for ( j = 0; j < i; j += 1UL << order )
>> +        {
>> +            dfn = dfn_add(dfn0, j);
>> +            order = mapping_order(hd, dfn, _mfn(0), i - j);
>>  
>> -        while ( i-- )
>>              /* if statement to satisfy __must_check */
>> -            if ( iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
>> -                            0, flush_flags) )
>> +            if ( iommu_call(hd->platform_ops, unmap_page, d, dfn, order,
>> +                            flush_flags) )
>>                  continue;
>> +        }
> 
> Why you need this unmap loop here, can't you just use iommu_unmap?

Good question - I merely converted the loop that was already there.
Looks like that could have been changed to a simple call already
before. I'll change it here, on the assumption that splitting this
out isn't going to be a worthwhile exercise.

Jan
diff mbox series

Patch

--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -260,12 +260,38 @@  void iommu_domain_destroy(struct domain
     arch_iommu_domain_destroy(d);
 }
 
-int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+static unsigned int mapping_order(const struct domain_iommu *hd,
+                                  dfn_t dfn, mfn_t mfn, unsigned long nr)
+{
+    unsigned long res = dfn_x(dfn) | mfn_x(mfn);
+    unsigned long sizes = hd->platform_ops->page_sizes;
+    unsigned int bit = find_first_set_bit(sizes), order = 0;
+
+    ASSERT(bit == PAGE_SHIFT);
+
+    while ( (sizes = (sizes >> bit) & ~1) )
+    {
+        unsigned long mask;
+
+        bit = find_first_set_bit(sizes);
+        mask = (1UL << bit) - 1;
+        if ( nr <= mask || (res & mask) )
+            break;
+        order += bit;
+        nr >>= bit;
+        res >>= bit;
+    }
+
+    return order;
+}
+
+int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
               unsigned long page_count, unsigned int flags,
               unsigned int *flush_flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
     unsigned long i;
+    unsigned int order;
     int rc = 0;
 
     if ( !is_iommu_enabled(d) )
@@ -273,10 +299,16 @@  int iommu_map(struct domain *d, dfn_t df
 
     ASSERT(!IOMMUF_order(flags));
 
-    for ( i = 0; i < page_count; i++ )
+    for ( i = 0; i < page_count; i += 1UL << order )
     {
-        rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i),
-                        mfn_add(mfn, i), flags, flush_flags);
+        dfn_t dfn = dfn_add(dfn0, i);
+        mfn_t mfn = mfn_add(mfn0, i);
+        unsigned long j;
+
+        order = mapping_order(hd, dfn, mfn, page_count - i);
+
+        rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
+                        flags | IOMMUF_order(order), flush_flags);
 
         if ( likely(!rc) )
             continue;
@@ -284,14 +316,18 @@  int iommu_map(struct domain *d, dfn_t df
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
                    "d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
-                   d->domain_id, dfn_x(dfn_add(dfn, i)),
-                   mfn_x(mfn_add(mfn, i)), rc);
+                   d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
+
+        for ( j = 0; j < i; j += 1UL << order )
+        {
+            dfn = dfn_add(dfn0, j);
+            order = mapping_order(hd, dfn, _mfn(0), i - j);
 
-        while ( i-- )
             /* if statement to satisfy __must_check */
-            if ( iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
-                            0, flush_flags) )
+            if ( iommu_call(hd->platform_ops, unmap_page, d, dfn, order,
+                            flush_flags) )
                 continue;
+        }
 
         if ( !is_hardware_domain(d) )
             domain_crash(d);
@@ -322,20 +358,25 @@  int iommu_legacy_map(struct domain *d, d
     return rc;
 }
 
-int iommu_unmap(struct domain *d, dfn_t dfn, unsigned long page_count,
+int iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
                 unsigned int *flush_flags)
 {
     const struct domain_iommu *hd = dom_iommu(d);
     unsigned long i;
+    unsigned int order;
     int rc = 0;
 
     if ( !is_iommu_enabled(d) )
         return 0;
 
-    for ( i = 0; i < page_count; i++ )
+    for ( i = 0; i < page_count; i += 1UL << order )
     {
-        int err = iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
-                             0, flush_flags);
+        dfn_t dfn = dfn_add(dfn0, i);
+        int err;
+
+        order = mapping_order(hd, dfn, _mfn(0), page_count - i);
+        err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
+                         order, flush_flags);
 
         if ( likely(!err) )
             continue;
@@ -343,7 +384,7 @@  int iommu_unmap(struct domain *d, dfn_t
         if ( !d->is_shutting_down && printk_ratelimit() )
             printk(XENLOG_ERR
                    "d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
-                   d->domain_id, dfn_x(dfn_add(dfn, i)), err);
+                   d->domain_id, dfn_x(dfn), err);
 
         if ( !rc )
             rc = err;