diff mbox series

[v4,20/21] VT-d: fold iommu_flush_iotlb{,_pages}()

Message ID eec16b83-7f8f-e94d-b1f6-9113ff93fd14@suse.com (mailing list archive)
State Superseded
Headers show
Series IOMMU: superpage support when not sharing pagetables | expand

Commit Message

Jan Beulich April 25, 2022, 8:44 a.m. UTC
With iommu_flush_iotlb_all() gone, iommu_flush_iotlb_pages() is merely a
wrapper around the not otherwise called iommu_flush_iotlb(). Fold both
functions.

No functional change intended.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v4: New.

Comments

Tian, Kevin April 27, 2022, 4:12 a.m. UTC | #1
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Monday, April 25, 2022 4:45 PM
> 
> With iommu_flush_iotlb_all() gone, iommu_flush_iotlb_pages() is merely a
> wrapper around the not otherwise called iommu_flush_iotlb(). Fold both
> functions.
> 
> No functional change intended.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> ---
> v4: New.
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -728,9 +728,9 @@ static int __must_check iommu_flush_all(
>      return rc;
>  }
> 
> -static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
> -                                          bool_t dma_old_pte_present,
> -                                          unsigned long page_count)
> +static int __must_check cf_check iommu_flush_iotlb(struct domain *d,
> dfn_t dfn,
> +                                                   unsigned long page_count,
> +                                                   unsigned int flush_flags)
>  {
>      struct domain_iommu *hd = dom_iommu(d);
>      struct acpi_drhd_unit *drhd;
> @@ -739,6 +739,17 @@ static int __must_check iommu_flush_iotl
>      int iommu_domid;
>      int ret = 0;
> 
> +    if ( flush_flags & IOMMU_FLUSHF_all )
> +    {
> +        dfn = INVALID_DFN;
> +        page_count = 0;
> +    }
> +    else
> +    {
> +        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
> +        ASSERT(flush_flags);
> +    }
> +
>      /*
>       * No need pcideves_lock here because we have flush
>       * when assign/deassign device
> @@ -765,7 +776,7 @@ static int __must_check iommu_flush_iotl
>              rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
>                                         dfn_to_daddr(dfn),
>                                         get_order_from_pages(page_count),
> -                                       !dma_old_pte_present,
> +                                       !(flush_flags & IOMMU_FLUSHF_modified),
>                                         flush_dev_iotlb);
> 
>          if ( rc > 0 )
> @@ -777,25 +788,6 @@ static int __must_check iommu_flush_iotl
>      return ret;
>  }
> 
> -static int __must_check cf_check iommu_flush_iotlb_pages(
> -    struct domain *d, dfn_t dfn, unsigned long page_count,
> -    unsigned int flush_flags)
> -{
> -    if ( flush_flags & IOMMU_FLUSHF_all )
> -    {
> -        dfn = INVALID_DFN;
> -        page_count = 0;
> -    }
> -    else
> -    {
> -        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
> -        ASSERT(flush_flags);
> -    }
> -
> -    return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified,
> -                             page_count);
> -}
> -
>  static void queue_free_pt(struct domain_iommu *hd, mfn_t mfn, unsigned
> int level)
>  {
>      if ( level > 1 )
> @@ -3254,7 +3246,7 @@ static const struct iommu_ops __initcons
>      .suspend = vtd_suspend,
>      .resume = vtd_resume,
>      .crash_shutdown = vtd_crash_shutdown,
> -    .iotlb_flush = iommu_flush_iotlb_pages,
> +    .iotlb_flush = iommu_flush_iotlb,
>      .get_reserved_device_memory =
> intel_iommu_get_reserved_device_memory,
>      .dump_page_tables = vtd_dump_page_tables,
>  };
Roger Pau Monne May 11, 2022, 1:50 p.m. UTC | #2
On Mon, Apr 25, 2022 at 10:44:38AM +0200, Jan Beulich wrote:
> With iommu_flush_iotlb_all() gone, iommu_flush_iotlb_pages() is merely a
> wrapper around the not otherwise called iommu_flush_iotlb(). Fold both
> functions.
> 
> No functional change intended.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks, Roger.
diff mbox series

Patch

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -728,9 +728,9 @@  static int __must_check iommu_flush_all(
     return rc;
 }
 
-static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
-                                          bool_t dma_old_pte_present,
-                                          unsigned long page_count)
+static int __must_check cf_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
+                                                   unsigned long page_count,
+                                                   unsigned int flush_flags)
 {
     struct domain_iommu *hd = dom_iommu(d);
     struct acpi_drhd_unit *drhd;
@@ -739,6 +739,17 @@  static int __must_check iommu_flush_iotl
     int iommu_domid;
     int ret = 0;
 
+    if ( flush_flags & IOMMU_FLUSHF_all )
+    {
+        dfn = INVALID_DFN;
+        page_count = 0;
+    }
+    else
+    {
+        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+        ASSERT(flush_flags);
+    }
+
     /*
      * No need pcideves_lock here because we have flush
      * when assign/deassign device
@@ -765,7 +776,7 @@  static int __must_check iommu_flush_iotl
             rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
                                        dfn_to_daddr(dfn),
                                        get_order_from_pages(page_count),
-                                       !dma_old_pte_present,
+                                       !(flush_flags & IOMMU_FLUSHF_modified),
                                        flush_dev_iotlb);
 
         if ( rc > 0 )
@@ -777,25 +788,6 @@  static int __must_check iommu_flush_iotl
     return ret;
 }
 
-static int __must_check cf_check iommu_flush_iotlb_pages(
-    struct domain *d, dfn_t dfn, unsigned long page_count,
-    unsigned int flush_flags)
-{
-    if ( flush_flags & IOMMU_FLUSHF_all )
-    {
-        dfn = INVALID_DFN;
-        page_count = 0;
-    }
-    else
-    {
-        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
-        ASSERT(flush_flags);
-    }
-
-    return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified,
-                             page_count);
-}
-
 static void queue_free_pt(struct domain_iommu *hd, mfn_t mfn, unsigned int level)
 {
     if ( level > 1 )
@@ -3254,7 +3246,7 @@  static const struct iommu_ops __initcons
     .suspend = vtd_suspend,
     .resume = vtd_resume,
     .crash_shutdown = vtd_crash_shutdown,
-    .iotlb_flush = iommu_flush_iotlb_pages,
+    .iotlb_flush = iommu_flush_iotlb,
     .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
     .dump_page_tables = vtd_dump_page_tables,
 };