diff mbox

[v4,06/10] IOMMU/MMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} (top level ones).

Message ID 1462524880-67205-7-git-send-email-quan.xu@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Quan Xu May 6, 2016, 8:54 a.m. UTC
Propagate the IOMMU Device-TLB flush error up to the iommu_iotlb_flush{,_all}.

This patch fixes the top level ones (this patch doesn't fix the leaf ones but
the next patch does).

Signed-off-by: Quan Xu <quan.xu@intel.com>

CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien.grall@arm.com>
CC: Jan Beulich <jbeulich@suse.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/arm/p2m.c                  |  5 ++++-
 xen/common/memory.c                 | 14 ++++++++++++--
 xen/drivers/passthrough/iommu.c     | 13 +++++++++----
 xen/drivers/passthrough/x86/iommu.c |  5 +++--
 xen/include/xen/iommu.h             |  5 +++--
 5 files changed, 31 insertions(+), 11 deletions(-)

Comments

Jan Beulich May 10, 2016, 9:04 a.m. UTC | #1
>>> On 06.05.16 at 10:54, <quan.xu@intel.com> wrote:
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -677,9 +677,19 @@ static int xenmem_add_to_physmap(struct domain *d,
>  #ifdef CONFIG_HAS_PASSTHROUGH
>      if ( need_iommu(d) )
>      {
> +        int ret;
> +
>          this_cpu(iommu_dont_flush_iotlb) = 0;
> -        iommu_iotlb_flush(d, xatp->idx - done, done);
> -        iommu_iotlb_flush(d, xatp->gpfn - done, done);
> +
> +        ret = iommu_iotlb_flush(d, xatp->idx - done, done);
> +
> +        if ( rc >= 0 && unlikely(ret) )
> +            rc = ret;
> +
> +        ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
> +
> +        if ( rc >= 0 && unlikely(ret) )
> +            rc = ret;

In both cases I think it would be preferable to switch the two sides
of the &&.

> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -312,24 +312,29 @@ static void iommu_free_pagetables(unsigned long unused)
>                              cpumask_cycle(smp_processor_id(), &cpu_online_map));
>  }
>  
> -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
> +int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
> +                                   unsigned int page_count)

The annotation generally belongs on declarations; the only exception
are static functions which don't have any (i.e. they get defined before
first getting used).

> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -200,8 +200,9 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
>  int iommu_do_domctl(struct xen_domctl *, struct domain *d,
>                      XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
>  
> -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count);
> -void iommu_iotlb_flush_all(struct domain *d);
> +int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
> +                                   unsigned int page_count);
> +int __must_check iommu_iotlb_flush_all(struct domain *d);

I.e. these changes suffice, repeating the annotations on the
function definitions is redundant with the ones put here.

With respective adjustments done
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
Quan Xu May 11, 2016, 5:52 a.m. UTC | #2
On May 10, 2016 5:04 PM, Jan Beulich <JBeulich@suse.com> wrote:
> >>> On 06.05.16 at 10:54, <quan.xu@intel.com> wrote:
> > --- a/xen/common/memory.c
> > +++ b/xen/common/memory.c
> > @@ -677,9 +677,19 @@ static int xenmem_add_to_physmap(struct
> domain
> > *d,  #ifdef CONFIG_HAS_PASSTHROUGH
> >      if ( need_iommu(d) )
> >      {
> > +        int ret;
> > +
> >          this_cpu(iommu_dont_flush_iotlb) = 0;
> > -        iommu_iotlb_flush(d, xatp->idx - done, done);
> > -        iommu_iotlb_flush(d, xatp->gpfn - done, done);
> > +
> > +        ret = iommu_iotlb_flush(d, xatp->idx - done, done);
> > +
> > +        if ( rc >= 0 && unlikely(ret) )
> > +            rc = ret;
> > +
> > +        ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
> > +
> > +        if ( rc >= 0 && unlikely(ret) )
> > +            rc = ret;
> 
> In both cases I think it would be preferable to switch the two sides of the &&.
> 

Make sense. Thanks for your careful check.


> > --- a/xen/drivers/passthrough/iommu.c
> > +++ b/xen/drivers/passthrough/iommu.c
> > @@ -312,24 +312,29 @@ static void iommu_free_pagetables(unsigned
> long unused)
> >                              cpumask_cycle(smp_processor_id(),
> > &cpu_online_map));  }
> >
> > -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned
> > int page_count)
> > +int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
> > +                                   unsigned int page_count)
> 
> The annotation generally belongs on declarations; the only exception are static
> functions which don't have any (i.e. they get defined before first getting
> used).
>

You are right. I will drop these __must_check annotation here.
 
> > --- a/xen/include/xen/iommu.h
> > +++ b/xen/include/xen/iommu.h
> > @@ -200,8 +200,9 @@ int iommu_do_pci_domctl(struct xen_domctl *,
> > struct domain *d,  int iommu_do_domctl(struct xen_domctl *, struct domain
> *d,
> >                      XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
> >
> > -void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned
> > int page_count); -void iommu_iotlb_flush_all(struct domain *d);
> > +int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
> > +                                   unsigned int page_count); int
> > +__must_check iommu_iotlb_flush_all(struct domain *d);
> 
> I.e. these changes suffice, repeating the annotations on the function
> definitions is redundant with the ones put here.
> 

Agreed.

> With respective adjustments done
> Reviewed-by: Jan Beulich <jbeulich@suse.com>

Quan
diff mbox

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index db21433..fe201d0 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1178,7 +1178,10 @@  out:
     if ( flush )
     {
         flush_tlb_domain(d);
-        iommu_iotlb_flush(d, sgfn, egfn - sgfn);
+        ret = iommu_iotlb_flush(d, sgfn, egfn - sgfn);
+
+        if ( !rc )
+            rc = ret;
     }
 
     while ( (pg = page_list_remove_head(&free_pages)) )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 644f81a..21b8098 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -677,9 +677,19 @@  static int xenmem_add_to_physmap(struct domain *d,
 #ifdef CONFIG_HAS_PASSTHROUGH
     if ( need_iommu(d) )
     {
+        int ret;
+
         this_cpu(iommu_dont_flush_iotlb) = 0;
-        iommu_iotlb_flush(d, xatp->idx - done, done);
-        iommu_iotlb_flush(d, xatp->gpfn - done, done);
+
+        ret = iommu_iotlb_flush(d, xatp->idx - done, done);
+
+        if ( rc >= 0 && unlikely(ret) )
+            rc = ret;
+
+        ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
+
+        if ( rc >= 0 && unlikely(ret) )
+            rc = ret;
     }
 #endif
 
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index cca4cf3..7b5fc59 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -312,24 +312,29 @@  static void iommu_free_pagetables(unsigned long unused)
                             cpumask_cycle(smp_processor_id(), &cpu_online_map));
 }
 
-void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
+int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+                                   unsigned int page_count)
 {
     const struct domain_iommu *hd = dom_iommu(d);
 
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
-        return;
+        return 0;
 
     hd->platform_ops->iotlb_flush(d, gfn, page_count);
+
+    return 0;
 }
 
-void iommu_iotlb_flush_all(struct domain *d)
+int __must_check iommu_iotlb_flush_all(struct domain *d)
 {
     const struct domain_iommu *hd = dom_iommu(d);
 
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all )
-        return;
+        return 0;
 
     hd->platform_ops->iotlb_flush_all(d);
+
+    return 0;
 }
 
 int __init iommu_setup(void)
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index b64b98f..a18a608 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -104,8 +104,9 @@  int arch_iommu_populate_page_table(struct domain *d)
     this_cpu(iommu_dont_flush_iotlb) = 0;
 
     if ( !rc )
-        iommu_iotlb_flush_all(d);
-    else if ( rc != -ERESTART )
+        rc = iommu_iotlb_flush_all(d);
+
+    if ( rc && rc != -ERESTART )
         iommu_teardown(d);
 
     return rc;
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 19ba976..75c17a7 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -200,8 +200,9 @@  int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
 int iommu_do_domctl(struct xen_domctl *, struct domain *d,
                     XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
 
-void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count);
-void iommu_iotlb_flush_all(struct domain *d);
+int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+                                   unsigned int page_count);
+int __must_check iommu_iotlb_flush_all(struct domain *d);
 
 /*
  * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to