diff mbox

[v4,04/10] vt-d: propagate IOMMU Device-TLB flush error up to IOMMU unmapping.

Message ID 1462524880-67205-5-git-send-email-quan.xu@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Quan Xu May 6, 2016, 8:54 a.m. UTC
Propagate the IOMMU Device-TLB flush error up to IOMMU unmapping.

Signed-off-by: Quan Xu <quan.xu@intel.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>

CC: Kevin Tian <kevin.tian@intel.com>
CC: Feng Wu <feng.wu@intel.com>
CC: Jan Beulich <jbeulich@suse.com>
---
 xen/drivers/passthrough/vtd/iommu.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

Comments

Jan Beulich May 10, 2016, 8:50 a.m. UTC | #1
>>> On 06.05.16 at 10:54, <quan.xu@intel.com> wrote:
> Propagate the IOMMU Device-TLB flush error up to IOMMU unmapping.
> 
> Signed-off-by: Quan Xu <quan.xu@intel.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>

but note ...

> @@ -1766,9 +1769,7 @@ static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
>      if ( iommu_passthrough && is_hardware_domain(d) )
>          return 0;
>  
> -    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
> -
> -    return 0;
> +    return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
>  }

... how you lose the __must_check here, since
intel_iommu_unmap_page() isn't __must_check (which we said you
may skip as long as the common code wrapper has it, but in the
context here I'm no longer convinced skipping this at any layer is a
good idea, as that makes validation of the call trees more difficult).
(This is just a remark regarding the comment on the earlier patch,
i.e. not something needing any further change here.)

Jan
Quan Xu May 11, 2016, 3:49 a.m. UTC | #2
On May 10, 2016 4:51 PM, Jan Beulich <JBeulich@suse.com> wrote:
> >>> On 06.05.16 at 10:54, <quan.xu@intel.com> wrote:
> > Propagate the IOMMU Device-TLB flush error up to IOMMU unmapping.
> >
> > Signed-off-by: Quan Xu <quan.xu@intel.com>
> > Acked-by: Kevin Tian <kevin.tian@intel.com>
> 
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> 
> but note ...
> 
> > @@ -1766,9 +1769,7 @@ static int intel_iommu_unmap_page(struct
> domain *d, unsigned long gfn)
> >      if ( iommu_passthrough && is_hardware_domain(d) )
> >          return 0;
> >
> > -    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
> > -
> > -    return 0;
> > +    return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
> >  }
> 
> ... how you lose the __must_check here, since
> intel_iommu_unmap_page() isn't __must_check (which we said you may skip
> as long as the common code wrapper has it, but in the context here I'm no
> longer convinced skipping this at any layer is a good idea, as that makes
> validation of the call trees more difficult).
> (This is just a remark regarding the comment on the earlier patch, i.e. not
> something needing any further change here.)
> 


I'll be bold to add __must_check..
Quan
diff mbox

Patch

diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 64093a9..e0e1126 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -616,11 +616,12 @@  static void iommu_flush_iotlb_all(struct domain *d)
 }
 
 /* clear one page's page table */
-static void dma_pte_clear_one(struct domain *domain, u64 addr)
+static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
 {
     struct domain_iommu *hd = dom_iommu(domain);
     struct dma_pte *page = NULL, *pte = NULL;
     u64 pg_maddr;
+    int rc = 0;
 
     spin_lock(&hd->arch.mapping_lock);
     /* get last level pte */
@@ -628,7 +629,7 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
     if ( pg_maddr == 0 )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        return;
+        return 0;
     }
 
     page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
@@ -638,7 +639,7 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
     {
         spin_unlock(&hd->arch.mapping_lock);
         unmap_vtd_domain_page(page);
-        return;
+        return 0;
     }
 
     dma_clear_pte(*pte);
@@ -646,9 +647,11 @@  static void dma_pte_clear_one(struct domain *domain, u64 addr)
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        iommu_flush_iotlb(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+        rc = iommu_flush_iotlb(domain, addr >> PAGE_SHIFT_4K, 1, 1);
 
     unmap_vtd_domain_page(page);
+
+    return rc;
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1766,9 +1769,7 @@  static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
     if ( iommu_passthrough && is_hardware_domain(d) )
         return 0;
 
-    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
-
-    return 0;
+    return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
 }
 
 int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,