diff mbox series

[v1,20/22] intel_iommu: propagate PASID-based iotlb invalidation to host

Message ID 1584880579-12178-21-git-send-email-yi.l.liu@intel.com (mailing list archive)
State New, archived
Headers show
Series intel_iommu: expose Shared Virtual Addressing to VMs | expand

Commit Message

Yi Liu March 22, 2020, 12:36 p.m. UTC
This patch propagates PASID-based iotlb invalidation to host.

Intel VT-d 3.0 supports nested translation in PASID granular.
Guest SVA support could be implemented by configuring nested
translation on specific PASID. This is also known as dual stage
DMA translation.

Under such configuration, guest owns the GVA->GPA translation
which is configured as first level page table in host side for
a specific pasid, and host owns GPA->HPA translation. As guest
owns first level translation table, piotlb invalidation should
be propagated to host since host IOMMU will cache first level
page table related mappings during DMA address translation.

This patch traps the guest PASID-based iotlb flush and propagate
it to host.

Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Yi Sun <yi.y.sun@linux.intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
---
 hw/i386/intel_iommu.c          | 139 +++++++++++++++++++++++++++++++++++++++++
 hw/i386/intel_iommu_internal.h |   7 +++
 2 files changed, 146 insertions(+)

Comments

Peter Xu March 24, 2020, 6:34 p.m. UTC | #1
On Sun, Mar 22, 2020 at 05:36:17AM -0700, Liu Yi L wrote:
> This patch propagates PASID-based iotlb invalidation to host.
> 
> Intel VT-d 3.0 supports nested translation in PASID granular.
> Guest SVA support could be implemented by configuring nested
> translation on specific PASID. This is also known as dual stage
> DMA translation.
> 
> Under such configuration, guest owns the GVA->GPA translation
> which is configured as first level page table in host side for
> a specific pasid, and host owns GPA->HPA translation. As guest
> owns first level translation table, piotlb invalidation should
> be propagated to host since host IOMMU will cache first level
> page table related mappings during DMA address translation.
> 
> This patch traps the guest PASID-based iotlb flush and propagate
> it to host.
> 
> Cc: Kevin Tian <kevin.tian@intel.com>
> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
> Cc: Peter Xu <peterx@redhat.com>
> Cc: Yi Sun <yi.y.sun@linux.intel.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Richard Henderson <rth@twiddle.net>
> Cc: Eduardo Habkost <ehabkost@redhat.com>
> Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
> ---
>  hw/i386/intel_iommu.c          | 139 +++++++++++++++++++++++++++++++++++++++++
>  hw/i386/intel_iommu_internal.h |   7 +++
>  2 files changed, 146 insertions(+)
> 
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index b9ac07d..10d314d 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -3134,15 +3134,154 @@ static bool vtd_process_pasid_desc(IntelIOMMUState *s,
>      return (ret == 0) ? true : false;
>  }
>  
> +/**
> + * Caller of this function should hold iommu_lock.
> + */
> +static void vtd_invalidate_piotlb(IntelIOMMUState *s,
> +                                  VTDBus *vtd_bus,
> +                                  int devfn,
> +                                  DualIOMMUStage1Cache *stage1_cache)
> +{
> +    VTDHostIOMMUContext *vtd_dev_icx;
> +    HostIOMMUContext *host_icx;
> +
> +    vtd_dev_icx = vtd_bus->dev_icx[devfn];
> +    if (!vtd_dev_icx) {
> +        goto out;
> +    }
> +    host_icx = vtd_dev_icx->host_icx;
> +    if (!host_icx) {
> +        goto out;
> +    }
> +    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
> +        error_report("Cache flush failed");

I think this should not easily be triggered by the guest, but just in
case... Let's use error_report_once() to be safe.

> +    }
> +out:
> +    return;
> +}
> +
> +static inline bool vtd_pasid_cache_valid(
> +                          VTDPASIDAddressSpace *vtd_pasid_as)
> +{
> +    return vtd_pasid_as->iommu_state &&

This check can be dropped because always true?

If you agree with both the changes, please add:

Reviewed-by: Peter Xu <peterx@redhat.com>

> +           (vtd_pasid_as->iommu_state->pasid_cache_gen
> +             == vtd_pasid_as->pasid_cache_entry.pasid_cache_gen);
> +}
> +
> +/**
> + * This function is a loop function for the s->vtd_pasid_as
> + * list with VTDPIOTLBInvInfo as execution filter. It propagates
> + * the piotlb invalidation to host. Caller of this function
> + * should hold iommu_lock.
> + */
> +static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
> +                                  gpointer user_data)
> +{
> +    VTDPIOTLBInvInfo *piotlb_info = user_data;
> +    VTDPASIDAddressSpace *vtd_pasid_as = value;
> +    uint16_t did;
> +
> +    /*
> +     * Needs to check whether the pasid entry cache stored in
> +     * vtd_pasid_as is valid or not. "invalid" means the pasid
> +     * cache has been flushed, thus host should have done piotlb
> +     * invalidation together with a pasid cache invalidation, so
> +     * no need to pass down piotlb invalidation to host for better
> +     * performance. Only when pasid entry cache is "valid", should
> +     * a piotlb invalidation be propagated to host since it means
> +     * guest just modified a mapping in its page table.
> +     */
> +    if (!vtd_pasid_cache_valid(vtd_pasid_as)) {
> +        return;
> +    }
> +
> +    did = vtd_pe_get_domain_id(
> +                &(vtd_pasid_as->pasid_cache_entry.pasid_entry));
> +
> +    if ((piotlb_info->domain_id == did) &&
> +        (piotlb_info->pasid == vtd_pasid_as->pasid)) {
> +        vtd_invalidate_piotlb(vtd_pasid_as->iommu_state,
> +                              vtd_pasid_as->vtd_bus,
> +                              vtd_pasid_as->devfn,
> +                              piotlb_info->stage1_cache);
> +    }
> +
> +    /*
> +     * TODO: needs to add QEMU piotlb flush when QEMU piotlb
> +     * infrastructure is ready. For now, it is enough for passthru
> +     * devices.
> +     */
> +}
> +
>  static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
>                                          uint16_t domain_id,
>                                          uint32_t pasid)
>  {
> +    VTDPIOTLBInvInfo piotlb_info;
> +    DualIOMMUStage1Cache *stage1_cache;
> +    struct iommu_cache_invalidate_info *cache_info;
> +
> +    stage1_cache = g_malloc0(sizeof(*stage1_cache));
> +    stage1_cache->pasid = pasid;
> +
> +    cache_info = &stage1_cache->cache_info;
> +    cache_info->version = IOMMU_UAPI_VERSION;
> +    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
> +    cache_info->granularity = IOMMU_INV_GRANU_PASID;
> +    cache_info->pasid_info.pasid = pasid;
> +    cache_info->pasid_info.flags = IOMMU_INV_PASID_FLAGS_PASID;
> +
> +    piotlb_info.domain_id = domain_id;
> +    piotlb_info.pasid = pasid;
> +    piotlb_info.stage1_cache = stage1_cache;
> +
> +    vtd_iommu_lock(s);
> +    /*
> +     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
> +     * to find out the affected devices since piotlb invalidation
> +     * should check pasid cache per architecture point of view.
> +     */
> +    g_hash_table_foreach(s->vtd_pasid_as,
> +                         vtd_flush_pasid_iotlb, &piotlb_info);
> +    vtd_iommu_unlock(s);
> +    g_free(stage1_cache);
>  }
>  
>  static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
>                               uint32_t pasid, hwaddr addr, uint8_t am, bool ih)
>  {
> +    VTDPIOTLBInvInfo piotlb_info;
> +    DualIOMMUStage1Cache *stage1_cache;
> +    struct iommu_cache_invalidate_info *cache_info;
> +
> +    stage1_cache = g_malloc0(sizeof(*stage1_cache));
> +    stage1_cache->pasid = pasid;
> +
> +    cache_info = &stage1_cache->cache_info;
> +    cache_info->version = IOMMU_UAPI_VERSION;
> +    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
> +    cache_info->granularity = IOMMU_INV_GRANU_ADDR;
> +    cache_info->addr_info.flags = IOMMU_INV_ADDR_FLAGS_PASID;
> +    cache_info->addr_info.flags |= ih ? IOMMU_INV_ADDR_FLAGS_LEAF : 0;
> +    cache_info->addr_info.pasid = pasid;
> +    cache_info->addr_info.addr = addr;
> +    cache_info->addr_info.granule_size = 1 << (12 + am);
> +    cache_info->addr_info.nb_granules = 1;
> +
> +    piotlb_info.domain_id = domain_id;
> +    piotlb_info.pasid = pasid;
> +    piotlb_info.stage1_cache = stage1_cache;
> +
> +    vtd_iommu_lock(s);
> +    /*
> +     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
> +     * to find out the affected devices since piotlb invalidation
> +     * should check pasid cache per architecture point of view.
> +     */
> +    g_hash_table_foreach(s->vtd_pasid_as,
> +                         vtd_flush_pasid_iotlb, &piotlb_info);
> +    vtd_iommu_unlock(s);
> +    g_free(stage1_cache);
>  }
>  
>  static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
> diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
> index 314e2c4..967cc4f 100644
> --- a/hw/i386/intel_iommu_internal.h
> +++ b/hw/i386/intel_iommu_internal.h
> @@ -560,6 +560,13 @@ struct VTDPASIDCacheInfo {
>                                        VTD_PASID_CACHE_DEVSI)
>  typedef struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
>  
> +struct VTDPIOTLBInvInfo {
> +    uint16_t domain_id;
> +    uint32_t pasid;
> +    DualIOMMUStage1Cache *stage1_cache;
> +};
> +typedef struct VTDPIOTLBInvInfo VTDPIOTLBInvInfo;
> +
>  /* PASID Table Related Definitions */
>  #define VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)
>  #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
> -- 
> 2.7.4
>
Yi Liu March 25, 2020, 1:21 p.m. UTC | #2
> From: Peter Xu <peterx@redhat.com>
> Sent: Wednesday, March 25, 2020 2:34 AM
> To: Liu, Yi L <yi.l.liu@intel.com>
> Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> invalidation to host
> 
> On Sun, Mar 22, 2020 at 05:36:17AM -0700, Liu Yi L wrote:
> > This patch propagates PASID-based iotlb invalidation to host.
> >
> > Intel VT-d 3.0 supports nested translation in PASID granular.
> > Guest SVA support could be implemented by configuring nested
> > translation on specific PASID. This is also known as dual stage DMA
> > translation.
> >
> > Under such configuration, guest owns the GVA->GPA translation which is
> > configured as first level page table in host side for a specific
> > pasid, and host owns GPA->HPA translation. As guest owns first level
> > translation table, piotlb invalidation should be propagated to host
> > since host IOMMU will cache first level page table related mappings
> > during DMA address translation.
> >
> > This patch traps the guest PASID-based iotlb flush and propagate it to
> > host.
> >
> > Cc: Kevin Tian <kevin.tian@intel.com>
> > Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > Cc: Peter Xu <peterx@redhat.com>
> > Cc: Yi Sun <yi.y.sun@linux.intel.com>
> > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > Cc: Richard Henderson <rth@twiddle.net>
> > Cc: Eduardo Habkost <ehabkost@redhat.com>
> > Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
> > ---
> >  hw/i386/intel_iommu.c          | 139
> +++++++++++++++++++++++++++++++++++++++++
> >  hw/i386/intel_iommu_internal.h |   7 +++
> >  2 files changed, 146 insertions(+)
> >
> > diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index
> > b9ac07d..10d314d 100644
> > --- a/hw/i386/intel_iommu.c
> > +++ b/hw/i386/intel_iommu.c
> > @@ -3134,15 +3134,154 @@ static bool
> vtd_process_pasid_desc(IntelIOMMUState *s,
> >      return (ret == 0) ? true : false;  }
> >
> > +/**
> > + * Caller of this function should hold iommu_lock.
> > + */
> > +static void vtd_invalidate_piotlb(IntelIOMMUState *s,
> > +                                  VTDBus *vtd_bus,
> > +                                  int devfn,
> > +                                  DualIOMMUStage1Cache *stage1_cache)
> > +{
> > +    VTDHostIOMMUContext *vtd_dev_icx;
> > +    HostIOMMUContext *host_icx;
> > +
> > +    vtd_dev_icx = vtd_bus->dev_icx[devfn];
> > +    if (!vtd_dev_icx) {
> > +        goto out;
> > +    }
> > +    host_icx = vtd_dev_icx->host_icx;
> > +    if (!host_icx) {
> > +        goto out;
> > +    }
> > +    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
> > +        error_report("Cache flush failed");
> 
> I think this should not easily be triggered by the guest, but just in case... Let's use
> error_report_once() to be safe.

Agreed.

> > +    }
> > +out:
> > +    return;
> > +}
> > +
> > +static inline bool vtd_pasid_cache_valid(
> > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > +    return vtd_pasid_as->iommu_state &&
> 
> This check can be dropped because always true?
> 
> If you agree with both the changes, please add:
> 
> Reviewed-by: Peter Xu <peterx@redhat.com>

I think the code should ensure all the pasid_as in hash table is valid. And
we can since all the operations are under protection of iommu_lock.

Thanks,
Yi Liu

> > +           (vtd_pasid_as->iommu_state->pasid_cache_gen
> > +             == vtd_pasid_as->pasid_cache_entry.pasid_cache_gen);
> > +}
> > +
> > +/**
> > + * This function is a loop function for the s->vtd_pasid_as
> > + * list with VTDPIOTLBInvInfo as execution filter. It propagates
> > + * the piotlb invalidation to host. Caller of this function
> > + * should hold iommu_lock.
> > + */
> > +static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
> > +                                  gpointer user_data) {
> > +    VTDPIOTLBInvInfo *piotlb_info = user_data;
> > +    VTDPASIDAddressSpace *vtd_pasid_as = value;
> > +    uint16_t did;
> > +
> > +    /*
> > +     * Needs to check whether the pasid entry cache stored in
> > +     * vtd_pasid_as is valid or not. "invalid" means the pasid
> > +     * cache has been flushed, thus host should have done piotlb
> > +     * invalidation together with a pasid cache invalidation, so
> > +     * no need to pass down piotlb invalidation to host for better
> > +     * performance. Only when pasid entry cache is "valid", should
> > +     * a piotlb invalidation be propagated to host since it means
> > +     * guest just modified a mapping in its page table.
> > +     */
> > +    if (!vtd_pasid_cache_valid(vtd_pasid_as)) {
> > +        return;
> > +    }
> > +
> > +    did = vtd_pe_get_domain_id(
> > +                &(vtd_pasid_as->pasid_cache_entry.pasid_entry));
> > +
> > +    if ((piotlb_info->domain_id == did) &&
> > +        (piotlb_info->pasid == vtd_pasid_as->pasid)) {
> > +        vtd_invalidate_piotlb(vtd_pasid_as->iommu_state,
> > +                              vtd_pasid_as->vtd_bus,
> > +                              vtd_pasid_as->devfn,
> > +                              piotlb_info->stage1_cache);
> > +    }
> > +
> > +    /*
> > +     * TODO: needs to add QEMU piotlb flush when QEMU piotlb
> > +     * infrastructure is ready. For now, it is enough for passthru
> > +     * devices.
> > +     */
> > +}
> > +
> >  static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
> >                                          uint16_t domain_id,
> >                                          uint32_t pasid)  {
> > +    VTDPIOTLBInvInfo piotlb_info;
> > +    DualIOMMUStage1Cache *stage1_cache;
> > +    struct iommu_cache_invalidate_info *cache_info;
> > +
> > +    stage1_cache = g_malloc0(sizeof(*stage1_cache));
> > +    stage1_cache->pasid = pasid;
> > +
> > +    cache_info = &stage1_cache->cache_info;
> > +    cache_info->version = IOMMU_UAPI_VERSION;
> > +    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
> > +    cache_info->granularity = IOMMU_INV_GRANU_PASID;
> > +    cache_info->pasid_info.pasid = pasid;
> > +    cache_info->pasid_info.flags = IOMMU_INV_PASID_FLAGS_PASID;
> > +
> > +    piotlb_info.domain_id = domain_id;
> > +    piotlb_info.pasid = pasid;
> > +    piotlb_info.stage1_cache = stage1_cache;
> > +
> > +    vtd_iommu_lock(s);
> > +    /*
> > +     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
> > +     * to find out the affected devices since piotlb invalidation
> > +     * should check pasid cache per architecture point of view.
> > +     */
> > +    g_hash_table_foreach(s->vtd_pasid_as,
> > +                         vtd_flush_pasid_iotlb, &piotlb_info);
> > +    vtd_iommu_unlock(s);
> > +    g_free(stage1_cache);
> >  }
> >
> >  static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
> >                               uint32_t pasid, hwaddr addr, uint8_t am,
> > bool ih)  {
> > +    VTDPIOTLBInvInfo piotlb_info;
> > +    DualIOMMUStage1Cache *stage1_cache;
> > +    struct iommu_cache_invalidate_info *cache_info;
> > +
> > +    stage1_cache = g_malloc0(sizeof(*stage1_cache));
> > +    stage1_cache->pasid = pasid;
> > +
> > +    cache_info = &stage1_cache->cache_info;
> > +    cache_info->version = IOMMU_UAPI_VERSION;
> > +    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
> > +    cache_info->granularity = IOMMU_INV_GRANU_ADDR;
> > +    cache_info->addr_info.flags = IOMMU_INV_ADDR_FLAGS_PASID;
> > +    cache_info->addr_info.flags |= ih ? IOMMU_INV_ADDR_FLAGS_LEAF : 0;
> > +    cache_info->addr_info.pasid = pasid;
> > +    cache_info->addr_info.addr = addr;
> > +    cache_info->addr_info.granule_size = 1 << (12 + am);
> > +    cache_info->addr_info.nb_granules = 1;
> > +
> > +    piotlb_info.domain_id = domain_id;
> > +    piotlb_info.pasid = pasid;
> > +    piotlb_info.stage1_cache = stage1_cache;
> > +
> > +    vtd_iommu_lock(s);
> > +    /*
> > +     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
> > +     * to find out the affected devices since piotlb invalidation
> > +     * should check pasid cache per architecture point of view.
> > +     */
> > +    g_hash_table_foreach(s->vtd_pasid_as,
> > +                         vtd_flush_pasid_iotlb, &piotlb_info);
> > +    vtd_iommu_unlock(s);
> > +    g_free(stage1_cache);
> >  }
> >
> >  static bool vtd_process_piotlb_desc(IntelIOMMUState *s, diff --git
> > a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
> > index 314e2c4..967cc4f 100644
> > --- a/hw/i386/intel_iommu_internal.h
> > +++ b/hw/i386/intel_iommu_internal.h
> > @@ -560,6 +560,13 @@ struct VTDPASIDCacheInfo {
> >                                        VTD_PASID_CACHE_DEVSI)  typedef
> > struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
> >
> > +struct VTDPIOTLBInvInfo {
> > +    uint16_t domain_id;
> > +    uint32_t pasid;
> > +    DualIOMMUStage1Cache *stage1_cache; }; typedef struct
> > +VTDPIOTLBInvInfo VTDPIOTLBInvInfo;
> > +
> >  /* PASID Table Related Definitions */  #define
> > VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)  #define
> > VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)
> > --
> > 2.7.4
> >
> 
> --
> Peter Xu
Yi Liu March 26, 2020, 5:41 a.m. UTC | #3
> From: Liu, Yi L
> Sent: Wednesday, March 25, 2020 9:22 PM
> To: 'Peter Xu' <peterx@redhat.com>
> Subject: RE: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> invalidation to host
> 
> > From: Peter Xu <peterx@redhat.com>
> > Sent: Wednesday, March 25, 2020 2:34 AM
> > To: Liu, Yi L <yi.l.liu@intel.com>
> > Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> > invalidation to host
> >
> > On Sun, Mar 22, 2020 at 05:36:17AM -0700, Liu Yi L wrote:
> > > This patch propagates PASID-based iotlb invalidation to host.
> > >
> > > Intel VT-d 3.0 supports nested translation in PASID granular.
> > > Guest SVA support could be implemented by configuring nested
> > > translation on specific PASID. This is also known as dual stage DMA
> > > translation.
> > >
> > > Under such configuration, guest owns the GVA->GPA translation which
> > > is configured as first level page table in host side for a specific
> > > pasid, and host owns GPA->HPA translation. As guest owns first level
> > > translation table, piotlb invalidation should be propagated to host
> > > since host IOMMU will cache first level page table related mappings
> > > during DMA address translation.
> > >
> > > This patch traps the guest PASID-based iotlb flush and propagate it
> > > to host.
> > >
> > > Cc: Kevin Tian <kevin.tian@intel.com>
> > > Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > > Cc: Peter Xu <peterx@redhat.com>
> > > Cc: Yi Sun <yi.y.sun@linux.intel.com>
> > > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > Cc: Richard Henderson <rth@twiddle.net>
> > > Cc: Eduardo Habkost <ehabkost@redhat.com>
> > > Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
> > > ---
> > >  hw/i386/intel_iommu.c          | 139
> > +++++++++++++++++++++++++++++++++++++++++
> > >  hw/i386/intel_iommu_internal.h |   7 +++
> > >  2 files changed, 146 insertions(+)
> > >
> > > diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index
> > > b9ac07d..10d314d 100644
> > > --- a/hw/i386/intel_iommu.c
> > > +++ b/hw/i386/intel_iommu.c
> > > @@ -3134,15 +3134,154 @@ static bool
> > vtd_process_pasid_desc(IntelIOMMUState *s,
> > >      return (ret == 0) ? true : false;  }
> > >
> > > +/**
> > > + * Caller of this function should hold iommu_lock.
> > > + */
> > > +static void vtd_invalidate_piotlb(IntelIOMMUState *s,
> > > +                                  VTDBus *vtd_bus,
> > > +                                  int devfn,
> > > +                                  DualIOMMUStage1Cache
> > > +*stage1_cache) {
> > > +    VTDHostIOMMUContext *vtd_dev_icx;
> > > +    HostIOMMUContext *host_icx;
> > > +
> > > +    vtd_dev_icx = vtd_bus->dev_icx[devfn];
> > > +    if (!vtd_dev_icx) {
> > > +        goto out;
> > > +    }
> > > +    host_icx = vtd_dev_icx->host_icx;
> > > +    if (!host_icx) {
> > > +        goto out;
> > > +    }
> > > +    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
> > > +        error_report("Cache flush failed");
> >
> > I think this should not easily be triggered by the guest, but just in
> > case... Let's use
> > error_report_once() to be safe.
> 
> Agreed.
> 
> > > +    }
> > > +out:
> > > +    return;
> > > +}
> > > +
> > > +static inline bool vtd_pasid_cache_valid(
> > > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > > +    return vtd_pasid_as->iommu_state &&
> >
> > This check can be dropped because always true?
> >
> > If you agree with both the changes, please add:
> >
> > Reviewed-by: Peter Xu <peterx@redhat.com>
> 
> I think the code should ensure all the pasid_as in hash table is valid. And we can
> since all the operations are under protection of iommu_lock.
> 
Peter,

I think my reply was wrong. pasid_as in has table may be stale since
the per pasid_as cache_gen may be not identical with the cache_gen
in iommu_state. e.g. vtd_pasid_cache_reset() only increases the
cache_gen in iommu_state. So there will be pasid_as in hash table
which has cached pasid entry but its cache_gen is not equal to the
one in iommu_state. For such pasid_as, we should treat it as stale.
So I guess the vtd_pasid_cache_valid() is still necessary.

Regards,
Yi Liu
Peter Xu March 26, 2020, 1:02 p.m. UTC | #4
On Thu, Mar 26, 2020 at 05:41:39AM +0000, Liu, Yi L wrote:
> > From: Liu, Yi L
> > Sent: Wednesday, March 25, 2020 9:22 PM
> > To: 'Peter Xu' <peterx@redhat.com>
> > Subject: RE: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> > invalidation to host
> > 
> > > From: Peter Xu <peterx@redhat.com>
> > > Sent: Wednesday, March 25, 2020 2:34 AM
> > > To: Liu, Yi L <yi.l.liu@intel.com>
> > > Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> > > invalidation to host
> > >
> > > On Sun, Mar 22, 2020 at 05:36:17AM -0700, Liu Yi L wrote:
> > > > This patch propagates PASID-based iotlb invalidation to host.
> > > >
> > > > Intel VT-d 3.0 supports nested translation in PASID granular.
> > > > Guest SVA support could be implemented by configuring nested
> > > > translation on specific PASID. This is also known as dual stage DMA
> > > > translation.
> > > >
> > > > Under such configuration, guest owns the GVA->GPA translation which
> > > > is configured as first level page table in host side for a specific
> > > > pasid, and host owns GPA->HPA translation. As guest owns first level
> > > > translation table, piotlb invalidation should be propagated to host
> > > > since host IOMMU will cache first level page table related mappings
> > > > during DMA address translation.
> > > >
> > > > This patch traps the guest PASID-based iotlb flush and propagate it
> > > > to host.
> > > >
> > > > Cc: Kevin Tian <kevin.tian@intel.com>
> > > > Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > > > Cc: Peter Xu <peterx@redhat.com>
> > > > Cc: Yi Sun <yi.y.sun@linux.intel.com>
> > > > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > Cc: Richard Henderson <rth@twiddle.net>
> > > > Cc: Eduardo Habkost <ehabkost@redhat.com>
> > > > Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
> > > > ---
> > > >  hw/i386/intel_iommu.c          | 139
> > > +++++++++++++++++++++++++++++++++++++++++
> > > >  hw/i386/intel_iommu_internal.h |   7 +++
> > > >  2 files changed, 146 insertions(+)
> > > >
> > > > diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index
> > > > b9ac07d..10d314d 100644
> > > > --- a/hw/i386/intel_iommu.c
> > > > +++ b/hw/i386/intel_iommu.c
> > > > @@ -3134,15 +3134,154 @@ static bool
> > > vtd_process_pasid_desc(IntelIOMMUState *s,
> > > >      return (ret == 0) ? true : false;  }
> > > >
> > > > +/**
> > > > + * Caller of this function should hold iommu_lock.
> > > > + */
> > > > +static void vtd_invalidate_piotlb(IntelIOMMUState *s,
> > > > +                                  VTDBus *vtd_bus,
> > > > +                                  int devfn,
> > > > +                                  DualIOMMUStage1Cache
> > > > +*stage1_cache) {
> > > > +    VTDHostIOMMUContext *vtd_dev_icx;
> > > > +    HostIOMMUContext *host_icx;
> > > > +
> > > > +    vtd_dev_icx = vtd_bus->dev_icx[devfn];
> > > > +    if (!vtd_dev_icx) {
> > > > +        goto out;
> > > > +    }
> > > > +    host_icx = vtd_dev_icx->host_icx;
> > > > +    if (!host_icx) {
> > > > +        goto out;
> > > > +    }
> > > > +    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
> > > > +        error_report("Cache flush failed");
> > >
> > > I think this should not easily be triggered by the guest, but just in
> > > case... Let's use
> > > error_report_once() to be safe.
> > 
> > Agreed.
> > 
> > > > +    }
> > > > +out:
> > > > +    return;
> > > > +}
> > > > +
> > > > +static inline bool vtd_pasid_cache_valid(
> > > > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > > > +    return vtd_pasid_as->iommu_state &&
                    ^^^^^^^^^^^^^^^^^^^^^^^^^

> > >
> > > This check can be dropped because always true?
> > >
> > > If you agree with both the changes, please add:
> > >
> > > Reviewed-by: Peter Xu <peterx@redhat.com>
> > 
> > I think the code should ensure all the pasid_as in hash table is valid. And we can
> > since all the operations are under protection of iommu_lock.
> > 
> Peter,
> 
> I think my reply was wrong. pasid_as in has table may be stale since
> the per pasid_as cache_gen may be not identical with the cache_gen
> in iommu_state. e.g. vtd_pasid_cache_reset() only increases the
> cache_gen in iommu_state. So there will be pasid_as in hash table
> which has cached pasid entry but its cache_gen is not equal to the
> one in iommu_state. For such pasid_as, we should treat it as stale.
> So I guess the vtd_pasid_cache_valid() is still necessary.

I guess you misread my comment. :)

I was saying the "vtd_pasid_as->iommu_state" check is not needed,
because iommu_state was always set if the address space is created.
vtd_pasid_cache_valid() is needed.

Also, please double confirm that vtd_pasid_cache_reset() should drop
all the address spaces (as I think it should), not "only increase the
cache_gen".  IMHO you should only increase the cache_gen in the PSI
hook (vtd_pasid_cache_psi()) only.

Thanks,
Peter Xu March 26, 2020, 1:22 p.m. UTC | #5
On Thu, Mar 26, 2020 at 09:02:48AM -0400, Peter Xu wrote:

[...]

> > > > > +static inline bool vtd_pasid_cache_valid(
> > > > > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > > > > +    return vtd_pasid_as->iommu_state &&
>                     ^^^^^^^^^^^^^^^^^^^^^^^^^
> 
> > > >
> > > > This check can be dropped because always true?
> > > >
> > > > If you agree with both the changes, please add:
> > > >
> > > > Reviewed-by: Peter Xu <peterx@redhat.com>
> > > 
> > > I think the code should ensure all the pasid_as in hash table is valid. And we can
> > > since all the operations are under protection of iommu_lock.
> > > 
> > Peter,
> > 
> > I think my reply was wrong. pasid_as in has table may be stale since
> > the per pasid_as cache_gen may be not identical with the cache_gen
> > in iommu_state. e.g. vtd_pasid_cache_reset() only increases the
> > cache_gen in iommu_state. So there will be pasid_as in hash table
> > which has cached pasid entry but its cache_gen is not equal to the
> > one in iommu_state. For such pasid_as, we should treat it as stale.
> > So I guess the vtd_pasid_cache_valid() is still necessary.
> 
> I guess you misread my comment. :)
> 
> I was saying the "vtd_pasid_as->iommu_state" check is not needed,
> because iommu_state was always set if the address space is created.
> vtd_pasid_cache_valid() is needed.
> 
> Also, please double confirm that vtd_pasid_cache_reset() should drop
> all the address spaces (as I think it should), not "only increase the
> cache_gen".  IMHO you should only increase the cache_gen in the PSI
> hook (vtd_pasid_cache_psi()) only.

Sorry, I mean GSI (vtd_pasid_cache_gsi), not PSI.
Yi Liu March 26, 2020, 1:23 p.m. UTC | #6
> From: Peter Xu <peterx@redhat.com>
> Sent: Thursday, March 26, 2020 9:03 PM
> To: Liu, Yi L <yi.l.liu@intel.com>
> Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> invalidation to host
> 
> On Thu, Mar 26, 2020 at 05:41:39AM +0000, Liu, Yi L wrote:
> > > From: Liu, Yi L
> > > Sent: Wednesday, March 25, 2020 9:22 PM
> > > To: 'Peter Xu' <peterx@redhat.com>
> > > Subject: RE: [PATCH v1 20/22] intel_iommu: propagate PASID-based
> > > iotlb invalidation to host
> > >
> > > > From: Peter Xu <peterx@redhat.com>
> > > > Sent: Wednesday, March 25, 2020 2:34 AM
> > > > To: Liu, Yi L <yi.l.liu@intel.com>
> > > > Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based
> > > > iotlb invalidation to host
> > > >
> > > > On Sun, Mar 22, 2020 at 05:36:17AM -0700, Liu Yi L wrote:
> > > > > This patch propagates PASID-based iotlb invalidation to host.
> > > > >
> > > > > Intel VT-d 3.0 supports nested translation in PASID granular.
> > > > > Guest SVA support could be implemented by configuring nested
> > > > > translation on specific PASID. This is also known as dual stage
> > > > > DMA translation.
> > > > >
> > > > > Under such configuration, guest owns the GVA->GPA translation
> > > > > which is configured as first level page table in host side for a
> > > > > specific pasid, and host owns GPA->HPA translation. As guest
> > > > > owns first level translation table, piotlb invalidation should
> > > > > be propagated to host since host IOMMU will cache first level
> > > > > page table related mappings during DMA address translation.
> > > > >
> > > > > This patch traps the guest PASID-based iotlb flush and propagate
> > > > > it to host.
> > > > >
> > > > > Cc: Kevin Tian <kevin.tian@intel.com>
> > > > > Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > > > > Cc: Peter Xu <peterx@redhat.com>
> > > > > Cc: Yi Sun <yi.y.sun@linux.intel.com>
> > > > > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > Cc: Richard Henderson <rth@twiddle.net>
> > > > > Cc: Eduardo Habkost <ehabkost@redhat.com>
> > > > > Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
> > > > > ---
> > > > >  hw/i386/intel_iommu.c          | 139
> > > > +++++++++++++++++++++++++++++++++++++++++
> > > > >  hw/i386/intel_iommu_internal.h |   7 +++
> > > > >  2 files changed, 146 insertions(+)
> > > > >
> > > > > diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index
> > > > > b9ac07d..10d314d 100644
> > > > > --- a/hw/i386/intel_iommu.c
> > > > > +++ b/hw/i386/intel_iommu.c
> > > > > @@ -3134,15 +3134,154 @@ static bool
> > > > vtd_process_pasid_desc(IntelIOMMUState *s,
> > > > >      return (ret == 0) ? true : false;  }
> > > > >
> > > > > +/**
> > > > > + * Caller of this function should hold iommu_lock.
> > > > > + */
> > > > > +static void vtd_invalidate_piotlb(IntelIOMMUState *s,
> > > > > +                                  VTDBus *vtd_bus,
> > > > > +                                  int devfn,
> > > > > +                                  DualIOMMUStage1Cache
> > > > > +*stage1_cache) {
> > > > > +    VTDHostIOMMUContext *vtd_dev_icx;
> > > > > +    HostIOMMUContext *host_icx;
> > > > > +
> > > > > +    vtd_dev_icx = vtd_bus->dev_icx[devfn];
> > > > > +    if (!vtd_dev_icx) {
> > > > > +        goto out;
> > > > > +    }
> > > > > +    host_icx = vtd_dev_icx->host_icx;
> > > > > +    if (!host_icx) {
> > > > > +        goto out;
> > > > > +    }
> > > > > +    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
> > > > > +        error_report("Cache flush failed");
> > > >
> > > > I think this should not easily be triggered by the guest, but just
> > > > in case... Let's use
> > > > error_report_once() to be safe.
> > >
> > > Agreed.
> > >
> > > > > +    }
> > > > > +out:
> > > > > +    return;
> > > > > +}
> > > > > +
> > > > > +static inline bool vtd_pasid_cache_valid(
> > > > > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > > > > +    return vtd_pasid_as->iommu_state &&
>                     ^^^^^^^^^^^^^^^^^^^^^^^^^
> 
> > > >
> > > > This check can be dropped because always true?
> > > >
> > > > If you agree with both the changes, please add:
> > > >
> > > > Reviewed-by: Peter Xu <peterx@redhat.com>
> > >
> > > I think the code should ensure all the pasid_as in hash table is
> > > valid. And we can since all the operations are under protection of iommu_lock.
> > >
> > Peter,
> >
> > I think my reply was wrong. pasid_as in has table may be stale since
> > the per pasid_as cache_gen may be not identical with the cache_gen in
> > iommu_state. e.g. vtd_pasid_cache_reset() only increases the cache_gen
> > in iommu_state. So there will be pasid_as in hash table which has
> > cached pasid entry but its cache_gen is not equal to the one in
> > iommu_state. For such pasid_as, we should treat it as stale.
> > So I guess the vtd_pasid_cache_valid() is still necessary.
> 
> I guess you misread my comment. :)
> 
> I was saying the "vtd_pasid_as->iommu_state" check is not needed, because
> iommu_state was always set if the address space is created.
> vtd_pasid_cache_valid() is needed.

ok, I see.

> Also, please double confirm that vtd_pasid_cache_reset() should drop all the
> address spaces (as I think it should), not "only increase the cache_gen". 

yes, I'm just evaluating it. vtd_pasid_cache_reset() should drop all the
pasid_as and need to notify host to unbind pasid.

> IMHO you
> should only increase the cache_gen in the PSI hook (vtd_pasid_cache_psi()) only.

I'm not quite get here. Why cache_gen increase only happen in PSI
hook? I think cache_gen used to avoid drop all pasid_as when a pasid
cache reset happened.

Regards,
Yi Liu
Yi Liu March 26, 2020, 1:33 p.m. UTC | #7
> From: Peter Xu <peterx@redhat.com>
> Sent: Thursday, March 26, 2020 9:23 PM
> To: Liu, Yi L <yi.l.liu@intel.com>
> Subject: Re: [PATCH v1 20/22] intel_iommu: propagate PASID-based iotlb
> invalidation to host
> 
> On Thu, Mar 26, 2020 at 09:02:48AM -0400, Peter Xu wrote:
> 
> [...]
> 
> > > > > > +static inline bool vtd_pasid_cache_valid(
> > > > > > +                          VTDPASIDAddressSpace *vtd_pasid_as) {
> > > > > > +    return vtd_pasid_as->iommu_state &&
> >                     ^^^^^^^^^^^^^^^^^^^^^^^^^
> >
> > > > >
> > > > > This check can be dropped because always true?
> > > > >
> > > > > If you agree with both the changes, please add:
> > > > >
> > > > > Reviewed-by: Peter Xu <peterx@redhat.com>
> > > >
> > > > I think the code should ensure all the pasid_as in hash table is
> > > > valid. And we can since all the operations are under protection of iommu_lock.
> > > >
> > > Peter,
> > >
> > > I think my reply was wrong. pasid_as in has table may be stale since
> > > the per pasid_as cache_gen may be not identical with the cache_gen
> > > in iommu_state. e.g. vtd_pasid_cache_reset() only increases the
> > > cache_gen in iommu_state. So there will be pasid_as in hash table
> > > which has cached pasid entry but its cache_gen is not equal to the
> > > one in iommu_state. For such pasid_as, we should treat it as stale.
> > > So I guess the vtd_pasid_cache_valid() is still necessary.
> >
> > I guess you misread my comment. :)
> >
> > I was saying the "vtd_pasid_as->iommu_state" check is not needed,
> > because iommu_state was always set if the address space is created.
> > vtd_pasid_cache_valid() is needed.
> >
> > Also, please double confirm that vtd_pasid_cache_reset() should drop
> > all the address spaces (as I think it should), not "only increase the
> > cache_gen".  IMHO you should only increase the cache_gen in the PSI
> > hook (vtd_pasid_cache_psi()) only.
> 
> Sorry, I mean GSI (vtd_pasid_cache_gsi), not PSI.

Got it.. Really confused me. :-) 

Regards,
Yi Liu
diff mbox series

Patch

diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index b9ac07d..10d314d 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -3134,15 +3134,154 @@  static bool vtd_process_pasid_desc(IntelIOMMUState *s,
     return (ret == 0) ? true : false;
 }
 
+/**
+ * Caller of this function should hold iommu_lock.
+ */
+static void vtd_invalidate_piotlb(IntelIOMMUState *s,
+                                  VTDBus *vtd_bus,
+                                  int devfn,
+                                  DualIOMMUStage1Cache *stage1_cache)
+{
+    VTDHostIOMMUContext *vtd_dev_icx;
+    HostIOMMUContext *host_icx;
+
+    vtd_dev_icx = vtd_bus->dev_icx[devfn];
+    if (!vtd_dev_icx) {
+        goto out;
+    }
+    host_icx = vtd_dev_icx->host_icx;
+    if (!host_icx) {
+        goto out;
+    }
+    if (host_iommu_ctx_flush_stage1_cache(host_icx, stage1_cache)) {
+        error_report("Cache flush failed");
+    }
+out:
+    return;
+}
+
+static inline bool vtd_pasid_cache_valid(
+                          VTDPASIDAddressSpace *vtd_pasid_as)
+{
+    return vtd_pasid_as->iommu_state &&
+           (vtd_pasid_as->iommu_state->pasid_cache_gen
+             == vtd_pasid_as->pasid_cache_entry.pasid_cache_gen);
+}
+
+/**
+ * This function is a loop function for the s->vtd_pasid_as
+ * list with VTDPIOTLBInvInfo as execution filter. It propagates
+ * the piotlb invalidation to host. Caller of this function
+ * should hold iommu_lock.
+ */
+static void vtd_flush_pasid_iotlb(gpointer key, gpointer value,
+                                  gpointer user_data)
+{
+    VTDPIOTLBInvInfo *piotlb_info = user_data;
+    VTDPASIDAddressSpace *vtd_pasid_as = value;
+    uint16_t did;
+
+    /*
+     * Needs to check whether the pasid entry cache stored in
+     * vtd_pasid_as is valid or not. "invalid" means the pasid
+     * cache has been flushed, thus host should have done piotlb
+     * invalidation together with a pasid cache invalidation, so
+     * no need to pass down piotlb invalidation to host for better
+     * performance. Only when pasid entry cache is "valid", should
+     * a piotlb invalidation be propagated to host since it means
+     * guest just modified a mapping in its page table.
+     */
+    if (!vtd_pasid_cache_valid(vtd_pasid_as)) {
+        return;
+    }
+
+    did = vtd_pe_get_domain_id(
+                &(vtd_pasid_as->pasid_cache_entry.pasid_entry));
+
+    if ((piotlb_info->domain_id == did) &&
+        (piotlb_info->pasid == vtd_pasid_as->pasid)) {
+        vtd_invalidate_piotlb(vtd_pasid_as->iommu_state,
+                              vtd_pasid_as->vtd_bus,
+                              vtd_pasid_as->devfn,
+                              piotlb_info->stage1_cache);
+    }
+
+    /*
+     * TODO: needs to add QEMU piotlb flush when QEMU piotlb
+     * infrastructure is ready. For now, it is enough for passthru
+     * devices.
+     */
+}
+
 static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
                                         uint16_t domain_id,
                                         uint32_t pasid)
 {
+    VTDPIOTLBInvInfo piotlb_info;
+    DualIOMMUStage1Cache *stage1_cache;
+    struct iommu_cache_invalidate_info *cache_info;
+
+    stage1_cache = g_malloc0(sizeof(*stage1_cache));
+    stage1_cache->pasid = pasid;
+
+    cache_info = &stage1_cache->cache_info;
+    cache_info->version = IOMMU_UAPI_VERSION;
+    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
+    cache_info->granularity = IOMMU_INV_GRANU_PASID;
+    cache_info->pasid_info.pasid = pasid;
+    cache_info->pasid_info.flags = IOMMU_INV_PASID_FLAGS_PASID;
+
+    piotlb_info.domain_id = domain_id;
+    piotlb_info.pasid = pasid;
+    piotlb_info.stage1_cache = stage1_cache;
+
+    vtd_iommu_lock(s);
+    /*
+     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
+     * to find out the affected devices since piotlb invalidation
+     * should check pasid cache per architecture point of view.
+     */
+    g_hash_table_foreach(s->vtd_pasid_as,
+                         vtd_flush_pasid_iotlb, &piotlb_info);
+    vtd_iommu_unlock(s);
+    g_free(stage1_cache);
 }
 
 static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
                              uint32_t pasid, hwaddr addr, uint8_t am, bool ih)
 {
+    VTDPIOTLBInvInfo piotlb_info;
+    DualIOMMUStage1Cache *stage1_cache;
+    struct iommu_cache_invalidate_info *cache_info;
+
+    stage1_cache = g_malloc0(sizeof(*stage1_cache));
+    stage1_cache->pasid = pasid;
+
+    cache_info = &stage1_cache->cache_info;
+    cache_info->version = IOMMU_UAPI_VERSION;
+    cache_info->cache = IOMMU_CACHE_INV_TYPE_IOTLB;
+    cache_info->granularity = IOMMU_INV_GRANU_ADDR;
+    cache_info->addr_info.flags = IOMMU_INV_ADDR_FLAGS_PASID;
+    cache_info->addr_info.flags |= ih ? IOMMU_INV_ADDR_FLAGS_LEAF : 0;
+    cache_info->addr_info.pasid = pasid;
+    cache_info->addr_info.addr = addr;
+    cache_info->addr_info.granule_size = 1 << (12 + am);
+    cache_info->addr_info.nb_granules = 1;
+
+    piotlb_info.domain_id = domain_id;
+    piotlb_info.pasid = pasid;
+    piotlb_info.stage1_cache = stage1_cache;
+
+    vtd_iommu_lock(s);
+    /*
+     * Here loops all the vtd_pasid_as instances in s->vtd_pasid_as
+     * to find out the affected devices since piotlb invalidation
+     * should check pasid cache per architecture point of view.
+     */
+    g_hash_table_foreach(s->vtd_pasid_as,
+                         vtd_flush_pasid_iotlb, &piotlb_info);
+    vtd_iommu_unlock(s);
+    g_free(stage1_cache);
 }
 
 static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index 314e2c4..967cc4f 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -560,6 +560,13 @@  struct VTDPASIDCacheInfo {
                                       VTD_PASID_CACHE_DEVSI)
 typedef struct VTDPASIDCacheInfo VTDPASIDCacheInfo;
 
+struct VTDPIOTLBInvInfo {
+    uint16_t domain_id;
+    uint32_t pasid;
+    DualIOMMUStage1Cache *stage1_cache;
+};
+typedef struct VTDPIOTLBInvInfo VTDPIOTLBInvInfo;
+
 /* PASID Table Related Definitions */
 #define VTD_PASID_DIR_BASE_ADDR_MASK  (~0xfffULL)
 #define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL)