diff mbox

[V2,23/25] x86/vvtd: Handle interrupt translation faults

Message ID 1502310866-10450-24-git-send-email-tianyu.lan@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

lan,Tianyu Aug. 9, 2017, 8:34 p.m. UTC
From: Chao Gao <chao.gao@intel.com>

Interrupt translation faults are non-recoverable fault. When faults
are triggered, it needs to populate fault info to Fault Recording
Registers and inject vIOMMU msi interrupt to notify guest IOMMU driver
to deal with faults.

This patch emulates hardware's handling interrupt translation
faults (more information about the process can be found in VT-d spec,
chipter "Translation Faults", section "Non-Recoverable Fault
Reporting" and section "Non-Recoverable Logging").
Specifically, viommu_record_fault() records the fault information and
viommu_report_non_recoverable_fault() reports faults to software.
Currently, only Primary Fault Logging is supported and the Number of
Fault-recording Registers is 1.

Signed-off-by: Chao Gao <chao.gao@intel.com>
Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
---
 xen/drivers/passthrough/vtd/iommu.h |  60 +++++++--
 xen/drivers/passthrough/vtd/vvtd.c  | 238 +++++++++++++++++++++++++++++++++++-
 2 files changed, 286 insertions(+), 12 deletions(-)

Comments

Roger Pau Monné Aug. 23, 2017, 11:51 a.m. UTC | #1
On Wed, Aug 09, 2017 at 04:34:24PM -0400, Lan Tianyu wrote:
> From: Chao Gao <chao.gao@intel.com>
> 
> Interrupt translation faults are non-recoverable fault. When faults
                                                   ^ faults
> are triggered, it needs to populate fault info to Fault Recording
> Registers and inject vIOMMU msi interrupt to notify guest IOMMU driver
> to deal with faults.
> 
> This patch emulates hardware's handling interrupt translation
> faults (more information about the process can be found in VT-d spec,
> chipter "Translation Faults", section "Non-Recoverable Fault
  ^ chapter
> Reporting" and section "Non-Recoverable Logging").
> Specifically, viommu_record_fault() records the fault information and
> viommu_report_non_recoverable_fault() reports faults to software.
> Currently, only Primary Fault Logging is supported and the Number of
> Fault-recording Registers is 1.
> 
> Signed-off-by: Chao Gao <chao.gao@intel.com>
> Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
> ---
>  xen/drivers/passthrough/vtd/iommu.h |  60 +++++++--
>  xen/drivers/passthrough/vtd/vvtd.c  | 238 +++++++++++++++++++++++++++++++++++-
>  2 files changed, 286 insertions(+), 12 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h
> index e323352..a9e905b 100644
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -226,26 +226,66 @@
>  #define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
>  
>  /* FECTL_REG */
> -#define DMA_FECTL_IM (((u64)1) << 31)
> +#define DMA_FECTL_IM_BIT 31

_SHIFT (here and below).

> +#define DMA_FECTL_IM (1U << DMA_FECTL_IM_BIT)
> +#define DMA_FECTL_IP_BIT 30
> +#define DMA_FECTL_IP (1U << DMA_FECTL_IP_BIT)
>  
>  /* FSTS_REG */
> -#define DMA_FSTS_PFO ((u64)1 << 0)
> -#define DMA_FSTS_PPF ((u64)1 << 1)
> -#define DMA_FSTS_AFO ((u64)1 << 2)
> -#define DMA_FSTS_APF ((u64)1 << 3)
> -#define DMA_FSTS_IQE ((u64)1 << 4)
> -#define DMA_FSTS_ICE ((u64)1 << 5)
> -#define DMA_FSTS_ITE ((u64)1 << 6)
> -#define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
> +#define DMA_FSTS_PFO_BIT 0
> +#define DMA_FSTS_PFO (1U << DMA_FSTS_PFO_BIT)
> +#define DMA_FSTS_PPF_BIT 1
> +#define DMA_FSTS_PPF (1U << DMA_FSTS_PPF_BIT)
> +#define DMA_FSTS_AFO (1U << 2)
> +#define DMA_FSTS_APF (1U << 3)
> +#define DMA_FSTS_IQE (1U << 4)
> +#define DMA_FSTS_ICE (1U << 5)
> +#define DMA_FSTS_ITE (1U << 6)
> +#define DMA_FSTS_PRO_BIT 7
> +#define DMA_FSTS_PRO (1U << DMA_FSTS_PRO_BIT)
> +#define DMA_FSTS_FAULTS    (DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE | DMA_FSTS_PRO)
> +#define DMA_FSTS_RW1CS     (DMA_FSTS_PFO | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE | DMA_FSTS_PRO)

Please split into separate lines.

>  #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
>  
>  /* FRCD_REG, 32 bits access */
> -#define DMA_FRCD_F (((u64)1) << 31)
> +#define DMA_FRCD_LEN            0x10
> +#define DMA_FRCD0_OFFSET        0x0

0

> +#define DMA_FRCD1_OFFSET        0x4
> +#define DMA_FRCD2_OFFSET        0x8
> +#define DMA_FRCD3_OFFSET        0xc
> +#define DMA_FRCD3_FR_MASK       0xffUL
> +#define DMA_FRCD_F_BIT 31
> +#define DMA_FRCD_F ((u64)1 << DMA_FRCD_F_BIT)
> +#define DMA_FRCD(idx, offset) (DMA_CAP_FRO_OFFSET + DMA_FRCD_LEN * idx + offset)

idx and offset need parentheses.

>  #define dma_frcd_type(d) ((d >> 30) & 1)
>  #define dma_frcd_fault_reason(c) (c & 0xff)
>  #define dma_frcd_source_id(c) (c & 0xffff)
>  #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
>  
> +struct vtd_fault_record_register
> +{
> +    union {
> +        struct {
> +            u64 lo;

uint64_t here and below.

> +            u64 hi;
> +        } bits;

s/bits/raw/? I don't have a strong opinion though.

> +        struct {
> +            u64 rsvd0   :12,
> +                FI      :52; /* Fault Info */
> +            u64 SID     :16, /* Source Identifier */
> +                rsvd1   :9,
> +                PRIV    :1,  /* Privilege Mode Requested */
> +                EXE     :1,  /* Execute Permission Requested */
> +                PP      :1,  /* PASID Present */
> +                FR      :8,  /* Fault Reason */
> +                PV      :20, /* PASID Value */
> +                AT      :2,  /* Address Type */
> +                T       :1,  /* Type. (0) Write (1) Read/AtomicOp */
> +                F       :1;  /* Fault */

I don't think we use capital letters for struct fields. Also some of
them could be more descriptive IMHO, like T -> type, F -> fault, FI ->
fault_info...

> +        } fields;
> +    };
> +};
> +
>  enum VTD_FAULT_TYPE
>  {
>      /* Interrupt remapping transition faults */
> diff --git a/xen/drivers/passthrough/vtd/vvtd.c b/xen/drivers/passthrough/vtd/vvtd.c
> index eae8f11..f1e6d01 100644
> --- a/xen/drivers/passthrough/vtd/vvtd.c
> +++ b/xen/drivers/passthrough/vtd/vvtd.c
> @@ -19,6 +19,7 @@
>   */
>  
>  #include <xen/domain_page.h>
> +#include <xen/lib.h>
>  #include <xen/sched.h>
>  #include <xen/types.h>
>  #include <xen/viommu.h>
> @@ -30,6 +31,7 @@
>  #include <asm/io_apic.h>
>  #include <asm/page.h>
>  #include <asm/p2m.h>
> +#include <asm/system.h>

system.h already includes xen/lib.h IIRC.

>  
>  #include "iommu.h"
>  #include "vtd.h"
> @@ -49,6 +51,8 @@ struct hvm_hw_vvtd_regs {
>  struct vvtd {
>      /* VIOMMU_STATUS_XXX */
>      int status;
> +    /* Fault Recording index */
> +    int frcd_idx;

fault_index?

>      /* Address range of remapping hardware register-set */
>      uint64_t base_addr;
>      uint64_t length;
> @@ -97,6 +101,23 @@ static inline struct vvtd *vcpu_vvtd(struct vcpu *v)
>      return domain_vvtd(v->domain);
>  }
>  
> +static inline int vvtd_test_and_set_bit(struct vvtd *vvtd, uint32_t reg,
> +                                        int nr)

unsigned int for nr, and I'm not really sure the usefulness of this
helpers. In any case inline should not be used and instead let the
compiler optimize this.

> +{
> +    return test_and_set_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
> +}
> +
> +static inline int vvtd_test_and_clear_bit(struct vvtd *vvtd, uint32_t reg,
> +                                          int nr)
> +{
> +    return test_and_clear_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
> +}
> +
> +static inline int vvtd_test_bit(struct vvtd *vvtd, uint32_t reg, int nr)
> +{
> +    return test_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
> +}
> +
>  static inline void __vvtd_set_bit(struct vvtd *vvtd, uint32_t reg, int nr)
>  {
>      return __set_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
> @@ -232,6 +253,24 @@ static int vvtd_delivery(
>      return 0;
>  }
>  
> +void vvtd_generate_interrupt(struct vvtd *vvtd,

const.

> +                             uint32_t addr,
> +                             uint32_t data)
> +{
> +    uint8_t dest, dm, dlm, tm, vector;
> +
> +    VVTD_DEBUG(VVTD_DBG_FAULT, "Sending interrupt %x %x to d%d",
> +               addr, data, vvtd->domain->domain_id);
> +
> +    dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;

MASK_EXTR (here and below).

> +    dm = !!(addr & MSI_ADDR_DESTMODE_MASK);
> +    dlm = (data & MSI_DATA_DELIVERY_MODE_MASK) >> MSI_DATA_DELIVERY_MODE_SHIFT;
> +    tm = (data & MSI_DATA_TRIGGER_MASK) >> MSI_DATA_TRIGGER_SHIFT;
> +    vector = data & MSI_DATA_VECTOR_MASK;
> +
> +    vvtd_delivery(vvtd->domain, vector, dest, dm, dlm, tm);
> +}
> +
>  static uint32_t irq_remapping_request_index(struct irq_remapping_request *irq)
>  {
>      if ( irq->type == VIOMMU_REQUEST_IRQ_MSI )
> @@ -260,11 +299,189 @@ static inline uint32_t irte_dest(struct vvtd *vvtd, uint32_t dest)
>      return DMA_IRTA_EIME(irta) ? dest : MASK_EXTR(dest, IRTE_xAPIC_DEST_MASK);
>  }
>  
> +static void vvtd_report_non_recoverable_fault(struct vvtd *vvtd, int reason)
> +{
> +    uint32_t fsts;
> +
> +    ASSERT(reason & DMA_FSTS_FAULTS);
> +    fsts = vvtd_get_reg(vvtd, DMAR_FSTS_REG);
> +    __vvtd_set_bit(vvtd, DMAR_FSTS_REG, reason);

I don't understand this, is reason a bit position or a mask?

DMA_FSTS_FAULTS seems to be a mask, that should be set into DMAR_FSTS_REG?

> +
> +    /*
> +     * Accoroding to VT-d spec "Non-Recoverable Fault Event" chapter, if
> +     * there are any previously reported interrupt conditions that are yet to
> +     * be sevices by software, the Fault Event interrrupt is not generated.
> +     */
> +    if ( fsts & DMA_FSTS_FAULTS )
> +        return;
> +
> +    __vvtd_set_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
> +    if ( !vvtd_test_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_BIT) )
> +    {
> +        uint32_t fe_data, fe_addr;

Newline.

> +        fe_data = vvtd_get_reg(vvtd, DMAR_FEDATA_REG);
> +        fe_addr = vvtd_get_reg(vvtd, DMAR_FEADDR_REG);
> +        vvtd_generate_interrupt(vvtd, fe_addr, fe_data);
> +        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
> +    }
> +}
> +
> +static void vvtd_recomputing_ppf(struct vvtd *vvtd)

recompute, or maybe better update?

> +{
> +    int i;

unsigned int.

> +
> +    for ( i = 0; i < DMA_FRCD_REG_NR; i++ )
> +    {
> +        if ( vvtd_test_bit(vvtd, DMA_FRCD(i, DMA_FRCD3_OFFSET),
> +                           DMA_FRCD_F_BIT) )
> +        {
> +            vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PPF_BIT);
> +            return;
> +        }
> +    }
> +    /*
> +     * No Primary Fault is in Fault Record Registers, thus clear PPF bit in
> +     * FSTS.
> +     */
> +    __vvtd_clear_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PPF_BIT);
> +
> +    /* If no fault is in FSTS, clear pending bit in FECTL. */
> +    if ( !(vvtd_get_reg(vvtd, DMAR_FSTS_REG) & DMA_FSTS_FAULTS) )
> +        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
> +}
> +
> +/*
> + * Commit a frcd to emulated Fault Record Registers.

frcd is not really helpful here, you should expand this into something
that's readable.

> + */
> +static void vvtd_commit_frcd(struct vvtd *vvtd, int idx,
> +                             struct vtd_fault_record_register *frcd)
> +{
> +    vvtd_set_reg_quad(vvtd, DMA_FRCD(idx, DMA_FRCD0_OFFSET), frcd->bits.lo);
> +    vvtd_set_reg_quad(vvtd, DMA_FRCD(idx, DMA_FRCD2_OFFSET), frcd->bits.hi);
> +    vvtd_recomputing_ppf(vvtd);
> +}
> +
> +/*
> + * Allocate a FRCD for the caller. If success, return the FRI. Or, return -1
> + * when failure.
> + */
> +static int vvtd_alloc_frcd(struct vvtd *vvtd)
> +{
> +    int prev;
> +
> +    /* Set the F bit to indicate the FRCD is in use. */
> +    if ( vvtd_test_and_set_bit(vvtd, DMA_FRCD(vvtd->frcd_idx, DMA_FRCD3_OFFSET),
> +                               DMA_FRCD_F_BIT) )

Shouldn't this be !vvtd_test_and_set_bit?

> +    {
> +        prev = vvtd->frcd_idx;
> +        vvtd->frcd_idx = (prev + 1) % DMA_FRCD_REG_NR;
> +        return vvtd->frcd_idx;
> +    }
> +    return -1;

-ENOMEM?

AFAICT this happens when you cannot find a free register?

> +}
> +
> +static void vvtd_free_frcd(struct vvtd *vvtd, int i)
> +{
> +    __vvtd_clear_bit(vvtd, DMA_FRCD(i, DMA_FRCD3_OFFSET), DMA_FRCD_F_BIT);
> +}
> +
>  static int vvtd_record_fault(struct vvtd *vvtd,
> -                             struct irq_remapping_request *irq,
> +                             struct irq_remapping_request *request,
>                               int reason)
>  {
> -    return 0;
> +    struct vtd_fault_record_register frcd;
> +    int frcd_idx;
> +
> +    switch(reason)
> +    {
> +    case VTD_FR_IR_REQ_RSVD:
> +    case VTD_FR_IR_INDEX_OVER:
> +    case VTD_FR_IR_ENTRY_P:
> +    case VTD_FR_IR_ROOT_INVAL:
> +    case VTD_FR_IR_IRTE_RSVD:
> +    case VTD_FR_IR_REQ_COMPAT:
> +    case VTD_FR_IR_SID_ERR:
> +        if ( vvtd_test_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PFO_BIT) )
> +            return X86EMUL_OKAY;
> +
> +        /* No available Fault Record means Fault overflowed */
> +        frcd_idx = vvtd_alloc_frcd(vvtd);
> +        if ( frcd_idx == -1 )
> +        {
> +            vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PFO_BIT);
> +            return X86EMUL_OKAY;
> +        }
> +        memset(&frcd, 0, sizeof(frcd));
> +        frcd.fields.FR = (u8)reason;
> +        frcd.fields.FI = ((u64)irq_remapping_request_index(request)) << 36;
> +        frcd.fields.SID = (u16)request->source_id;
> +        frcd.fields.F = 1;
> +        vvtd_commit_frcd(vvtd, frcd_idx, &frcd);
> +        return X86EMUL_OKAY;
> +
> +    default:

Other reasons are just ignored? Should this have an ASSERT_UNREACHABLE
maybe?

> +        break;
> +    }
> +
> +    gdprintk(XENLOG_ERR, "Can't handle vVTD Fault (reason 0x%x).", reason);
> +    domain_crash(vvtd->domain);

Oh, I see. Is it expected that such faults with unhandled reasons can
be somehow generated by the domain itself?

> +    return X86EMUL_OKAY;
> +}
> +
> +static int vvtd_write_frcd3(struct vvtd *vvtd, uint32_t val)
> +{
> +    /* Writing a 1 means clear fault */
> +    if ( val & DMA_FRCD_F )
> +    {
> +        vvtd_free_frcd(vvtd, 0);
> +        vvtd_recomputing_ppf(vvtd);
> +    }
> +    return X86EMUL_OKAY;
> +}
> +
> +static int vvtd_write_fectl(struct vvtd *vvtd, uint32_t val)
> +{
> +    /*
> +     * Only DMA_FECTL_IM bit is writable. Generate pending event when unmask.
> +     */
> +    if ( !(val & DMA_FECTL_IM) )
> +    {
> +        /* Clear IM */
> +        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_BIT);
> +        if ( vvtd_test_and_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT) )
> +        {
> +            uint32_t fe_data, fe_addr;

Newline.

> +            fe_data = vvtd_get_reg(vvtd, DMAR_FEDATA_REG);
> +            fe_addr = vvtd_get_reg(vvtd, DMAR_FEADDR_REG);
> +            vvtd_generate_interrupt(vvtd, fe_addr, fe_data);
> +        }
> +    }
> +    else
> +        __vvtd_set_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_BIT);
> +
> +    return X86EMUL_OKAY;
> +}
> +
> +static int vvtd_write_fsts(struct vvtd *vvtd, uint32_t val)
> +{
> +    int i, max_fault_index = DMA_FSTS_PRO_BIT;

unsigned int.

> +    uint64_t bits_to_clear = val & DMA_FSTS_RW1CS;
> +
> +    i = find_first_bit(&bits_to_clear, max_fault_index / 8 + 1);
> +    while ( i <= max_fault_index )

Shouldn't you check whether bits_to_clear is not 0 also? And I don't
remember, but is the return of find_first_bit based on 0 or 1 (ie: is
bit 0 reported as 0 or 1).

Roger.
Chao Gao Aug. 25, 2017, 7:17 a.m. UTC | #2
On Wed, Aug 23, 2017 at 12:51:27PM +0100, Roger Pau Monné wrote:
>On Wed, Aug 09, 2017 at 04:34:24PM -0400, Lan Tianyu wrote:
>> From: Chao Gao <chao.gao@intel.com>
>> 
>> Interrupt translation faults are non-recoverable fault. When faults
>                                                   ^ faults
>> are triggered, it needs to populate fault info to Fault Recording
>> Registers and inject vIOMMU msi interrupt to notify guest IOMMU driver
>> to deal with faults.
>> 
>> This patch emulates hardware's handling interrupt translation
>> faults (more information about the process can be found in VT-d spec,
>> chipter "Translation Faults", section "Non-Recoverable Fault
>  ^ chapter
>> Reporting" and section "Non-Recoverable Logging").
>> Specifically, viommu_record_fault() records the fault information and
>> viommu_report_non_recoverable_fault() reports faults to software.
>> Currently, only Primary Fault Logging is supported and the Number of
>> Fault-recording Registers is 1.
>> 
>> Signed-off-by: Chao Gao <chao.gao@intel.com>
>> Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
>> ---
>
>>      /* Address range of remapping hardware register-set */
>>      uint64_t base_addr;
>>      uint64_t length;
>> @@ -97,6 +101,23 @@ static inline struct vvtd *vcpu_vvtd(struct vcpu *v)
>>      return domain_vvtd(v->domain);
>>  }
>>  
>> +static inline int vvtd_test_and_set_bit(struct vvtd *vvtd, uint32_t reg,
>> +                                        int nr)
>
>unsigned int for nr, and I'm not really sure the usefulness of this
>helpers. In any case inline should not be used and instead let the
>compiler optimize this.
>

I think compiler doesn't know the frequency of calling these function.
Explicitly make this function inline sometimes can avoid compiler
doesn't do this for some short and frequently used functions.

>> +static void vvtd_report_non_recoverable_fault(struct vvtd *vvtd, int reason)
>> +{
>> +    uint32_t fsts;
>> +
>> +    ASSERT(reason & DMA_FSTS_FAULTS);
>> +    fsts = vvtd_get_reg(vvtd, DMAR_FSTS_REG);
>> +    __vvtd_set_bit(vvtd, DMAR_FSTS_REG, reason);
>
>I don't understand this, is reason a bit position or a mask?
>
>DMA_FSTS_FAULTS seems to be a mask, that should be set into DMAR_FSTS_REG?

According VT-d spec 10.4.9, Each kind of fault is denoted by one bit in
DMAR_FSTS_REG.

>>  static int vvtd_record_fault(struct vvtd *vvtd,
>> -                             struct irq_remapping_request *irq,
>> +                             struct irq_remapping_request *request,
>>                               int reason)
>>  {
>> -    return 0;
>> +    struct vtd_fault_record_register frcd;
>> +    int frcd_idx;
>> +
>> +    switch(reason)
>> +    {
>> +    case VTD_FR_IR_REQ_RSVD:
>> +    case VTD_FR_IR_INDEX_OVER:
>> +    case VTD_FR_IR_ENTRY_P:
>> +    case VTD_FR_IR_ROOT_INVAL:
>> +    case VTD_FR_IR_IRTE_RSVD:
>> +    case VTD_FR_IR_REQ_COMPAT:
>> +    case VTD_FR_IR_SID_ERR:
>> +        if ( vvtd_test_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PFO_BIT) )
>> +            return X86EMUL_OKAY;
>> +
>> +        /* No available Fault Record means Fault overflowed */
>> +        frcd_idx = vvtd_alloc_frcd(vvtd);
>> +        if ( frcd_idx == -1 )
>> +        {
>> +            vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PFO_BIT);
>> +            return X86EMUL_OKAY;
>> +        }
>> +        memset(&frcd, 0, sizeof(frcd));
>> +        frcd.fields.FR = (u8)reason;
>> +        frcd.fields.FI = ((u64)irq_remapping_request_index(request)) << 36;
>> +        frcd.fields.SID = (u16)request->source_id;
>> +        frcd.fields.F = 1;
>> +        vvtd_commit_frcd(vvtd, frcd_idx, &frcd);
>> +        return X86EMUL_OKAY;
>> +
>> +    default:
>
>Other reasons are just ignored? Should this have an ASSERT_UNREACHABLE
>maybe?

It can have for all the faults are raised by vvtd. When vvtd generates a
new kinds of fault, the corresponding handler also should be added.

>
>> +        break;
>> +    }
>> +
>> +    gdprintk(XENLOG_ERR, "Can't handle vVTD Fault (reason 0x%x).", reason);
>> +    domain_crash(vvtd->domain);
>
>Oh, I see. Is it expected that such faults with unhandled reasons can
>be somehow generated by the domain itself?
>

No. Faults are generated by vvtd. We only add interrupt translation
faults. Other faults can be added when adding other features (e.g. DMA
remapping).
Roger Pau Monné Aug. 25, 2017, 9:43 a.m. UTC | #3
On Fri, Aug 25, 2017 at 03:17:24PM +0800, Chao Gao wrote:
> On Wed, Aug 23, 2017 at 12:51:27PM +0100, Roger Pau Monné wrote:
> >On Wed, Aug 09, 2017 at 04:34:24PM -0400, Lan Tianyu wrote:
> >> From: Chao Gao <chao.gao@intel.com>
> >> 
> >> Interrupt translation faults are non-recoverable fault. When faults
> >                                                   ^ faults
> >> are triggered, it needs to populate fault info to Fault Recording
> >> Registers and inject vIOMMU msi interrupt to notify guest IOMMU driver
> >> to deal with faults.
> >> 
> >> This patch emulates hardware's handling interrupt translation
> >> faults (more information about the process can be found in VT-d spec,
> >> chipter "Translation Faults", section "Non-Recoverable Fault
> >  ^ chapter
> >> Reporting" and section "Non-Recoverable Logging").
> >> Specifically, viommu_record_fault() records the fault information and
> >> viommu_report_non_recoverable_fault() reports faults to software.
> >> Currently, only Primary Fault Logging is supported and the Number of
> >> Fault-recording Registers is 1.
> >> 
> >> Signed-off-by: Chao Gao <chao.gao@intel.com>
> >> Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
> >> ---
> >
> >>      /* Address range of remapping hardware register-set */
> >>      uint64_t base_addr;
> >>      uint64_t length;
> >> @@ -97,6 +101,23 @@ static inline struct vvtd *vcpu_vvtd(struct vcpu *v)
> >>      return domain_vvtd(v->domain);
> >>  }
> >>  
> >> +static inline int vvtd_test_and_set_bit(struct vvtd *vvtd, uint32_t reg,
> >> +                                        int nr)
> >
> >unsigned int for nr, and I'm not really sure the usefulness of this
> >helpers. In any case inline should not be used and instead let the
> >compiler optimize this.
> >
> 
> I think compiler doesn't know the frequency of calling these function.
> Explicitly make this function inline sometimes can avoid compiler
> doesn't do this for some short and frequently used functions.

I will leave that to the maintainers. IMHO I wouldn't add inline
unless I see some figures that back up the supposed speed increase.

> >> +static void vvtd_report_non_recoverable_fault(struct vvtd *vvtd, int reason)
> >> +{
> >> +    uint32_t fsts;
> >> +
> >> +    ASSERT(reason & DMA_FSTS_FAULTS);
> >> +    fsts = vvtd_get_reg(vvtd, DMAR_FSTS_REG);
> >> +    __vvtd_set_bit(vvtd, DMAR_FSTS_REG, reason);
> >
> >I don't understand this, is reason a bit position or a mask?
> >
> >DMA_FSTS_FAULTS seems to be a mask, that should be set into DMAR_FSTS_REG?
> 
> According VT-d spec 10.4.9, Each kind of fault is denoted by one bit in
> DMAR_FSTS_REG.

So 'reason' is a bit position in this context? It needs to be unsigned
int then.

> >>  static int vvtd_record_fault(struct vvtd *vvtd,
> >> -                             struct irq_remapping_request *irq,
> >> +                             struct irq_remapping_request *request,
> >>                               int reason)
> >>  {
> >> -    return 0;
> >> +    struct vtd_fault_record_register frcd;
> >> +    int frcd_idx;
> >> +
> >> +    switch(reason)
> >> +    {
> >> +    case VTD_FR_IR_REQ_RSVD:
> >> +    case VTD_FR_IR_INDEX_OVER:
> >> +    case VTD_FR_IR_ENTRY_P:
> >> +    case VTD_FR_IR_ROOT_INVAL:
> >> +    case VTD_FR_IR_IRTE_RSVD:
> >> +    case VTD_FR_IR_REQ_COMPAT:
> >> +    case VTD_FR_IR_SID_ERR:
> >> +        if ( vvtd_test_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PFO_BIT) )
> >> +            return X86EMUL_OKAY;
> >> +
> >> +        /* No available Fault Record means Fault overflowed */
> >> +        frcd_idx = vvtd_alloc_frcd(vvtd);
> >> +        if ( frcd_idx == -1 )
> >> +        {
> >> +            vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PFO_BIT);
> >> +            return X86EMUL_OKAY;
> >> +        }
> >> +        memset(&frcd, 0, sizeof(frcd));
> >> +        frcd.fields.FR = (u8)reason;
> >> +        frcd.fields.FI = ((u64)irq_remapping_request_index(request)) << 36;
> >> +        frcd.fields.SID = (u16)request->source_id;
> >> +        frcd.fields.F = 1;
> >> +        vvtd_commit_frcd(vvtd, frcd_idx, &frcd);
> >> +        return X86EMUL_OKAY;
> >> +
> >> +    default:
> >
> >Other reasons are just ignored? Should this have an ASSERT_UNREACHABLE
> >maybe?
> 
> It can have for all the faults are raised by vvtd. When vvtd generates a
> new kinds of fault, the corresponding handler also should be added.

Do you mean that with the code above all the possible fault types are
covered?

In which case it seems you want to add a ASSERT_UNREACHABLE to the
default case.

> >
> >> +        break;
> >> +    }
> >> +
> >> +    gdprintk(XENLOG_ERR, "Can't handle vVTD Fault (reason 0x%x).", reason);
> >> +    domain_crash(vvtd->domain);
> >
> >Oh, I see. Is it expected that such faults with unhandled reasons can
> >be somehow generated by the domain itself?
> >
> 
> No. Faults are generated by vvtd. We only add interrupt translation
> faults. Other faults can be added when adding other features (e.g. DMA
> remapping). 

OK, so then it looks like you want an ASSERT above, and I'm not sure
whether you want to kill the domain. At the end if Xen ever gets here
it means there's a bug in the vvtd implementation. I think the proper
solution is to add the ASSERT above and simply return X86EMUL_OKAY
here.

Roger.
diff mbox

Patch

diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h
index e323352..a9e905b 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -226,26 +226,66 @@ 
 #define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
 
 /* FECTL_REG */
-#define DMA_FECTL_IM (((u64)1) << 31)
+#define DMA_FECTL_IM_BIT 31
+#define DMA_FECTL_IM (1U << DMA_FECTL_IM_BIT)
+#define DMA_FECTL_IP_BIT 30
+#define DMA_FECTL_IP (1U << DMA_FECTL_IP_BIT)
 
 /* FSTS_REG */
-#define DMA_FSTS_PFO ((u64)1 << 0)
-#define DMA_FSTS_PPF ((u64)1 << 1)
-#define DMA_FSTS_AFO ((u64)1 << 2)
-#define DMA_FSTS_APF ((u64)1 << 3)
-#define DMA_FSTS_IQE ((u64)1 << 4)
-#define DMA_FSTS_ICE ((u64)1 << 5)
-#define DMA_FSTS_ITE ((u64)1 << 6)
-#define DMA_FSTS_FAULTS    DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
+#define DMA_FSTS_PFO_BIT 0
+#define DMA_FSTS_PFO (1U << DMA_FSTS_PFO_BIT)
+#define DMA_FSTS_PPF_BIT 1
+#define DMA_FSTS_PPF (1U << DMA_FSTS_PPF_BIT)
+#define DMA_FSTS_AFO (1U << 2)
+#define DMA_FSTS_APF (1U << 3)
+#define DMA_FSTS_IQE (1U << 4)
+#define DMA_FSTS_ICE (1U << 5)
+#define DMA_FSTS_ITE (1U << 6)
+#define DMA_FSTS_PRO_BIT 7
+#define DMA_FSTS_PRO (1U << DMA_FSTS_PRO_BIT)
+#define DMA_FSTS_FAULTS    (DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE | DMA_FSTS_PRO)
+#define DMA_FSTS_RW1CS     (DMA_FSTS_PFO | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE | DMA_FSTS_PRO)
 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
 
 /* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u64)1) << 31)
+#define DMA_FRCD_LEN            0x10
+#define DMA_FRCD0_OFFSET        0x0
+#define DMA_FRCD1_OFFSET        0x4
+#define DMA_FRCD2_OFFSET        0x8
+#define DMA_FRCD3_OFFSET        0xc
+#define DMA_FRCD3_FR_MASK       0xffUL
+#define DMA_FRCD_F_BIT 31
+#define DMA_FRCD_F ((u64)1 << DMA_FRCD_F_BIT)
+#define DMA_FRCD(idx, offset) (DMA_CAP_FRO_OFFSET + DMA_FRCD_LEN * idx + offset)
 #define dma_frcd_type(d) ((d >> 30) & 1)
 #define dma_frcd_fault_reason(c) (c & 0xff)
 #define dma_frcd_source_id(c) (c & 0xffff)
 #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
 
+struct vtd_fault_record_register
+{
+    union {
+        struct {
+            u64 lo;
+            u64 hi;
+        } bits;
+        struct {
+            u64 rsvd0   :12,
+                FI      :52; /* Fault Info */
+            u64 SID     :16, /* Source Identifier */
+                rsvd1   :9,
+                PRIV    :1,  /* Privilege Mode Requested */
+                EXE     :1,  /* Execute Permission Requested */
+                PP      :1,  /* PASID Present */
+                FR      :8,  /* Fault Reason */
+                PV      :20, /* PASID Value */
+                AT      :2,  /* Address Type */
+                T       :1,  /* Type. (0) Write (1) Read/AtomicOp */
+                F       :1;  /* Fault */
+        } fields;
+    };
+};
+
 enum VTD_FAULT_TYPE
 {
     /* Interrupt remapping transition faults */
diff --git a/xen/drivers/passthrough/vtd/vvtd.c b/xen/drivers/passthrough/vtd/vvtd.c
index eae8f11..f1e6d01 100644
--- a/xen/drivers/passthrough/vtd/vvtd.c
+++ b/xen/drivers/passthrough/vtd/vvtd.c
@@ -19,6 +19,7 @@ 
  */
 
 #include <xen/domain_page.h>
+#include <xen/lib.h>
 #include <xen/sched.h>
 #include <xen/types.h>
 #include <xen/viommu.h>
@@ -30,6 +31,7 @@ 
 #include <asm/io_apic.h>
 #include <asm/page.h>
 #include <asm/p2m.h>
+#include <asm/system.h>
 
 #include "iommu.h"
 #include "vtd.h"
@@ -49,6 +51,8 @@  struct hvm_hw_vvtd_regs {
 struct vvtd {
     /* VIOMMU_STATUS_XXX */
     int status;
+    /* Fault Recording index */
+    int frcd_idx;
     /* Address range of remapping hardware register-set */
     uint64_t base_addr;
     uint64_t length;
@@ -97,6 +101,23 @@  static inline struct vvtd *vcpu_vvtd(struct vcpu *v)
     return domain_vvtd(v->domain);
 }
 
+static inline int vvtd_test_and_set_bit(struct vvtd *vvtd, uint32_t reg,
+                                        int nr)
+{
+    return test_and_set_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
+}
+
+static inline int vvtd_test_and_clear_bit(struct vvtd *vvtd, uint32_t reg,
+                                          int nr)
+{
+    return test_and_clear_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
+}
+
+static inline int vvtd_test_bit(struct vvtd *vvtd, uint32_t reg, int nr)
+{
+    return test_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
+}
+
 static inline void __vvtd_set_bit(struct vvtd *vvtd, uint32_t reg, int nr)
 {
     return __set_bit(nr, (uint32_t *)&vvtd->regs->data[reg]);
@@ -232,6 +253,24 @@  static int vvtd_delivery(
     return 0;
 }
 
+void vvtd_generate_interrupt(struct vvtd *vvtd,
+                             uint32_t addr,
+                             uint32_t data)
+{
+    uint8_t dest, dm, dlm, tm, vector;
+
+    VVTD_DEBUG(VVTD_DBG_FAULT, "Sending interrupt %x %x to d%d",
+               addr, data, vvtd->domain->domain_id);
+
+    dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+    dm = !!(addr & MSI_ADDR_DESTMODE_MASK);
+    dlm = (data & MSI_DATA_DELIVERY_MODE_MASK) >> MSI_DATA_DELIVERY_MODE_SHIFT;
+    tm = (data & MSI_DATA_TRIGGER_MASK) >> MSI_DATA_TRIGGER_SHIFT;
+    vector = data & MSI_DATA_VECTOR_MASK;
+
+    vvtd_delivery(vvtd->domain, vector, dest, dm, dlm, tm);
+}
+
 static uint32_t irq_remapping_request_index(struct irq_remapping_request *irq)
 {
     if ( irq->type == VIOMMU_REQUEST_IRQ_MSI )
@@ -260,11 +299,189 @@  static inline uint32_t irte_dest(struct vvtd *vvtd, uint32_t dest)
     return DMA_IRTA_EIME(irta) ? dest : MASK_EXTR(dest, IRTE_xAPIC_DEST_MASK);
 }
 
+static void vvtd_report_non_recoverable_fault(struct vvtd *vvtd, int reason)
+{
+    uint32_t fsts;
+
+    ASSERT(reason & DMA_FSTS_FAULTS);
+    fsts = vvtd_get_reg(vvtd, DMAR_FSTS_REG);
+    __vvtd_set_bit(vvtd, DMAR_FSTS_REG, reason);
+
+    /*
+     * Accoroding to VT-d spec "Non-Recoverable Fault Event" chapter, if
+     * there are any previously reported interrupt conditions that are yet to
+     * be sevices by software, the Fault Event interrrupt is not generated.
+     */
+    if ( fsts & DMA_FSTS_FAULTS )
+        return;
+
+    __vvtd_set_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
+    if ( !vvtd_test_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_BIT) )
+    {
+        uint32_t fe_data, fe_addr;
+        fe_data = vvtd_get_reg(vvtd, DMAR_FEDATA_REG);
+        fe_addr = vvtd_get_reg(vvtd, DMAR_FEADDR_REG);
+        vvtd_generate_interrupt(vvtd, fe_addr, fe_data);
+        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
+    }
+}
+
+static void vvtd_recomputing_ppf(struct vvtd *vvtd)
+{
+    int i;
+
+    for ( i = 0; i < DMA_FRCD_REG_NR; i++ )
+    {
+        if ( vvtd_test_bit(vvtd, DMA_FRCD(i, DMA_FRCD3_OFFSET),
+                           DMA_FRCD_F_BIT) )
+        {
+            vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PPF_BIT);
+            return;
+        }
+    }
+    /*
+     * No Primary Fault is in Fault Record Registers, thus clear PPF bit in
+     * FSTS.
+     */
+    __vvtd_clear_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PPF_BIT);
+
+    /* If no fault is in FSTS, clear pending bit in FECTL. */
+    if ( !(vvtd_get_reg(vvtd, DMAR_FSTS_REG) & DMA_FSTS_FAULTS) )
+        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
+}
+
+/*
+ * Commit a frcd to emulated Fault Record Registers.
+ */
+static void vvtd_commit_frcd(struct vvtd *vvtd, int idx,
+                             struct vtd_fault_record_register *frcd)
+{
+    vvtd_set_reg_quad(vvtd, DMA_FRCD(idx, DMA_FRCD0_OFFSET), frcd->bits.lo);
+    vvtd_set_reg_quad(vvtd, DMA_FRCD(idx, DMA_FRCD2_OFFSET), frcd->bits.hi);
+    vvtd_recomputing_ppf(vvtd);
+}
+
+/*
+ * Allocate a FRCD for the caller. If success, return the FRI. Or, return -1
+ * when failure.
+ */
+static int vvtd_alloc_frcd(struct vvtd *vvtd)
+{
+    int prev;
+
+    /* Set the F bit to indicate the FRCD is in use. */
+    if ( vvtd_test_and_set_bit(vvtd, DMA_FRCD(vvtd->frcd_idx, DMA_FRCD3_OFFSET),
+                               DMA_FRCD_F_BIT) )
+    {
+        prev = vvtd->frcd_idx;
+        vvtd->frcd_idx = (prev + 1) % DMA_FRCD_REG_NR;
+        return vvtd->frcd_idx;
+    }
+    return -1;
+}
+
+static void vvtd_free_frcd(struct vvtd *vvtd, int i)
+{
+    __vvtd_clear_bit(vvtd, DMA_FRCD(i, DMA_FRCD3_OFFSET), DMA_FRCD_F_BIT);
+}
+
 static int vvtd_record_fault(struct vvtd *vvtd,
-                             struct irq_remapping_request *irq,
+                             struct irq_remapping_request *request,
                              int reason)
 {
-    return 0;
+    struct vtd_fault_record_register frcd;
+    int frcd_idx;
+
+    switch(reason)
+    {
+    case VTD_FR_IR_REQ_RSVD:
+    case VTD_FR_IR_INDEX_OVER:
+    case VTD_FR_IR_ENTRY_P:
+    case VTD_FR_IR_ROOT_INVAL:
+    case VTD_FR_IR_IRTE_RSVD:
+    case VTD_FR_IR_REQ_COMPAT:
+    case VTD_FR_IR_SID_ERR:
+        if ( vvtd_test_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PFO_BIT) )
+            return X86EMUL_OKAY;
+
+        /* No available Fault Record means Fault overflowed */
+        frcd_idx = vvtd_alloc_frcd(vvtd);
+        if ( frcd_idx == -1 )
+        {
+            vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PFO_BIT);
+            return X86EMUL_OKAY;
+        }
+        memset(&frcd, 0, sizeof(frcd));
+        frcd.fields.FR = (u8)reason;
+        frcd.fields.FI = ((u64)irq_remapping_request_index(request)) << 36;
+        frcd.fields.SID = (u16)request->source_id;
+        frcd.fields.F = 1;
+        vvtd_commit_frcd(vvtd, frcd_idx, &frcd);
+        return X86EMUL_OKAY;
+
+    default:
+        break;
+    }
+
+    gdprintk(XENLOG_ERR, "Can't handle vVTD Fault (reason 0x%x).", reason);
+    domain_crash(vvtd->domain);
+    return X86EMUL_OKAY;
+}
+
+static int vvtd_write_frcd3(struct vvtd *vvtd, uint32_t val)
+{
+    /* Writing a 1 means clear fault */
+    if ( val & DMA_FRCD_F )
+    {
+        vvtd_free_frcd(vvtd, 0);
+        vvtd_recomputing_ppf(vvtd);
+    }
+    return X86EMUL_OKAY;
+}
+
+static int vvtd_write_fectl(struct vvtd *vvtd, uint32_t val)
+{
+    /*
+     * Only DMA_FECTL_IM bit is writable. Generate pending event when unmask.
+     */
+    if ( !(val & DMA_FECTL_IM) )
+    {
+        /* Clear IM */
+        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_BIT);
+        if ( vvtd_test_and_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT) )
+        {
+            uint32_t fe_data, fe_addr;
+            fe_data = vvtd_get_reg(vvtd, DMAR_FEDATA_REG);
+            fe_addr = vvtd_get_reg(vvtd, DMAR_FEADDR_REG);
+            vvtd_generate_interrupt(vvtd, fe_addr, fe_data);
+        }
+    }
+    else
+        __vvtd_set_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_BIT);
+
+    return X86EMUL_OKAY;
+}
+
+static int vvtd_write_fsts(struct vvtd *vvtd, uint32_t val)
+{
+    int i, max_fault_index = DMA_FSTS_PRO_BIT;
+    uint64_t bits_to_clear = val & DMA_FSTS_RW1CS;
+
+    i = find_first_bit(&bits_to_clear, max_fault_index / 8 + 1);
+    while ( i <= max_fault_index )
+    {
+        __vvtd_clear_bit(vvtd, DMAR_FSTS_REG, i);
+        i = find_next_bit(&bits_to_clear, max_fault_index / 8 + 1, i + 1);
+    }
+
+    /*
+     * Clear IP field when all status fields in the Fault Status Register
+     * being clear.
+     */
+    if ( !((vvtd_get_reg(vvtd, DMAR_FSTS_REG) & DMA_FSTS_FAULTS)) )
+        __vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_BIT);
+
+    return X86EMUL_OKAY;
 }
 
 static int vvtd_handle_gcmd_qie(struct vvtd *vvtd, uint32_t val)
@@ -410,6 +627,18 @@  static int vvtd_write(struct vcpu *v, unsigned long addr,
             ret = vvtd_write_gcmd(vvtd, val);
             break;
 
+        case DMAR_FSTS_REG:
+            ret = vvtd_write_fsts(vvtd, val);
+            break;
+
+        case DMAR_FECTL_REG:
+            ret = vvtd_write_fectl(vvtd, val);
+            break;
+
+        case DMA_CAP_FRO_OFFSET + DMA_FRCD3_OFFSET:
+            ret = vvtd_write_frcd3(vvtd, val);
+            break;
+
         case DMAR_IEDATA_REG:
         case DMAR_IEADDR_REG:
         case DMAR_IEUADDR_REG:
@@ -435,6 +664,10 @@  static int vvtd_write(struct vcpu *v, unsigned long addr,
             ret = X86EMUL_OKAY;
             break;
 
+        case DMA_CAP_FRO_OFFSET + DMA_FRCD2_OFFSET:
+            ret = vvtd_write_frcd3(vvtd, val >> 32);
+            break;
+
         default:
             ret = X86EMUL_UNHANDLEABLE;
             break;
@@ -649,6 +882,7 @@  static int vvtd_create(struct domain *d, struct viommu *viommu)
     vvtd->eim = 0;
     vvtd->irt = 0;
     vvtd->irt_max_entry = 0;
+    vvtd->frcd_idx = 0;
     register_mmio_handler(d, &vvtd_mmio_ops);
     return 0;