Message ID | 20240801154334.1009852-11-dbarboza@ventanamicro.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | riscv: QEMU RISC-V IOMMU Support | expand |
On Fri, Aug 2, 2024 at 1:47 AM Daniel Henrique Barboza <dbarboza@ventanamicro.com> wrote: > > From: Tomasz Jeznach <tjeznach@rivosinc.com> > > DBG support adds three additional registers: tr_req_iova, tr_req_ctl and > tr_response. > > The DBG cap is always enabled. No on/off toggle is provided for it. > > Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com> > Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> > Reviewed-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > hw/riscv/riscv-iommu-bits.h | 17 +++++++++++ > hw/riscv/riscv-iommu.c | 59 +++++++++++++++++++++++++++++++++++++ > 2 files changed, 76 insertions(+) > > diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h > index da6fb603aa..9d59c5c78d 100644 > --- a/hw/riscv/riscv-iommu-bits.h > +++ b/hw/riscv/riscv-iommu-bits.h > @@ -82,6 +82,7 @@ struct riscv_iommu_pq_record { > #define RISCV_IOMMU_CAP_ATS BIT_ULL(25) > #define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26) > #define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28) > +#define RISCV_IOMMU_CAP_DBG BIT_ULL(31) > #define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32) > #define RISCV_IOMMU_CAP_PD8 BIT_ULL(38) > #define RISCV_IOMMU_CAP_PD17 BIT_ULL(39) > @@ -184,6 +185,22 @@ enum { > RISCV_IOMMU_INTR_COUNT > }; > > +/* 5.24 Translation request IOVA (64bits) */ > +#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258 > + > +/* 5.25 Translation request control (64bits) */ > +#define RISCV_IOMMU_REG_TR_REQ_CTL 0x0260 > +#define RISCV_IOMMU_TR_REQ_CTL_GO_BUSY BIT_ULL(0) > +#define RISCV_IOMMU_TR_REQ_CTL_NW BIT_ULL(3) > +#define RISCV_IOMMU_TR_REQ_CTL_PID GENMASK_ULL(31, 12) > +#define RISCV_IOMMU_TR_REQ_CTL_DID GENMASK_ULL(63, 40) > + > +/* 5.26 Translation request response (64bits) */ > +#define RISCV_IOMMU_REG_TR_RESPONSE 0x0268 > +#define RISCV_IOMMU_TR_RESPONSE_FAULT BIT_ULL(0) > +#define RISCV_IOMMU_TR_RESPONSE_S BIT_ULL(9) > +#define RISCV_IOMMU_TR_RESPONSE_PPN RISCV_IOMMU_PPN_FIELD > + > /* 5.27 Interrupt cause to vector (64bits) */ > #define RISCV_IOMMU_REG_ICVEC 0x02F8 > > diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c > index d127838ef8..bf90cea674 100644 > --- a/hw/riscv/riscv-iommu.c > +++ b/hw/riscv/riscv-iommu.c > @@ -1756,6 +1756,50 @@ static void riscv_iommu_process_icvec_update(RISCVIOMMUState *s) > riscv_iommu_get_icvec_vector(icvec, RISCV_IOMMU_INTR_PQ)); > } > > +static void riscv_iommu_process_dbg(RISCVIOMMUState *s) > +{ > + uint64_t iova = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_IOVA); > + uint64_t ctrl = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_CTL); > + unsigned devid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_DID); > + unsigned pid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_PID); > + RISCVIOMMUContext *ctx; > + void *ref; > + > + if (!(ctrl & RISCV_IOMMU_TR_REQ_CTL_GO_BUSY)) { > + return; > + } > + > + ctx = riscv_iommu_ctx(s, devid, pid, &ref); > + if (ctx == NULL) { > + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, > + RISCV_IOMMU_TR_RESPONSE_FAULT | > + (RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED << 10)); > + } else { > + IOMMUTLBEntry iotlb = { > + .iova = iova, > + .perm = ctrl & RISCV_IOMMU_TR_REQ_CTL_NW ? IOMMU_RO : IOMMU_RW, > + .addr_mask = ~0, > + .target_as = NULL, > + }; > + int fault = riscv_iommu_translate(s, ctx, &iotlb, false); > + if (fault) { > + iova = RISCV_IOMMU_TR_RESPONSE_FAULT | (((uint64_t) fault) << 10); > + } else { > + iova = iotlb.translated_addr & ~iotlb.addr_mask; > + iova >>= TARGET_PAGE_BITS; > + iova &= RISCV_IOMMU_TR_RESPONSE_PPN; > + > + /* We do not support superpages (> 4kbs) for now */ > + iova &= ~RISCV_IOMMU_TR_RESPONSE_S; > + } > + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, iova); > + } > + > + riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_TR_REQ_CTL, 0, > + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); > + riscv_iommu_ctx_put(s, ref); > +} > + > typedef void riscv_iommu_process_fn(RISCVIOMMUState *s); > > static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data) > @@ -1883,6 +1927,12 @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr, > > return MEMTX_OK; > > + case RISCV_IOMMU_REG_TR_REQ_CTL: > + process_fn = riscv_iommu_process_dbg; > + regb = RISCV_IOMMU_REG_TR_REQ_CTL; > + busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY; > + break; > + > default: > break; > } > @@ -2055,6 +2105,9 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp) > s->cap |= RISCV_IOMMU_CAP_SV32X4 | RISCV_IOMMU_CAP_SV39X4 | > RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4; > } > + /* Enable translation debug interface */ > + s->cap |= RISCV_IOMMU_CAP_DBG; > + > /* Report QEMU target physical address space limits */ > s->cap = set_field(s->cap, RISCV_IOMMU_CAP_PAS, > TARGET_PHYS_ADDR_SPACE_BITS); > @@ -2111,6 +2164,12 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp) > stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_IPSR], ~0); > stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_ICVEC], 0); > stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_DDTP], s->ddtp); > + /* If debug registers enabled. */ > + if (s->cap & RISCV_IOMMU_CAP_DBG) { > + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_IOVA], 0); > + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_CTL], > + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); > + } > > /* Memory region for downstream access, if specified. */ > if (s->target_mr) { > -- > 2.45.2 > >
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h index da6fb603aa..9d59c5c78d 100644 --- a/hw/riscv/riscv-iommu-bits.h +++ b/hw/riscv/riscv-iommu-bits.h @@ -82,6 +82,7 @@ struct riscv_iommu_pq_record { #define RISCV_IOMMU_CAP_ATS BIT_ULL(25) #define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26) #define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28) +#define RISCV_IOMMU_CAP_DBG BIT_ULL(31) #define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32) #define RISCV_IOMMU_CAP_PD8 BIT_ULL(38) #define RISCV_IOMMU_CAP_PD17 BIT_ULL(39) @@ -184,6 +185,22 @@ enum { RISCV_IOMMU_INTR_COUNT }; +/* 5.24 Translation request IOVA (64bits) */ +#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258 + +/* 5.25 Translation request control (64bits) */ +#define RISCV_IOMMU_REG_TR_REQ_CTL 0x0260 +#define RISCV_IOMMU_TR_REQ_CTL_GO_BUSY BIT_ULL(0) +#define RISCV_IOMMU_TR_REQ_CTL_NW BIT_ULL(3) +#define RISCV_IOMMU_TR_REQ_CTL_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_TR_REQ_CTL_DID GENMASK_ULL(63, 40) + +/* 5.26 Translation request response (64bits) */ +#define RISCV_IOMMU_REG_TR_RESPONSE 0x0268 +#define RISCV_IOMMU_TR_RESPONSE_FAULT BIT_ULL(0) +#define RISCV_IOMMU_TR_RESPONSE_S BIT_ULL(9) +#define RISCV_IOMMU_TR_RESPONSE_PPN RISCV_IOMMU_PPN_FIELD + /* 5.27 Interrupt cause to vector (64bits) */ #define RISCV_IOMMU_REG_ICVEC 0x02F8 diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c index d127838ef8..bf90cea674 100644 --- a/hw/riscv/riscv-iommu.c +++ b/hw/riscv/riscv-iommu.c @@ -1756,6 +1756,50 @@ static void riscv_iommu_process_icvec_update(RISCVIOMMUState *s) riscv_iommu_get_icvec_vector(icvec, RISCV_IOMMU_INTR_PQ)); } +static void riscv_iommu_process_dbg(RISCVIOMMUState *s) +{ + uint64_t iova = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_IOVA); + uint64_t ctrl = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_CTL); + unsigned devid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_DID); + unsigned pid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_PID); + RISCVIOMMUContext *ctx; + void *ref; + + if (!(ctrl & RISCV_IOMMU_TR_REQ_CTL_GO_BUSY)) { + return; + } + + ctx = riscv_iommu_ctx(s, devid, pid, &ref); + if (ctx == NULL) { + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, + RISCV_IOMMU_TR_RESPONSE_FAULT | + (RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED << 10)); + } else { + IOMMUTLBEntry iotlb = { + .iova = iova, + .perm = ctrl & RISCV_IOMMU_TR_REQ_CTL_NW ? IOMMU_RO : IOMMU_RW, + .addr_mask = ~0, + .target_as = NULL, + }; + int fault = riscv_iommu_translate(s, ctx, &iotlb, false); + if (fault) { + iova = RISCV_IOMMU_TR_RESPONSE_FAULT | (((uint64_t) fault) << 10); + } else { + iova = iotlb.translated_addr & ~iotlb.addr_mask; + iova >>= TARGET_PAGE_BITS; + iova &= RISCV_IOMMU_TR_RESPONSE_PPN; + + /* We do not support superpages (> 4kbs) for now */ + iova &= ~RISCV_IOMMU_TR_RESPONSE_S; + } + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, iova); + } + + riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_TR_REQ_CTL, 0, + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); + riscv_iommu_ctx_put(s, ref); +} + typedef void riscv_iommu_process_fn(RISCVIOMMUState *s); static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data) @@ -1883,6 +1927,12 @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr, return MEMTX_OK; + case RISCV_IOMMU_REG_TR_REQ_CTL: + process_fn = riscv_iommu_process_dbg; + regb = RISCV_IOMMU_REG_TR_REQ_CTL; + busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY; + break; + default: break; } @@ -2055,6 +2105,9 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp) s->cap |= RISCV_IOMMU_CAP_SV32X4 | RISCV_IOMMU_CAP_SV39X4 | RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4; } + /* Enable translation debug interface */ + s->cap |= RISCV_IOMMU_CAP_DBG; + /* Report QEMU target physical address space limits */ s->cap = set_field(s->cap, RISCV_IOMMU_CAP_PAS, TARGET_PHYS_ADDR_SPACE_BITS); @@ -2111,6 +2164,12 @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp) stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_IPSR], ~0); stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_ICVEC], 0); stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_DDTP], s->ddtp); + /* If debug registers enabled. */ + if (s->cap & RISCV_IOMMU_CAP_DBG) { + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_IOVA], 0); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_CTL], + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); + } /* Memory region for downstream access, if specified. */ if (s->target_mr) {