Message ID | 20211211041917.135345-12-anup.patel@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | QEMU RISC-V AIA support | expand |
On Sat, Dec 11, 2021 at 2:23 PM Anup Patel <anup.patel@wdc.com> wrote: > > The AIA hvictl and hviprioX CSRs allow hypervisor to control > interrupts visible at VS-level. This patch implements AIA hvictl > and hviprioX CSRs. > > Signed-off-by: Anup Patel <anup.patel@wdc.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > target/riscv/cpu.h | 2 + > target/riscv/csr.c | 126 +++++++++++++++++++++++++++++++++++++++++ > target/riscv/machine.c | 2 + > 3 files changed, 130 insertions(+) > > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h > index 8b14fac9a9..c6b9cc0f2a 100644 > --- a/target/riscv/cpu.h > +++ b/target/riscv/cpu.h > @@ -197,6 +197,7 @@ struct CPURISCVState { > uint64_t htimedelta; > > /* Hypervisor controlled virtual interrupt priorities */ > + target_ulong hvictl; > uint8_t hviprio[64]; > > /* Virtual CSRs */ > @@ -465,6 +466,7 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) > return env->misa_mxl; > } > #endif > +#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) > > /* > * A simplification for VLMAX > diff --git a/target/riscv/csr.c b/target/riscv/csr.c > index d9bb5ff649..968a65bbd2 100644 > --- a/target/riscv/csr.c > +++ b/target/riscv/csr.c > @@ -229,6 +229,15 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) > return RISCV_EXCP_ILLEGAL_INST; > } > > +static int aia_hmode(CPURISCVState *env, int csrno) > +{ > + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { > + return RISCV_EXCP_ILLEGAL_INST; > + } > + > + return hmode(env, csrno); > +} > + > static int aia_hmode32(CPURISCVState *env, int csrno) > { > if (!riscv_feature(env, RISCV_FEATURE_AIA)) { > @@ -1041,6 +1050,9 @@ static RISCVException rmw_sie64(CPURISCVState *env, int csrno, > uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; > > if (riscv_cpu_virt_enabled(env)) { > + if (env->hvictl & HVICTL_VTI) { > + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; > + } > ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); > } else { > ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask); > @@ -1239,6 +1251,9 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno, > uint64_t mask = env->mideleg & sip_writable_mask; > > if (riscv_cpu_virt_enabled(env)) { > + if (env->hvictl & HVICTL_VTI) { > + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; > + } > ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); > } else { > ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask); > @@ -1625,6 +1640,110 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, > return RISCV_EXCP_NONE; > } > > +static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) > +{ > + *val = env->hvictl; > + return RISCV_EXCP_NONE; > +} > + > +static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) > +{ > + env->hvictl = val & HVICTL_VALID_MASK; > + return RISCV_EXCP_NONE; > +} > + > +static int read_hvipriox(CPURISCVState *env, int first_index, > + uint8_t *iprio, target_ulong *val) > +{ > + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); > + > + /* First index has to be multiple of numbe of irqs per register */ > + if (first_index % num_irqs) { > + return (riscv_cpu_virt_enabled(env)) ? > + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; > + } > + > + /* Fill-up return value */ > + *val = 0; > + for (i = 0; i < num_irqs; i++) { > + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { > + continue; > + } > + if (rdzero) { > + continue; > + } > + *val |= ((target_ulong)iprio[irq]) << (i * 8); > + } > + > + return RISCV_EXCP_NONE; > +} > + > +static int write_hvipriox(CPURISCVState *env, int first_index, > + uint8_t *iprio, target_ulong val) > +{ > + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); > + > + /* First index has to be multiple of numbe of irqs per register */ > + if (first_index % num_irqs) { > + return (riscv_cpu_virt_enabled(env)) ? > + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; > + } > + > + /* Fill-up priority arrary */ > + for (i = 0; i < num_irqs; i++) { > + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { > + continue; > + } > + if (rdzero) { > + iprio[irq] = 0; > + } else { > + iprio[irq] = (val >> (i * 8)) & 0xff; > + } > + } > + > + return RISCV_EXCP_NONE; > +} > + > +static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) > +{ > + return read_hvipriox(env, 0, env->hviprio, val); > +} > + > +static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) > +{ > + return write_hvipriox(env, 0, env->hviprio, val); > +} > + > +static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) > +{ > + return read_hvipriox(env, 4, env->hviprio, val); > +} > + > +static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) > +{ > + return write_hvipriox(env, 4, env->hviprio, val); > +} > + > +static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) > +{ > + return read_hvipriox(env, 8, env->hviprio, val); > +} > + > +static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) > +{ > + return write_hvipriox(env, 8, env->hviprio, val); > +} > + > +static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) > +{ > + return read_hvipriox(env, 12, env->hviprio, val); > +} > + > +static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) > +{ > + return write_hvipriox(env, 12, env->hviprio, val); > +} > + > /* Virtual CSR Registers */ > static RISCVException read_vsstatus(CPURISCVState *env, int csrno, > target_ulong *val) > @@ -2277,9 +2396,16 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { > [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 }, > [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst }, > > + /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ > + [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl }, > + [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 }, > + [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 }, > + > /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ > [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh }, > [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, > + [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h }, > + [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h }, > [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, > [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, > > diff --git a/target/riscv/machine.c b/target/riscv/machine.c > index 44dca84ded..f027d5e307 100644 > --- a/target/riscv/machine.c > +++ b/target/riscv/machine.c > @@ -92,6 +92,8 @@ static const VMStateDescription vmstate_hyper = { > VMSTATE_UINTTL(env.hgeie, RISCVCPU), > VMSTATE_UINTTL(env.hgeip, RISCVCPU), > VMSTATE_UINT64(env.htimedelta, RISCVCPU), > + > + VMSTATE_UINTTL(env.hvictl, RISCVCPU), > VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64), > > VMSTATE_UINT64(env.vsstatus, RISCVCPU), > -- > 2.25.1 > >
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 8b14fac9a9..c6b9cc0f2a 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -197,6 +197,7 @@ struct CPURISCVState { uint64_t htimedelta; /* Hypervisor controlled virtual interrupt priorities */ + target_ulong hvictl; uint8_t hviprio[64]; /* Virtual CSRs */ @@ -465,6 +466,7 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) return env->misa_mxl; } #endif +#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) /* * A simplification for VLMAX diff --git a/target/riscv/csr.c b/target/riscv/csr.c index d9bb5ff649..968a65bbd2 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -229,6 +229,15 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } +static int aia_hmode(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode(env, csrno); +} + static int aia_hmode32(CPURISCVState *env, int csrno) { if (!riscv_feature(env, RISCV_FEATURE_AIA)) { @@ -1041,6 +1050,9 @@ static RISCVException rmw_sie64(CPURISCVState *env, int csrno, uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; if (riscv_cpu_virt_enabled(env)) { + if (env->hvictl & HVICTL_VTI) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); } else { ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask); @@ -1239,6 +1251,9 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno, uint64_t mask = env->mideleg & sip_writable_mask; if (riscv_cpu_virt_enabled(env)) { + if (env->hvictl & HVICTL_VTI) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); } else { ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask); @@ -1625,6 +1640,110 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hvictl; + return RISCV_EXCP_NONE; +} + +static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hvictl = val & HVICTL_VALID_MASK; + return RISCV_EXCP_NONE; +} + +static int read_hvipriox(CPURISCVState *env, int first_index, + uint8_t *iprio, target_ulong *val) +{ + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); + + /* First index has to be multiple of numbe of irqs per register */ + if (first_index % num_irqs) { + return (riscv_cpu_virt_enabled(env)) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + /* Fill-up return value */ + *val = 0; + for (i = 0; i < num_irqs; i++) { + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { + continue; + } + if (rdzero) { + continue; + } + *val |= ((target_ulong)iprio[irq]) << (i * 8); + } + + return RISCV_EXCP_NONE; +} + +static int write_hvipriox(CPURISCVState *env, int first_index, + uint8_t *iprio, target_ulong val) +{ + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); + + /* First index has to be multiple of numbe of irqs per register */ + if (first_index % num_irqs) { + return (riscv_cpu_virt_enabled(env)) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + /* Fill-up priority arrary */ + for (i = 0; i < num_irqs; i++) { + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { + continue; + } + if (rdzero) { + iprio[irq] = 0; + } else { + iprio[irq] = (val >> (i * 8)) & 0xff; + } + } + + return RISCV_EXCP_NONE; +} + +static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 0, env->hviprio, val); +} + +static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 0, env->hviprio, val); +} + +static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 4, env->hviprio, val); +} + +static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 4, env->hviprio, val); +} + +static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 8, env->hviprio, val); +} + +static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 8, env->hviprio, val); +} + +static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 12, env->hviprio, val); +} + +static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 12, env->hviprio, val); +} + /* Virtual CSR Registers */ static RISCVException read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val) @@ -2277,9 +2396,16 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 }, [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst }, + /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ + [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl }, + [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 }, + [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 }, + /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh }, [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, + [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h }, + [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h }, [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 44dca84ded..f027d5e307 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -92,6 +92,8 @@ static const VMStateDescription vmstate_hyper = { VMSTATE_UINTTL(env.hgeie, RISCVCPU), VMSTATE_UINTTL(env.hgeip, RISCVCPU), VMSTATE_UINT64(env.htimedelta, RISCVCPU), + + VMSTATE_UINTTL(env.hvictl, RISCVCPU), VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64), VMSTATE_UINT64(env.vsstatus, RISCVCPU),
The AIA hvictl and hviprioX CSRs allow hypervisor to control interrupts visible at VS-level. This patch implements AIA hvictl and hviprioX CSRs. Signed-off-by: Anup Patel <anup.patel@wdc.com> --- target/riscv/cpu.h | 2 + target/riscv/csr.c | 126 +++++++++++++++++++++++++++++++++++++++++ target/riscv/machine.c | 2 + 3 files changed, 130 insertions(+)