Message ID | 20190822084131.114764-12-anup.patel@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM RISC-V Support | expand |
On 22.08.19 10:45, Anup Patel wrote: > We get illegal instruction trap whenever Guest/VM executes WFI > instruction. > > This patch handles WFI trap by blocking the trapped VCPU using > kvm_vcpu_block() API. The blocked VCPU will be automatically > resumed whenever a VCPU interrupt is injected from user-space > or from in-kernel IRQCHIP emulation. > > Signed-off-by: Anup Patel <anup.patel@wdc.com> > Acked-by: Paolo Bonzini <pbonzini@redhat.com> > Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/riscv/kvm/vcpu_exit.c | 88 ++++++++++++++++++++++++++++++++++++++ > 1 file changed, 88 insertions(+) > > diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c > index efc06198c259..fbc04fe335ad 100644 > --- a/arch/riscv/kvm/vcpu_exit.c > +++ b/arch/riscv/kvm/vcpu_exit.c > @@ -12,6 +12,9 @@ > #include <linux/kvm_host.h> > #include <asm/csr.h> > > +#define INSN_MASK_WFI 0xffffff00 > +#define INSN_MATCH_WFI 0x10500000 > + > #define INSN_MATCH_LB 0x3 > #define INSN_MASK_LB 0x707f > #define INSN_MATCH_LH 0x1003 > @@ -179,6 +182,87 @@ static ulong get_insn(struct kvm_vcpu *vcpu) > return val; > } > > +typedef int (*illegal_insn_func)(struct kvm_vcpu *vcpu, > + struct kvm_run *run, > + ulong insn); > + > +static int truly_illegal_insn(struct kvm_vcpu *vcpu, > + struct kvm_run *run, > + ulong insn) > +{ > + /* TODO: Redirect trap to Guest VCPU */ > + return -ENOTSUPP; > +} > + > +static int system_opcode_insn(struct kvm_vcpu *vcpu, > + struct kvm_run *run, > + ulong insn) > +{ > + if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { > + vcpu->stat.wfi_exit_stat++; > + if (!kvm_arch_vcpu_runnable(vcpu)) { > + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); > + kvm_vcpu_block(vcpu); > + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); > + kvm_clear_request(KVM_REQ_UNHALT, vcpu); > + } > + vcpu->arch.guest_context.sepc += INSN_LEN(insn); > + return 1; > + } > + > + return truly_illegal_insn(vcpu, run, insn); > +} > + > +static illegal_insn_func illegal_insn_table[32] = { Every time I did experiments on PowerPC with indirect tables like this over switch() in C, the switch() code won. CPUs are pretty good at predicting branches. Predicting indirect jumps however, they are terrible at. So unless you consider the jump table more readable / maintainable, I would suggest to use a simple switch() statement. It will be faster and smaller. Alex > + truly_illegal_insn, /* 0 */ > + truly_illegal_insn, /* 1 */ > + truly_illegal_insn, /* 2 */ > + truly_illegal_insn, /* 3 */ > + truly_illegal_insn, /* 4 */ > + truly_illegal_insn, /* 5 */ > + truly_illegal_insn, /* 6 */ > + truly_illegal_insn, /* 7 */ > + truly_illegal_insn, /* 8 */ > + truly_illegal_insn, /* 9 */ > + truly_illegal_insn, /* 10 */ > + truly_illegal_insn, /* 11 */ > + truly_illegal_insn, /* 12 */ > + truly_illegal_insn, /* 13 */ > + truly_illegal_insn, /* 14 */ > + truly_illegal_insn, /* 15 */ > + truly_illegal_insn, /* 16 */ > + truly_illegal_insn, /* 17 */ > + truly_illegal_insn, /* 18 */ > + truly_illegal_insn, /* 19 */ > + truly_illegal_insn, /* 20 */ > + truly_illegal_insn, /* 21 */ > + truly_illegal_insn, /* 22 */ > + truly_illegal_insn, /* 23 */ > + truly_illegal_insn, /* 24 */ > + truly_illegal_insn, /* 25 */ > + truly_illegal_insn, /* 26 */ > + truly_illegal_insn, /* 27 */ > + system_opcode_insn, /* 28 */ > + truly_illegal_insn, /* 29 */ > + truly_illegal_insn, /* 30 */ > + truly_illegal_insn /* 31 */ > +}; > + > +static int illegal_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, > + unsigned long stval) > +{ > + ulong insn = stval; > + > + if (unlikely((insn & 3) != 3)) { > + if (insn == 0) > + insn = get_insn(vcpu); > + if ((insn & 3) != 3) > + return truly_illegal_insn(vcpu, run, insn); > + } > + > + return illegal_insn_table[(insn & 0x7c) >> 2](vcpu, run, insn); > +} > + > static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, > unsigned long fault_addr) > { > @@ -439,6 +523,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, > ret = -EFAULT; > run->exit_reason = KVM_EXIT_UNKNOWN; > switch (scause) { > + case EXC_INST_ILLEGAL: > + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) > + ret = illegal_inst_fault(vcpu, run, stval); > + break; > case EXC_INST_PAGE_FAULT: > case EXC_LOAD_PAGE_FAULT: > case EXC_STORE_PAGE_FAULT: >
On Thu, Aug 22, 2019 at 5:49 PM Alexander Graf <graf@amazon.com> wrote: > > On 22.08.19 10:45, Anup Patel wrote: > > We get illegal instruction trap whenever Guest/VM executes WFI > > instruction. > > > > This patch handles WFI trap by blocking the trapped VCPU using > > kvm_vcpu_block() API. The blocked VCPU will be automatically > > resumed whenever a VCPU interrupt is injected from user-space > > or from in-kernel IRQCHIP emulation. > > > > Signed-off-by: Anup Patel <anup.patel@wdc.com> > > Acked-by: Paolo Bonzini <pbonzini@redhat.com> > > Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> > > --- > > arch/riscv/kvm/vcpu_exit.c | 88 ++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 88 insertions(+) > > > > diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c > > index efc06198c259..fbc04fe335ad 100644 > > --- a/arch/riscv/kvm/vcpu_exit.c > > +++ b/arch/riscv/kvm/vcpu_exit.c > > @@ -12,6 +12,9 @@ > > #include <linux/kvm_host.h> > > #include <asm/csr.h> > > > > +#define INSN_MASK_WFI 0xffffff00 > > +#define INSN_MATCH_WFI 0x10500000 > > + > > #define INSN_MATCH_LB 0x3 > > #define INSN_MASK_LB 0x707f > > #define INSN_MATCH_LH 0x1003 > > @@ -179,6 +182,87 @@ static ulong get_insn(struct kvm_vcpu *vcpu) > > return val; > > } > > > > +typedef int (*illegal_insn_func)(struct kvm_vcpu *vcpu, > > + struct kvm_run *run, > > + ulong insn); > > + > > +static int truly_illegal_insn(struct kvm_vcpu *vcpu, > > + struct kvm_run *run, > > + ulong insn) > > +{ > > + /* TODO: Redirect trap to Guest VCPU */ > > + return -ENOTSUPP; > > +} > > + > > +static int system_opcode_insn(struct kvm_vcpu *vcpu, > > + struct kvm_run *run, > > + ulong insn) > > +{ > > + if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { > > + vcpu->stat.wfi_exit_stat++; > > + if (!kvm_arch_vcpu_runnable(vcpu)) { > > + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); > > + kvm_vcpu_block(vcpu); > > + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); > > + kvm_clear_request(KVM_REQ_UNHALT, vcpu); > > + } > > + vcpu->arch.guest_context.sepc += INSN_LEN(insn); > > + return 1; > > + } > > + > > + return truly_illegal_insn(vcpu, run, insn); > > +} > > + > > +static illegal_insn_func illegal_insn_table[32] = { > > Every time I did experiments on PowerPC with indirect tables like this > over switch() in C, the switch() code won. CPUs are pretty good at > predicting branches. Predicting indirect jumps however, they are > terrible at. > > So unless you consider the jump table more readable / maintainable, I > would suggest to use a simple switch() statement. It will be faster and > smaller. Yes, readability was the reason why we choose jump table but I see your point. Most of the entries in jump table point to truly_illegal_insn() so I guess switch case will be quite simple here. I will update this in next revision. Regards, Anup > > > Alex > > > > + truly_illegal_insn, /* 0 */ > > + truly_illegal_insn, /* 1 */ > > + truly_illegal_insn, /* 2 */ > > + truly_illegal_insn, /* 3 */ > > + truly_illegal_insn, /* 4 */ > > + truly_illegal_insn, /* 5 */ > > + truly_illegal_insn, /* 6 */ > > + truly_illegal_insn, /* 7 */ > > + truly_illegal_insn, /* 8 */ > > + truly_illegal_insn, /* 9 */ > > + truly_illegal_insn, /* 10 */ > > + truly_illegal_insn, /* 11 */ > > + truly_illegal_insn, /* 12 */ > > + truly_illegal_insn, /* 13 */ > > + truly_illegal_insn, /* 14 */ > > + truly_illegal_insn, /* 15 */ > > + truly_illegal_insn, /* 16 */ > > + truly_illegal_insn, /* 17 */ > > + truly_illegal_insn, /* 18 */ > > + truly_illegal_insn, /* 19 */ > > + truly_illegal_insn, /* 20 */ > > + truly_illegal_insn, /* 21 */ > > + truly_illegal_insn, /* 22 */ > > + truly_illegal_insn, /* 23 */ > > + truly_illegal_insn, /* 24 */ > > + truly_illegal_insn, /* 25 */ > > + truly_illegal_insn, /* 26 */ > > + truly_illegal_insn, /* 27 */ > > + system_opcode_insn, /* 28 */ > > + truly_illegal_insn, /* 29 */ > > + truly_illegal_insn, /* 30 */ > > + truly_illegal_insn /* 31 */ > > +}; > > + > > +static int illegal_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, > > + unsigned long stval) > > +{ > > + ulong insn = stval; > > + > > + if (unlikely((insn & 3) != 3)) { > > + if (insn == 0) > > + insn = get_insn(vcpu); > > + if ((insn & 3) != 3) > > + return truly_illegal_insn(vcpu, run, insn); > > + } > > + > > + return illegal_insn_table[(insn & 0x7c) >> 2](vcpu, run, insn); > > +} > > + > > static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, > > unsigned long fault_addr) > > { > > @@ -439,6 +523,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, > > ret = -EFAULT; > > run->exit_reason = KVM_EXIT_UNKNOWN; > > switch (scause) { > > + case EXC_INST_ILLEGAL: > > + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) > > + ret = illegal_inst_fault(vcpu, run, stval); > > + break; > > case EXC_INST_PAGE_FAULT: > > case EXC_LOAD_PAGE_FAULT: > > case EXC_STORE_PAGE_FAULT: > > >
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index efc06198c259..fbc04fe335ad 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -12,6 +12,9 @@ #include <linux/kvm_host.h> #include <asm/csr.h> +#define INSN_MASK_WFI 0xffffff00 +#define INSN_MATCH_WFI 0x10500000 + #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f #define INSN_MATCH_LH 0x1003 @@ -179,6 +182,87 @@ static ulong get_insn(struct kvm_vcpu *vcpu) return val; } +typedef int (*illegal_insn_func)(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn); + +static int truly_illegal_insn(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn) +{ + /* TODO: Redirect trap to Guest VCPU */ + return -ENOTSUPP; +} + +static int system_opcode_insn(struct kvm_vcpu *vcpu, + struct kvm_run *run, + ulong insn) +{ + if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { + vcpu->stat.wfi_exit_stat++; + if (!kvm_arch_vcpu_runnable(vcpu)) { + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); + kvm_vcpu_block(vcpu); + vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); + } + vcpu->arch.guest_context.sepc += INSN_LEN(insn); + return 1; + } + + return truly_illegal_insn(vcpu, run, insn); +} + +static illegal_insn_func illegal_insn_table[32] = { + truly_illegal_insn, /* 0 */ + truly_illegal_insn, /* 1 */ + truly_illegal_insn, /* 2 */ + truly_illegal_insn, /* 3 */ + truly_illegal_insn, /* 4 */ + truly_illegal_insn, /* 5 */ + truly_illegal_insn, /* 6 */ + truly_illegal_insn, /* 7 */ + truly_illegal_insn, /* 8 */ + truly_illegal_insn, /* 9 */ + truly_illegal_insn, /* 10 */ + truly_illegal_insn, /* 11 */ + truly_illegal_insn, /* 12 */ + truly_illegal_insn, /* 13 */ + truly_illegal_insn, /* 14 */ + truly_illegal_insn, /* 15 */ + truly_illegal_insn, /* 16 */ + truly_illegal_insn, /* 17 */ + truly_illegal_insn, /* 18 */ + truly_illegal_insn, /* 19 */ + truly_illegal_insn, /* 20 */ + truly_illegal_insn, /* 21 */ + truly_illegal_insn, /* 22 */ + truly_illegal_insn, /* 23 */ + truly_illegal_insn, /* 24 */ + truly_illegal_insn, /* 25 */ + truly_illegal_insn, /* 26 */ + truly_illegal_insn, /* 27 */ + system_opcode_insn, /* 28 */ + truly_illegal_insn, /* 29 */ + truly_illegal_insn, /* 30 */ + truly_illegal_insn /* 31 */ +}; + +static int illegal_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long stval) +{ + ulong insn = stval; + + if (unlikely((insn & 3) != 3)) { + if (insn == 0) + insn = get_insn(vcpu); + if ((insn & 3) != 3) + return truly_illegal_insn(vcpu, run, insn); + } + + return illegal_insn_table[(insn & 0x7c) >> 2](vcpu, run, insn); +} + static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run, unsigned long fault_addr) { @@ -439,6 +523,10 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, ret = -EFAULT; run->exit_reason = KVM_EXIT_UNKNOWN; switch (scause) { + case EXC_INST_ILLEGAL: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) + ret = illegal_inst_fault(vcpu, run, stval); + break; case EXC_INST_PAGE_FAULT: case EXC_LOAD_PAGE_FAULT: case EXC_STORE_PAGE_FAULT: