Message ID | 148174556351.13485.18084576305677228092.stgit@brijesh-build-machine (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
> +++ b/arch/x86/kvm/x86.c > @@ -4420,6 +4420,21 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, > } > EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); > > +static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, > + gpa_t gpa, bool write) > +{ > + /* For APIC access vmexit */ > + if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) > + return 1; > + > + if (vcpu_match_mmio_gpa(vcpu, gpa)) { > + trace_vcpu_match_mmio(gva, gpa, write, true); > + return 1; > + } > + > + return 0; > +} I think I'd prefer that in a separate patch. But I don't have any strong feelings about this. > + > static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, > gpa_t *gpa, struct x86_exception *exception, > bool write) > @@ -4446,16 +4461,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, > if (*gpa == UNMAPPED_GVA) > return -1; > > - /* For APIC access vmexit */ > - if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) > - return 1; > - > - if (vcpu_match_mmio_gpa(vcpu, *gpa)) { > - trace_vcpu_match_mmio(gva, *gpa, write, true); > - return 1; > - } > - > - return 0; > + return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); > } > > int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, > @@ -4552,6 +4558,22 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, > int handled, ret; > bool write = ops->write; > struct kvm_mmio_fragment *frag; > + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; > + > + /* > + * If the exit was due to a NPF we may already have a GPA. > + * If the GPA is present, use it to avoid the GVA to GPA table walk. > + * Note, this cannot be used on string operations since string > + * operation using rep will only have the initial GPA from the NPF > + * occurred. > + */ I was wondering if it would make sense to get rid of gpa_available and rather define a new function: bool exception_gpa_valid(struct kvm_vcpu) { // check if svm // check if exit code is NPF // check ctxt } Then you could move the whole comment into that function. Looks good to me in general.
On 15/12/2016 13:42, David Hildenbrand wrote: > >> +++ b/arch/x86/kvm/x86.c >> @@ -4420,6 +4420,21 @@ int kvm_write_guest_virt_system(struct >> x86_emulate_ctxt *ctxt, >> } >> EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); >> >> +static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, >> + gpa_t gpa, bool write) >> +{ >> + /* For APIC access vmexit */ >> + if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) >> + return 1; >> + >> + if (vcpu_match_mmio_gpa(vcpu, gpa)) { >> + trace_vcpu_match_mmio(gva, gpa, write, true); >> + return 1; >> + } >> + >> + return 0; >> +} > > I think I'd prefer that in a separate patch. But I don't have any > strong feelings about this. > >> + >> static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long >> gva, >> gpa_t *gpa, struct x86_exception *exception, >> bool write) >> @@ -4446,16 +4461,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu >> *vcpu, unsigned long gva, >> if (*gpa == UNMAPPED_GVA) >> return -1; >> >> - /* For APIC access vmexit */ >> - if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) >> - return 1; >> - >> - if (vcpu_match_mmio_gpa(vcpu, *gpa)) { >> - trace_vcpu_match_mmio(gva, *gpa, write, true); >> - return 1; >> - } >> - >> - return 0; >> + return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); >> } >> >> int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, >> @@ -4552,6 +4558,22 @@ static int emulator_read_write_onepage(unsigned >> long addr, void *val, >> int handled, ret; >> bool write = ops->write; >> struct kvm_mmio_fragment *frag; >> + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; >> + >> + /* >> + * If the exit was due to a NPF we may already have a GPA. >> + * If the GPA is present, use it to avoid the GVA to GPA table walk. >> + * Note, this cannot be used on string operations since string >> + * operation using rep will only have the initial GPA from the NPF >> + * occurred. >> + */ > > I was wondering if it would make sense to get rid of gpa_available and > rather define a new function: > > bool exception_gpa_valid(struct kvm_vcpu) > { > // check if svm > // check if exit code is NPF > // check ctxt > } No, this would be a layering violation. The emulator ops don't know about svm and exit codes (and in fact it's trivial to implement this optimization for vmx, with a slightly different logic), so we need to have gpa_available. Paolo > Then you could move the whole comment into that function. > > > Looks good to me in general. > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
>>> + * If the exit was due to a NPF we may already have a GPA. >>> + * If the GPA is present, use it to avoid the GVA to GPA table walk. >>> + * Note, this cannot be used on string operations since string >>> + * operation using rep will only have the initial GPA from the NPF >>> + * occurred. >>> + */ >> >> I was wondering if it would make sense to get rid of gpa_available and >> rather define a new function: >> >> bool exception_gpa_valid(struct kvm_vcpu) >> { >> // check if svm >> // check if exit code is NPF >> // check ctxt >> } > > No, this would be a layering violation. The emulator ops don't know > about svm and exit codes (and in fact it's trivial to implement this > optimization for vmx, with a slightly different logic), so we need to > have gpa_available. I was rather thinking about adding an vmx/svm independent callback, which would return false for vmx for now. I just saw the variable and was wondering if it is really necessary.
On 15/12/2016 14:09, David Hildenbrand wrote: >>> >>> bool exception_gpa_valid(struct kvm_vcpu) >>> { >>> // check if svm >>> // check if exit code is NPF >>> // check ctxt >>> } >> >> No, this would be a layering violation. The emulator ops don't know >> about svm and exit codes (and in fact it's trivial to implement this >> optimization for vmx, with a slightly different logic), so we need to >> have gpa_available. > > I was rather thinking about adding an vmx/svm independent callback, > which would return false for vmx for now. I just saw the variable > and was wondering if it is really necessary. The variable would probably just move into struct {vmx,svm}_vcpu. Maybe you could access the VMX/SVM exitcode directly, but doing that worries me a bit... Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7be..3e8c287 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -441,5 +441,6 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); +bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt); #endif /* _ASM_X86_KVM_X86_EMULATE_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 77cb3f9..fd5b1c8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -668,6 +668,9 @@ struct kvm_vcpu_arch { int pending_ioapic_eoi; int pending_external_vector; + + /* GPA available (AMD only) */ + bool gpa_available; }; struct kvm_lpage_info { diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a3ce9d2..1b9bb01 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -171,6 +171,7 @@ #define NearBranch ((u64)1 << 52) /* Near branches */ #define No16 ((u64)1 << 53) /* No 16 bit operand */ #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ +#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) @@ -4104,7 +4105,7 @@ static const struct opcode group1[] = { }; static const struct opcode group1A[] = { - I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N, + I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { @@ -4142,7 +4143,7 @@ static const struct opcode group5[] = { I(SrcMemFAddr | ImplicitOps, em_call_far), I(SrcMem | NearBranch, em_jmp_abs), I(SrcMemFAddr | ImplicitOps, em_jmp_far), - I(SrcMem | Stack, em_push), D(Undefined), + I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined), }; static const struct opcode group6[] = { @@ -4360,8 +4361,8 @@ static const struct opcode opcode_table[256] = { /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), - I2bv(SrcSI | DstDI | Mov | String, em_mov), - F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r), + I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov), + F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), @@ -5483,3 +5484,14 @@ void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); } + +bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) +{ + if (ctxt->rep_prefix && (ctxt->d & String)) + return false; + + if (ctxt->d & TwoMemOp) + return false; + + return true; +} diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5e64e656..e9b3555 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4188,6 +4188,8 @@ static int handle_exit(struct kvm_vcpu *vcpu) trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); + vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF); + if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c30f62dc..26c8b93 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4420,6 +4420,21 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); +static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, + gpa_t gpa, bool write) +{ + /* For APIC access vmexit */ + if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) + return 1; + + if (vcpu_match_mmio_gpa(vcpu, gpa)) { + trace_vcpu_match_mmio(gva, gpa, write, true); + return 1; + } + + return 0; +} + static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) @@ -4446,16 +4461,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, if (*gpa == UNMAPPED_GVA) return -1; - /* For APIC access vmexit */ - if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) - return 1; - - if (vcpu_match_mmio_gpa(vcpu, *gpa)) { - trace_vcpu_match_mmio(gva, *gpa, write, true); - return 1; - } - - return 0; + return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, @@ -4552,6 +4558,22 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, int handled, ret; bool write = ops->write; struct kvm_mmio_fragment *frag; + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + + /* + * If the exit was due to a NPF we may already have a GPA. + * If the GPA is present, use it to avoid the GVA to GPA table walk. + * Note, this cannot be used on string operations since string + * operation using rep will only have the initial GPA from the NPF + * occurred. + */ + if (vcpu->arch.gpa_available && + emulator_can_use_gpa(ctxt) && + vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) && + (addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) { + gpa = exception->address; + goto mmio; + } ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); @@ -5563,6 +5585,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, } restart: + /* Save the faulting GPA (cr2) in the address field */ + ctxt->exception.address = cr2; + r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED)