Message ID | 20190719204110.18306-4-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: VMX: Optimize VMX instrs error/fault handling | expand |
On Fri, Jul 19, 2019 at 01:41:08PM -0700, Sean Christopherson wrote: > @@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) > { > unsigned long value; > > - asm volatile (__ex_clear("vmread %1, %0", "%k0") > - : "=r"(value) : "r"(field)); > + asm volatile("1: vmread %2, %1\n\t" > + ".byte 0x3e\n\t" /* branch taken hint */ > + "ja 3f\n\t" > + "mov %2, %%" _ASM_ARG1 "\n\t" > + "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t" > + "2: call vmread_error\n\t" > + "xor %k1, %k1\n\t" > + "3:\n\t" > + > + ".pushsection .fixup, \"ax\"\n\t" > + "4: mov %2, %%" _ASM_ARG1 "\n\t" > + "mov $1, %%" _ASM_ARG2 "\n\t" > + "jmp 2b\n\t" > + ".popsection\n\t" > + _ASM_EXTABLE(1b, 4b) > + : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc"); Was there a reason you didn't do the asm goto thing here like you did for the previous patch? That seemed cleaner, and needs less asm. I think the branch hints aren't needed -- they're ignored on modern processors. Ditto for the previous patch. Also please use named asm operands whereever you can, like "%[field]" instead of "%2". It helps a lot with readability.
On 28/07/19 21:36, Josh Poimboeuf wrote: > On Fri, Jul 19, 2019 at 01:41:08PM -0700, Sean Christopherson wrote: >> @@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) >> { >> unsigned long value; >> >> - asm volatile (__ex_clear("vmread %1, %0", "%k0") >> - : "=r"(value) : "r"(field)); >> + asm volatile("1: vmread %2, %1\n\t" >> + ".byte 0x3e\n\t" /* branch taken hint */ >> + "ja 3f\n\t" >> + "mov %2, %%" _ASM_ARG1 "\n\t" >> + "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t" >> + "2: call vmread_error\n\t" >> + "xor %k1, %k1\n\t" >> + "3:\n\t" >> + >> + ".pushsection .fixup, \"ax\"\n\t" >> + "4: mov %2, %%" _ASM_ARG1 "\n\t" >> + "mov $1, %%" _ASM_ARG2 "\n\t" >> + "jmp 2b\n\t" >> + ".popsection\n\t" >> + _ASM_EXTABLE(1b, 4b) >> + : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc"); > > Was there a reason you didn't do the asm goto thing here like you did > for the previous patch? That seemed cleaner, and needs less asm. It's because asm goto doesn't support outputs. Paolo > I think the branch hints aren't needed -- they're ignored on modern > processors. Ditto for the previous patch. > > Also please use named asm operands whereever you can, like "%[field]" > instead of "%2". It helps a lot with readability. >
diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h index 79e25d49d4d9..45eaedee2ac0 100644 --- a/arch/x86/kvm/vmx/ops.h +++ b/arch/x86/kvm/vmx/ops.h @@ -11,9 +11,8 @@ #include "vmcs.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) -#define __ex_clear(x, reg) \ - ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg) +asmlinkage void vmread_error(unsigned long field, bool fault); void vmwrite_error(unsigned long field, unsigned long value); void vmclear_error(struct vmcs *vmcs, u64 phys_addr); void vmptrld_error(struct vmcs *vmcs, u64 phys_addr); @@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; - asm volatile (__ex_clear("vmread %1, %0", "%k0") - : "=r"(value) : "r"(field)); + asm volatile("1: vmread %2, %1\n\t" + ".byte 0x3e\n\t" /* branch taken hint */ + "ja 3f\n\t" + "mov %2, %%" _ASM_ARG1 "\n\t" + "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t" + "2: call vmread_error\n\t" + "xor %k1, %k1\n\t" + "3:\n\t" + + ".pushsection .fixup, \"ax\"\n\t" + "4: mov %2, %%" _ASM_ARG1 "\n\t" + "mov $1, %%" _ASM_ARG2 "\n\t" + "jmp 2b\n\t" + ".popsection\n\t" + _ASM_EXTABLE(1b, 4b) + : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc"); return value; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 46689019ebf7..9acf3d6395d3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -349,6 +349,14 @@ do { \ pr_warn_ratelimited(fmt); \ } while (0) +asmlinkage void vmread_error(unsigned long field, bool fault) +{ + if (fault) + kvm_spurious_fault(); + else + vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); +} + noinline void vmwrite_error(unsigned long field, unsigned long value) { vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",