diff mbox series

[v2,3/5] KVM: VMX: Add error handling to VMREAD helper

Message ID 20190719204110.18306-4-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Optimize VMX instrs error/fault handling | expand

Commit Message

Sean Christopherson July 19, 2019, 8:41 p.m. UTC
Now that VMREAD flows require a taken branch, courtesy of commit

  3901336ed9887 ("x86/kvm: Don't call kvm_spurious_fault() from .fixup")

bite the bullet and add full error handling to VMREAD, i.e. replace the
JMP added by __ex()/____kvm_handle_fault_on_reboot() with a hinted Jcc.

To minimize the code footprint, add a helper function, vmread_error(),
to handle both faults and failures so that the inline flow has a single
CALL.

Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/ops.h | 21 +++++++++++++++++----
 arch/x86/kvm/vmx/vmx.c |  8 ++++++++
 2 files changed, 25 insertions(+), 4 deletions(-)

Comments

Josh Poimboeuf July 28, 2019, 7:36 p.m. UTC | #1
On Fri, Jul 19, 2019 at 01:41:08PM -0700, Sean Christopherson wrote:
> @@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
>  {
>  	unsigned long value;
>  
> -	asm volatile (__ex_clear("vmread %1, %0", "%k0")
> -		      : "=r"(value) : "r"(field));
> +	asm volatile("1: vmread %2, %1\n\t"
> +		     ".byte 0x3e\n\t" /* branch taken hint */
> +		     "ja 3f\n\t"
> +		     "mov %2, %%" _ASM_ARG1 "\n\t"
> +		     "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
> +		     "2: call vmread_error\n\t"
> +		     "xor %k1, %k1\n\t"
> +		     "3:\n\t"
> +
> +		     ".pushsection .fixup, \"ax\"\n\t"
> +		     "4: mov %2, %%" _ASM_ARG1 "\n\t"
> +		     "mov $1, %%" _ASM_ARG2 "\n\t"
> +		     "jmp 2b\n\t"
> +		     ".popsection\n\t"
> +		     _ASM_EXTABLE(1b, 4b)
> +		     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");

Was there a reason you didn't do the asm goto thing here like you did
for the previous patch?  That seemed cleaner, and needs less asm.  

I think the branch hints aren't needed -- they're ignored on modern
processors.  Ditto for the previous patch.

Also please use named asm operands whereever you can, like "%[field]"
instead of "%2".  It helps a lot with readability.
Paolo Bonzini July 29, 2019, 9:20 a.m. UTC | #2
On 28/07/19 21:36, Josh Poimboeuf wrote:
> On Fri, Jul 19, 2019 at 01:41:08PM -0700, Sean Christopherson wrote:
>> @@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
>>  {
>>  	unsigned long value;
>>  
>> -	asm volatile (__ex_clear("vmread %1, %0", "%k0")
>> -		      : "=r"(value) : "r"(field));
>> +	asm volatile("1: vmread %2, %1\n\t"
>> +		     ".byte 0x3e\n\t" /* branch taken hint */
>> +		     "ja 3f\n\t"
>> +		     "mov %2, %%" _ASM_ARG1 "\n\t"
>> +		     "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
>> +		     "2: call vmread_error\n\t"
>> +		     "xor %k1, %k1\n\t"
>> +		     "3:\n\t"
>> +
>> +		     ".pushsection .fixup, \"ax\"\n\t"
>> +		     "4: mov %2, %%" _ASM_ARG1 "\n\t"
>> +		     "mov $1, %%" _ASM_ARG2 "\n\t"
>> +		     "jmp 2b\n\t"
>> +		     ".popsection\n\t"
>> +		     _ASM_EXTABLE(1b, 4b)
>> +		     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
> 
> Was there a reason you didn't do the asm goto thing here like you did
> for the previous patch?  That seemed cleaner, and needs less asm.  

It's because asm goto doesn't support outputs.

Paolo

> I think the branch hints aren't needed -- they're ignored on modern
> processors.  Ditto for the previous patch.
> 
> Also please use named asm operands whereever you can, like "%[field]"
> instead of "%2".  It helps a lot with readability.
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
index 79e25d49d4d9..45eaedee2ac0 100644
--- a/arch/x86/kvm/vmx/ops.h
+++ b/arch/x86/kvm/vmx/ops.h
@@ -11,9 +11,8 @@ 
 #include "vmcs.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
-#define __ex_clear(x, reg) \
-	____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
 
+asmlinkage void vmread_error(unsigned long field, bool fault);
 void vmwrite_error(unsigned long field, unsigned long value);
 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
@@ -68,8 +67,22 @@  static __always_inline unsigned long __vmcs_readl(unsigned long field)
 {
 	unsigned long value;
 
-	asm volatile (__ex_clear("vmread %1, %0", "%k0")
-		      : "=r"(value) : "r"(field));
+	asm volatile("1: vmread %2, %1\n\t"
+		     ".byte 0x3e\n\t" /* branch taken hint */
+		     "ja 3f\n\t"
+		     "mov %2, %%" _ASM_ARG1 "\n\t"
+		     "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
+		     "2: call vmread_error\n\t"
+		     "xor %k1, %k1\n\t"
+		     "3:\n\t"
+
+		     ".pushsection .fixup, \"ax\"\n\t"
+		     "4: mov %2, %%" _ASM_ARG1 "\n\t"
+		     "mov $1, %%" _ASM_ARG2 "\n\t"
+		     "jmp 2b\n\t"
+		     ".popsection\n\t"
+		     _ASM_EXTABLE(1b, 4b)
+		     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
 	return value;
 }
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 46689019ebf7..9acf3d6395d3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -349,6 +349,14 @@  do {					\
 	pr_warn_ratelimited(fmt);	\
 } while (0)
 
+asmlinkage void vmread_error(unsigned long field, bool fault)
+{
+	if (fault)
+		kvm_spurious_fault();
+	else
+		vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
+}
+
 noinline void vmwrite_error(unsigned long field, unsigned long value)
 {
 	vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",