diff mbox

[resend] x86,kvm: Add a kernel parameter to disable PV spinlock

Message ID 20170905090306.gz52hlo7s2ybogoy@hirez.programming.kicks-ass.net (mailing list archive)
State New, archived
Headers show

Commit Message

Peter Zijlstra Sept. 5, 2017, 9:03 a.m. UTC
On Tue, Sep 05, 2017 at 10:52:06AM +0200, Juergen Gross wrote:
> > Hmm, that might work. Could we somehow nop that call when
> > !X86_FEATURE_HYPERVISOR?, that saves native from having to do the call
> > and would be a win for everyone.
> 
> So in fact we want a "always false" shortcut for bare metal and for any
> virtualization environment selecting bare metal behavior.
> 
> I'll have a try.

Right, so for native I think you can do this:

Comments

Jürgen Groß Sept. 5, 2017, 9:11 a.m. UTC | #1
On 05/09/17 11:03, Peter Zijlstra wrote:
> On Tue, Sep 05, 2017 at 10:52:06AM +0200, Juergen Gross wrote:
>>> Hmm, that might work. Could we somehow nop that call when
>>> !X86_FEATURE_HYPERVISOR?, that saves native from having to do the call
>>> and would be a win for everyone.
>>
>> So in fact we want a "always false" shortcut for bare metal and for any
>> virtualization environment selecting bare metal behavior.
>>
>> I'll have a try.
> 
> Right, so for native I think you can do this:
> 
> 
> diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
> index 11aaf1eaa0e4..4e9839001291 100644
> --- a/arch/x86/kernel/paravirt_patch_64.c
> +++ b/arch/x86/kernel/paravirt_patch_64.c
> @@ -21,6 +21,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
>  #if defined(CONFIG_PARAVIRT_SPINLOCKS)
>  DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
>  DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
> +DEF_NATIVE(pv_lock_ops, virt_spin_lock, "xor %rax, %rax");
>  #endif
>  
>  unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> @@ -77,6 +78,13 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
>  				goto patch_site;
>  			}
>  			goto patch_default;
> +		case PARAVIRT_PATCH(pv_lock_ops.virt_spin_lock):
> +			if (!this_cpu_has(X86_FEATURE_HYPERVISOR)) {
> +				start = start_pv_lock_ops_virt_spin_lock;
> +				end   = end_pv_lock_ops_virt_spin_lock;
> +				goto patch_side;
> +			}
> +			goto patch_default;

I'd rather add a test to paravirt_patch_default() similar to the
_paravirt_ident_* cases for catching the Xen (and possibly KVM) cases,
too.

I just need to add _paravirt_zero_[32|64] functions I can use for bare
metal and the nopvspin cases.


Juergen
diff mbox

Patch

diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 11aaf1eaa0e4..4e9839001291 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -21,6 +21,7 @@  DEF_NATIVE(, mov64, "mov %rdi, %rax");
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
 DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
+DEF_NATIVE(pv_lock_ops, virt_spin_lock, "xor %rax, %rax");
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -77,6 +78,13 @@  unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 				goto patch_site;
 			}
 			goto patch_default;
+		case PARAVIRT_PATCH(pv_lock_ops.virt_spin_lock):
+			if (!this_cpu_has(X86_FEATURE_HYPERVISOR)) {
+				start = start_pv_lock_ops_virt_spin_lock;
+				end   = end_pv_lock_ops_virt_spin_lock;
+				goto patch_side;
+			}
+			goto patch_default;
 #endif
 
 	default: