diff mbox

[03/13] x86/paravirt: Convert native patch assembly code strings to macros

Message ID e4cea2b8aa8ca23122d9c807784ca62ee6cbbff8.1507128293.git.jpoimboe@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Josh Poimboeuf Oct. 4, 2017, 3:58 p.m. UTC
Convert the hard-coded native patch assembly code strings to macros to
facilitate sharing common code between 32-bit and 64-bit.

These macros will also be used by a future patch which requires the GCC
extended asm syntax of two '%' characters instead of one when specifying
a register name.

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
---
 arch/x86/include/asm/special_insns.h | 24 ++++++++++++++++++++++++
 arch/x86/kernel/paravirt_patch_32.c  | 21 +++++++++++----------
 arch/x86/kernel/paravirt_patch_64.c  | 29 +++++++++++++++--------------
 3 files changed, 50 insertions(+), 24 deletions(-)

Comments

Jürgen Groß Oct. 25, 2017, 9:46 a.m. UTC | #1
On 04/10/17 17:58, Josh Poimboeuf wrote:
> Convert the hard-coded native patch assembly code strings to macros to
> facilitate sharing common code between 32-bit and 64-bit.
> 
> These macros will also be used by a future patch which requires the GCC
> extended asm syntax of two '%' characters instead of one when specifying
> a register name.
> 
> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>

Reviewed-by: Juergen Gross <jgross@suse.com>

Mind adding another patch to merge the now nearly identical
paravirt_patch_32.c and paravirt_patch_64.c either into paravirt.c
or paravirt_patch.c? This would require only very few #ifdef.


Juergen

> ---
>  arch/x86/include/asm/special_insns.h | 24 ++++++++++++++++++++++++
>  arch/x86/kernel/paravirt_patch_32.c  | 21 +++++++++++----------
>  arch/x86/kernel/paravirt_patch_64.c  | 29 +++++++++++++++--------------
>  3 files changed, 50 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
> index ac402c6fc24b..0549c5f2c1b3 100644
> --- a/arch/x86/include/asm/special_insns.h
> +++ b/arch/x86/include/asm/special_insns.h
> @@ -6,6 +6,30 @@
>  
>  #include <asm/nops.h>
>  
> +#ifdef CONFIG_X86_64
> +# define _REG_ARG1			"%rdi"
> +# define NATIVE_IDENTITY_32		"mov %edi, %eax"
> +# define NATIVE_USERGS_SYSRET64		"swapgs; sysretq"
> +#else
> +# define _REG_ARG1			"%eax"
> +#endif
> +
> +#define _REG_RET			"%" _ASM_AX
> +
> +#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1
> +#define NATIVE_IDENTITY			"mov " _REG_ARG1 ", " _REG_RET
> +#define NATIVE_SAVE_FL			"pushf; pop " _REG_RET
> +#define NATIVE_RESTORE_FL		"push " _REG_ARG1 "; popf"
> +#define NATIVE_IRQ_DISABLE		"cli"
> +#define NATIVE_IRQ_ENABLE		"sti"
> +#define NATIVE_READ_CR2			"mov %cr2, " _REG_RET
> +#define NATIVE_READ_CR3			"mov %cr3, " _REG_RET
> +#define NATIVE_WRITE_CR3		"mov " _REG_ARG1 ", %cr3"
> +#define NATIVE_FLUSH_TLB_SINGLE		"invlpg (" _REG_ARG1 ")"
> +#define NATIVE_SWAPGS			"swapgs"
> +#define NATIVE_IRET			"iret"
> +#define NATIVE_QUEUED_SPIN_UNLOCK	"movb $0, (" _REG_ARG1 ")"
> +
>  /*
>   * Volatile isn't enough to prevent the compiler from reordering the
>   * read/write functions for the control registers and messing everything up.
> diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
> index 553acbbb4d32..c9c6106ae714 100644
> --- a/arch/x86/kernel/paravirt_patch_32.c
> +++ b/arch/x86/kernel/paravirt_patch_32.c
> @@ -1,17 +1,18 @@
>  #include <asm/paravirt.h>
> +#include <asm/special_insns.h>
>  
> -DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
> -DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
> -DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
> -DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
> -DEF_NATIVE(pv_cpu_ops, iret, "iret");
> -DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
> -DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
> -DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
> +DEF_NATIVE(pv_irq_ops,	save_fl,		NATIVE_SAVE_FL);
> +DEF_NATIVE(pv_irq_ops,	restore_fl,		NATIVE_RESTORE_FL);
> +DEF_NATIVE(pv_irq_ops,	irq_disable,		NATIVE_IRQ_DISABLE);
> +DEF_NATIVE(pv_irq_ops,	irq_enable,		NATIVE_IRQ_ENABLE);
> +DEF_NATIVE(pv_cpu_ops,	iret,			NATIVE_IRET);
> +DEF_NATIVE(pv_mmu_ops,	read_cr2,		NATIVE_READ_CR2);
> +DEF_NATIVE(pv_mmu_ops,	read_cr3,		NATIVE_READ_CR3);
> +DEF_NATIVE(pv_mmu_ops,	write_cr3,		NATIVE_WRITE_CR3);
>  
>  #if defined(CONFIG_PARAVIRT_SPINLOCKS)
> -DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
> -DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
> +DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
> +DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
>  #endif
>  
>  unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
> diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
> index 0a1ba3f80cbf..0aa232edd670 100644
> --- a/arch/x86/kernel/paravirt_patch_64.c
> +++ b/arch/x86/kernel/paravirt_patch_64.c
> @@ -1,25 +1,26 @@
>  #include <asm/paravirt.h>
>  #include <asm/asm-offsets.h>
> +#include <asm/special_insns.h>
>  #include <linux/stringify.h>
>  
> -DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
> -DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
> -DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
> -DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
> -DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
> -DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
> -DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
> -DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
> +DEF_NATIVE(pv_irq_ops,	save_fl,		NATIVE_SAVE_FL);
> +DEF_NATIVE(pv_irq_ops,	restore_fl,		NATIVE_RESTORE_FL);
> +DEF_NATIVE(pv_irq_ops,	irq_disable,		NATIVE_IRQ_DISABLE);
> +DEF_NATIVE(pv_irq_ops,	irq_enable,		NATIVE_IRQ_ENABLE);
> +DEF_NATIVE(pv_mmu_ops,	read_cr2,		NATIVE_READ_CR2);
> +DEF_NATIVE(pv_mmu_ops,	read_cr3,		NATIVE_READ_CR3);
> +DEF_NATIVE(pv_mmu_ops,	write_cr3,		NATIVE_WRITE_CR3);
> +DEF_NATIVE(pv_mmu_ops,	flush_tlb_single,	NATIVE_FLUSH_TLB_SINGLE);
>  
> -DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
> -DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
> +DEF_NATIVE(pv_cpu_ops,	usergs_sysret64,	NATIVE_USERGS_SYSRET64);
> +DEF_NATIVE(pv_cpu_ops,	swapgs,			NATIVE_SWAPGS);
>  
> -DEF_NATIVE(, mov32, "mov %edi, %eax");
> -DEF_NATIVE(, mov64, "mov %rdi, %rax");
> +DEF_NATIVE(,		mov32,			NATIVE_IDENTITY_32);
> +DEF_NATIVE(,		mov64,			NATIVE_IDENTITY);
>  
>  #if defined(CONFIG_PARAVIRT_SPINLOCKS)
> -DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
> -DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
> +DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
> +DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
>  #endif
>  
>  unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
>
Josh Poimboeuf Nov. 16, 2017, 9:04 p.m. UTC | #2
On Wed, Oct 25, 2017 at 11:46:18AM +0200, Juergen Gross wrote:
> On 04/10/17 17:58, Josh Poimboeuf wrote:
> > Convert the hard-coded native patch assembly code strings to macros to
> > facilitate sharing common code between 32-bit and 64-bit.
> > 
> > These macros will also be used by a future patch which requires the GCC
> > extended asm syntax of two '%' characters instead of one when specifying
> > a register name.
> > 
> > Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
> 
> Reviewed-by: Juergen Gross <jgross@suse.com>
> 
> Mind adding another patch to merge the now nearly identical
> paravirt_patch_32.c and paravirt_patch_64.c either into paravirt.c
> or paravirt_patch.c? This would require only very few #ifdef.

Good idea, will do.
Borislav Petkov Nov. 17, 2017, 6:07 p.m. UTC | #3
On Wed, Oct 04, 2017 at 10:58:24AM -0500, Josh Poimboeuf wrote:
> Convert the hard-coded native patch assembly code strings to macros to
> facilitate sharing common code between 32-bit and 64-bit.
> 
> These macros will also be used by a future patch which requires the GCC
> extended asm syntax of two '%' characters instead of one when specifying
> a register name.
> 
> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
> ---
>  arch/x86/include/asm/special_insns.h | 24 ++++++++++++++++++++++++
>  arch/x86/kernel/paravirt_patch_32.c  | 21 +++++++++++----------
>  arch/x86/kernel/paravirt_patch_64.c  | 29 +++++++++++++++--------------
>  3 files changed, 50 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
> index ac402c6fc24b..0549c5f2c1b3 100644
> --- a/arch/x86/include/asm/special_insns.h
> +++ b/arch/x86/include/asm/special_insns.h
> @@ -6,6 +6,30 @@
>  
>  #include <asm/nops.h>
>  
> +#ifdef CONFIG_X86_64
> +# define _REG_ARG1			"%rdi"
> +# define NATIVE_IDENTITY_32		"mov %edi, %eax"

Yeah, that "identity" looks strange. How about NATIVE_NOOP and
NATIVE_NOOP_32 ?

> +# define NATIVE_USERGS_SYSRET64		"swapgs; sysretq"
> +#else
> +# define _REG_ARG1			"%eax"
> +#endif
> +
> +#define _REG_RET			"%" _ASM_AX
> +
> +#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1

NATIVE_ZERO_OUT

I guess. NATIVE_ZERO reads like the native representation of 0 :-)

...
Jürgen Groß Nov. 17, 2017, 7:10 p.m. UTC | #4
On 17/11/17 19:07, Borislav Petkov wrote:
> On Wed, Oct 04, 2017 at 10:58:24AM -0500, Josh Poimboeuf wrote:
>> Convert the hard-coded native patch assembly code strings to macros to
>> facilitate sharing common code between 32-bit and 64-bit.
>>
>> These macros will also be used by a future patch which requires the GCC
>> extended asm syntax of two '%' characters instead of one when specifying
>> a register name.
>>
>> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
>> ---
>>  arch/x86/include/asm/special_insns.h | 24 ++++++++++++++++++++++++
>>  arch/x86/kernel/paravirt_patch_32.c  | 21 +++++++++++----------
>>  arch/x86/kernel/paravirt_patch_64.c  | 29 +++++++++++++++--------------
>>  3 files changed, 50 insertions(+), 24 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
>> index ac402c6fc24b..0549c5f2c1b3 100644
>> --- a/arch/x86/include/asm/special_insns.h
>> +++ b/arch/x86/include/asm/special_insns.h
>> @@ -6,6 +6,30 @@
>>  
>>  #include <asm/nops.h>
>>  
>> +#ifdef CONFIG_X86_64
>> +# define _REG_ARG1			"%rdi"
>> +# define NATIVE_IDENTITY_32		"mov %edi, %eax"
> 
> Yeah, that "identity" looks strange. How about NATIVE_NOOP and
> NATIVE_NOOP_32 ?

Those are not NOPs. They return the identical value which was passed to
them. So identity isn't a bad name after all.

> 
>> +# define NATIVE_USERGS_SYSRET64		"swapgs; sysretq"
>> +#else
>> +# define _REG_ARG1			"%eax"
>> +#endif
>> +
>> +#define _REG_RET			"%" _ASM_AX
>> +
>> +#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1
> 
> NATIVE_ZERO_OUT
> 
> I guess. NATIVE_ZERO reads like the native representation of 0 :-)

NATIVE_ZERO_ARG1?


Juergen
Josh Poimboeuf Nov. 17, 2017, 7:42 p.m. UTC | #5
On Fri, Nov 17, 2017 at 08:10:13PM +0100, Juergen Gross wrote:
> On 17/11/17 19:07, Borislav Petkov wrote:
> > On Wed, Oct 04, 2017 at 10:58:24AM -0500, Josh Poimboeuf wrote:
> >> Convert the hard-coded native patch assembly code strings to macros to
> >> facilitate sharing common code between 32-bit and 64-bit.
> >>
> >> These macros will also be used by a future patch which requires the GCC
> >> extended asm syntax of two '%' characters instead of one when specifying
> >> a register name.
> >>
> >> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
> >> ---
> >>  arch/x86/include/asm/special_insns.h | 24 ++++++++++++++++++++++++
> >>  arch/x86/kernel/paravirt_patch_32.c  | 21 +++++++++++----------
> >>  arch/x86/kernel/paravirt_patch_64.c  | 29 +++++++++++++++--------------
> >>  3 files changed, 50 insertions(+), 24 deletions(-)
> >>
> >> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
> >> index ac402c6fc24b..0549c5f2c1b3 100644
> >> --- a/arch/x86/include/asm/special_insns.h
> >> +++ b/arch/x86/include/asm/special_insns.h
> >> @@ -6,6 +6,30 @@
> >>  
> >>  #include <asm/nops.h>
> >>  
> >> +#ifdef CONFIG_X86_64
> >> +# define _REG_ARG1			"%rdi"
> >> +# define NATIVE_IDENTITY_32		"mov %edi, %eax"
> > 
> > Yeah, that "identity" looks strange. How about NATIVE_NOOP and
> > NATIVE_NOOP_32 ?
> 
> Those are not NOPs. They return the identical value which was passed to
> them. So identity isn't a bad name after all.

Right, like the math identity function:

  https://en.wikipedia.org/wiki/Identity_function

> >> +# define NATIVE_USERGS_SYSRET64		"swapgs; sysretq"
> >> +#else
> >> +# define _REG_ARG1			"%eax"
> >> +#endif
> >> +
> >> +#define _REG_RET			"%" _ASM_AX
> >> +
> >> +#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1
> > 
> > NATIVE_ZERO_OUT
> > 
> > I guess. NATIVE_ZERO reads like the native representation of 0 :-)
> 
> NATIVE_ZERO_ARG1?

On a slight tangent, does anybody know why it zeros the arg?

The only place it's used is here:

#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
#endif

Isn't that a bug?  Seems like it should _return_ zero.  Zeroing the arg
shouldn't have any effect.

If I'm right, we could call it NATIVE_FALSE.
Jürgen Groß Nov. 18, 2017, 10:20 a.m. UTC | #6
On 17/11/17 20:42, Josh Poimboeuf wrote:
> On Fri, Nov 17, 2017 at 08:10:13PM +0100, Juergen Gross wrote:
>> On 17/11/17 19:07, Borislav Petkov wrote:
>>> On Wed, Oct 04, 2017 at 10:58:24AM -0500, Josh Poimboeuf wrote:
>>>> Convert the hard-coded native patch assembly code strings to macros to
>>>> facilitate sharing common code between 32-bit and 64-bit.
>>>>
>>>> These macros will also be used by a future patch which requires the GCC
>>>> extended asm syntax of two '%' characters instead of one when specifying
>>>> a register name.
>>>>
>>>> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
>>>> ---
>>>>  arch/x86/include/asm/special_insns.h | 24 ++++++++++++++++++++++++
>>>>  arch/x86/kernel/paravirt_patch_32.c  | 21 +++++++++++----------
>>>>  arch/x86/kernel/paravirt_patch_64.c  | 29 +++++++++++++++--------------
>>>>  3 files changed, 50 insertions(+), 24 deletions(-)
>>>>
>>>> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
>>>> index ac402c6fc24b..0549c5f2c1b3 100644
>>>> --- a/arch/x86/include/asm/special_insns.h
>>>> +++ b/arch/x86/include/asm/special_insns.h
>>>> @@ -6,6 +6,30 @@
>>>>  
>>>>  #include <asm/nops.h>
>>>>  
>>>> +#ifdef CONFIG_X86_64
>>>> +# define _REG_ARG1			"%rdi"
>>>> +# define NATIVE_IDENTITY_32		"mov %edi, %eax"
>>>
>>> Yeah, that "identity" looks strange. How about NATIVE_NOOP and
>>> NATIVE_NOOP_32 ?
>>
>> Those are not NOPs. They return the identical value which was passed to
>> them. So identity isn't a bad name after all.
> 
> Right, like the math identity function:
> 
>   https://en.wikipedia.org/wiki/Identity_function
> 
>>>> +# define NATIVE_USERGS_SYSRET64		"swapgs; sysretq"
>>>> +#else
>>>> +# define _REG_ARG1			"%eax"
>>>> +#endif
>>>> +
>>>> +#define _REG_RET			"%" _ASM_AX
>>>> +
>>>> +#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1
>>>
>>> NATIVE_ZERO_OUT
>>>
>>> I guess. NATIVE_ZERO reads like the native representation of 0 :-)
>>
>> NATIVE_ZERO_ARG1?
> 
> On a slight tangent, does anybody know why it zeros the arg?

Why are _you_ asking? You've introduced it.

> The only place it's used is here:
> 
> #if defined(CONFIG_PARAVIRT_SPINLOCKS)
> DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
> DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
> #endif
> 
> Isn't that a bug?  Seems like it should _return_ zero.  Zeroing the arg
> shouldn't have any effect.

Right. Before that patch it _did_ return zero instead of zeroing arg1.

> If I'm right, we could call it NATIVE_FALSE.

I'd prefer NATIVE_ZERO, as it will be usable for non-boolean cases, too.


Juergen
Josh Poimboeuf Nov. 18, 2017, 1:17 p.m. UTC | #7
On Sat, Nov 18, 2017 at 11:20:06AM +0100, Juergen Gross wrote:
> >>>> +#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1
> >>>
> >>> NATIVE_ZERO_OUT
> >>>
> >>> I guess. NATIVE_ZERO reads like the native representation of 0 :-)
> >>
> >> NATIVE_ZERO_ARG1?
> > 
> > On a slight tangent, does anybody know why it zeros the arg?
> 
> Why are _you_ asking? You've introduced it.

So I did.  Touché!

> > The only place it's used is here:
> > 
> > #if defined(CONFIG_PARAVIRT_SPINLOCKS)
> > DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
> > DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
> > #endif
> > 
> > Isn't that a bug?  Seems like it should _return_ zero.  Zeroing the arg
> > shouldn't have any effect.
> 
> Right. Before that patch it _did_ return zero instead of zeroing arg1.

Oops!

> > If I'm right, we could call it NATIVE_FALSE.
> 
> I'd prefer NATIVE_ZERO, as it will be usable for non-boolean cases, too.

NATIVE_ZERO works for me.
diff mbox

Patch

diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index ac402c6fc24b..0549c5f2c1b3 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -6,6 +6,30 @@ 
 
 #include <asm/nops.h>
 
+#ifdef CONFIG_X86_64
+# define _REG_ARG1			"%rdi"
+# define NATIVE_IDENTITY_32		"mov %edi, %eax"
+# define NATIVE_USERGS_SYSRET64		"swapgs; sysretq"
+#else
+# define _REG_ARG1			"%eax"
+#endif
+
+#define _REG_RET			"%" _ASM_AX
+
+#define NATIVE_ZERO			"xor " _REG_ARG1 ", " _REG_ARG1
+#define NATIVE_IDENTITY			"mov " _REG_ARG1 ", " _REG_RET
+#define NATIVE_SAVE_FL			"pushf; pop " _REG_RET
+#define NATIVE_RESTORE_FL		"push " _REG_ARG1 "; popf"
+#define NATIVE_IRQ_DISABLE		"cli"
+#define NATIVE_IRQ_ENABLE		"sti"
+#define NATIVE_READ_CR2			"mov %cr2, " _REG_RET
+#define NATIVE_READ_CR3			"mov %cr3, " _REG_RET
+#define NATIVE_WRITE_CR3		"mov " _REG_ARG1 ", %cr3"
+#define NATIVE_FLUSH_TLB_SINGLE		"invlpg (" _REG_ARG1 ")"
+#define NATIVE_SWAPGS			"swapgs"
+#define NATIVE_IRET			"iret"
+#define NATIVE_QUEUED_SPIN_UNLOCK	"movb $0, (" _REG_ARG1 ")"
+
 /*
  * Volatile isn't enough to prevent the compiler from reordering the
  * read/write functions for the control registers and messing everything up.
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 553acbbb4d32..c9c6106ae714 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -1,17 +1,18 @@ 
 #include <asm/paravirt.h>
+#include <asm/special_insns.h>
 
-DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
-DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
-DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
-DEF_NATIVE(pv_cpu_ops, iret, "iret");
-DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
+DEF_NATIVE(pv_irq_ops,	save_fl,		NATIVE_SAVE_FL);
+DEF_NATIVE(pv_irq_ops,	restore_fl,		NATIVE_RESTORE_FL);
+DEF_NATIVE(pv_irq_ops,	irq_disable,		NATIVE_IRQ_DISABLE);
+DEF_NATIVE(pv_irq_ops,	irq_enable,		NATIVE_IRQ_ENABLE);
+DEF_NATIVE(pv_cpu_ops,	iret,			NATIVE_IRET);
+DEF_NATIVE(pv_mmu_ops,	read_cr2,		NATIVE_READ_CR2);
+DEF_NATIVE(pv_mmu_ops,	read_cr3,		NATIVE_READ_CR3);
+DEF_NATIVE(pv_mmu_ops,	write_cr3,		NATIVE_WRITE_CR3);
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
-DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
+DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
+DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 0a1ba3f80cbf..0aa232edd670 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -1,25 +1,26 @@ 
 #include <asm/paravirt.h>
 #include <asm/asm-offsets.h>
+#include <asm/special_insns.h>
 #include <linux/stringify.h>
 
-DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
-DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
-DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
-DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
-DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
+DEF_NATIVE(pv_irq_ops,	save_fl,		NATIVE_SAVE_FL);
+DEF_NATIVE(pv_irq_ops,	restore_fl,		NATIVE_RESTORE_FL);
+DEF_NATIVE(pv_irq_ops,	irq_disable,		NATIVE_IRQ_DISABLE);
+DEF_NATIVE(pv_irq_ops,	irq_enable,		NATIVE_IRQ_ENABLE);
+DEF_NATIVE(pv_mmu_ops,	read_cr2,		NATIVE_READ_CR2);
+DEF_NATIVE(pv_mmu_ops,	read_cr3,		NATIVE_READ_CR3);
+DEF_NATIVE(pv_mmu_ops,	write_cr3,		NATIVE_WRITE_CR3);
+DEF_NATIVE(pv_mmu_ops,	flush_tlb_single,	NATIVE_FLUSH_TLB_SINGLE);
 
-DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
-DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
+DEF_NATIVE(pv_cpu_ops,	usergs_sysret64,	NATIVE_USERGS_SYSRET64);
+DEF_NATIVE(pv_cpu_ops,	swapgs,			NATIVE_SWAPGS);
 
-DEF_NATIVE(, mov32, "mov %edi, %eax");
-DEF_NATIVE(, mov64, "mov %rdi, %rax");
+DEF_NATIVE(,		mov32,			NATIVE_IDENTITY_32);
+DEF_NATIVE(,		mov64,			NATIVE_IDENTITY);
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
-DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
+DEF_NATIVE(pv_lock_ops,	queued_spin_unlock,	NATIVE_QUEUED_SPIN_UNLOCK);
+DEF_NATIVE(pv_lock_ops,	vcpu_is_preempted,	NATIVE_ZERO);
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)