@@ -1380,9 +1380,11 @@ asmlinkage void kvm_spurious_fault(void);
".pushsection .fixup, \"ax\" \n" \
"667: \n\t" \
cleanup_insn "\n\t" \
- "cmpb $0, kvm_rebooting \n\t" \
+ "cmpb $0, kvm_rebooting" __ASM_SEL(,(%%rip)) " \n\t" \
"jne 668b \n\t" \
- __ASM_SIZE(push) " $666b \n\t" \
+ __ASM_SIZE(push) "%%" _ASM_AX " \n\t" \
+ _ASM_MOVABS " $666b, %%" _ASM_AX "\n\t" \
+ "xchg %%" _ASM_AX ", (%%" _ASM_SP ") \n\t" \
"call kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
@@ -711,8 +711,10 @@ asm(
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
"__raw_callee_save___kvm_vcpu_is_preempted:"
-"movq __per_cpu_offset(,%rdi,8), %rax;"
-"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
+"leaq __per_cpu_offset(%rip), %rax;"
+"movq (%rax,%rdi,8), %rax;"
+"addq " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rip), %rax;"
+"cmpb $0, (%rax);"
"setne %al;"
"ret;"
".popsection");
@@ -626,12 +626,12 @@ static u32 svm_msrpm_offset(u32 msr)
static inline void clgi(void)
{
- asm volatile (__ex(SVM_CLGI));
+ asm volatile (__ex(SVM_CLGI) : :);
}
static inline void stgi(void)
{
- asm volatile (__ex(SVM_STGI));
+ asm volatile (__ex(SVM_STGI) : :);
}
static inline void invlpga(unsigned long addr, u32 asid)
Change the assembly code to use only relative references of symbols for the kernel to be PIE compatible. The new __ASM_MOVABS macro is used to get the address of a symbol on both 32 and 64-bit with PIE support. Position Independent Executable (PIE) support will allow to extended the KASLR randomization range below the -2G memory limit. Signed-off-by: Thomas Garnier <thgarnie@google.com> --- arch/x86/include/asm/kvm_host.h | 6 ++++-- arch/x86/kernel/kvm.c | 6 ++++-- arch/x86/kvm/svm.c | 4 ++-- 3 files changed, 10 insertions(+), 6 deletions(-)