diff mbox series

[v10,1/8] x86/vmware: Introduce VMware hypercall API

Message ID 20240523191446.54695-2-alexey.makhalov@broadcom.com (mailing list archive)
State New, archived
Headers show
Series VMware hypercalls enhancements | expand

Commit Message

Alexey Makhalov May 23, 2024, 7:14 p.m. UTC
Introduce vmware_hypercall family of functions. It is a common
implementation to be used by the VMware guest code and virtual
device drivers in architecture independent manner.

The API consists of vmware_hypercallX and vmware_hypercall_hb_{out,in}
set of functions by analogy with KVM hypercall API. Architecture
specific implementation is hidden inside.

It will simplify future enhancements in VMware hypercalls such
as SEV-ES and TDX related changes without needs to modify a
caller in device drivers code.

Current implementation extends an idea from commit bac7b4e84323
("x86/vmware: Update platform detection code for VMCALL/VMMCALL
hypercalls") to have a slow, but safe path in VMWARE_HYPERCALL
earlier during the boot when alternatives are not yet applied.
The code inherits VMWARE_CMD logic from the commit mentioned above.

Move common macros from vmware.c to vmware.h.

Make vmware_hypercall_mode a global variable.

Signed-off-by: Alexey Makhalov <alexey.makhalov@broadcom.com>
---
 arch/x86/include/asm/vmware.h | 274 ++++++++++++++++++++++++++++++++--
 arch/x86/kernel/cpu/vmware.c  |  11 +-
 2 files changed, 262 insertions(+), 23 deletions(-)

Comments

Borislav Petkov May 27, 2024, 5:07 p.m. UTC | #1
On Thu, May 23, 2024 at 12:14:39PM -0700, Alexey Makhalov wrote:
> +#define VMWARE_HYPERCALL						\
> +	ALTERNATIVE_3("",						\
> +		      "jmp .Lport_call%=", X86_FEATURE_HYPERVISOR,	\
> +		      "jmp .Lvmcall%=", X86_FEATURE_VMCALL,		\
> +		      "vmmcall\n\t"					\
> +		      "jmp .Lend%=", X86_FEATURE_VMW_VMMCALL)		\
> +		      "cmpb $"						\
> +			__stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL)	\
> +			", %[mode]\n\t"					\
> +		      "jg .Lvmcall%=\n\t"				\
> +		      "je .Lvmmcall%=\n\t"				\
> +		      ".Lport_call%=: movw %[port], %%dx\n\t"		\
> +		      "inl (%%dx), %%eax\n\t"				\
> +		      "jmp .Lend%=\n\t"					\
> +		      ".Lvmmcall%=: vmmcall\n\t"			\
> +		      "jmp .Lend%=\n\t"					\
> +		      ".Lvmcall%=: vmcall\n\t"				\
> +		      ".Lend%=:"

So applied (and with minor fixups for the proper indentation, see end of
this mail) this looks like this:

.pushsection .altinstructions,"a"
 .long 661b - .
 .long 6641f - .
 .4byte ( 4*32+31)
 .byte 663b-661b
 .byte 6651f-6641f
 .long 661b - .
 .long 6642f - .
 .4byte ( 8*32+18)
 .byte 663b-661b
 .byte 6652f-6642f
 .long 661b - .
 .long 6643f - .
 .4byte ( 8*32+19)
 .byte 663b-661b
 .byte 6653f-6643f
.popsection
.pushsection .altinstr_replacement, "ax"
# ALT: replacement 1
6641:
        jmp .Lport_call72
6651:
# ALT: replacement 2
6642:
        jmp .Lvmcall72
6652:
# ALT: replacement 3
6643:
        vmmcall
        jmp .Lend72
6653:
.popsection
        cmpb $((((1UL))) << (0)), vmware_hypercall_mode(%rip)   # vmware_hypercall_mode
        jg .Lvmcall72
        je .Lvmmcall72
.Lport_call72:
        movw $22104, %dx        #
        inl (%dx), %eax
        jmp .Lend72
.Lvmmcall72:
        vmmcall
        jmp .Lend72 
.Lvmcall72:
        vmcall
.Lend72:

---

so AFAICT, you want three things:

1. X86_FEATURE_HYPERVISOR - that is always set when running as a guest.
   For that it should do:

        movw $22104, %dx        #
        inl (%dx), %eax

2. X86_FEATURE_VMCALL:

	vmcall

3. X86_FEATURE_VMW_VMMCALL:

	vmmcall

So why don't you simply do that?

vmware_set_capabilities() sets vmware_hypercall_mode *and* those feature
flags at the same time.

And you either support VMCALL or VMMCALL so the first thing should be the
fallback for some ancient crap.

IOW, your hypercall alternative should simply be:

	ALTERNATIVE_2("vmcall", "vmmcall", X86_FEATURE_VMW_VMMCALL, "movw %[port], %%dx; "inl (%%dx), %%eax", X86_FEATURE_HYPERVISOR);

without any more silly dance?

Hmmm?

---

Fixup indentation for proper .s output:

diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index 5114f4c75c54..8be877d8bb7c 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -70,17 +70,18 @@ extern u8 vmware_hypercall_mode;
 		      "jmp .Lvmcall%=", X86_FEATURE_VMCALL,		\
 		      "vmmcall\n\t"					\
 		      "jmp .Lend%=", X86_FEATURE_VMW_VMMCALL)		\
-		      "cmpb $"						\
-			__stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL)	\
-			", %[mode]\n\t"					\
+		      "\tcmpb $" __stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL) ", %[mode]\n\t" \
 		      "jg .Lvmcall%=\n\t"				\
-		      "je .Lvmmcall%=\n\t"				\
-		      ".Lport_call%=: movw %[port], %%dx\n\t"		\
+		      "je .Lvmmcall%=\n"				\
+		      ".Lport_call%=:\n\t"				\
+		      "movw %[port], %%dx\n\t"				\
 		      "inl (%%dx), %%eax\n\t"				\
-		      "jmp .Lend%=\n\t"					\
-		      ".Lvmmcall%=: vmmcall\n\t"			\
-		      "jmp .Lend%=\n\t"					\
-		      ".Lvmcall%=: vmcall\n\t"				\
+		      "jmp .Lend%=\n"					\
+		      ".Lvmmcall%=:\n\t"				\
+		      "vmmcall\n\t"					\
+		      "jmp .Lend%=\n"					\
+		      ".Lvmcall%=:\n\t"					\
+		      "vmcall\n"					\
 		      ".Lend%=:"
 
 static inline
Alexey Makhalov May 30, 2024, 12:44 a.m. UTC | #2
On 5/27/24 10:07 AM, Borislav Petkov wrote:
> On Thu, May 23, 2024 at 12:14:39PM -0700, Alexey Makhalov wrote:
>> +#define VMWARE_HYPERCALL						\
>> +	ALTERNATIVE_3("",						\
>> +		      "jmp .Lport_call%=", X86_FEATURE_HYPERVISOR,	\
>> +		      "jmp .Lvmcall%=", X86_FEATURE_VMCALL,		\
>> +		      "vmmcall\n\t"					\
>> +		      "jmp .Lend%=", X86_FEATURE_VMW_VMMCALL)		\
>> +		      "cmpb $"						\
>> +			__stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL)	\
>> +			", %[mode]\n\t"					\
>> +		      "jg .Lvmcall%=\n\t"				\
>> +		      "je .Lvmmcall%=\n\t"				\
>> +		      ".Lport_call%=: movw %[port], %%dx\n\t"		\
>> +		      "inl (%%dx), %%eax\n\t"				\
>> +		      "jmp .Lend%=\n\t"					\
>> +		      ".Lvmmcall%=: vmmcall\n\t"			\
>> +		      "jmp .Lend%=\n\t"					\
>> +		      ".Lvmcall%=: vmcall\n\t"				\
>> +		      ".Lend%=:"
> 
> So applied (and with minor fixups for the proper indentation, see end of
> this mail) this looks like this:
> 
> .pushsection .altinstructions,"a"
>   .long 661b - .
>   .long 6641f - .
>   .4byte ( 4*32+31)
>   .byte 663b-661b
>   .byte 6651f-6641f
>   .long 661b - .
>   .long 6642f - .
>   .4byte ( 8*32+18)
>   .byte 663b-661b
>   .byte 6652f-6642f
>   .long 661b - .
>   .long 6643f - .
>   .4byte ( 8*32+19)
>   .byte 663b-661b
>   .byte 6653f-6643f
> .popsection
> .pushsection .altinstr_replacement, "ax"
> # ALT: replacement 1
> 6641:
>          jmp .Lport_call72
> 6651:
> # ALT: replacement 2
> 6642:
>          jmp .Lvmcall72
> 6652:
> # ALT: replacement 3
> 6643:
>          vmmcall
>          jmp .Lend72
> 6653:
> .popsection
>          cmpb $((((1UL))) << (0)), vmware_hypercall_mode(%rip)   # vmware_hypercall_mode
>          jg .Lvmcall72
>          je .Lvmmcall72
> .Lport_call72:
>          movw $22104, %dx        #
>          inl (%dx), %eax
>          jmp .Lend72
> .Lvmmcall72:
>          vmmcall
>          jmp .Lend72
> .Lvmcall72:
>          vmcall
> .Lend72:
> 
> ---
> 
> so AFAICT, you want three things:
> 
> 1. X86_FEATURE_HYPERVISOR - that is always set when running as a guest.
>     For that it should do:
> 
>          movw $22104, %dx        #
>          inl (%dx), %eax
> 
> 2. X86_FEATURE_VMCALL:
> 
> 	vmcall
> 
> 3. X86_FEATURE_VMW_VMMCALL:
> 
> 	vmmcall
> 
> So why don't you simply do that?
> 
> vmware_set_capabilities() sets vmware_hypercall_mode *and* those feature
> flags at the same time.
> 
> And you either support VMCALL or VMMCALL so the first thing should be the
> fallback for some ancient crap.
> 
> IOW, your hypercall alternative should simply be:
> 
> 	ALTERNATIVE_2("vmcall", "vmmcall", X86_FEATURE_VMW_VMMCALL, "movw %[port], %%dx; "inl (%%dx), %%eax", X86_FEATURE_HYPERVISOR);
> 
> without any more silly dance?
While most of the vmware_hypercall callers are executed after 
alternative patching applied, there are small amount of hypercalls 
running before that.
Only for them we have the logic of analyzing vmware_hypercall_mode as a 
default alternative code. And there are 2 constraints:
1. vmcall/vmmcall are not supported by old ESXi/Workstation/Fusion. We 
have to use in/out instructions. After the end of support of old 
hypervisors the alternative can be simplified as follow:
ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMW_VMMCALL);
2. SEV-ES enabled VMs should use _only_ vmcall/vmmcall as in/out 
instructions cause faults.

Another approach that we discussed internally was to use
ALTERNATIVE_2("movw %[port], %%dx; "inl (%%dx), %%eax", "vmcall", 
X86_FEATURE_VMW_VMCALL, "vmmcall", X86_FEATURE_VMW_VMMCALL) for 
vmware_hypercallX family of functions, _and_ to have a separate API
vmware_sev_hypercallX, with the silly dance without an alternative 
inside, to be used only by early boot code, before alternative 
application. But, it's error prone when things come to boot time related 
code movements or rearrangements as it puts additional requirement for 
SEV-ES understanding/testing for VMware guests.

So, we picked a safe solution until a deprecation of port based 
hypercalls, which was mentioned above.


See also a commit bac7b4e843232 ("x86/vmware: Update platform detection 
code for VMCALL/VMMCALL hypercalls") where silly dance was introduced 
with VMWARE_CMD macro.

> 
> Hmmm?
> 
> ---
> 
> Fixup indentation for proper .s output:
> 
> diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
> index 5114f4c75c54..8be877d8bb7c 100644
> --- a/arch/x86/include/asm/vmware.h
> +++ b/arch/x86/include/asm/vmware.h
> @@ -70,17 +70,18 @@ extern u8 vmware_hypercall_mode;
>   		      "jmp .Lvmcall%=", X86_FEATURE_VMCALL,		\
>   		      "vmmcall\n\t"					\
>   		      "jmp .Lend%=", X86_FEATURE_VMW_VMMCALL)		\
> -		      "cmpb $"						\
> -			__stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL)	\
> -			", %[mode]\n\t"					\
> +		      "\tcmpb $" __stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL) ", %[mode]\n\t" \
Noted \t prefix before cmpb, but will keep original 3 lines to fit in 80 
columns limit.

>   		      "jg .Lvmcall%=\n\t"				\
> -		      "je .Lvmmcall%=\n\t"				\
> -		      ".Lport_call%=: movw %[port], %%dx\n\t"		\
> +		      "je .Lvmmcall%=\n"				\
> +		      ".Lport_call%=:\n\t"				\
> +		      "movw %[port], %%dx\n\t"				\
Noted having labels on a separate line.
>   		      "inl (%%dx), %%eax\n\t"				\
> -		      "jmp .Lend%=\n\t"					\
> -		      ".Lvmmcall%=: vmmcall\n\t"			\
> -		      "jmp .Lend%=\n\t"					\
> -		      ".Lvmcall%=: vmcall\n\t"				\
> +		      "jmp .Lend%=\n"					\
> +		      ".Lvmmcall%=:\n\t"				\
> +		      "vmmcall\n\t"					\
> +		      "jmp .Lend%=\n"					\
> +		      ".Lvmcall%=:\n\t"					\
> +		      "vmcall\n"					\
>   		      ".Lend%=:"
>   
>   static inline
> 
> 
Best regards,
--Alexey
Borislav Petkov June 3, 2024, 5:58 p.m. UTC | #3
On Wed, May 29, 2024 at 05:44:32PM -0700, Alexey Makhalov wrote:
> While most of the vmware_hypercall callers are executed after alternative
> patching applied, there are small amount of hypercalls running before that.
> Only for them we have the logic of analyzing vmware_hypercall_mode as a
> default alternative code. And there are 2 constraints:
> 1. vmcall/vmmcall are not supported by old ESXi/Workstation/Fusion. We have
> to use in/out instructions. After the end of support of old hypervisors the
> alternative can be simplified as follow:
> ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMW_VMMCALL);
> 2. SEV-ES enabled VMs should use _only_ vmcall/vmmcall as in/out
> instructions cause faults.
> 
> Another approach that we discussed internally was to use
> ALTERNATIVE_2("movw %[port], %%dx; "inl (%%dx), %%eax", "vmcall",
> X86_FEATURE_VMW_VMCALL, "vmmcall", X86_FEATURE_VMW_VMMCALL) for
> vmware_hypercallX family of functions, _and_ to have a separate API
> vmware_sev_hypercallX, with the silly dance without an alternative inside,
> to be used only by early boot code, before alternative application. But,
> it's error prone when things come to boot time related code movements or
> rearrangements as it puts additional requirement for SEV-ES
> understanding/testing for VMware guests.

Right, so since we're exporting that alternatives_patched thing already,
you might also try to do:

	if (unlikely(!alternatives_patched))
		return slow_hypercall_X_in_c();

	asm_inline volatile(VMWARE_HYPERCALL...

where that slow_hypercall_X_in_c()* set of APIs does the checks in C.

And the VMWARE_HYPERCALL thing is a lot simpler then.

All in all, you'll have a lot less unreadable asm to pay attention to
and those APIs should be all easy and readable.

But in the end of the day, your call.

Thanks for explaining the situation.
Alexey Makhalov June 6, 2024, 11:18 p.m. UTC | #4
On 6/3/24 10:58 AM, Borislav Petkov wrote:
> On Wed, May 29, 2024 at 05:44:32PM -0700, Alexey Makhalov wrote:
>> While most of the vmware_hypercall callers are executed after alternative
>> patching applied, there are small amount of hypercalls running before that.
>> Only for them we have the logic of analyzing vmware_hypercall_mode as a
>> default alternative code. And there are 2 constraints:
>> 1. vmcall/vmmcall are not supported by old ESXi/Workstation/Fusion. We have
>> to use in/out instructions. After the end of support of old hypervisors the
>> alternative can be simplified as follow:
>> ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMW_VMMCALL);
>> 2. SEV-ES enabled VMs should use _only_ vmcall/vmmcall as in/out
>> instructions cause faults.
>>
>> Another approach that we discussed internally was to use
>> ALTERNATIVE_2("movw %[port], %%dx; "inl (%%dx), %%eax", "vmcall",
>> X86_FEATURE_VMW_VMCALL, "vmmcall", X86_FEATURE_VMW_VMMCALL) for
>> vmware_hypercallX family of functions, _and_ to have a separate API
>> vmware_sev_hypercallX, with the silly dance without an alternative inside,
>> to be used only by early boot code, before alternative application. But,
>> it's error prone when things come to boot time related code movements or
>> rearrangements as it puts additional requirement for SEV-ES
>> understanding/testing for VMware guests.
> 
> Right, so since we're exporting that alternatives_patched thing already,
> you might also try to do:
> 
> 	if (unlikely(!alternatives_patched))
> 		return slow_hypercall_X_in_c();
> 
> 	asm_inline volatile(VMWARE_HYPERCALL...
> 
> where that slow_hypercall_X_in_c()* set of APIs does the checks in C.
> 
> And the VMWARE_HYPERCALL thing is a lot simpler then.
> 
> All in all, you'll have a lot less unreadable asm to pay attention to
> and those APIs should be all easy and readable.
> 

Thanks for the idea.

I improved the condition to eliminate slow path for modules such as 
vmmouse, vmwgfx.
        if (unlikely(!alternatives_patched) && !__is_defined(MODULE))
                return vmware_hypercall_slow(...);
It also drops the need for exporting vmware_hypercall_mode or 
vmware_hypercall_slow symbols.

Will post just Patch0001 for review here before sending v11 out.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index ac9fc51e2b18..5114f4c75c54 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -7,26 +7,272 @@ 
 #include <linux/stringify.h>
 
 /*
- * The hypercall definitions differ in the low word of the %edx argument
- * in the following way: the old port base interface uses the port
- * number to distinguish between high- and low bandwidth versions.
+ * VMware hypercall ABI.
+ *
+ * - Low bandwidth (LB) hypercalls (I/O port based, vmcall and vmmcall)
+ * have up to 6 input and 6 output arguments passed and returned using
+ * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
+ * %esi (arg4), %edi (arg5).
+ * The following input arguments must be initialized by the caller:
+ * arg0 - VMWARE_HYPERVISOR_MAGIC
+ * arg2 - Hypercall command
+ * arg3 bits [15:0] - Port number, LB and direction flags
+ *
+ * - High bandwidth (HB) hypercalls are I/O port based only. They have
+ * up to 7 input and 7 output arguments passed and returned using
+ * registers: %eax (arg0), %ebx (arg1), %ecx (arg2), %edx (arg3),
+ * %esi (arg4), %edi (arg5), %ebp (arg6).
+ * The following input arguments must be initialized by the caller:
+ * arg0 - VMWARE_HYPERVISOR_MAGIC
+ * arg1 - Hypercall command
+ * arg3 bits [15:0] - Port number, HB and direction flags
+ *
+ * For compatibility purposes, x86_64 systems use only lower 32 bits
+ * for input and output arguments.
+ *
+ * The hypercall definitions differ in the low word of the %edx (arg3)
+ * in the following way: the old I/O port based interface uses the port
+ * number to distinguish between high- and low bandwidth versions, and
+ * uses IN/OUT instructions to define transfer direction.
  *
  * The new vmcall interface instead uses a set of flags to select
  * bandwidth mode and transfer direction. The flags should be loaded
- * into %dx by any user and are automatically replaced by the port
- * number if the VMWARE_HYPERVISOR_PORT method is used.
- *
- * In short, new driver code should strictly use the new definition of
- * %dx content.
+ * into arg3 by any user and are automatically replaced by the port
+ * number if the I/O port method is used.
+ */
+
+#define VMWARE_HYPERVISOR_HB		BIT(0)
+#define VMWARE_HYPERVISOR_OUT		BIT(1)
+
+#define VMWARE_HYPERVISOR_PORT		0x5658
+#define VMWARE_HYPERVISOR_PORT_HB	(VMWARE_HYPERVISOR_PORT | \
+					 VMWARE_HYPERVISOR_HB)
+
+#define VMWARE_HYPERVISOR_MAGIC		0x564d5868U
+
+#define VMWARE_CMD_GETVERSION		10
+#define VMWARE_CMD_GETHZ		45
+#define VMWARE_CMD_GETVCPU_INFO		68
+#define VMWARE_CMD_STEALCLOCK		91
+
+#define CPUID_VMWARE_FEATURES_ECX_VMMCALL	BIT(0)
+#define CPUID_VMWARE_FEATURES_ECX_VMCALL	BIT(1)
+
+extern u8 vmware_hypercall_mode;
+
+/*
+ * The low bandwidth call. The low word of %edx is presumed to have OUT bit
+ * set. The high word of %edx may contain input data from the caller.
+ */
+#define VMWARE_HYPERCALL						\
+	ALTERNATIVE_3("",						\
+		      "jmp .Lport_call%=", X86_FEATURE_HYPERVISOR,	\
+		      "jmp .Lvmcall%=", X86_FEATURE_VMCALL,		\
+		      "vmmcall\n\t"					\
+		      "jmp .Lend%=", X86_FEATURE_VMW_VMMCALL)		\
+		      "cmpb $"						\
+			__stringify(CPUID_VMWARE_FEATURES_ECX_VMMCALL)	\
+			", %[mode]\n\t"					\
+		      "jg .Lvmcall%=\n\t"				\
+		      "je .Lvmmcall%=\n\t"				\
+		      ".Lport_call%=: movw %[port], %%dx\n\t"		\
+		      "inl (%%dx), %%eax\n\t"				\
+		      "jmp .Lend%=\n\t"					\
+		      ".Lvmmcall%=: vmmcall\n\t"			\
+		      "jmp .Lend%=\n\t"					\
+		      ".Lvmcall%=: vmcall\n\t"				\
+		      ".Lend%=:"
+
+static inline
+unsigned long vmware_hypercall1(unsigned long cmd, unsigned long in1)
+{
+	unsigned long out0;
+
+	asm_inline volatile (VMWARE_HYPERCALL
+		: "=a" (out0)
+		: [port] "i" (VMWARE_HYPERVISOR_PORT),
+		  [mode] "m" (vmware_hypercall_mode),
+		  "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (in1),
+		  "c" (cmd),
+		  "d" (0)
+		: "cc", "memory");
+	return out0;
+}
+
+static inline
+unsigned long vmware_hypercall3(unsigned long cmd, unsigned long in1,
+				u32 *out1, u32 *out2)
+{
+	unsigned long out0;
+
+	asm_inline volatile (VMWARE_HYPERCALL
+		: "=a" (out0), "=b" (*out1), "=c" (*out2)
+		: [port] "i" (VMWARE_HYPERVISOR_PORT),
+		  [mode] "m" (vmware_hypercall_mode),
+		  "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (in1),
+		  "c" (cmd),
+		  "d" (0)
+		: "cc", "memory");
+	return out0;
+}
+
+static inline
+unsigned long vmware_hypercall4(unsigned long cmd, unsigned long in1,
+				u32 *out1, u32 *out2, u32 *out3)
+{
+	unsigned long out0;
+
+	asm_inline volatile (VMWARE_HYPERCALL
+		: "=a" (out0), "=b" (*out1), "=c" (*out2), "=d" (*out3)
+		: [port] "i" (VMWARE_HYPERVISOR_PORT),
+		  [mode] "m" (vmware_hypercall_mode),
+		  "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (in1),
+		  "c" (cmd),
+		  "d" (0)
+		: "cc", "memory");
+	return out0;
+}
+
+static inline
+unsigned long vmware_hypercall5(unsigned long cmd, unsigned long in1,
+				unsigned long in3, unsigned long in4,
+				unsigned long in5, u32 *out2)
+{
+	unsigned long out0;
+
+	asm_inline volatile (VMWARE_HYPERCALL
+		: "=a" (out0), "=c" (*out2)
+		: [port] "i" (VMWARE_HYPERVISOR_PORT),
+		  [mode] "m" (vmware_hypercall_mode),
+		  "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (in1),
+		  "c" (cmd),
+		  "d" (in3),
+		  "S" (in4),
+		  "D" (in5)
+		: "cc", "memory");
+	return out0;
+}
+
+static inline
+unsigned long vmware_hypercall6(unsigned long cmd, unsigned long in1,
+				unsigned long in3, u32 *out2,
+				u32 *out3, u32 *out4, u32 *out5)
+{
+	unsigned long out0;
+
+	asm_inline volatile (VMWARE_HYPERCALL
+		: "=a" (out0), "=c" (*out2), "=d" (*out3), "=S" (*out4),
+		  "=D" (*out5)
+		: [port] "i" (VMWARE_HYPERVISOR_PORT),
+		  [mode] "m" (vmware_hypercall_mode),
+		  "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (in1),
+		  "c" (cmd),
+		  "d" (in3)
+		: "cc", "memory");
+	return out0;
+}
+
+static inline
+unsigned long vmware_hypercall7(unsigned long cmd, unsigned long in1,
+				unsigned long in3, unsigned long in4,
+				unsigned long in5, u32 *out1,
+				u32 *out2, u32 *out3)
+{
+	unsigned long out0;
+
+	asm_inline volatile (VMWARE_HYPERCALL
+		: "=a" (out0), "=b" (*out1), "=c" (*out2), "=d" (*out3)
+		: [port] "i" (VMWARE_HYPERVISOR_PORT),
+		  [mode] "m" (vmware_hypercall_mode),
+		  "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (in1),
+		  "c" (cmd),
+		  "d" (in3),
+		  "S" (in4),
+		  "D" (in5)
+		: "cc", "memory");
+	return out0;
+}
+
+
+#ifdef CONFIG_X86_64
+#define VMW_BP_REG "%%rbp"
+#define VMW_BP_CONSTRAINT "r"
+#else
+#define VMW_BP_REG "%%ebp"
+#define VMW_BP_CONSTRAINT "m"
+#endif
+
+/*
+ * High bandwidth calls are not supported on encrypted memory guests.
+ * The caller should check cc_platform_has(CC_ATTR_MEM_ENCRYPT) and use
+ * low bandwidth hypercall if memory encryption is set.
+ * This assumption simplifies HB hypercall implementation to just I/O port
+ * based approach without alternative patching.
  */
+static inline
+unsigned long vmware_hypercall_hb_out(unsigned long cmd, unsigned long in2,
+				      unsigned long in3, unsigned long in4,
+				      unsigned long in5, unsigned long in6,
+				      u32 *out1)
+{
+	unsigned long out0;
+
+	asm_inline volatile (
+		UNWIND_HINT_SAVE
+		"push " VMW_BP_REG "\n\t"
+		UNWIND_HINT_UNDEFINED
+		"mov %[in6], " VMW_BP_REG "\n\t"
+		"rep outsb\n\t"
+		"pop " VMW_BP_REG "\n\t"
+		UNWIND_HINT_RESTORE
+		: "=a" (out0), "=b" (*out1)
+		: "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (cmd),
+		  "c" (in2),
+		  "d" (in3 | VMWARE_HYPERVISOR_PORT_HB),
+		  "S" (in4),
+		  "D" (in5),
+		  [in6] VMW_BP_CONSTRAINT (in6)
+		: "cc", "memory");
+	return out0;
+}
 
-/* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT    0x5658
-#define VMWARE_HYPERVISOR_PORT_HB 0x5659
+static inline
+unsigned long vmware_hypercall_hb_in(unsigned long cmd, unsigned long in2,
+				     unsigned long in3, unsigned long in4,
+				     unsigned long in5, unsigned long in6,
+				     u32 *out1)
+{
+	unsigned long out0;
 
-/* Current vmcall / vmmcall version */
-#define VMWARE_HYPERVISOR_HB   BIT(0)
-#define VMWARE_HYPERVISOR_OUT  BIT(1)
+	asm_inline volatile (
+		UNWIND_HINT_SAVE
+		"push " VMW_BP_REG "\n\t"
+		UNWIND_HINT_UNDEFINED
+		"mov %[in6], " VMW_BP_REG "\n\t"
+		"rep insb\n\t"
+		"pop " VMW_BP_REG "\n\t"
+		UNWIND_HINT_RESTORE
+		: "=a" (out0), "=b" (*out1)
+		: "a" (VMWARE_HYPERVISOR_MAGIC),
+		  "b" (cmd),
+		  "c" (in2),
+		  "d" (in3 | VMWARE_HYPERVISOR_PORT_HB),
+		  "S" (in4),
+		  "D" (in5),
+		  [in6] VMW_BP_CONSTRAINT (in6)
+		: "cc", "memory");
+	return out0;
+}
+#undef VMW_BP_REG
+#undef VMW_BP_CONSTRAINT
+#undef VMWARE_HYPERCALL
 
 /* The low bandwidth call. The low word of edx is presumed clear. */
 #define VMWARE_HYPERCALL						\
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 11f83d07925e..07b7b5b773a0 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -41,17 +41,9 @@ 
 
 #define CPUID_VMWARE_INFO_LEAF               0x40000000
 #define CPUID_VMWARE_FEATURES_LEAF           0x40000010
-#define CPUID_VMWARE_FEATURES_ECX_VMMCALL    BIT(0)
-#define CPUID_VMWARE_FEATURES_ECX_VMCALL     BIT(1)
 
-#define VMWARE_HYPERVISOR_MAGIC	0x564D5868
-
-#define VMWARE_CMD_GETVERSION    10
-#define VMWARE_CMD_GETHZ         45
-#define VMWARE_CMD_GETVCPU_INFO  68
 #define VMWARE_CMD_LEGACY_X2APIC  3
 #define VMWARE_CMD_VCPU_RESERVED 31
-#define VMWARE_CMD_STEALCLOCK    91
 
 #define STEALCLOCK_NOT_AVAILABLE (-1)
 #define STEALCLOCK_DISABLED        0
@@ -108,7 +100,8 @@  struct vmware_steal_time {
 };
 
 static unsigned long vmware_tsc_khz __ro_after_init;
-static u8 vmware_hypercall_mode     __ro_after_init;
+u8 vmware_hypercall_mode __ro_after_init;
+EXPORT_SYMBOL_GPL(vmware_hypercall_mode);
 
 static inline int __vmware_platform(void)
 {