@@ -18,6 +18,90 @@
#include "desc.h"
#include "asm/page.h"
#include "efi.h"
+#include "processor.h"
+#include "insn/insn.h"
+#include "svm.h"
+
+#define GHCB_SHARED_BUF_SIZE 2032
+
+struct ghcb_save_area {
+ u8 reserved_0x0[203];
+ u8 cpl;
+ u8 reserved_0xcc[116];
+ u64 xss;
+ u8 reserved_0x148[24];
+ u64 dr7;
+ u8 reserved_0x168[16];
+ u64 rip;
+ u8 reserved_0x180[88];
+ u64 rsp;
+ u8 reserved_0x1e0[24];
+ u64 rax;
+ u8 reserved_0x200[264];
+ u64 rcx;
+ u64 rdx;
+ u64 rbx;
+ u8 reserved_0x320[8];
+ u64 rbp;
+ u64 rsi;
+ u64 rdi;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ u8 reserved_0x380[16];
+ u64 sw_exit_code;
+ u64 sw_exit_info_1;
+ u64 sw_exit_info_2;
+ u64 sw_scratch;
+ u8 reserved_0x3b0[56];
+ u64 xcr0;
+ u8 valid_bitmap[16];
+ u64 x87_state_gpa;
+} __packed;
+
+struct ghcb {
+ struct ghcb_save_area save;
+ u8 reserved_save[2048 - sizeof(struct ghcb_save_area)];
+
+ u8 shared_buffer[GHCB_SHARED_BUF_SIZE];
+
+ u8 reserved_0xff0[10];
+ u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */
+ u32 ghcb_usage;
+} __packed;
+
+#define GHCB_PROTO_OUR 0x0001UL
+#define GHCB_PROTOCOL_MAX 1ULL
+#define GHCB_DEFAULT_USAGE 0ULL
+
+#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
+
+enum es_result {
+ ES_OK, /* All good */
+ ES_UNSUPPORTED, /* Requested operation not supported */
+ ES_VMM_ERROR, /* Unexpected state from the VMM */
+ ES_DECODE_FAILED, /* Instruction decoding failed */
+ ES_EXCEPTION, /* Instruction caused exception */
+ ES_RETRY, /* Retry instruction emulation */
+};
+
+struct es_fault_info {
+ unsigned long vector;
+ unsigned long error_code;
+ unsigned long cr2;
+};
+
+/* ES instruction emulation context */
+struct es_em_ctxt {
+ struct ex_regs *regs;
+ struct insn insn;
+ struct es_fault_info fi;
+};
/*
* AMD Programmer's Manual Volume 3
@@ -59,6 +143,58 @@ void handle_sev_es_vc(struct ex_regs *regs);
unsigned long long get_amd_sev_c_bit_mask(void);
unsigned long long get_amd_sev_addr_upperbound(void);
+/* GHCB Accessor functions from Linux's include/asm/svm.h */
+#define GHCB_BITMAP_IDX(field) \
+ (offsetof(struct ghcb_save_area, field) / sizeof(u64))
+
+#define DEFINE_GHCB_ACCESSORS(field) \
+ static __always_inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \
+ { \
+ return test_bit(GHCB_BITMAP_IDX(field), \
+ (unsigned long *)&ghcb->save.valid_bitmap); \
+ } \
+ \
+ static __always_inline u64 ghcb_get_##field(struct ghcb *ghcb) \
+ { \
+ return ghcb->save.field; \
+ } \
+ \
+ static __always_inline u64 ghcb_get_##field##_if_valid(struct ghcb *ghcb) \
+ { \
+ return ghcb_##field##_is_valid(ghcb) ? ghcb->save.field : 0; \
+ } \
+ \
+ static __always_inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \
+ { \
+ set_bit(GHCB_BITMAP_IDX(field), \
+ (u8 *)&ghcb->save.valid_bitmap); \
+ ghcb->save.field = value; \
+ }
+
+DEFINE_GHCB_ACCESSORS(cpl)
+DEFINE_GHCB_ACCESSORS(rip)
+DEFINE_GHCB_ACCESSORS(rsp)
+DEFINE_GHCB_ACCESSORS(rax)
+DEFINE_GHCB_ACCESSORS(rcx)
+DEFINE_GHCB_ACCESSORS(rdx)
+DEFINE_GHCB_ACCESSORS(rbx)
+DEFINE_GHCB_ACCESSORS(rbp)
+DEFINE_GHCB_ACCESSORS(rsi)
+DEFINE_GHCB_ACCESSORS(rdi)
+DEFINE_GHCB_ACCESSORS(r8)
+DEFINE_GHCB_ACCESSORS(r9)
+DEFINE_GHCB_ACCESSORS(r10)
+DEFINE_GHCB_ACCESSORS(r11)
+DEFINE_GHCB_ACCESSORS(r12)
+DEFINE_GHCB_ACCESSORS(r13)
+DEFINE_GHCB_ACCESSORS(r14)
+DEFINE_GHCB_ACCESSORS(r15)
+DEFINE_GHCB_ACCESSORS(sw_exit_code)
+DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
+DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
+DEFINE_GHCB_ACCESSORS(sw_scratch)
+DEFINE_GHCB_ACCESSORS(xcr0)
+
#endif /* CONFIG_EFI */
#endif /* _X86_AMD_SEV_H_ */
@@ -151,6 +151,7 @@
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSCTL 0xc001103a
+#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
@@ -837,6 +837,14 @@ static inline void set_bit(int bit, u8 *addr)
: "+m" (*addr) : "Ir" (bit) : "cc", "memory");
}
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+ const volatile unsigned long *word = addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+
+ return (*word & mask) != 0;
+}
+
static inline void flush_tlb(void)
{
ulong cr4;
@@ -175,11 +175,13 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
struct vmcb_seg ldtr;
struct vmcb_seg idtr;
struct vmcb_seg tr;
- u8 reserved_1[43];
+ /* Reserved fields are named following their struct offset */
+ u8 reserved_0xa0[42];
+ u8 vmpl;
u8 cpl;
- u8 reserved_2[4];
+ u8 reserved_0xcc[4];
u64 efer;
- u8 reserved_3[112];
+ u8 reserved_0xd8[112];
u64 cr4;
u64 cr3;
u64 cr0;
@@ -187,9 +189,11 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
u64 dr6;
u64 rflags;
u64 rip;
- u8 reserved_4[88];
+ u8 reserved_0x180[88];
u64 rsp;
- u8 reserved_5[24];
+ u64 s_cet;
+ u64 ssp;
+ u64 isst_addr;
u64 rax;
u64 star;
u64 lstar;
@@ -200,13 +204,15 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
u64 sysenter_esp;
u64 sysenter_eip;
u64 cr2;
- u8 reserved_6[32];
+ u8 reserved_0x248[32];
u64 g_pat;
u64 dbgctl;
u64 br_from;
u64 br_to;
u64 last_excp_from;
u64 last_excp_to;
+ u8 reserved_0x298[72];
+ u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
};
struct __attribute__ ((__packed__)) vmcb {
@@ -307,6 +313,7 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXIT_WRITE_DR6 0x036
#define SVM_EXIT_WRITE_DR7 0x037
#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_LAST_EXCP 0x05f
#define SVM_EXIT_INTR 0x060
#define SVM_EXIT_NMI 0x061
#define SVM_EXIT_SMI 0x062
@@ -19,6 +19,7 @@ endif
fcf_protection_full := $(call cc-option, -fcf-protection=full,)
COMMON_CFLAGS += -mno-red-zone -mno-sse -mno-sse2 $(fcf_protection_full)
+COMMON_CFLAGS += -Wno-address-of-packed-member
cflatobjs += lib/x86/setjmp64.o
cflatobjs += lib/x86/intel-iommu.o