@@ -1372,6 +1372,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_PKRS,
MSR_IA32_BNDCFGS,
MSR_IA32_XSS,
MSR_AMD64_DR0_ADDRESS_MASK,
@@ -632,6 +632,11 @@ static void vmx_cpuid_policy_changed(struct vcpu *v)
vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
else
vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+
+ if ( cp->feat.pks )
+ vmx_clear_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+ else
+ vmx_set_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
}
int vmx_guest_x86_mode(struct vcpu *v)
@@ -28,6 +28,7 @@
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/viridian.h>
#include <asm/msr.h>
+#include <asm/prot-key.h>
#include <asm/setup.h>
#include <public/hvm/params.h>
@@ -315,6 +316,13 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
*val = 0;
break;
+ case MSR_PKRS:
+ if ( !cp->feat.pks )
+ goto gp_fault;
+
+ *val = (v == curr) ? rdpkrs() : msrs->pkrs;
+ break;
+
case MSR_X2APIC_FIRST ... MSR_X2APIC_LAST:
if ( !is_hvm_domain(d) || v != curr )
goto gp_fault;
@@ -581,6 +589,15 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
break;
goto gp_fault;
+ case MSR_PKRS:
+ if ( !cp->feat.pks || val != (uint32_t)val )
+ goto gp_fault;
+
+ msrs->pkrs = val;
+ if ( v == curr )
+ wrmsr(MSR_PKRS, val, 0);
+ break;
+
case MSR_X2APIC_FIRST ... MSR_X2APIC_LAST:
if ( !is_hvm_domain(d) || v != curr )
goto gp_fault;
Have guest_{rd,wr}msr() access either the live register, or stashed state, depending on context. Include MSR_PKRS for migration, and let the guest have full access. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Wei Liu <wl@xen.org> CC: Kevin Tian <kevin.tian@intel.com> --- xen/arch/x86/hvm/hvm.c | 1 + xen/arch/x86/hvm/vmx/vmx.c | 5 +++++ xen/arch/x86/msr.c | 17 +++++++++++++++++ 3 files changed, 23 insertions(+)