Message ID | 97f610c7fcf0410985a3ff4cd6d4013f83fe59e6.1600114548.git.thomas.lendacky@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | SEV-ES hypervisor support | expand |
On Mon, Sep 14, 2020 at 03:15:37PM -0500, Tom Lendacky wrote: > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 6f5988c305e1..5e5f1e8fed3a 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1033,6 +1033,26 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) > } > EXPORT_SYMBOL_GPL(kvm_set_cr4); > > +int kvm_track_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) > +{ > + unsigned long old_cr4 = kvm_read_cr4(vcpu); > + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | > + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; > + > + if (kvm_x86_ops.set_cr4(vcpu, cr4)) > + return 1; Pretty much all the same comments as EFER and CR0, e.g. call svm_set_cr4() directly instead of bouncing through kvm_x86_ops. And with that, this can be called __kvm_set_cr4() to be consistent with __kvm_set_cr0(). > + > + if (((cr4 ^ old_cr4) & pdptr_bits) || > + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) > + kvm_mmu_reset_context(vcpu); > + > + if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) > + kvm_update_cpuid_runtime(vcpu); > + > + return 0; > +} > +EXPORT_SYMBOL_GPL(kvm_track_cr4); > + > int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) > { > bool skip_tlb_flush = false; > -- > 2.28.0 >
On 15/09/20 00:16, Sean Christopherson wrote: >> +int kvm_track_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) >> +{ >> + unsigned long old_cr4 = kvm_read_cr4(vcpu); >> + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | >> + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; >> + >> + if (kvm_x86_ops.set_cr4(vcpu, cr4)) >> + return 1; > Pretty much all the same comments as EFER and CR0, e.g. call svm_set_cr4() > directly instead of bouncing through kvm_x86_ops. And with that, this can > be called __kvm_set_cr4() to be consistent with __kvm_set_cr0(). I agree with calling svm_set_cr4 directly, but then this should be kvm_post_set_cr4. Paolo >> + >> + if (((cr4 ^ old_cr4) & pdptr_bits) || >> + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) >> + kvm_mmu_reset_context(vcpu); >> + >> + if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) >> + kvm_update_cpuid_runtime(vcpu); >> + >> + return 0; >> +}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9cc9b65bea7e..e4fd2600ecf6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1433,6 +1433,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_track_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); +int kvm_track_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index cc45d7996e9c..ea88789d71f2 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -202,6 +202,7 @@ { SVM_EXIT_XSETBV, "xsetbv" }, \ { SVM_EXIT_EFER_WRITE_TRAP, "write_efer_trap" }, \ { SVM_EXIT_CR0_WRITE_TRAP, "write_cr0_trap" }, \ + { SVM_EXIT_CR4_WRITE_TRAP, "write_cr4_trap" }, \ { SVM_EXIT_NPF, "npf" }, \ { SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \ { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 506656988559..ec5efa1d4344 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2423,6 +2423,9 @@ static int cr_trap(struct vcpu_svm *svm) case 0: kvm_track_cr0(&svm->vcpu, svm->vmcb->control.exit_info_1); break; + case 4: + kvm_track_cr4(&svm->vcpu, svm->vmcb->control.exit_info_1); + break; default: WARN(1, "unhandled CR%d write trap", cr); kvm_queue_exception(&svm->vcpu, UD_VECTOR); @@ -2976,6 +2979,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_RDPRU] = rdpru_interception, [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, + [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, [SVM_EXIT_NPF] = npf_interception, [SVM_EXIT_RSM] = rsm_interception, [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f5988c305e1..5e5f1e8fed3a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1033,6 +1033,26 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } EXPORT_SYMBOL_GPL(kvm_set_cr4); +int kvm_track_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +{ + unsigned long old_cr4 = kvm_read_cr4(vcpu); + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + + if (kvm_x86_ops.set_cr4(vcpu, cr4)) + return 1; + + if (((cr4 ^ old_cr4) & pdptr_bits) || + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) + kvm_mmu_reset_context(vcpu); + + if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) + kvm_update_cpuid_runtime(vcpu); + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_track_cr4); + int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { bool skip_tlb_flush = false;