Message ID | 159597948045.12744.16141020747986494741.stgit@bmoger-ubuntu (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | SVM cleanup and INVPCID support for the AMD guests | expand |
On Tue, Jul 28, 2020 at 4:38 PM Babu Moger <babu.moger@amd.com> wrote: > > Change intercept_cr to generic intercepts in vmcb_control_area. > Use the new __set_intercept, __clr_intercept and __is_intercept > where applicable. > > Signed-off-by: Babu Moger <babu.moger@amd.com> > --- > arch/x86/include/asm/svm.h | 42 ++++++++++++++++++++++++++++++++---------- > arch/x86/kvm/svm/nested.c | 26 +++++++++++++++++--------- > arch/x86/kvm/svm/svm.c | 4 ++-- > arch/x86/kvm/svm/svm.h | 6 +++--- > 4 files changed, 54 insertions(+), 24 deletions(-) > > diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h > index 8a1f5382a4ea..d4739f4eae63 100644 > --- a/arch/x86/include/asm/svm.h > +++ b/arch/x86/include/asm/svm.h > @@ -4,6 +4,37 @@ > > #include <uapi/asm/svm.h> > > +/* > + * VMCB Control Area intercept bits starting > + * at Byte offset 000h (Vector 0). > + */ > + > +enum vector_offset { > + CR_VECTOR = 0, > + MAX_VECTORS, > +}; > + > +enum { > + /* Byte offset 000h (Vector 0) */ > + INTERCEPT_CR0_READ = 0, > + INTERCEPT_CR1_READ, > + INTERCEPT_CR2_READ, > + INTERCEPT_CR3_READ, > + INTERCEPT_CR4_READ, > + INTERCEPT_CR5_READ, > + INTERCEPT_CR6_READ, > + INTERCEPT_CR7_READ, > + INTERCEPT_CR8_READ, > + INTERCEPT_CR0_WRITE = 16, > + INTERCEPT_CR1_WRITE, > + INTERCEPT_CR2_WRITE, > + INTERCEPT_CR3_WRITE, > + INTERCEPT_CR4_WRITE, > + INTERCEPT_CR5_WRITE, > + INTERCEPT_CR6_WRITE, > + INTERCEPT_CR7_WRITE, > + INTERCEPT_CR8_WRITE, > +}; > > enum { > INTERCEPT_INTR, > @@ -57,7 +88,7 @@ enum { > > > struct __attribute__ ((__packed__)) vmcb_control_area { > - u32 intercept_cr; > + u32 intercepts[MAX_VECTORS]; > u32 intercept_dr; > u32 intercept_exceptions; > u64 intercept; > @@ -240,15 +271,6 @@ struct __attribute__ ((__packed__)) vmcb { > #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK > #define SVM_SELECTOR_CODE_MASK (1 << 3) > > -#define INTERCEPT_CR0_READ 0 > -#define INTERCEPT_CR3_READ 3 > -#define INTERCEPT_CR4_READ 4 > -#define INTERCEPT_CR8_READ 8 > -#define INTERCEPT_CR0_WRITE (16 + 0) > -#define INTERCEPT_CR3_WRITE (16 + 3) > -#define INTERCEPT_CR4_WRITE (16 + 4) > -#define INTERCEPT_CR8_WRITE (16 + 8) > - > #define INTERCEPT_DR0_READ 0 > #define INTERCEPT_DR1_READ 1 > #define INTERCEPT_DR2_READ 2 > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index 6bceafb19108..46f5c82d9b45 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -105,6 +105,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) > void recalc_intercepts(struct vcpu_svm *svm) > { > struct vmcb_control_area *c, *h, *g; > + unsigned int i; > > mark_dirty(svm->vmcb, VMCB_INTERCEPTS); > > @@ -117,15 +118,17 @@ void recalc_intercepts(struct vcpu_svm *svm) > > svm->nested.host_intercept_exceptions = h->intercept_exceptions; > > - c->intercept_cr = h->intercept_cr; > + for (i = 0; i < MAX_VECTORS; i++) > + c->intercepts[i] = h->intercepts[i]; > + > c->intercept_dr = h->intercept_dr; > c->intercept_exceptions = h->intercept_exceptions; > c->intercept = h->intercept; > > if (g->int_ctl & V_INTR_MASKING_MASK) { > /* We only want the cr8 intercept bits of L1 */ > - c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); > - c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); > + __clr_intercept(&c->intercepts, INTERCEPT_CR8_READ); > + __clr_intercept(&c->intercepts, INTERCEPT_CR8_WRITE); Why the direct calls to the __clr_intercept worker function? Can't these be calls to clr_cr_intercept()? Likewise throughout.
> -----Original Message----- > From: Jim Mattson <jmattson@google.com> > Sent: Tuesday, July 28, 2020 6:56 PM > To: Moger, Babu <Babu.Moger@amd.com> > Cc: Paolo Bonzini <pbonzini@redhat.com>; Vitaly Kuznetsov > <vkuznets@redhat.com>; Wanpeng Li <wanpengli@tencent.com>; Sean > Christopherson <sean.j.christopherson@intel.com>; kvm list > <kvm@vger.kernel.org>; Joerg Roedel <joro@8bytes.org>; the arch/x86 > maintainers <x86@kernel.org>; LKML <linux-kernel@vger.kernel.org>; Ingo > Molnar <mingo@redhat.com>; Borislav Petkov <bp@alien8.de>; H . Peter Anvin > <hpa@zytor.com>; Thomas Gleixner <tglx@linutronix.de> > Subject: Re: [PATCH v3 02/11] KVM: SVM: Change intercept_cr to generic > intercepts > > On Tue, Jul 28, 2020 at 4:38 PM Babu Moger <babu.moger@amd.com> wrote: > > > > Change intercept_cr to generic intercepts in vmcb_control_area. > > Use the new __set_intercept, __clr_intercept and __is_intercept where > > applicable. > > > > Signed-off-by: Babu Moger <babu.moger@amd.com> > > --- > > arch/x86/include/asm/svm.h | 42 ++++++++++++++++++++++++++++++++---- > ------ > > arch/x86/kvm/svm/nested.c | 26 +++++++++++++++++--------- > > arch/x86/kvm/svm/svm.c | 4 ++-- > > arch/x86/kvm/svm/svm.h | 6 +++--- > > 4 files changed, 54 insertions(+), 24 deletions(-) > > > > diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h > > index 8a1f5382a4ea..d4739f4eae63 100644 > > --- a/arch/x86/include/asm/svm.h > > +++ b/arch/x86/include/asm/svm.h > > @@ -4,6 +4,37 @@ > > > > #include <uapi/asm/svm.h> > > > > +/* > > + * VMCB Control Area intercept bits starting > > + * at Byte offset 000h (Vector 0). > > + */ > > + > > +enum vector_offset { > > + CR_VECTOR = 0, > > + MAX_VECTORS, > > +}; > > + > > +enum { > > + /* Byte offset 000h (Vector 0) */ > > + INTERCEPT_CR0_READ = 0, > > + INTERCEPT_CR1_READ, > > + INTERCEPT_CR2_READ, > > + INTERCEPT_CR3_READ, > > + INTERCEPT_CR4_READ, > > + INTERCEPT_CR5_READ, > > + INTERCEPT_CR6_READ, > > + INTERCEPT_CR7_READ, > > + INTERCEPT_CR8_READ, > > + INTERCEPT_CR0_WRITE = 16, > > + INTERCEPT_CR1_WRITE, > > + INTERCEPT_CR2_WRITE, > > + INTERCEPT_CR3_WRITE, > > + INTERCEPT_CR4_WRITE, > > + INTERCEPT_CR5_WRITE, > > + INTERCEPT_CR6_WRITE, > > + INTERCEPT_CR7_WRITE, > > + INTERCEPT_CR8_WRITE, > > +}; > > > > enum { > > INTERCEPT_INTR, > > @@ -57,7 +88,7 @@ enum { > > > > > > struct __attribute__ ((__packed__)) vmcb_control_area { > > - u32 intercept_cr; > > + u32 intercepts[MAX_VECTORS]; > > u32 intercept_dr; > > u32 intercept_exceptions; > > u64 intercept; > > @@ -240,15 +271,6 @@ struct __attribute__ ((__packed__)) vmcb { > > #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK #define > > SVM_SELECTOR_CODE_MASK (1 << 3) > > > > -#define INTERCEPT_CR0_READ 0 > > -#define INTERCEPT_CR3_READ 3 > > -#define INTERCEPT_CR4_READ 4 > > -#define INTERCEPT_CR8_READ 8 > > -#define INTERCEPT_CR0_WRITE (16 + 0) > > -#define INTERCEPT_CR3_WRITE (16 + 3) > > -#define INTERCEPT_CR4_WRITE (16 + 4) > > -#define INTERCEPT_CR8_WRITE (16 + 8) > > - > > #define INTERCEPT_DR0_READ 0 > > #define INTERCEPT_DR1_READ 1 > > #define INTERCEPT_DR2_READ 2 > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > > index 6bceafb19108..46f5c82d9b45 100644 > > --- a/arch/x86/kvm/svm/nested.c > > +++ b/arch/x86/kvm/svm/nested.c > > @@ -105,6 +105,7 @@ static void nested_svm_uninit_mmu_context(struct > > kvm_vcpu *vcpu) void recalc_intercepts(struct vcpu_svm *svm) { > > struct vmcb_control_area *c, *h, *g; > > + unsigned int i; > > > > mark_dirty(svm->vmcb, VMCB_INTERCEPTS); > > > > @@ -117,15 +118,17 @@ void recalc_intercepts(struct vcpu_svm *svm) > > > > svm->nested.host_intercept_exceptions = > > h->intercept_exceptions; > > > > - c->intercept_cr = h->intercept_cr; > > + for (i = 0; i < MAX_VECTORS; i++) > > + c->intercepts[i] = h->intercepts[i]; > > + > > c->intercept_dr = h->intercept_dr; > > c->intercept_exceptions = h->intercept_exceptions; > > c->intercept = h->intercept; > > > > if (g->int_ctl & V_INTR_MASKING_MASK) { > > /* We only want the cr8 intercept bits of L1 */ > > - c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); > > - c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); > > + __clr_intercept(&c->intercepts, INTERCEPT_CR8_READ); > > + __clr_intercept(&c->intercepts, INTERCEPT_CR8_WRITE); > > Why the direct calls to the __clr_intercept worker function? Can't these be calls > to clr_cr_intercept()? > Likewise throughout. This code uses the address to clear the bits. So called __clr_intercept directly. The call clr_cr_intercept() passes the structure vcpu_svm and then uses get_host_vmcb to get the address.
On 29/07/20 18:08, Babu Moger wrote: >>> >>> if (g->int_ctl & V_INTR_MASKING_MASK) { >>> /* We only want the cr8 intercept bits of L1 */ >>> - c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); >>> - c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); >>> + __clr_intercept(&c->intercepts, INTERCEPT_CR8_READ); >>> + __clr_intercept(&c->intercepts, INTERCEPT_CR8_WRITE); >> Why the direct calls to the __clr_intercept worker function? Can't these be calls >> to clr_cr_intercept()? >> Likewise throughout. > This code uses the address to clear the bits. So called __clr_intercept > directly. The call clr_cr_intercept() passes the structure vcpu_svm and > then uses get_host_vmcb to get the address. Yes, this is correct. Paolo
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 8a1f5382a4ea..d4739f4eae63 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -4,6 +4,37 @@ #include <uapi/asm/svm.h> +/* + * VMCB Control Area intercept bits starting + * at Byte offset 000h (Vector 0). + */ + +enum vector_offset { + CR_VECTOR = 0, + MAX_VECTORS, +}; + +enum { + /* Byte offset 000h (Vector 0) */ + INTERCEPT_CR0_READ = 0, + INTERCEPT_CR1_READ, + INTERCEPT_CR2_READ, + INTERCEPT_CR3_READ, + INTERCEPT_CR4_READ, + INTERCEPT_CR5_READ, + INTERCEPT_CR6_READ, + INTERCEPT_CR7_READ, + INTERCEPT_CR8_READ, + INTERCEPT_CR0_WRITE = 16, + INTERCEPT_CR1_WRITE, + INTERCEPT_CR2_WRITE, + INTERCEPT_CR3_WRITE, + INTERCEPT_CR4_WRITE, + INTERCEPT_CR5_WRITE, + INTERCEPT_CR6_WRITE, + INTERCEPT_CR7_WRITE, + INTERCEPT_CR8_WRITE, +}; enum { INTERCEPT_INTR, @@ -57,7 +88,7 @@ enum { struct __attribute__ ((__packed__)) vmcb_control_area { - u32 intercept_cr; + u32 intercepts[MAX_VECTORS]; u32 intercept_dr; u32 intercept_exceptions; u64 intercept; @@ -240,15 +271,6 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK #define SVM_SELECTOR_CODE_MASK (1 << 3) -#define INTERCEPT_CR0_READ 0 -#define INTERCEPT_CR3_READ 3 -#define INTERCEPT_CR4_READ 4 -#define INTERCEPT_CR8_READ 8 -#define INTERCEPT_CR0_WRITE (16 + 0) -#define INTERCEPT_CR3_WRITE (16 + 3) -#define INTERCEPT_CR4_WRITE (16 + 4) -#define INTERCEPT_CR8_WRITE (16 + 8) - #define INTERCEPT_DR0_READ 0 #define INTERCEPT_DR1_READ 1 #define INTERCEPT_DR2_READ 2 diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 6bceafb19108..46f5c82d9b45 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -105,6 +105,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) void recalc_intercepts(struct vcpu_svm *svm) { struct vmcb_control_area *c, *h, *g; + unsigned int i; mark_dirty(svm->vmcb, VMCB_INTERCEPTS); @@ -117,15 +118,17 @@ void recalc_intercepts(struct vcpu_svm *svm) svm->nested.host_intercept_exceptions = h->intercept_exceptions; - c->intercept_cr = h->intercept_cr; + for (i = 0; i < MAX_VECTORS; i++) + c->intercepts[i] = h->intercepts[i]; + c->intercept_dr = h->intercept_dr; c->intercept_exceptions = h->intercept_exceptions; c->intercept = h->intercept; if (g->int_ctl & V_INTR_MASKING_MASK) { /* We only want the cr8 intercept bits of L1 */ - c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); - c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); + __clr_intercept(&c->intercepts, INTERCEPT_CR8_READ); + __clr_intercept(&c->intercepts, INTERCEPT_CR8_WRITE); /* * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not @@ -138,7 +141,9 @@ void recalc_intercepts(struct vcpu_svm *svm) /* We don't want to see VMMCALLs from a nested guest */ c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); - c->intercept_cr |= g->intercept_cr; + for (i = 0; i < MAX_VECTORS; i++) + c->intercepts[i] |= g->intercepts[i]; + c->intercept_dr |= g->intercept_dr; c->intercept_exceptions |= g->intercept_exceptions; c->intercept |= g->intercept; @@ -147,7 +152,11 @@ void recalc_intercepts(struct vcpu_svm *svm) static void copy_vmcb_control_area(struct vmcb_control_area *dst, struct vmcb_control_area *from) { - dst->intercept_cr = from->intercept_cr; + unsigned int i; + + for (i = 0; i < MAX_VECTORS; i++) + dst->intercepts[i] = from->intercepts[i]; + dst->intercept_dr = from->intercept_dr; dst->intercept_exceptions = from->intercept_exceptions; dst->intercept = from->intercept; @@ -430,8 +439,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm) nested_vmcb->control.event_inj, nested_vmcb->control.nested_ctl); - trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, - nested_vmcb->control.intercept_cr >> 16, + trace_kvm_nested_intercepts(nested_vmcb->control.intercepts[CR_VECTOR] & 0xffff, + nested_vmcb->control.intercepts[CR_VECTOR] >> 16, nested_vmcb->control.intercept_exceptions, nested_vmcb->control.intercept); @@ -703,8 +712,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm) vmexit = nested_svm_intercept_ioio(svm); break; case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { - u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); - if (svm->nested.ctl.intercept_cr & bit) + if (__is_intercept(&svm->nested.ctl.intercepts, exit_code)) vmexit = NESTED_EXIT_DONE; break; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c0da4dd78ac5..334bda8b31c1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2797,8 +2797,8 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) } pr_err("VMCB Control Area:\n"); - pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff); - pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16); + pr_err("%-20s%04x\n", "cr_read:", control->intercepts[CR_VECTOR] & 0xffff); + pr_err("%-20s%04x\n", "cr_write:", control->intercepts[CR_VECTOR] >> 16); pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff); pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16); pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 3b669718190a..89d1d91d5bc6 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -236,7 +236,7 @@ static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); - vmcb->control.intercept_cr |= (1U << bit); + __set_intercept(&vmcb->control.intercepts, bit); recalc_intercepts(svm); } @@ -245,7 +245,7 @@ static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); - vmcb->control.intercept_cr &= ~(1U << bit); + __clr_intercept(&vmcb->control.intercepts, bit); recalc_intercepts(svm); } @@ -254,7 +254,7 @@ static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) { struct vmcb *vmcb = get_host_vmcb(svm); - return vmcb->control.intercept_cr & (1U << bit); + return __is_intercept(&vmcb->control.intercepts, bit); } static inline void set_dr_intercepts(struct vcpu_svm *svm)
Change intercept_cr to generic intercepts in vmcb_control_area. Use the new __set_intercept, __clr_intercept and __is_intercept where applicable. Signed-off-by: Babu Moger <babu.moger@amd.com> --- arch/x86/include/asm/svm.h | 42 ++++++++++++++++++++++++++++++++---------- arch/x86/kvm/svm/nested.c | 26 +++++++++++++++++--------- arch/x86/kvm/svm/svm.c | 4 ++-- arch/x86/kvm/svm/svm.h | 6 +++--- 4 files changed, 54 insertions(+), 24 deletions(-)