diff mbox series

[1/3] KVM: emulate: #GP when emulating rdpmc if CR0.PE is 1

Message ID 1633687054-18865-1-git-send-email-wanpengli@tencent.com (mailing list archive)
State New, archived
Headers show
Series [1/3] KVM: emulate: #GP when emulating rdpmc if CR0.PE is 1 | expand

Commit Message

Wanpeng Li Oct. 8, 2021, 9:57 a.m. UTC
From: Wanpeng Li <wanpengli@tencent.com>

SDM mentioned that, RDPMC: 

  IF (((CR4.PCE = 1) or (CPL = 0) or (CR0.PE = 0)) and (ECX indicates a supported counter)) 
      THEN
          EAX := counter[31:0];
          EDX := ZeroExtend(counter[MSCB:32]);
      ELSE (* ECX is not valid or CR4.PCE is 0 and CPL is 1, 2, or 3 and CR0.PE is 1 *)
          #GP(0); 
  FI;

Let's add the CR0.PE is 1 checking to rdpmc emulate.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
 arch/x86/kvm/emulate.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

Comments

Sean Christopherson Oct. 8, 2021, 3:20 p.m. UTC | #1
The shortlog makes it sound like "inject a #GP if CR0.PE=1", i.e. unconditionally
inject #GP for RDMPC in protected mode.  Maybe "Don't inject #GP when emulating
RDMPC if CR0.PE=0"?

On Fri, Oct 08, 2021, Wanpeng Li wrote:
> From: Wanpeng Li <wanpengli@tencent.com>
> 
> SDM mentioned that, RDPMC: 
> 
>   IF (((CR4.PCE = 1) or (CPL = 0) or (CR0.PE = 0)) and (ECX indicates a supported counter)) 
>       THEN
>           EAX := counter[31:0];
>           EDX := ZeroExtend(counter[MSCB:32]);
>       ELSE (* ECX is not valid or CR4.PCE is 0 and CPL is 1, 2, or 3 and CR0.PE is 1 *)
>           #GP(0); 
>   FI;
> 
> Let's add the CR0.PE is 1 checking to rdpmc emulate.
> 
> Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> ---
>  arch/x86/kvm/emulate.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index 9a144ca8e146..ab7ec569e8c9 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -4213,6 +4213,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
>  static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
>  {
>  	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
> +	u64 cr0 = ctxt->ops->get_cr(ctxt, 0);
>  	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
>  
>  	/*
> @@ -4222,7 +4223,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
>  	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
>  		return X86EMUL_CONTINUE;
>  
> -	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
> +	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt) && (cr0 & X86_CR0_PE)) ||

I don't think it's possible for CPL to be >0 if CR0.PE=0, e.g. we could probably
WARN in the #GP path.  Realistically it doesn't add value though, so maybe just
add a blurb in the changelog saying this isn't strictly necessary?

>  	    ctxt->ops->check_pmc(ctxt, rcx))
>  		return emulate_gp(ctxt, 0);
>  
> -- 
> 2.25.1
>
Wanpeng Li Oct. 9, 2021, 9:09 a.m. UTC | #2
On Fri, 8 Oct 2021 at 23:20, Sean Christopherson <seanjc@google.com> wrote:
>
> The shortlog makes it sound like "inject a #GP if CR0.PE=1", i.e. unconditionally
> inject #GP for RDMPC in protected mode.  Maybe "Don't inject #GP when emulating
> RDMPC if CR0.PE=0"?
>

Agreed.

> On Fri, Oct 08, 2021, Wanpeng Li wrote:
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > SDM mentioned that, RDPMC:
> >
> >   IF (((CR4.PCE = 1) or (CPL = 0) or (CR0.PE = 0)) and (ECX indicates a supported counter))
> >       THEN
> >           EAX := counter[31:0];
> >           EDX := ZeroExtend(counter[MSCB:32]);
> >       ELSE (* ECX is not valid or CR4.PCE is 0 and CPL is 1, 2, or 3 and CR0.PE is 1 *)
> >           #GP(0);
> >   FI;
> >
> > Let's add the CR0.PE is 1 checking to rdpmc emulate.
> >
> > Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
> > ---
> >  arch/x86/kvm/emulate.c | 3 ++-
> >  1 file changed, 2 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> > index 9a144ca8e146..ab7ec569e8c9 100644
> > --- a/arch/x86/kvm/emulate.c
> > +++ b/arch/x86/kvm/emulate.c
> > @@ -4213,6 +4213,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
> >  static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
> >  {
> >       u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
> > +     u64 cr0 = ctxt->ops->get_cr(ctxt, 0);
> >       u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
> >
> >       /*
> > @@ -4222,7 +4223,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
> >       if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
> >               return X86EMUL_CONTINUE;
> >
> > -     if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
> > +     if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt) && (cr0 & X86_CR0_PE)) ||
>
> I don't think it's possible for CPL to be >0 if CR0.PE=0, e.g. we could probably
> WARN in the #GP path.  Realistically it doesn't add value though, so maybe just
> add a blurb in the changelog saying this isn't strictly necessary?

Do it in v2.

    Wanpeng
diff mbox series

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9a144ca8e146..ab7ec569e8c9 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4213,6 +4213,7 @@  static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
 {
 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
+	u64 cr0 = ctxt->ops->get_cr(ctxt, 0);
 	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
 
 	/*
@@ -4222,7 +4223,7 @@  static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
 	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
 		return X86EMUL_CONTINUE;
 
-	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
+	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt) && (cr0 & X86_CR0_PE)) ||
 	    ctxt->ops->check_pmc(ctxt, rcx))
 		return emulate_gp(ctxt, 0);