diff mbox series

[v2,3/3] KVM: x86: Move #PF retry tracking variables into emulation context

Message ID 20200218230310.29410-4-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Minor emulator cleanup | expand

Commit Message

Sean Christopherson Feb. 18, 2020, 11:03 p.m. UTC
Move last_retry_eip and last_retry_addr into the emulation context as
they are specific to retrying an instruction after emulation failure.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_emulate.h |  4 ++++
 arch/x86/include/asm/kvm_host.h    |  3 ---
 arch/x86/kvm/x86.c                 | 11 ++++++-----
 3 files changed, 10 insertions(+), 8 deletions(-)

Comments

Paolo Bonzini Feb. 19, 2020, 8:13 a.m. UTC | #1
On 19/02/20 00:03, Sean Christopherson wrote:
> Move last_retry_eip and last_retry_addr into the emulation context as
> they are specific to retrying an instruction after emulation failure.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

I'm not sure about this, since it's not used by emulate.c.  The other
two patches are good.

Paolo

> ---
>  arch/x86/include/asm/kvm_emulate.h |  4 ++++
>  arch/x86/include/asm/kvm_host.h    |  3 ---
>  arch/x86/kvm/x86.c                 | 11 ++++++-----
>  3 files changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> index a4ef19a6e612..a26c8de414e8 100644
> --- a/arch/x86/include/asm/kvm_emulate.h
> +++ b/arch/x86/include/asm/kvm_emulate.h
> @@ -315,6 +315,10 @@ struct x86_emulate_ctxt {
>  	bool gpa_available;
>  	gpa_t gpa_val;
>  
> +	/* Track EIP and CR2/GPA when retrying faulting instruction on #PF. */
> +	unsigned long last_retry_eip;
> +	unsigned long last_retry_addr;
> +
>  	/*
>  	 * decode cache
>  	 */
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 9c79c41eb5f6..6312ea32bb41 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -752,9 +752,6 @@ struct kvm_vcpu_arch {
>  
>  	cpumask_var_t wbinvd_dirty_mask;
>  
> -	unsigned long last_retry_eip;
> -	unsigned long last_retry_addr;
> -
>  	struct {
>  		bool halted;
>  		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index f88b72932c35..d19eb776f297 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -6407,6 +6407,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
>  
>  	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
>  
> +	/* last_retry_{eip,addr} are persistent and must not be init'd here. */
>  	ctxt->gpa_available = false;
>  	ctxt->eflags = kvm_get_rflags(vcpu);
>  	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
> @@ -6557,8 +6558,8 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
>  	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
>  	unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
>  
> -	last_retry_eip = vcpu->arch.last_retry_eip;
> -	last_retry_addr = vcpu->arch.last_retry_addr;
> +	last_retry_eip = ctxt->last_retry_eip;
> +	last_retry_addr = ctxt->last_retry_addr;
>  
>  	/*
>  	 * If the emulation is caused by #PF and it is non-page_table
> @@ -6573,7 +6574,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
>  	 * and the address again, we can break out of the potential infinite
>  	 * loop.
>  	 */
> -	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
> +	ctxt->last_retry_eip = ctxt->last_retry_addr = 0;
>  
>  	if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
>  		return false;
> @@ -6588,8 +6589,8 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
>  	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
>  		return false;
>  
> -	vcpu->arch.last_retry_eip = ctxt->eip;
> -	vcpu->arch.last_retry_addr = cr2_or_gpa;
> +	ctxt->last_retry_eip = ctxt->eip;
> +	ctxt->last_retry_addr = cr2_or_gpa;
>  
>  	if (!vcpu->arch.mmu->direct_map)
>  		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
>
Sean Christopherson Feb. 19, 2020, 3:16 p.m. UTC | #2
On Wed, Feb 19, 2020 at 09:13:00AM +0100, Paolo Bonzini wrote:
> On 19/02/20 00:03, Sean Christopherson wrote:
> > Move last_retry_eip and last_retry_addr into the emulation context as
> > they are specific to retrying an instruction after emulation failure.
> > 
> > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> 
> I'm not sure about this, since it's not used by emulate.c.  The other
> two patches are good.

The easy solution to that is to move retry_instruction() into emulate.c.
That would also allow making x86_page_table_writing_insn() static.  All
other functions invoked from retry_instruction() are exposed via kvm_host.h.

Moving last_retry_* into the emulator context hopefully makes it more clear
that this code is the only user of the variables, e.g. last_retry_eip can't
be set by some other non-emulator flow.

	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
		return false;

	vcpu->arch.last_retry_eip = ctxt->eip;
	vcpu->arch.last_retry_addr = cr2;

> > ---
> >  arch/x86/include/asm/kvm_emulate.h |  4 ++++
> >  arch/x86/include/asm/kvm_host.h    |  3 ---
> >  arch/x86/kvm/x86.c                 | 11 ++++++-----
> >  3 files changed, 10 insertions(+), 8 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> > index a4ef19a6e612..a26c8de414e8 100644
> > --- a/arch/x86/include/asm/kvm_emulate.h
> > +++ b/arch/x86/include/asm/kvm_emulate.h
> > @@ -315,6 +315,10 @@ struct x86_emulate_ctxt {
> >  	bool gpa_available;
> >  	gpa_t gpa_val;
> >  
> > +	/* Track EIP and CR2/GPA when retrying faulting instruction on #PF. */
> > +	unsigned long last_retry_eip;
> > +	unsigned long last_retry_addr;
> > +
> >  	/*
> >  	 * decode cache
> >  	 */
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 9c79c41eb5f6..6312ea32bb41 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -752,9 +752,6 @@ struct kvm_vcpu_arch {
> >  
> >  	cpumask_var_t wbinvd_dirty_mask;
> >  
> > -	unsigned long last_retry_eip;
> > -	unsigned long last_retry_addr;
> > -
> >  	struct {
> >  		bool halted;
> >  		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index f88b72932c35..d19eb776f297 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -6407,6 +6407,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
> >  
> >  	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
> >  
> > +	/* last_retry_{eip,addr} are persistent and must not be init'd here. */
> >  	ctxt->gpa_available = false;
> >  	ctxt->eflags = kvm_get_rflags(vcpu);
> >  	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
> > @@ -6557,8 +6558,8 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> >  	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
> >  	unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
> >  
> > -	last_retry_eip = vcpu->arch.last_retry_eip;
> > -	last_retry_addr = vcpu->arch.last_retry_addr;
> > +	last_retry_eip = ctxt->last_retry_eip;
> > +	last_retry_addr = ctxt->last_retry_addr;
> >  
> >  	/*
> >  	 * If the emulation is caused by #PF and it is non-page_table
> > @@ -6573,7 +6574,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> >  	 * and the address again, we can break out of the potential infinite
> >  	 * loop.
> >  	 */
> > -	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
> > +	ctxt->last_retry_eip = ctxt->last_retry_addr = 0;
> >  
> >  	if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
> >  		return false;
> > @@ -6588,8 +6589,8 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> >  	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
> >  		return false;
> >  
> > -	vcpu->arch.last_retry_eip = ctxt->eip;
> > -	vcpu->arch.last_retry_addr = cr2_or_gpa;
> > +	ctxt->last_retry_eip = ctxt->eip;
> > +	ctxt->last_retry_addr = cr2_or_gpa;
> >  
> >  	if (!vcpu->arch.mmu->direct_map)
> >  		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
> > 
>
Paolo Bonzini Feb. 21, 2020, 5:14 p.m. UTC | #3
On 19/02/20 16:16, Sean Christopherson wrote:
> The easy solution to that is to move retry_instruction() into emulate.c.
> That would also allow making x86_page_table_writing_insn() static.  All
> other functions invoked from retry_instruction() are exposed via kvm_host.h.

emulate.c is supposed to invoke no (or almost no) function outside the
ctxt->ops struct.  In particular, retry_instruction() invokes
kvm_mmu_gva_to_gpa_write and kvm_mmu_unprotect_page.

Paolo
Sean Christopherson Feb. 21, 2020, 6:28 p.m. UTC | #4
On Fri, Feb 21, 2020 at 06:14:03PM +0100, Paolo Bonzini wrote:
> On 19/02/20 16:16, Sean Christopherson wrote:
> > The easy solution to that is to move retry_instruction() into emulate.c.
> > That would also allow making x86_page_table_writing_insn() static.  All
> > other functions invoked from retry_instruction() are exposed via kvm_host.h.
> 
> emulate.c is supposed to invoke no (or almost no) function outside the
> ctxt->ops struct.  In particular, retry_instruction() invokes
> kvm_mmu_gva_to_gpa_write and kvm_mmu_unprotect_page.

Ah, right.  We could split the logic, e.g.

	if (x86_retry_pf_instruction(ctxt, cr2_or_gpa, emulation_type)) {
		gpa_t = gpa = cr2_or_gpa;

		if (!vcpu->arch.mmu->direct_map)
			gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);

		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
		return 1;
	}

but that's probably a net negative in terms of clarity.  And there's also
vcpu->arch.write_fault_to_shadow_pgtable, which is consumed only by
reexecute_instruction(), and I 100% agree that that variable should stay
in vcpu->arch.  Moving one flag used to retry #PF instructions and not the
other would be weird.

That was a long winded way of saying I agree we should drop this patch :-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index a4ef19a6e612..a26c8de414e8 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -315,6 +315,10 @@  struct x86_emulate_ctxt {
 	bool gpa_available;
 	gpa_t gpa_val;
 
+	/* Track EIP and CR2/GPA when retrying faulting instruction on #PF. */
+	unsigned long last_retry_eip;
+	unsigned long last_retry_addr;
+
 	/*
 	 * decode cache
 	 */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9c79c41eb5f6..6312ea32bb41 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -752,9 +752,6 @@  struct kvm_vcpu_arch {
 
 	cpumask_var_t wbinvd_dirty_mask;
 
-	unsigned long last_retry_eip;
-	unsigned long last_retry_addr;
-
 	struct {
 		bool halted;
 		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f88b72932c35..d19eb776f297 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6407,6 +6407,7 @@  static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
 
 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
+	/* last_retry_{eip,addr} are persistent and must not be init'd here. */
 	ctxt->gpa_available = false;
 	ctxt->eflags = kvm_get_rflags(vcpu);
 	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
@@ -6557,8 +6558,8 @@  static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
 
-	last_retry_eip = vcpu->arch.last_retry_eip;
-	last_retry_addr = vcpu->arch.last_retry_addr;
+	last_retry_eip = ctxt->last_retry_eip;
+	last_retry_addr = ctxt->last_retry_addr;
 
 	/*
 	 * If the emulation is caused by #PF and it is non-page_table
@@ -6573,7 +6574,7 @@  static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	 * and the address again, we can break out of the potential infinite
 	 * loop.
 	 */
-	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
+	ctxt->last_retry_eip = ctxt->last_retry_addr = 0;
 
 	if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
 		return false;
@@ -6588,8 +6589,8 @@  static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
 		return false;
 
-	vcpu->arch.last_retry_eip = ctxt->eip;
-	vcpu->arch.last_retry_addr = cr2_or_gpa;
+	ctxt->last_retry_eip = ctxt->eip;
+	ctxt->last_retry_addr = cr2_or_gpa;
 
 	if (!vcpu->arch.mmu->direct_map)
 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);