diff mbox series

[RFC,v6,69/92] kvm: x86: keep the page protected if tracked by the introspection tool

Message ID 20190809160047.8319-70-alazar@bitdefender.com (mailing list archive)
State New, archived
Headers show
Series VM introspection | expand

Commit Message

Adalbert Lazăr Aug. 9, 2019, 4 p.m. UTC
This patch might be obsolete thanks to single-stepping.

Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/kvm/x86.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

Comments

Konrad Rzeszutek Wilk Sept. 10, 2019, 2:26 p.m. UTC | #1
On Fri, Aug 09, 2019 at 07:00:24PM +0300, Adalbert Lazăr wrote:
> This patch might be obsolete thanks to single-stepping.

sooo should it be skipped from this large patchset to easy
review?

> 
> Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
> ---
>  arch/x86/kvm/x86.c | 9 +++++++--
>  1 file changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2c06de73a784..06f44ce8ed07 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -6311,7 +6311,8 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
>  		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
>  		spin_unlock(&vcpu->kvm->mmu_lock);
>  
> -		if (indirect_shadow_pages)
> +		if (indirect_shadow_pages
> +		    && !kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
>  			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
>  
>  		return true;
> @@ -6322,7 +6323,8 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
>  	 * and it failed try to unshadow page and re-enter the
>  	 * guest to let CPU execute the instruction.
>  	 */
> -	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> +	if (!kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> +		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
>  
>  	/*
>  	 * If the access faults on its page table, it can not
> @@ -6374,6 +6376,9 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
>  	if (!vcpu->arch.mmu->direct_map)
>  		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
>  
> +	if (kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> +		return false;
> +
>  	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
>  
>  	return true;
Adalbert Lazăr Sept. 10, 2019, 4:28 p.m. UTC | #2
On Tue, 10 Sep 2019 10:26:42 -0400, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote:
> On Fri, Aug 09, 2019 at 07:00:24PM +0300, Adalbert Lazăr wrote:
> > This patch might be obsolete thanks to single-stepping.
> 
> sooo should it be skipped from this large patchset to easy
> review?

I'll add a couple of warning messages to check if this patch is still
needed, in order to skip it from the next submission (which will be smaller:)

However, on AMD, single-stepping is not an option.

Thanks,
Adalbert

> 
> > 
> > Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
> > ---
> >  arch/x86/kvm/x86.c | 9 +++++++--
> >  1 file changed, 7 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 2c06de73a784..06f44ce8ed07 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -6311,7 +6311,8 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
> >  		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
> >  		spin_unlock(&vcpu->kvm->mmu_lock);
> >  
> > -		if (indirect_shadow_pages)
> > +		if (indirect_shadow_pages
> > +		    && !kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> >  			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> >  
> >  		return true;
> > @@ -6322,7 +6323,8 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
> >  	 * and it failed try to unshadow page and re-enter the
> >  	 * guest to let CPU execute the instruction.
> >  	 */
> > -	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> > +	if (!kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> > +		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> >  
> >  	/*
> >  	 * If the access faults on its page table, it can not
> > @@ -6374,6 +6376,9 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> >  	if (!vcpu->arch.mmu->direct_map)
> >  		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
> >  
> > +	if (kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
> > +		return false;
> > +
> >  	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
> >  
> >  	return true;
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c06de73a784..06f44ce8ed07 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6311,7 +6311,8 @@  static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
 		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
 		spin_unlock(&vcpu->kvm->mmu_lock);
 
-		if (indirect_shadow_pages)
+		if (indirect_shadow_pages
+		    && !kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
 		return true;
@@ -6322,7 +6323,8 @@  static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
 	 * and it failed try to unshadow page and re-enter the
 	 * guest to let CPU execute the instruction.
 	 */
-	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+	if (!kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
+		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
 	/*
 	 * If the access faults on its page table, it can not
@@ -6374,6 +6376,9 @@  static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 	if (!vcpu->arch.mmu->direct_map)
 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
 
+	if (kvmi_tracked_gfn(vcpu, gpa_to_gfn(gpa)))
+		return false;
+
 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
 
 	return true;