diff mbox

[4/6] kvm/x86/mmu: handle invlpg on large pages

Message ID 1236255153-4432-5-git-send-email-joerg.roedel@amd.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Joerg Roedel March 5, 2009, 12:12 p.m. UTC
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
 arch/x86/kvm/paging_tmpl.h |   12 +++++++++---
 1 files changed, 9 insertions(+), 3 deletions(-)

Comments

Marcelo Tosatti March 5, 2009, 9:11 p.m. UTC | #1
On Thu, Mar 05, 2009 at 01:12:31PM +0100, Joerg Roedel wrote:
> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
> ---
>  arch/x86/kvm/paging_tmpl.h |   12 +++++++++---
>  1 files changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 79668ba..aa79396 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -441,6 +441,7 @@ out_unlock:
>  static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>  {
>  	struct kvm_shadow_walk_iterator iterator;
> +	struct kvm_mmu_page *sp;
>  	pt_element_t gpte;
>  	gpa_t pte_gpa = -1;
>  	int level;
> @@ -451,12 +452,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>  	for_each_shadow_entry(vcpu, gva, iterator) {
>  		level = iterator.level;
>  		sptep = iterator.sptep;
> +		sp = page_header(__pa(sptep));
> +
> +		if (sp->role.direct) {
> +			/* mapped from a guest's large_pte */
> +			kvm_mmu_zap_page(vcpu->kvm, sp);
> +			kvm_flush_remote_tlbs(vcpu->kvm);
> +			return;
> +		}

If the guest has 32-bit pte's there might be:

- two large shadow entries to cover 4MB
- one large shadow entry and one shadow page with 512 4k entries
- two shadow pages with 512 4k entries each

So need to cover all this cases. 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Joerg Roedel March 6, 2009, 1:06 p.m. UTC | #2
On Thu, Mar 05, 2009 at 06:11:22PM -0300, Marcelo Tosatti wrote:
> On Thu, Mar 05, 2009 at 01:12:31PM +0100, Joerg Roedel wrote:
> > Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
> > ---
> >  arch/x86/kvm/paging_tmpl.h |   12 +++++++++---
> >  1 files changed, 9 insertions(+), 3 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> > index 79668ba..aa79396 100644
> > --- a/arch/x86/kvm/paging_tmpl.h
> > +++ b/arch/x86/kvm/paging_tmpl.h
> > @@ -441,6 +441,7 @@ out_unlock:
> >  static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
> >  {
> >  	struct kvm_shadow_walk_iterator iterator;
> > +	struct kvm_mmu_page *sp;
> >  	pt_element_t gpte;
> >  	gpa_t pte_gpa = -1;
> >  	int level;
> > @@ -451,12 +452,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
> >  	for_each_shadow_entry(vcpu, gva, iterator) {
> >  		level = iterator.level;
> >  		sptep = iterator.sptep;
> > +		sp = page_header(__pa(sptep));
> > +
> > +		if (sp->role.direct) {
> > +			/* mapped from a guest's large_pte */
> > +			kvm_mmu_zap_page(vcpu->kvm, sp);
> > +			kvm_flush_remote_tlbs(vcpu->kvm);
> > +			return;
> > +		}
> 
> If the guest has 32-bit pte's there might be:
> 
> - two large shadow entries to cover 4MB
> - one large shadow entry and one shadow page with 512 4k entries
> - two shadow pages with 512 4k entries each
> 
> So need to cover all this cases.

Right. Thanks for pointing this out. I will post an updated version of
this patch.

	Joerg
diff mbox

Patch

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 79668ba..aa79396 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -441,6 +441,7 @@  out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	struct kvm_shadow_walk_iterator iterator;
+	struct kvm_mmu_page *sp;
 	pt_element_t gpte;
 	gpa_t pte_gpa = -1;
 	int level;
@@ -451,12 +452,17 @@  static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 	for_each_shadow_entry(vcpu, gva, iterator) {
 		level = iterator.level;
 		sptep = iterator.sptep;
+		sp = page_header(__pa(sptep));
+
+		if (sp->role.direct) {
+			/* mapped from a guest's large_pte */
+			kvm_mmu_zap_page(vcpu->kvm, sp);
+			kvm_flush_remote_tlbs(vcpu->kvm);
+			return;
+		}
 
-		/* FIXME: properly handle invlpg on large guest pages */
 		if (level == PT_PAGE_TABLE_LEVEL ||
 		    ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
-			struct kvm_mmu_page *sp = page_header(__pa(sptep));
-
 			pte_gpa = (sp->gfn << PAGE_SHIFT);
 			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);