diff mbox

[6/6] Revert "KVM: MMU: split kvm_mmu_free_page"

Message ID 1359557105-30821-7-git-send-email-gleb@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov Jan. 30, 2013, 2:45 p.m. UTC
This reverts commit bd4c86eaa6ff10abc4e00d0f45d2a28b10b09df4.

There is not user for kvm_mmu_isolate_page() any more.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 arch/x86/kvm/mmu.c |   21 +++------------------
 1 file changed, 3 insertions(+), 18 deletions(-)

Comments

Marcelo Tosatti Feb. 5, 2013, 1:24 a.m. UTC | #1
On Wed, Jan 30, 2013 at 04:45:05PM +0200, Gleb Natapov wrote:
> This reverts commit bd4c86eaa6ff10abc4e00d0f45d2a28b10b09df4.
> 
> There is not user for kvm_mmu_isolate_page() any more.
> 
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> ---
>  arch/x86/kvm/mmu.c |   21 +++------------------

Applied all except this - Takuya's using it.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Takuya Yoshikawa Feb. 5, 2013, 1:47 a.m. UTC | #2
On Mon, 4 Feb 2013 23:24:01 -0200
Marcelo Tosatti <mtosatti@redhat.com> wrote:

> On Wed, Jan 30, 2013 at 04:45:05PM +0200, Gleb Natapov wrote:
> > This reverts commit bd4c86eaa6ff10abc4e00d0f45d2a28b10b09df4.
> > 
> > There is not user for kvm_mmu_isolate_page() any more.
> > 
> > Signed-off-by: Gleb Natapov <gleb@redhat.com>
> > ---
> >  arch/x86/kvm/mmu.c |   21 +++------------------
> 
> Applied all except this - Takuya's using it.

My patch just folds it into kvm_mmu_free_page(), so this patch
helps me a bit.

If you want to apply this, I can rebase my patch appropriately.

Thanks,
	Takuya
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov Feb. 5, 2013, 8:42 a.m. UTC | #3
On Mon, Feb 04, 2013 at 11:24:01PM -0200, Marcelo Tosatti wrote:
> On Wed, Jan 30, 2013 at 04:45:05PM +0200, Gleb Natapov wrote:
> > This reverts commit bd4c86eaa6ff10abc4e00d0f45d2a28b10b09df4.
> > 
> > There is not user for kvm_mmu_isolate_page() any more.
> > 
> > Signed-off-by: Gleb Natapov <gleb@redhat.com>
> > ---
> >  arch/x86/kvm/mmu.c |   21 +++------------------
> 
> Applied all except this - Takuya's using it.
He actually revers this commit, but in non-obvious way. Reverting it
explicitly makes for more clear commit history.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 42ba85c..0242a8a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1461,28 +1461,14 @@  static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
 }
 
-/*
- * Remove the sp from shadow page cache, after call it,
- * we can not find this sp from the cache, and the shadow
- * page table is still valid.
- * It should be under the protection of mmu lock.
- */
-static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
+static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
 {
 	ASSERT(is_empty_shadow_page(sp->spt));
 	hlist_del(&sp->hash_link);
-	if (!sp->role.direct)
-		free_page((unsigned long)sp->gfns);
-}
-
-/*
- * Free the shadow page table and the sp, we can do it
- * out of the protection of mmu lock.
- */
-static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
-{
 	list_del(&sp->link);
 	free_page((unsigned long)sp->spt);
+	if (!sp->role.direct)
+		free_page((unsigned long)sp->gfns);
 	kmem_cache_free(mmu_page_header_cache, sp);
 }
 
@@ -2126,7 +2112,6 @@  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	do {
 		sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
 		WARN_ON(!sp->role.invalid || sp->root_count);
-		kvm_mmu_isolate_page(sp);
 		kvm_mmu_free_page(sp);
 	} while (!list_empty(invalid_list));
 }