diff mbox series

[v2] KVM: x86: remove check on nr_mmu_pages in kvm_arch_commit_memory_region()

Message ID 20180927003126.32359-1-richard.weiyang@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v2] KVM: x86: remove check on nr_mmu_pages in kvm_arch_commit_memory_region() | expand

Commit Message

Wei Yang Sept. 27, 2018, 12:31 a.m. UTC
* nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
  non-zero.

* nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
  never return zero.

Based on these two reasons, we can merge the two *if* clause and use the
return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
the code and also eliminate the possibility for reader to believe
nr_mmu_pages would be zero.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>

---
v2: refactor changelog and rename kvm_mmu_calculate_mmu_pages() based on
Sean Christopherson comment.
---
 arch/x86/include/asm/kvm_host.h | 2 +-
 arch/x86/kvm/mmu.c              | 2 +-
 arch/x86/kvm/x86.c              | 8 ++------
 3 files changed, 4 insertions(+), 8 deletions(-)

Comments

Wei Yang Oct. 25, 2018, 9:50 a.m. UTC | #1
Paolo,

Do you have time to take a look at this one?

On Thu, Sep 27, 2018 at 08:31:26AM +0800, Wei Yang wrote:
>* nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
>  non-zero.
>
>* nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
>  never return zero.
>
>Based on these two reasons, we can merge the two *if* clause and use the
>return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
>the code and also eliminate the possibility for reader to believe
>nr_mmu_pages would be zero.
>
>Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
>
>---
>v2: refactor changelog and rename kvm_mmu_calculate_mmu_pages() based on
>Sean Christopherson comment.
>---
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu.c              | 2 +-
> arch/x86/kvm/x86.c              | 8 ++------
> 3 files changed, 4 insertions(+), 8 deletions(-)
>
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index c5e7bb811b1e..036dede47274 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -1190,7 +1190,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
> 				   gfn_t gfn_offset, unsigned long mask);
> void kvm_mmu_zap_all(struct kvm *kvm);
> void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
>-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
>+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
> void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
> 
> int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
>diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>index 9fa77aa24fc7..80629f5377c3 100644
>--- a/arch/x86/kvm/mmu.c
>+++ b/arch/x86/kvm/mmu.c
>@@ -5893,7 +5893,7 @@ int kvm_mmu_module_init(void)
> /*
>  * Calculate mmu pages needed for kvm.
>  */
>-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
>+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
> {
> 	unsigned int nr_mmu_pages;
> 	unsigned int  nr_pages = 0;
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index 8614199d733b..bed566fd4ee5 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -9131,13 +9131,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
> 				const struct kvm_memory_slot *new,
> 				enum kvm_mr_change change)
> {
>-	int nr_mmu_pages = 0;
>-
> 	if (!kvm->arch.n_requested_mmu_pages)
>-		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
>-
>-	if (nr_mmu_pages)
>-		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
>+		kvm_mmu_change_mmu_pages(kvm,
>+				kvm_mmu_calculate_default_mmu_pages(kvm));
> 
> 	/*
> 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
>-- 
>2.15.1
Wei Yang Dec. 30, 2018, 8:22 a.m. UTC | #2
On Thu, Sep 27, 2018 at 08:31:26AM +0800, Wei Yang wrote:
>* nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
>  non-zero.
>
>* nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
>  never return zero.
>
>Based on these two reasons, we can merge the two *if* clause and use the
>return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
>the code and also eliminate the possibility for reader to believe
>nr_mmu_pages would be zero.
>
>Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>

Paolo,

Do you like this one?

>
>---
>v2: refactor changelog and rename kvm_mmu_calculate_mmu_pages() based on
>Sean Christopherson comment.
>---
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu.c              | 2 +-
> arch/x86/kvm/x86.c              | 8 ++------
> 3 files changed, 4 insertions(+), 8 deletions(-)
>
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index c5e7bb811b1e..036dede47274 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -1190,7 +1190,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
> 				   gfn_t gfn_offset, unsigned long mask);
> void kvm_mmu_zap_all(struct kvm *kvm);
> void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
>-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
>+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
> void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
> 
> int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
>diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>index 9fa77aa24fc7..80629f5377c3 100644
>--- a/arch/x86/kvm/mmu.c
>+++ b/arch/x86/kvm/mmu.c
>@@ -5893,7 +5893,7 @@ int kvm_mmu_module_init(void)
> /*
>  * Calculate mmu pages needed for kvm.
>  */
>-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
>+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
> {
> 	unsigned int nr_mmu_pages;
> 	unsigned int  nr_pages = 0;
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index 8614199d733b..bed566fd4ee5 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -9131,13 +9131,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
> 				const struct kvm_memory_slot *new,
> 				enum kvm_mr_change change)
> {
>-	int nr_mmu_pages = 0;
>-
> 	if (!kvm->arch.n_requested_mmu_pages)
>-		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
>-
>-	if (nr_mmu_pages)
>-		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
>+		kvm_mmu_change_mmu_pages(kvm,
>+				kvm_mmu_calculate_default_mmu_pages(kvm));
> 
> 	/*
> 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
>-- 
>2.15.1
Wei Yang Feb. 5, 2019, 12:29 p.m. UTC | #3
On Thu, Sep 27, 2018 at 08:31:26AM +0800, Wei Yang wrote:
>* nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
>  non-zero.
>
>* nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
>  never return zero.
>
>Based on these two reasons, we can merge the two *if* clause and use the
>return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
>the code and also eliminate the possibility for reader to believe
>nr_mmu_pages would be zero.
>
>Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
>

Ping on this old patch :-)

>---
>v2: refactor changelog and rename kvm_mmu_calculate_mmu_pages() based on
>Sean Christopherson comment.
>---
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu.c              | 2 +-
> arch/x86/kvm/x86.c              | 8 ++------
> 3 files changed, 4 insertions(+), 8 deletions(-)
>
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index c5e7bb811b1e..036dede47274 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -1190,7 +1190,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
> 				   gfn_t gfn_offset, unsigned long mask);
> void kvm_mmu_zap_all(struct kvm *kvm);
> void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
>-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
>+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
> void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
> 
> int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
>diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>index 9fa77aa24fc7..80629f5377c3 100644
>--- a/arch/x86/kvm/mmu.c
>+++ b/arch/x86/kvm/mmu.c
>@@ -5893,7 +5893,7 @@ int kvm_mmu_module_init(void)
> /*
>  * Calculate mmu pages needed for kvm.
>  */
>-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
>+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
> {
> 	unsigned int nr_mmu_pages;
> 	unsigned int  nr_pages = 0;
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index 8614199d733b..bed566fd4ee5 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -9131,13 +9131,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
> 				const struct kvm_memory_slot *new,
> 				enum kvm_mr_change change)
> {
>-	int nr_mmu_pages = 0;
>-
> 	if (!kvm->arch.n_requested_mmu_pages)
>-		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
>-
>-	if (nr_mmu_pages)
>-		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
>+		kvm_mmu_change_mmu_pages(kvm,
>+				kvm_mmu_calculate_default_mmu_pages(kvm));
> 
> 	/*
> 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
>-- 
>2.15.1
Paolo Bonzini March 15, 2019, 5:40 p.m. UTC | #4
On 25/10/18 11:50, Wei Yang wrote:
> Paolo,
> 
> Do you have time to take a look at this one?
> 
> On Thu, Sep 27, 2018 at 08:31:26AM +0800, Wei Yang wrote:
>> * nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
>>  non-zero.
>>
>> * nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
>>  never return zero.
>>
>> Based on these two reasons, we can merge the two *if* clause and use the
>> return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
>> the code and also eliminate the possibility for reader to believe
>> nr_mmu_pages would be zero.
>>
>> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
>>
>> ---
>> v2: refactor changelog and rename kvm_mmu_calculate_mmu_pages() based on
>> Sean Christopherson comment.
>> ---
>> arch/x86/include/asm/kvm_host.h | 2 +-
>> arch/x86/kvm/mmu.c              | 2 +-
>> arch/x86/kvm/x86.c              | 8 ++------
>> 3 files changed, 4 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index c5e7bb811b1e..036dede47274 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -1190,7 +1190,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
>> 				   gfn_t gfn_offset, unsigned long mask);
>> void kvm_mmu_zap_all(struct kvm *kvm);
>> void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
>> -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
>> +unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
>> void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
>>
>> int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 9fa77aa24fc7..80629f5377c3 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -5893,7 +5893,7 @@ int kvm_mmu_module_init(void)
>> /*
>>  * Calculate mmu pages needed for kvm.
>>  */
>> -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
>> +unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
>> {
>> 	unsigned int nr_mmu_pages;
>> 	unsigned int  nr_pages = 0;
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 8614199d733b..bed566fd4ee5 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -9131,13 +9131,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
>> 				const struct kvm_memory_slot *new,
>> 				enum kvm_mr_change change)
>> {
>> -	int nr_mmu_pages = 0;
>> -
>> 	if (!kvm->arch.n_requested_mmu_pages)
>> -		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
>> -
>> -	if (nr_mmu_pages)
>> -		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
>> +		kvm_mmu_change_mmu_pages(kvm,
>> +				kvm_mmu_calculate_default_mmu_pages(kvm));
>>
>> 	/*
>> 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
>> -- 
>> 2.15.1
> 

Queued, thanks.

Paolo
Wei Yang March 16, 2019, 1:39 a.m. UTC | #5
On Fri, Mar 15, 2019 at 06:40:20PM +0100, Paolo Bonzini wrote:
>On 25/10/18 11:50, Wei Yang wrote:
>> Paolo,
>> 
>> Do you have time to take a look at this one?
>> 
>> On Thu, Sep 27, 2018 at 08:31:26AM +0800, Wei Yang wrote:
>>> * nr_mmu_pages would be non-zero only if kvm->arch.n_requested_mmu_pages is
>>>  non-zero.
>>>
>>> * nr_mmu_pages is always non-zero, since kvm_mmu_calculate_mmu_pages()
>>>  never return zero.
>>>
>>> Based on these two reasons, we can merge the two *if* clause and use the
>>> return value from kvm_mmu_calculate_mmu_pages() directly. This simplify
>>> the code and also eliminate the possibility for reader to believe
>>> nr_mmu_pages would be zero.
>>>
>>> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>>> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
>>>
>>> ---
>>> v2: refactor changelog and rename kvm_mmu_calculate_mmu_pages() based on
>>> Sean Christopherson comment.
>>> ---
>>> arch/x86/include/asm/kvm_host.h | 2 +-
>>> arch/x86/kvm/mmu.c              | 2 +-
>>> arch/x86/kvm/x86.c              | 8 ++------
>>> 3 files changed, 4 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>>> index c5e7bb811b1e..036dede47274 100644
>>> --- a/arch/x86/include/asm/kvm_host.h
>>> +++ b/arch/x86/include/asm/kvm_host.h
>>> @@ -1190,7 +1190,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
>>> 				   gfn_t gfn_offset, unsigned long mask);
>>> void kvm_mmu_zap_all(struct kvm *kvm);
>>> void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
>>> -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
>>> +unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
>>> void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
>>>
>>> int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>> index 9fa77aa24fc7..80629f5377c3 100644
>>> --- a/arch/x86/kvm/mmu.c
>>> +++ b/arch/x86/kvm/mmu.c
>>> @@ -5893,7 +5893,7 @@ int kvm_mmu_module_init(void)
>>> /*
>>>  * Calculate mmu pages needed for kvm.
>>>  */
>>> -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
>>> +unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
>>> {
>>> 	unsigned int nr_mmu_pages;
>>> 	unsigned int  nr_pages = 0;
>>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>>> index 8614199d733b..bed566fd4ee5 100644
>>> --- a/arch/x86/kvm/x86.c
>>> +++ b/arch/x86/kvm/x86.c
>>> @@ -9131,13 +9131,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
>>> 				const struct kvm_memory_slot *new,
>>> 				enum kvm_mr_change change)
>>> {
>>> -	int nr_mmu_pages = 0;
>>> -
>>> 	if (!kvm->arch.n_requested_mmu_pages)
>>> -		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
>>> -
>>> -	if (nr_mmu_pages)
>>> -		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
>>> +		kvm_mmu_change_mmu_pages(kvm,
>>> +				kvm_mmu_calculate_default_mmu_pages(kvm));
>>>
>>> 	/*
>>> 	 * Dirty logging tracks sptes in 4k granularity, meaning that large
>>> -- 
>>> 2.15.1
>> 
>
>Queued, thanks.
>

Thanks :-)

>Paolo
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c5e7bb811b1e..036dede47274 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1190,7 +1190,7 @@  void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 				   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9fa77aa24fc7..80629f5377c3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5893,7 +5893,7 @@  int kvm_mmu_module_init(void)
 /*
  * Calculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 {
 	unsigned int nr_mmu_pages;
 	unsigned int  nr_pages = 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8614199d733b..bed566fd4ee5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9131,13 +9131,9 @@  void kvm_arch_commit_memory_region(struct kvm *kvm,
 				const struct kvm_memory_slot *new,
 				enum kvm_mr_change change)
 {
-	int nr_mmu_pages = 0;
-
 	if (!kvm->arch.n_requested_mmu_pages)
-		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
-
-	if (nr_mmu_pages)
-		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+		kvm_mmu_change_mmu_pages(kvm,
+				kvm_mmu_calculate_default_mmu_pages(kvm));
 
 	/*
 	 * Dirty logging tracks sptes in 4k granularity, meaning that large