diff mbox

[09/11] KVM: MMU: simplify mmu_need_write_protect

Message ID 1448907973-36066-10-git-send-email-guangrong.xiao@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong Nov. 30, 2015, 6:26 p.m. UTC
Now, all non-leaf shadow page are page tracked, if gfn is not tracked
there is no non-leaf shadow page of gfn is existed, we can directly
make the shadow page of gfn to unsync

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
 arch/x86/kvm/mmu.c | 26 ++++++++------------------
 1 file changed, 8 insertions(+), 18 deletions(-)

Comments

Kai Huang Dec. 15, 2015, 8:43 a.m. UTC | #1
On 12/01/2015 02:26 AM, Xiao Guangrong wrote:
> Now, all non-leaf shadow page are page tracked, if gfn is not tracked
> there is no non-leaf shadow page of gfn is existed, we can directly
> make the shadow page of gfn to unsync
>
> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
> ---
>   arch/x86/kvm/mmu.c | 26 ++++++++------------------
>   1 file changed, 8 insertions(+), 18 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 5a2ca73..f89e77f 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2461,41 +2461,31 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>   	kvm_mmu_mark_parents_unsync(sp);
>   }
>   
> -static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
> +static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
> +			     bool can_unsync)
>   {
>   	struct kvm_mmu_page *s;
>   
>   	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
> +		if (!can_unsync)
> +			return true;
How about moving this right before for_each_gfn_indirect_valid_sp? As 
can_unsync is passed as parameter, so there's no point checking it 
several times.

A further thinking is can we move it to mmu_need_write_protect? Passing 
can_unsync as parameter to kvm_unsync_pages sounds a little bit odd.

> +
>   		if (s->unsync)
>   			continue;
>   		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
How about large page mapping? Such as if guest uses 2M mapping and its 
shadow is indirect, does above WARN_ON still meet? As you removed the PT 
level check in mmu_need_write_protect.

Thanks,
-Kai
>   		__kvm_unsync_page(vcpu, s);
>   	}
> +
> +	return false;
>   }

>   
>   static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
>   				   bool can_unsync)
>   {
> -	struct kvm_mmu_page *s;
> -	bool need_unsync = false;
> -
>   	if (kvm_page_track_check_mode(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
>   		return true;
>   
> -	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
> -		if (!can_unsync)
> -			return true;
> -
> -		if (s->role.level != PT_PAGE_TABLE_LEVEL)
> -			return true;
> -
> -		if (!s->unsync)
> -			need_unsync = true;
> -	}
> -	if (need_unsync)
> -		kvm_unsync_pages(vcpu, gfn);
> -
> -	return false;
> +	return kvm_unsync_pages(vcpu, gfn, can_unsync);
>   }
>   
>   static bool kvm_is_mmio_pfn(pfn_t pfn)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kai Huang Dec. 15, 2015, 8:47 a.m. UTC | #2
On 12/15/2015 04:43 PM, Kai Huang wrote:
>
>
> On 12/01/2015 02:26 AM, Xiao Guangrong wrote:
>> Now, all non-leaf shadow page are page tracked, if gfn is not tracked
>> there is no non-leaf shadow page of gfn is existed, we can directly
>> make the shadow page of gfn to unsync
>>
>> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>> ---
>>   arch/x86/kvm/mmu.c | 26 ++++++++------------------
>>   1 file changed, 8 insertions(+), 18 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 5a2ca73..f89e77f 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -2461,41 +2461,31 @@ static void __kvm_unsync_page(struct kvm_vcpu 
>> *vcpu, struct kvm_mmu_page *sp)
>>       kvm_mmu_mark_parents_unsync(sp);
>>   }
>>   -static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
>> +static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
>> +                 bool can_unsync)
>>   {
>>       struct kvm_mmu_page *s;
>>         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
>> +        if (!can_unsync)
>> +            return true;
> How about moving this right before for_each_gfn_indirect_valid_sp? As 
> can_unsync is passed as parameter, so there's no point checking it 
> several times.
>
> A further thinking is can we move it to mmu_need_write_protect? 
> Passing can_unsync as parameter to kvm_unsync_pages sounds a little 
> bit odd.
>
>> +
>>           if (s->unsync)
>>               continue;
>>           WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
> How about large page mapping? Such as if guest uses 2M mapping and its 
> shadow is indirect, does above WARN_ON still meet? As you removed the 
> PT level check in mmu_need_write_protect.
>
> Thanks,
> -Kai
Btw I also think this patch can be merged with patch 6.

Thanks,
-Kai
>>           __kvm_unsync_page(vcpu, s);
>>       }
>> +
>> +    return false;
>>   }
>
>>     static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
>>                      bool can_unsync)
>>   {
>> -    struct kvm_mmu_page *s;
>> -    bool need_unsync = false;
>> -
>>       if (kvm_page_track_check_mode(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
>>           return true;
>>   -    for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
>> -        if (!can_unsync)
>> -            return true;
>> -
>> -        if (s->role.level != PT_PAGE_TABLE_LEVEL)
>> -            return true;
>> -
>> -        if (!s->unsync)
>> -            need_unsync = true;
>> -    }
>> -    if (need_unsync)
>> -        kvm_unsync_pages(vcpu, gfn);
>> -
>> -    return false;
>> +    return kvm_unsync_pages(vcpu, gfn, can_unsync);
>>   }
>>     static bool kvm_is_mmio_pfn(pfn_t pfn)
>
> -- 
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong Dec. 15, 2015, 9:25 a.m. UTC | #3
On 12/15/2015 04:43 PM, Kai Huang wrote:
>
>
> On 12/01/2015 02:26 AM, Xiao Guangrong wrote:
>> Now, all non-leaf shadow page are page tracked, if gfn is not tracked
>> there is no non-leaf shadow page of gfn is existed, we can directly
>> make the shadow page of gfn to unsync
>>
>> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>> ---
>>   arch/x86/kvm/mmu.c | 26 ++++++++------------------
>>   1 file changed, 8 insertions(+), 18 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 5a2ca73..f89e77f 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -2461,41 +2461,31 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>>       kvm_mmu_mark_parents_unsync(sp);
>>   }
>> -static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
>> +static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
>> +                 bool can_unsync)
>>   {
>>       struct kvm_mmu_page *s;
>>       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
>> +        if (!can_unsync)
>> +            return true;
> How about moving this right before for_each_gfn_indirect_valid_sp? As can_unsync is passed as
> parameter, so there's no point checking it several times.
>

We can not do this. What we are doing here is checking if we have shadow page mapping
for 'gfn':
a) if no, it can be writable.
b) if yes, check 'can_unsync' to see if these shadow pages can make to be 'unsync'.

Your suggestion can break the point a).

> A further thinking is can we move it to mmu_need_write_protect? Passing can_unsync as parameter to
> kvm_unsync_pages sounds a little bit odd.
>
>> +
>>           if (s->unsync)
>>               continue;
>>           WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
> How about large page mapping? Such as if guest uses 2M mapping and its shadow is indirect, does
> above WARN_ON still meet? As you removed the PT level check in mmu_need_write_protect.

The lager mapping are on the non-leaf shadow pages which can be figured out by
kvm_page_track_check_mode() before we call this function.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong Dec. 15, 2015, 9:26 a.m. UTC | #4
On 12/15/2015 04:47 PM, Kai Huang wrote:

>> A further thinking is can we move it to mmu_need_write_protect? Passing can_unsync as parameter to
>> kvm_unsync_pages sounds a little bit odd.
>>
>>> +
>>>           if (s->unsync)
>>>               continue;
>>>           WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
>> How about large page mapping? Such as if guest uses 2M mapping and its shadow is indirect, does
>> above WARN_ON still meet? As you removed the PT level check in mmu_need_write_protect.
>>
>> Thanks,
>> -Kai
> Btw I also think this patch can be merged with patch 6.

We can not as it depends on patch 8. ;)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kai Huang Dec. 16, 2015, 8:05 a.m. UTC | #5
On 12/15/2015 05:25 PM, Xiao Guangrong wrote:
>
>
> On 12/15/2015 04:43 PM, Kai Huang wrote:
>>
>>
>> On 12/01/2015 02:26 AM, Xiao Guangrong wrote:
>>> Now, all non-leaf shadow page are page tracked, if gfn is not tracked
>>> there is no non-leaf shadow page of gfn is existed, we can directly
>>> make the shadow page of gfn to unsync
>>>
>>> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>>> ---
>>>   arch/x86/kvm/mmu.c | 26 ++++++++------------------
>>>   1 file changed, 8 insertions(+), 18 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>> index 5a2ca73..f89e77f 100644
>>> --- a/arch/x86/kvm/mmu.c
>>> +++ b/arch/x86/kvm/mmu.c
>>> @@ -2461,41 +2461,31 @@ static void __kvm_unsync_page(struct 
>>> kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
>>>       kvm_mmu_mark_parents_unsync(sp);
>>>   }
>>> -static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
>>> +static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
>>> +                 bool can_unsync)
>>>   {
>>>       struct kvm_mmu_page *s;
>>>       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
>>> +        if (!can_unsync)
>>> +            return true;
>> How about moving this right before for_each_gfn_indirect_valid_sp? As 
>> can_unsync is passed as
>> parameter, so there's no point checking it several times.
>>
>
> We can not do this. What we are doing here is checking if we have 
> shadow page mapping
> for 'gfn':
> a) if no, it can be writable.
I think in this case you should also check whether the GFN is being 
write protection tracked. Ex, if the spte never exists when you add the 
GFN to write protection tracking, and in this case I think 
mmu_need_write_protect should also report true. Right?

> b) if yes, check 'can_unsync' to see if these shadow pages can make to 
> be 'unsync'.
>
> Your suggestion can break the point a).
>
>> A further thinking is can we move it to mmu_need_write_protect? 
>> Passing can_unsync as parameter to
>> kvm_unsync_pages sounds a little bit odd.
>>
>>> +
>>>           if (s->unsync)
>>>               continue;
>>>           WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
>> How about large page mapping? Such as if guest uses 2M mapping and 
>> its shadow is indirect, does
>> above WARN_ON still meet? As you removed the PT level check in 
>> mmu_need_write_protect.
>
> The lager mapping are on the non-leaf shadow pages which can be 
> figured out by
> kvm_page_track_check_mode() before we call this function.
Actually I am not quite understanding how large page mapping is 
implemented. I see in kvm_mmu_get_page, when sp is allocated, it is 
large page mapping disabled, but I think we do support large shadow 
mapping, right? I mean theoretically if guest uses  2M mapping and 
shadow mapping can certainly use 2M mapping as well, and the 2M shadow 
mapping can also be 'unsynced' (as a leaf mapping table). But in your 
series I see if we write protect some  GFN, the shadow large page 
mapping is always disabled.

Am I wrong?

Thanks,
-Kai
>
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong Dec. 16, 2015, 8:48 a.m. UTC | #6
On 12/16/2015 04:05 PM, Kai Huang wrote:
>
>
> On 12/15/2015 05:25 PM, Xiao Guangrong wrote:
>>
>>
>> On 12/15/2015 04:43 PM, Kai Huang wrote:
>>>
>>>
>>> On 12/01/2015 02:26 AM, Xiao Guangrong wrote:
>>>> Now, all non-leaf shadow page are page tracked, if gfn is not tracked
>>>> there is no non-leaf shadow page of gfn is existed, we can directly
>>>> make the shadow page of gfn to unsync
>>>>
>>>> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>>>> ---
>>>>   arch/x86/kvm/mmu.c | 26 ++++++++------------------
>>>>   1 file changed, 8 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>>> index 5a2ca73..f89e77f 100644
>>>> --- a/arch/x86/kvm/mmu.c
>>>> +++ b/arch/x86/kvm/mmu.c
>>>> @@ -2461,41 +2461,31 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page
>>>> *sp)
>>>>       kvm_mmu_mark_parents_unsync(sp);
>>>>   }
>>>> -static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
>>>> +static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
>>>> +                 bool can_unsync)
>>>>   {
>>>>       struct kvm_mmu_page *s;
>>>>       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
>>>> +        if (!can_unsync)
>>>> +            return true;
>>> How about moving this right before for_each_gfn_indirect_valid_sp? As can_unsync is passed as
>>> parameter, so there's no point checking it several times.
>>>
>>
>> We can not do this. What we are doing here is checking if we have shadow page mapping
>> for 'gfn':
>> a) if no, it can be writable.
> I think in this case you should also check whether the GFN is being write protection tracked. Ex, if
> the spte never exists when you add the GFN to write protection tracking, and in this case I think
> mmu_need_write_protect should also report true. Right?

We have already checked it:

static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
                                    bool can_unsync)
{
         if (kvm_page_track_check_mode(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
                 return true;

         return kvm_unsync_pages(vcpu, gfn, can_unsync);
}


>
>> b) if yes, check 'can_unsync' to see if these shadow pages can make to be 'unsync'.
>>
>> Your suggestion can break the point a).
>>
>>> A further thinking is can we move it to mmu_need_write_protect? Passing can_unsync as parameter to
>>> kvm_unsync_pages sounds a little bit odd.
>>>
>>>> +
>>>>           if (s->unsync)
>>>>               continue;
>>>>           WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
>>> How about large page mapping? Such as if guest uses 2M mapping and its shadow is indirect, does
>>> above WARN_ON still meet? As you removed the PT level check in mmu_need_write_protect.
>>
>> The lager mapping are on the non-leaf shadow pages which can be figured out by
>> kvm_page_track_check_mode() before we call this function.
> Actually I am not quite understanding how large page mapping is implemented. I see in
> kvm_mmu_get_page, when sp is allocated, it is large page mapping disabled, but I think we do support
> large shadow mapping, right? I mean theoretically if guest uses  2M mapping and shadow mapping can
> certainly use 2M mapping as well, and the 2M shadow mapping can also be 'unsynced' (as a leaf
> mapping table). But in your series I see if we write protect some  GFN, the shadow large page
> mapping is always disabled.
>
> Am I wrong?

If the large page contains the page which is used as page table, kvm does not map large page for
it, the reason is we track the 4k page instead of the whole large page to reduce write emulation.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kai Huang Dec. 17, 2015, 2:51 a.m. UTC | #7
On 12/16/2015 04:48 PM, Xiao Guangrong wrote:
>
>
> On 12/16/2015 04:05 PM, Kai Huang wrote:
>>
>>
>> On 12/15/2015 05:25 PM, Xiao Guangrong wrote:
>>>
>>>
>>> On 12/15/2015 04:43 PM, Kai Huang wrote:
>>>>
>>>>
>>>> On 12/01/2015 02:26 AM, Xiao Guangrong wrote:
>>>>> Now, all non-leaf shadow page are page tracked, if gfn is not tracked
>>>>> there is no non-leaf shadow page of gfn is existed, we can directly
>>>>> make the shadow page of gfn to unsync
>>>>>
>>>>> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>>>>> ---
>>>>>   arch/x86/kvm/mmu.c | 26 ++++++++------------------
>>>>>   1 file changed, 8 insertions(+), 18 deletions(-)
>>>>>
>>>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>>>> index 5a2ca73..f89e77f 100644
>>>>> --- a/arch/x86/kvm/mmu.c
>>>>> +++ b/arch/x86/kvm/mmu.c
>>>>> @@ -2461,41 +2461,31 @@ static void __kvm_unsync_page(struct 
>>>>> kvm_vcpu *vcpu, struct kvm_mmu_page
>>>>> *sp)
>>>>>       kvm_mmu_mark_parents_unsync(sp);
>>>>>   }
>>>>> -static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
>>>>> +static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
>>>>> +                 bool can_unsync)
>>>>>   {
>>>>>       struct kvm_mmu_page *s;
>>>>>       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
>>>>> +        if (!can_unsync)
>>>>> +            return true;
>>>> How about moving this right before for_each_gfn_indirect_valid_sp? 
>>>> As can_unsync is passed as
>>>> parameter, so there's no point checking it several times.
>>>>
>>>
>>> We can not do this. What we are doing here is checking if we have 
>>> shadow page mapping
>>> for 'gfn':
>>> a) if no, it can be writable.
>> I think in this case you should also check whether the GFN is being 
>> write protection tracked. Ex, if
>> the spte never exists when you add the GFN to write protection 
>> tracking, and in this case I think
>> mmu_need_write_protect should also report true. Right?
>
> We have already checked it:
>
> static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
>                                    bool can_unsync)
> {
>         if (kvm_page_track_check_mode(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
>                 return true;
>
>         return kvm_unsync_pages(vcpu, gfn, can_unsync);
> }
Oh sorry I missed this :)

>
>
>>
>>> b) if yes, check 'can_unsync' to see if these shadow pages can make 
>>> to be 'unsync'.
>>>
>>> Your suggestion can break the point a).
>>>
>>>> A further thinking is can we move it to mmu_need_write_protect? 
>>>> Passing can_unsync as parameter to
>>>> kvm_unsync_pages sounds a little bit odd.
>>>>
>>>>> +
>>>>>           if (s->unsync)
>>>>>               continue;
>>>>>           WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
>>>> How about large page mapping? Such as if guest uses 2M mapping and 
>>>> its shadow is indirect, does
>>>> above WARN_ON still meet? As you removed the PT level check in 
>>>> mmu_need_write_protect.
>>>
>>> The lager mapping are on the non-leaf shadow pages which can be 
>>> figured out by
>>> kvm_page_track_check_mode() before we call this function.
>> Actually I am not quite understanding how large page mapping is 
>> implemented. I see in
>> kvm_mmu_get_page, when sp is allocated, it is large page mapping 
>> disabled, but I think we do support
>> large shadow mapping, right? I mean theoretically if guest uses 2M 
>> mapping and shadow mapping can
>> certainly use 2M mapping as well, and the 2M shadow mapping can also 
>> be 'unsynced' (as a leaf
>> mapping table). But in your series I see if we write protect some  
>> GFN, the shadow large page
>> mapping is always disabled.
>>
>> Am I wrong?
>
> If the large page contains the page which is used as page table, kvm 
> does not map large page for
> it, the reason is we track the 4k page instead of the whole large page 
> to reduce write emulation.
I don't know why breaking large page to 4K mapping can reduce write 
emulation, but this explanation works for me. I guess KVM-GT doesn't 
care about it neither. :)

Thanks,
-Kai
>
> -- 
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5a2ca73..f89e77f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2461,41 +2461,31 @@  static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	kvm_mmu_mark_parents_unsync(sp);
 }
 
-static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
+static bool kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
+			     bool can_unsync)
 {
 	struct kvm_mmu_page *s;
 
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
+		if (!can_unsync)
+			return true;
+
 		if (s->unsync)
 			continue;
 		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
 		__kvm_unsync_page(vcpu, s);
 	}
+
+	return false;
 }
 
 static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 				   bool can_unsync)
 {
-	struct kvm_mmu_page *s;
-	bool need_unsync = false;
-
 	if (kvm_page_track_check_mode(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
 		return true;
 
-	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
-		if (!can_unsync)
-			return true;
-
-		if (s->role.level != PT_PAGE_TABLE_LEVEL)
-			return true;
-
-		if (!s->unsync)
-			need_unsync = true;
-	}
-	if (need_unsync)
-		kvm_unsync_pages(vcpu, gfn);
-
-	return false;
+	return kvm_unsync_pages(vcpu, gfn, can_unsync);
 }
 
 static bool kvm_is_mmio_pfn(pfn_t pfn)