diff mbox

[3/3] kvm-s390: streamline memslot handling - rebased

Message ID 1243952771-32428-4-git-send-email-ehrhardt@linux.vnet.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

ehrhardt@linux.vnet.ibm.com June 2, 2009, 2:26 p.m. UTC
From: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>

As requested this is a rebased patch on top of the already applied v3
of the patch series.

*updates to applied version*
- ensure the wait_on_bit waiter is notified
- ensure dropping vcpu all requests while freeing a vcpu
- kickout only scheduled vcpus (its superfluous and wait might hang forever on
  not running vcpus)
- kvm_arch_set_memory_region waits until the bit is consumed by the vcpu

This patch relocates the variables kvm-s390 uses to track guest mem addr/size.
As discussed dropping the variables at struct kvm_arch level allows to use the
common vcpu->request based mechanism to reload guest memory if e.g. changes
via set_memory_region.
The kick mechanism introduced in this series is used to ensure running vcpus
leave guest state to catch the update.


Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
---

[diffstat]
 arch/s390/kvm/kvm-s390.c |   27 ++++++++++++++++++++-------
 arch/s390/kvm/kvm-s390.h |    7 +++++++
 virt/kvm/kvm_main.c      |    4 ++++
 3 files changed, 31 insertions(+), 7 deletions(-)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Marcelo Tosatti June 5, 2009, 8:53 p.m. UTC | #1
On Tue, Jun 02, 2009 at 04:26:11PM +0200, ehrhardt@linux.vnet.ibm.com wrote:
> From: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
> 
> As requested this is a rebased patch on top of the already applied v3
> of the patch series.
> 
> *updates to applied version*
> - ensure the wait_on_bit waiter is notified
> - ensure dropping vcpu all requests while freeing a vcpu
> - kickout only scheduled vcpus (its superfluous and wait might hang forever on
>   not running vcpus)
> - kvm_arch_set_memory_region waits until the bit is consumed by the vcpu
> 
> This patch relocates the variables kvm-s390 uses to track guest mem addr/size.
> As discussed dropping the variables at struct kvm_arch level allows to use the
> common vcpu->request based mechanism to reload guest memory if e.g. changes
> via set_memory_region.
> The kick mechanism introduced in this series is used to ensure running vcpus
> leave guest state to catch the update.
> 
> 
> Signed-off-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
> ---
> 
> [diffstat]
>  arch/s390/kvm/kvm-s390.c |   27 ++++++++++++++++++++-------
>  arch/s390/kvm/kvm-s390.h |    7 +++++++
>  virt/kvm/kvm_main.c      |    4 ++++
>  3 files changed, 31 insertions(+), 7 deletions(-)
> 
> Index: kvm/arch/s390/kvm/kvm-s390.c
> ===================================================================
> --- kvm.orig/arch/s390/kvm/kvm-s390.c
> +++ kvm/arch/s390/kvm/kvm-s390.c
> @@ -674,6 +674,12 @@ long kvm_arch_vcpu_ioctl(struct file *fi
>  	return -EINVAL;
>  }
>  
> +static int wait_bit_schedule(void *word)
> +{
> +	schedule();
> +	return 0;
> +}
> +
>  /* Section: memory related */
>  int kvm_arch_set_memory_region(struct kvm *kvm,
>  				struct kvm_userspace_memory_region *mem,
> @@ -681,6 +687,7 @@ int kvm_arch_set_memory_region(struct kv
>  				int user_alloc)
>  {
>  	int i;
> +	struct kvm_vcpu *vcpu;
>  
>  	/* A few sanity checks. We can have exactly one memory slot which has
>  	   to start at guest virtual zero and which has to be located at a
> @@ -706,13 +713,19 @@ int kvm_arch_set_memory_region(struct kv
>  
>  	/* request update of sie control block for all available vcpus */
>  	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
> -		if (kvm->vcpus[i]) {
> -			if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
> -						&kvm->vcpus[i]->requests))
> -				continue;
> -			kvm_s390_inject_sigp_stop(kvm->vcpus[i],
> -						  ACTION_VCPUREQUEST_ON_STOP);
> -		}
> +		vcpu = kvm->vcpus[i];
> +		if (!vcpu)
> +			continue;
> +
> +		if (!test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
> +			continue;
> +
> +		if (vcpu->cpu == -1)
> +			continue;

What happens if the check for cpu == -1 races with kvm_arch_vcpu_put?
This context will wait until the vcpu_put context is scheduled back in
to clear the bit? Is that OK?

> +
> +		kvm_s390_inject_sigp_stop(vcpu, ACTION_VCPUREQUEST_ON_STOP);
> +		wait_on_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD,
> +			    wait_bit_schedule, TASK_UNINTERRUPTIBLE);
>  	}

 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       vcpu->cpu = -1;
        save_fp_regs(&vcpu->arch.guest_fpregs);
        save_access_regs(vcpu->arch.guest_acrs);
        restore_fp_regs(&vcpu->arch.host_fpregs);

>  
>  	return 0;
> Index: kvm/arch/s390/kvm/kvm-s390.h
> ===================================================================
> --- kvm.orig/arch/s390/kvm/kvm-s390.h
> +++ kvm/arch/s390/kvm/kvm-s390.h
> @@ -92,6 +92,13 @@ static inline unsigned long kvm_s390_han
>  	if (!vcpu->requests)
>  		return 0;
>  
> +	/* requests that can be handled at all levels */
> +	if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
> +		smp_mb__after_clear_bit();

Really need that smp_mb__after_clear_bit ? AFAIK test_and_clear_bit
implies a barrier?

> +		wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
> +		kvm_s390_vcpu_set_mem(vcpu);
> +	}
> +
>  	return vcpu->requests;
>  }
>  
> Index: kvm/virt/kvm/kvm_main.c
> ===================================================================
> --- kvm.orig/virt/kvm/kvm_main.c
> +++ kvm/virt/kvm/kvm_main.c
> @@ -1682,6 +1682,10 @@ static int kvm_vcpu_release(struct inode
>  {
>  	struct kvm_vcpu *vcpu = filp->private_data;
>  
> +	clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
> +	smp_mb__after_clear_bit();
> +	wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
> +

And this should be generic? Say if other architectures want to make use 
of a similar wait infrastructure. Talk is cheap.

Anyway, yeah, the set request / wait mechanism you implement here is
quite similar to the idea mentioned earlier that could be used for x86.

Just get rid of this explicit KVM_REQ_MMU_RELOAD knowledge in
arch-independent code please (if you want to see this merged).

Later it can all be lifted off to arch independent code.

>  	kvm_put_kvm(vcpu->kvm);
>  	return 0;
>  }
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
ehrhardt@linux.vnet.ibm.com June 8, 2009, 10:51 a.m. UTC | #2
Marcelo Tosatti wrote:
> On Tue, Jun 02, 2009 at 04:26:11PM +0200, ehrhardt@linux.vnet.ibm.com wrote:
>   
>> From: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
>>     
[...]
>> @@ -706,13 +713,19 @@ int kvm_arch_set_memory_region(struct kv
>>  
>>  	/* request update of sie control block for all available vcpus */
>>  	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
>> -		if (kvm->vcpus[i]) {
>> -			if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
>> -						&kvm->vcpus[i]->requests))
>> -				continue;
>> -			kvm_s390_inject_sigp_stop(kvm->vcpus[i],
>> -						  ACTION_VCPUREQUEST_ON_STOP);
>> -		}
>> +		vcpu = kvm->vcpus[i];
>> +		if (!vcpu)
>> +			continue;
>> +
>> +		if (!test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
>> +			continue;
>> +
>> +		if (vcpu->cpu == -1)
>> +			continue;
>>     
>
> What happens if the check for cpu == -1 races with kvm_arch_vcpu_put?
> This context will wait until the vcpu_put context is scheduled back in
> to clear the bit? Is that OK?
>   
It either comes back to clear the bit or it is consumed on deletion of 
the vcpu. Both ways are ok. The question we have to answer is if it 
might stall the mem update ioctl for too long.
Because eventually the check for vcpu->cpu == -1 is just an optimization 
if we would completely ignore remove it we would have the same problem 
-> could it stall the set mem operation too much. That means the "race" 
is not an issue it might just be sub-optimal, but the chance for a long 
stall could become an issue. Unfortunately I have no better approach to 
that (yet), until then this I like this implementation more than what we 
would have without all the corner case fixes in that patch series.

>> +
>> +		kvm_s390_inject_sigp_stop(vcpu, ACTION_VCPUREQUEST_ON_STOP);
>> +		wait_on_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD,
>> +			    wait_bit_schedule, TASK_UNINTERRUPTIBLE);
>>  	}
>>     
>
>  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
>  {
> +       vcpu->cpu = -1;
>         save_fp_regs(&vcpu->arch.guest_fpregs);
>   
[...]
>> +++ kvm/arch/s390/kvm/kvm-s390.h
>> @@ -92,6 +92,13 @@ static inline unsigned long kvm_s390_han
>>  	if (!vcpu->requests)
>>  		return 0;
>>  
>> +	/* requests that can be handled at all levels */
>> +	if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
>> +		smp_mb__after_clear_bit();
>>     
>
> Really need that smp_mb__after_clear_bit ? AFAIK test_and_clear_bit
> implies a barrier?
>   

Well I agree that practically test_and_clear_bit has a barrier on s390, 
but as far as I read Documentation/atomic_ops.txt at line 339-360 I 
think the interface does not imply it so I wanted to add it explicitly. 
I would be happy if someone really knows the in depth details here and 
corrects me :-)

>> +		wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
>> +		kvm_s390_vcpu_set_mem(vcpu);
>> +	}
>> +
>>  	return vcpu->requests;
>>  }
>>  
>> Index: kvm/virt/kvm/kvm_main.c
>> ===================================================================
>> --- kvm.orig/virt/kvm/kvm_main.c
>> +++ kvm/virt/kvm/kvm_main.c
>> @@ -1682,6 +1682,10 @@ static int kvm_vcpu_release(struct inode
>>  {
>>  	struct kvm_vcpu *vcpu = filp->private_data;
>>  
>> +	clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
>> +	smp_mb__after_clear_bit();
>> +	wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
>> +
>>     
>
> And this should be generic? Say if other architectures want to make use 
> of a similar wait infrastructure. Talk is cheap.
>   
Clear bit and wake up on release doesn't hurt any architecture, but it 
is at a good place fine for those using the mechanism to ensure cleaning 
up outstanding things when closing a vcpu fd.
I thought its not worth to add kvm_ARCH_vcpu_release for it while I 
could do so if we want it separated.
(continued below)
> Anyway, yeah, the set request / wait mechanism you implement here is
> quite similar to the idea mentioned earlier that could be used for x86.
>
> Just get rid of this explicit KVM_REQ_MMU_RELOAD knowledge in
> arch-independent code please (if you want to see this merged).
>   
I agree to lift the wait part to other archs later if needed, but as 
mentioned above I could move this to arch code to the cost of one arch 
hook more. But as also mentioned it doesn't really hurt. I agree that it 
does not need to be KVM_REQ_MMU_RELOAD specific, we could just 
walk/clear/wake all bits on that vcpu->requests variable.
Would that be generic enough in your opinion ?
> Later it can all be lifted off to arch independent code.
>   
True for the wait part which can evolve in our arch code until it is 
ripe to get cross arch merged.
Avi Kivity June 8, 2009, 11:10 a.m. UTC | #3
Christian Ehrhardt wrote:   
>>
>> Really need that smp_mb__after_clear_bit ? AFAIK test_and_clear_bit
>> implies a barrier?
>>   
>
> Well I agree that practically test_and_clear_bit has a barrier on 
> s390, but as far as I read Documentation/atomic_ops.txt at line 
> 339-360 I think the interface does not imply it so I wanted to add it 
> explicitly. I would be happy if someone really knows the in depth 
> details here and corrects me :-)

IIUC rmw bitops are full memory barriers.  The non-rmw (from the 
caller's perspective), clear_bit() and set_bit(), are not.
ehrhardt@linux.vnet.ibm.com June 8, 2009, 12:05 p.m. UTC | #4
Avi Kivity wrote:
> Christian Ehrhardt wrote:  
>>>
>>> Really need that smp_mb__after_clear_bit ? AFAIK test_and_clear_bit
>>> implies a barrier?
>>>   
>>
>> Well I agree that practically test_and_clear_bit has a barrier on 
>> s390, but as far as I read Documentation/atomic_ops.txt at line 
>> 339-360 I think the interface does not imply it so I wanted to add it 
>> explicitly. I would be happy if someone really knows the in depth 
>> details here and corrects me :-)
>
> IIUC rmw bitops are full memory barriers.  The non-rmw (from the 
> caller's perspective), clear_bit() and set_bit(), are not.
>
>
Ok, as the real implementation has one + memory-barriers.txt describing 
it with barrier and finally include/asm-generic/bitops/atomic.h 
descirbes it that way too I think I can drop the explicit smb_wb from my 
patch in the next update (I wait a bit to give the discussion about the 
wati/bits a bit more time).

Hmm ... would that be worth a clarifying patch to atomic_ops.txt that 
confused me in the first place ?
Avi Kivity June 8, 2009, 12:09 p.m. UTC | #5
Christian Ehrhardt wrote:
> Hmm ... would that be worth a clarifying patch to atomic_ops.txt that 
> confused me in the first place ?

If it confused you, it probably confuses others.
Marcelo Tosatti June 9, 2009, 12:56 a.m. UTC | #6
On Mon, Jun 08, 2009 at 12:51:26PM +0200, Christian Ehrhardt wrote:
>>>  Index: kvm/virt/kvm/kvm_main.c
>>> ===================================================================
>>> --- kvm.orig/virt/kvm/kvm_main.c
>>> +++ kvm/virt/kvm/kvm_main.c
>>> @@ -1682,6 +1682,10 @@ static int kvm_vcpu_release(struct inode
>>>  {
>>>  	struct kvm_vcpu *vcpu = filp->private_data;
>>>  +	clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
>>> +	smp_mb__after_clear_bit();
>>> +	wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
>>> +
>>>     
>>
>> And this should be generic? Say if other architectures want to make use 
>> of a similar wait infrastructure. Talk is cheap.
>>   
> Clear bit and wake up on release doesn't hurt any architecture, but it  
> is at a good place fine for those using the mechanism to ensure cleaning  
> up outstanding things when closing a vcpu fd.
> I thought its not worth to add kvm_ARCH_vcpu_release for it while I  
> could do so if we want it separated.

Yeah, was frustated for lack of more useful comments so decided to
nitpick on something.

> (continued below)
>> Anyway, yeah, the set request / wait mechanism you implement here is
>> quite similar to the idea mentioned earlier that could be used for x86.
>>
>> Just get rid of this explicit KVM_REQ_MMU_RELOAD knowledge in
>> arch-independent code please (if you want to see this merged).
>>   
> I agree to lift the wait part to other archs later if needed, but as  
> mentioned above I could move this to arch code to the cost of one arch  
> hook more. But as also mentioned it doesn't really hurt. I agree that it  
> does not need to be KVM_REQ_MMU_RELOAD specific, we could just  
> walk/clear/wake all bits on that vcpu->requests variable.
> Would that be generic enough in your opinion ?

Don't know.

Avi?

>> Later it can all be lifted off to arch independent code.
>>   
> True for the wait part which can evolve in our arch code until it is 
> ripe to get cross arch merged.
>
> -- 
>
> Grüsse / regards, Christian Ehrhardt
> IBM Linux Technology Center, Open Virtualization 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity June 14, 2009, 12:04 p.m. UTC | #7
Marcelo Tosatti wrote:
>   
>> (continued below)
>>     
>>> Anyway, yeah, the set request / wait mechanism you implement here is
>>> quite similar to the idea mentioned earlier that could be used for x86.
>>>
>>> Just get rid of this explicit KVM_REQ_MMU_RELOAD knowledge in
>>> arch-independent code please (if you want to see this merged).
>>>   
>>>       
>> I agree to lift the wait part to other archs later if needed, but as  
>> mentioned above I could move this to arch code to the cost of one arch  
>> hook more. But as also mentioned it doesn't really hurt. I agree that it  
>> does not need to be KVM_REQ_MMU_RELOAD specific, we could just  
>> walk/clear/wake all bits on that vcpu->requests variable.
>> Would that be generic enough in your opinion ?
>>     
>
> Don't know.
>
> Avi?
>   

I think I lost the thread here, but I'll try.  Isn't the wake part 
make_all_vcpus_request() in kvm_main.c?  The wait part could be moved to 
a similar generic function.
ehrhardt@linux.vnet.ibm.com June 15, 2009, 1:47 p.m. UTC | #8
Avi Kivity wrote:
> Marcelo Tosatti wrote:
>>  
>>> (continued below)
>>>    
>>>> Anyway, yeah, the set request / wait mechanism you implement here is
>>>> quite similar to the idea mentioned earlier that could be used for 
>>>> x86.
>>>>
>>>> Just get rid of this explicit KVM_REQ_MMU_RELOAD knowledge in
>>>> arch-independent code please (if you want to see this merged).
>>>>         
>>> I agree to lift the wait part to other archs later if needed, but 
>>> as  mentioned above I could move this to arch code to the cost of 
>>> one arch  hook more. But as also mentioned it doesn't really hurt. I 
>>> agree that it  does not need to be KVM_REQ_MMU_RELOAD specific, we 
>>> could just  walk/clear/wake all bits on that vcpu->requests variable.
>>> Would that be generic enough in your opinion ?
>>>     
>>
>> Don't know.
>>
>> Avi?
>>   
>
> I think I lost the thread here, but I'll try.  Isn't the wake part 
> make_all_vcpus_request() in kvm_main.c?  The wait part could be moved 
> to a similar generic function.
>
I'll try to summarize my current thoughts a bit:
The rebased patch series brings several fixes and the wait/wakeup 
mechanism which is in discussion here.
As explained before this keeps the new wait implementation in s390 arch 
code which allows us to experiment with it. Later if we are happy with 
it we might (or not) continue the merge and bring this mechanism to 
make_all_vcpus_request (as on x86 you don't have the issues I try to fix 
here we don't need to hurry bringing that into generic code).

Well now to the wait/wakeup which is here in discussion in detail:
The s390 arch code can kick a guest, but we don't know implicitly (as 
x86 does) that the kick succeeded, it might happen somewhen sooner or later.
Therefore the code uses wait_on_bit to wait until the vcpu->request bit 
is consumed.
To ensure cleanup of these waiting threads in some special cases the 
clear&wake up is also needed at other places than the real bit 
consumption. One of them is the vcpu release code where we should 
clear&wakeup all waiters (Marcelo correctly pointed out that we should 
not be bit specific there, so I just just wake up all in the updated code).

That was the discussion here: "if it would be ok to clear & wake up 
all". As wake_up_bit doesn't hurt if there is no waiter it looks like 
the best solution to to do that in the generic part of vcpu_release. If 
ever someone else waits for this or another bit in vcpu->requests, the 
code ensures all of them are awaken on vcpu release.

I send an updated version of the rebased series in a few minutes, 
containing updates related to what marcelo pointed out.

P.S. in case you think we need much more discussions we might try to 
catch up on irc to save this thread a few cycles :-)
diff mbox

Patch

Index: kvm/arch/s390/kvm/kvm-s390.c
===================================================================
--- kvm.orig/arch/s390/kvm/kvm-s390.c
+++ kvm/arch/s390/kvm/kvm-s390.c
@@ -674,6 +674,12 @@  long kvm_arch_vcpu_ioctl(struct file *fi
 	return -EINVAL;
 }
 
+static int wait_bit_schedule(void *word)
+{
+	schedule();
+	return 0;
+}
+
 /* Section: memory related */
 int kvm_arch_set_memory_region(struct kvm *kvm,
 				struct kvm_userspace_memory_region *mem,
@@ -681,6 +687,7 @@  int kvm_arch_set_memory_region(struct kv
 				int user_alloc)
 {
 	int i;
+	struct kvm_vcpu *vcpu;
 
 	/* A few sanity checks. We can have exactly one memory slot which has
 	   to start at guest virtual zero and which has to be located at a
@@ -706,13 +713,19 @@  int kvm_arch_set_memory_region(struct kv
 
 	/* request update of sie control block for all available vcpus */
 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		if (kvm->vcpus[i]) {
-			if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
-						&kvm->vcpus[i]->requests))
-				continue;
-			kvm_s390_inject_sigp_stop(kvm->vcpus[i],
-						  ACTION_VCPUREQUEST_ON_STOP);
-		}
+		vcpu = kvm->vcpus[i];
+		if (!vcpu)
+			continue;
+
+		if (!test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+			continue;
+
+		if (vcpu->cpu == -1)
+			continue;
+
+		kvm_s390_inject_sigp_stop(vcpu, ACTION_VCPUREQUEST_ON_STOP);
+		wait_on_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD,
+			    wait_bit_schedule, TASK_UNINTERRUPTIBLE);
 	}
 
 	return 0;
Index: kvm/arch/s390/kvm/kvm-s390.h
===================================================================
--- kvm.orig/arch/s390/kvm/kvm-s390.h
+++ kvm/arch/s390/kvm/kvm-s390.h
@@ -92,6 +92,13 @@  static inline unsigned long kvm_s390_han
 	if (!vcpu->requests)
 		return 0;
 
+	/* requests that can be handled at all levels */
+	if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
+		smp_mb__after_clear_bit();
+		wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
+		kvm_s390_vcpu_set_mem(vcpu);
+	}
+
 	return vcpu->requests;
 }
 
Index: kvm/virt/kvm/kvm_main.c
===================================================================
--- kvm.orig/virt/kvm/kvm_main.c
+++ kvm/virt/kvm/kvm_main.c
@@ -1682,6 +1682,10 @@  static int kvm_vcpu_release(struct inode
 {
 	struct kvm_vcpu *vcpu = filp->private_data;
 
+	clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
+	smp_mb__after_clear_bit();
+	wake_up_bit(&vcpu->requests, KVM_REQ_MMU_RELOAD);
+
 	kvm_put_kvm(vcpu->kvm);
 	return 0;
 }