diff mbox series

[RFC] powerpc/kvm: Fix spinlock member access for PREEMPT_RT

Message ID ZwgYXsCDDwsOBZ4a@linux.ibm.com (mailing list archive)
State New
Headers show
Series [RFC] powerpc/kvm: Fix spinlock member access for PREEMPT_RT | expand

Commit Message

Vishal Chourasia Oct. 10, 2024, 6:09 p.m. UTC
Hi,

While building the kernel with CONFIG_PREEMPT_RT, I encountered several
compilation errors in the PowerPC KVM code. The issues appear in
book3s_hv_rm_mmu.c where it tries to access the 'rlock' member of struct
spinlock, which doesn't exist in the RT configuration.

arch/powerpc/kvm/book3s_hv_rm_mmu.c:248:32: error: no member named 'rlock' in 'struct spinlock'
  248 |         arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
      |                         ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:164:45: note: expanded from macro 'arch_spin_lock'
  164 | #define arch_spin_lock(l)               queued_spin_lock(l)
      |                                                          ^
arch/powerpc/kvm/book3s_hv_rm_mmu.c:263:36: error: no member named 'rlock' in 'struct spinlock'
  263 |                         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
      |                                           ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
  166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
      |                                                            ^
arch/powerpc/kvm/book3s_hv_rm_mmu.c:277:34: error: no member named 'rlock' in 'struct spinlock'
  277 |         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
      |                           ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
  166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
      |                                                            ^
arch/powerpc/kvm/book3s_hv_rm_mmu.c:938:32: error: no member named 'rlock' in 'struct spinlock'
  938 |         arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
      |                         ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:164:45: note: expanded from macro 'arch_spin_lock'
  164 | #define arch_spin_lock(l)               queued_spin_lock(l)
      |                                                          ^
arch/powerpc/kvm/book3s_hv_rm_mmu.c:950:34: error: no member named 'rlock' in 'struct spinlock'
  950 |         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
      |                           ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
  166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
      |                                                            ^
arch/powerpc/kvm/book3s_hv_rm_mmu.c:966:32: error: no member named 'rlock' in 'struct spinlock'
  966 |         arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
      |                         ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:164:45: note: expanded from macro 'arch_spin_lock'
  164 | #define arch_spin_lock(l)               queued_spin_lock(l)
      |                                                          ^
arch/powerpc/kvm/book3s_hv_rm_mmu.c:981:34: error: no member named 'rlock' in 'struct spinlock'
  981 |         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
      |                           ~~~~~~~~~~~~~ ^
./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
  166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
      |                                                            ^
7 errors generated.
make[4]: *** [scripts/Makefile.build:229: arch/powerpc/kvm/book3s_hv_rm_mmu.o] Error 1
make[3]: *** [scripts/Makefile.build:478: arch/powerpc/kvm] Error 2
make[3]: *** Waiting for unfinished jobs....
make[2]: *** [scripts/Makefile.build:478: arch/powerpc] Error 2
make[2]: *** Waiting for unfinished jobs....

Replace arch_spin_lock/unlock on kvm->mmu_lock.rlock.raw_lock with
simple spin_lock/unlock on kvm->mmu_lock to fix build errors with
CONFIG_PREEMPT_RT. The RT configuration changes the spinlock structure,
removing the rlock member.

Signed-off-by: Vishal Chourasia <vishalc@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_rm_mmu.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

--
2.46.2

Comments

Paolo Bonzini Oct. 10, 2024, 9:23 p.m. UTC | #1
On 10/10/24 20:09, Vishal Chourasia wrote:
> Hi,
> 
> While building the kernel with CONFIG_PREEMPT_RT, I encountered several
> compilation errors in the PowerPC KVM code. The issues appear in
> book3s_hv_rm_mmu.c where it tries to access the 'rlock' member of struct
> spinlock, which doesn't exist in the RT configuration.

How was this tested? I suspect that putting to sleep a task that is 
running in real mode is a huge no-no.  The actual solution would have to 
be to split mmu_lock into a spin_lock and a raw_spin_lock, but that's a 
huge amount of work probably.  I'd just add a "depends on !PPC || 
!KVM_BOOK3S_64_HV" or something like that, to prevent enabling KVM-HV on 
PREEMPT_RT kernels.

Thanks,

Paolo

> arch/powerpc/kvm/book3s_hv_rm_mmu.c:248:32: error: no member named 'rlock' in 'struct spinlock'
>    248 |         arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
>        |                         ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:164:45: note: expanded from macro 'arch_spin_lock'
>    164 | #define arch_spin_lock(l)               queued_spin_lock(l)
>        |                                                          ^
> arch/powerpc/kvm/book3s_hv_rm_mmu.c:263:36: error: no member named 'rlock' in 'struct spinlock'
>    263 |                         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
>        |                                           ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
>    166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
>        |                                                            ^
> arch/powerpc/kvm/book3s_hv_rm_mmu.c:277:34: error: no member named 'rlock' in 'struct spinlock'
>    277 |         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
>        |                           ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
>    166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
>        |                                                            ^
> arch/powerpc/kvm/book3s_hv_rm_mmu.c:938:32: error: no member named 'rlock' in 'struct spinlock'
>    938 |         arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
>        |                         ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:164:45: note: expanded from macro 'arch_spin_lock'
>    164 | #define arch_spin_lock(l)               queued_spin_lock(l)
>        |                                                          ^
> arch/powerpc/kvm/book3s_hv_rm_mmu.c:950:34: error: no member named 'rlock' in 'struct spinlock'
>    950 |         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
>        |                           ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
>    166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
>        |                                                            ^
> arch/powerpc/kvm/book3s_hv_rm_mmu.c:966:32: error: no member named 'rlock' in 'struct spinlock'
>    966 |         arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
>        |                         ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:164:45: note: expanded from macro 'arch_spin_lock'
>    164 | #define arch_spin_lock(l)               queued_spin_lock(l)
>        |                                                          ^
> arch/powerpc/kvm/book3s_hv_rm_mmu.c:981:34: error: no member named 'rlock' in 'struct spinlock'
>    981 |         arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
>        |                           ~~~~~~~~~~~~~ ^
> ./arch/powerpc/include/asm/qspinlock.h:166:49: note: expanded from macro 'arch_spin_unlock'
>    166 | #define arch_spin_unlock(l)             queued_spin_unlock(l)
>        |                                                            ^
> 7 errors generated.
> make[4]: *** [scripts/Makefile.build:229: arch/powerpc/kvm/book3s_hv_rm_mmu.o] Error 1
> make[3]: *** [scripts/Makefile.build:478: arch/powerpc/kvm] Error 2
> make[3]: *** Waiting for unfinished jobs....
> make[2]: *** [scripts/Makefile.build:478: arch/powerpc] Error 2
> make[2]: *** Waiting for unfinished jobs....
> 
> Replace arch_spin_lock/unlock on kvm->mmu_lock.rlock.raw_lock with
> simple spin_lock/unlock on kvm->mmu_lock to fix build errors with
> CONFIG_PREEMPT_RT. The RT configuration changes the spinlock structure,
> removing the rlock member.
> 
> Signed-off-by: Vishal Chourasia <vishalc@linux.ibm.com>
> ---
>   arch/powerpc/kvm/book3s_hv_rm_mmu.c | 14 +++++++-------
>   1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> index 17cb75a127b04..abf1e6de85644 100644
> --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> @@ -245,7 +245,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
>          /* Translate to host virtual address */
>          hva = __gfn_to_hva_memslot(memslot, gfn);
> 
> -       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
> +       spin_lock(&kvm->mmu_lock);
>          ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
>          if (ptep) {
>                  pte_t pte;
> @@ -260,7 +260,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
>                   * to <= host page size, if host is using hugepage
>                   */
>                  if (host_pte_size < psize) {
> -                       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
> +                       spin_unlock(&kvm->mmu_lock);
>                          return H_PARAMETER;
>                  }
>                  pte = kvmppc_read_update_linux_pte(ptep, writing);
> @@ -274,7 +274,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
>                          pa |= gpa & ~PAGE_MASK;
>                  }
>          }
> -       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
> +       spin_unlock(&kvm->mmu_lock);
> 
>          ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
>          ptel |= pa;
> @@ -935,7 +935,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
>          mmu_seq = kvm->mmu_invalidate_seq;
>          smp_rmb();
> 
> -       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
> +       spin_lock(&kvm->mmu_lock);
> 
>          ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
>          if (ret != H_SUCCESS)
> @@ -947,7 +947,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
>          kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
> 
>   out_unlock:
> -       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
> +       spin_unlock(&kvm->mmu_lock);
>          return ret;
>   }
> 
> @@ -963,7 +963,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
>          mmu_seq = kvm->mmu_invalidate_seq;
>          smp_rmb();
> 
> -       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
> +       spin_lock(&kvm->mmu_lock);
>          ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
>          if (ret != H_SUCCESS)
>                  goto out_unlock;
> @@ -978,7 +978,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
>          kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
> 
>   out_unlock:
> -       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
> +       spin_unlock(&kvm->mmu_lock);
>          return ret;
>   }
> 
> --
> 2.46.2
> 
>
Michael Ellerman Oct. 11, 2024, 2:57 a.m. UTC | #2
Paolo Bonzini <pbonzini@redhat.com> writes:
> On 10/10/24 20:09, Vishal Chourasia wrote:
>> Hi,
>> 
>> While building the kernel with CONFIG_PREEMPT_RT, I encountered several
>> compilation errors in the PowerPC KVM code. The issues appear in
>> book3s_hv_rm_mmu.c where it tries to access the 'rlock' member of struct
>> spinlock, which doesn't exist in the RT configuration.
>
> How was this tested? I suspect that putting to sleep a task that is 
> running in real mode is a huge no-no.

Yeah.

Even without preempt, spin_lock() can end up in debug/tracing code that
will blow up in real mode.

Vishal, if you look at the history of that file you'll see eg:

  87013f9c602c ("powerpc/kvm/book3s: switch from raw_spin_*lock to arch_spin_lock.")

> The actual solution would have to 
> be to split mmu_lock into a spin_lock and a raw_spin_lock, but that's a 
> huge amount of work probably.  I'd just add a "depends on !PPC || 
> !KVM_BOOK3S_64_HV" or something like that, to prevent enabling KVM-HV on 
> PREEMPT_RT kernels.

Yeah that should work to get something building.

The bulk (or all?) of that file is not used for Radix guests, only for
hash page table MMU guests.

So I think it should be possible to hide that code behind a new CONFIG
option that controls support for HPT guests. And then that option could
be incompatible with PREEMPT_RT. But that will require unstitching some
of the connections between that code and the other ppc KVM code.

cheers
Vishal Chourasia Oct. 11, 2024, 10:03 a.m. UTC | #3
On Thu, Oct 10, 2024 at 11:23:55PM +0200, Paolo Bonzini wrote:
> On 10/10/24 20:09, Vishal Chourasia wrote:
> > Hi,
> > 
> > While building the kernel with CONFIG_PREEMPT_RT, I encountered several
> > compilation errors in the PowerPC KVM code. The issues appear in
> > book3s_hv_rm_mmu.c where it tries to access the 'rlock' member of struct
> > spinlock, which doesn't exist in the RT configuration.
> 
> How was this tested? I suspect that putting to sleep a task that is running
> in real mode is a huge no-no.  The actual solution would have to be to split
> mmu_lock into a spin_lock and a raw_spin_lock, but that's a huge amount of
> work probably.  I'd just add a "depends on !PPC || !KVM_BOOK3S_64_HV" or
> something like that, to prevent enabling KVM-HV on PREEMPT_RT kernels.
Hi Paolo,

This is a build time error, I didn't boot the kernel.

I used pseries_le_defconfig with some other configs enabled. I was trying
to see if the kernel would compile with ARCH_SUPPORTS_RT and
CONFIG_PREEMPT_RT enabled.

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8094a01974cca..568dc856f0dfa 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -168,6 +168,7 @@ config PPC
        select ARCH_STACKWALK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_DEBUG_PAGEALLOC    if PPC_BOOK3S || PPC_8xx
+       select ARCH_SUPPORTS_RT                 if !PPC || !KVM_BOOK3S_64_HV
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
        select ARCH_USE_MEMTEST
I tried rebuilding with the above diff as per your suggestion
though it works when KVM_BOOK3S_64_HV is set to N, but for 
pseries_le_defconfig, it's set to M, by default, which then requires setting it
to N explicitly.


Will something like below be a better solution? This will set
KVM_BOOK3S_64_HV to N if ARCH_SUPPORTS_RT is set.

diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index dbfdc126bf144..33e0d50b08b14 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -80,7 +80,7 @@ config KVM_BOOK3S_64

 config KVM_BOOK3S_64_HV
        tristate "KVM for POWER7 and later using hypervisor mode in host"
-       depends on KVM_BOOK3S_64 && PPC_POWERNV
+       depends on KVM_BOOK3S_64 && PPC_POWERNV && !ARCH_SUPPORTS_RT
        select KVM_BOOK3S_HV_POSSIBLE
        select KVM_GENERIC_MMU_NOTIFIER
        select CMA

Thanks
Paolo Bonzini Oct. 11, 2024, 10:08 a.m. UTC | #4
On Fri, Oct 11, 2024 at 12:04 PM Vishal Chourasia <vishalc@linux.ibm.com> wrote:
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 8094a01974cca..568dc856f0dfa 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -168,6 +168,7 @@ config PPC
>         select ARCH_STACKWALK
>         select ARCH_SUPPORTS_ATOMIC_RMW
>         select ARCH_SUPPORTS_DEBUG_PAGEALLOC    if PPC_BOOK3S || PPC_8xx
> +       select ARCH_SUPPORTS_RT                 if !PPC || !KVM_BOOK3S_64_HV
>         select ARCH_USE_BUILTIN_BSWAP
>         select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
>         select ARCH_USE_MEMTEST
> I tried rebuilding with the above diff as per your suggestion
> though it works when KVM_BOOK3S_64_HV is set to N, but for
> pseries_le_defconfig, it's set to M, by default, which then requires setting it
> to N explicitly.

Yes, that was intentional (the "!PPC ||" part is not necessary since
you placed this in "config PPC"). I understand however that it's hard
to discover that you need KVM_BOOK3S_64_HV=n in order to build an RT
kernel.

> Will something like below be a better solution? This will set
> KVM_BOOK3S_64_HV to N if ARCH_SUPPORTS_RT is set.
>
> diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
> index dbfdc126bf144..33e0d50b08b14 100644
> --- a/arch/powerpc/kvm/Kconfig
> +++ b/arch/powerpc/kvm/Kconfig
> @@ -80,7 +80,7 @@ config KVM_BOOK3S_64
>
>  config KVM_BOOK3S_64_HV
>         tristate "KVM for POWER7 and later using hypervisor mode in host"
> -       depends on KVM_BOOK3S_64 && PPC_POWERNV
> +       depends on KVM_BOOK3S_64 && PPC_POWERNV && !ARCH_SUPPORTS_RT
>         select KVM_BOOK3S_HV_POSSIBLE
>         select KVM_GENERIC_MMU_NOTIFIER
>         select CMA

No, that would make it completely impossible to build with KVM enabled.

Paolo
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 17cb75a127b04..abf1e6de85644 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -245,7 +245,7 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        /* Translate to host virtual address */
        hva = __gfn_to_hva_memslot(memslot, gfn);

-       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+       spin_lock(&kvm->mmu_lock);
        ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
        if (ptep) {
                pte_t pte;
@@ -260,7 +260,7 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                 * to <= host page size, if host is using hugepage
                 */
                if (host_pte_size < psize) {
-                       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+                       spin_unlock(&kvm->mmu_lock);
                        return H_PARAMETER;
                }
                pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -274,7 +274,7 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                        pa |= gpa & ~PAGE_MASK;
                }
        }
-       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+       spin_unlock(&kvm->mmu_lock);

        ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
        ptel |= pa;
@@ -935,7 +935,7 @@  static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
        mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();

-       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+       spin_lock(&kvm->mmu_lock);

        ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
        if (ret != H_SUCCESS)
@@ -947,7 +947,7 @@  static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
        kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);

 out_unlock:
-       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+       spin_unlock(&kvm->mmu_lock);
        return ret;
 }

@@ -963,7 +963,7 @@  static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
        mmu_seq = kvm->mmu_invalidate_seq;
        smp_rmb();

-       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+       spin_lock(&kvm->mmu_lock);
        ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
        if (ret != H_SUCCESS)
                goto out_unlock;
@@ -978,7 +978,7 @@  static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
        kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);

 out_unlock:
-       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+       spin_unlock(&kvm->mmu_lock);
        return ret;
 }