diff mbox series

[v2,07/11] KVM: x86/MMU: Factor out updating NX hugepages state for a VM

Message ID 20220321234844.1543161-8-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Add a cap to disable NX hugepages on a VM | expand

Commit Message

Ben Gardon March 21, 2022, 11:48 p.m. UTC
Factor out the code to update the NX hugepages state for an individual
VM. This will be expanded in future commits to allow per-VM control of
Nx hugepages.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)

Comments

David Matlack March 28, 2022, 8:18 p.m. UTC | #1
On Mon, Mar 21, 2022 at 04:48:40PM -0700, Ben Gardon wrote:
> Factor out the code to update the NX hugepages state for an individual
> VM. This will be expanded in future commits to allow per-VM control of
> Nx hugepages.
> 
> No functional change intended.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 18 +++++++++++-------
>  1 file changed, 11 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 3b8da8b0745e..1b59b56642f1 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6195,6 +6195,15 @@ static void __set_nx_huge_pages(bool val)
>  	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
>  }
>  
> +static int kvm_update_nx_huge_pages(struct kvm *kvm)
> +{
> +	mutex_lock(&kvm->slots_lock);
> +	kvm_mmu_zap_all_fast(kvm);
> +	mutex_unlock(&kvm->slots_lock);
> +
> +	wake_up_process(kvm->arch.nx_lpage_recovery_thread);
> +}
> +
>  static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
>  {
>  	bool old_val = nx_huge_pages;
> @@ -6217,13 +6226,8 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
>  
>  		mutex_lock(&kvm_lock);
>  

nit: This blank line is asymmetrical with mutex_unlock().

> -		list_for_each_entry(kvm, &vm_list, vm_list) {
> -			mutex_lock(&kvm->slots_lock);
> -			kvm_mmu_zap_all_fast(kvm);
> -			mutex_unlock(&kvm->slots_lock);
> -
> -			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
> -		}
> +		list_for_each_entry(kvm, &vm_list, vm_list)
> +			kvm_set_nx_huge_pages(kvm);

This should be kvm_update_nx_huge_pages() right?

>  		mutex_unlock(&kvm_lock);
>  	}
>  
> -- 
> 2.35.1.894.gb6a874cedc-goog
>
Ben Gardon March 28, 2022, 10:41 p.m. UTC | #2
On Mon, Mar 28, 2022 at 1:18 PM David Matlack <dmatlack@google.com> wrote:
>
> On Mon, Mar 21, 2022 at 04:48:40PM -0700, Ben Gardon wrote:
> > Factor out the code to update the NX hugepages state for an individual
> > VM. This will be expanded in future commits to allow per-VM control of
> > Nx hugepages.
> >
> > No functional change intended.
> >
> > Signed-off-by: Ben Gardon <bgardon@google.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c | 18 +++++++++++-------
> >  1 file changed, 11 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 3b8da8b0745e..1b59b56642f1 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -6195,6 +6195,15 @@ static void __set_nx_huge_pages(bool val)
> >       nx_huge_pages = itlb_multihit_kvm_mitigation = val;
> >  }
> >
> > +static int kvm_update_nx_huge_pages(struct kvm *kvm)
> > +{
> > +     mutex_lock(&kvm->slots_lock);
> > +     kvm_mmu_zap_all_fast(kvm);
> > +     mutex_unlock(&kvm->slots_lock);
> > +
> > +     wake_up_process(kvm->arch.nx_lpage_recovery_thread);
> > +}
> > +
> >  static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
> >  {
> >       bool old_val = nx_huge_pages;
> > @@ -6217,13 +6226,8 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
> >
> >               mutex_lock(&kvm_lock);
> >
>
> nit: This blank line is asymmetrical with mutex_unlock().
>
> > -             list_for_each_entry(kvm, &vm_list, vm_list) {
> > -                     mutex_lock(&kvm->slots_lock);
> > -                     kvm_mmu_zap_all_fast(kvm);
> > -                     mutex_unlock(&kvm->slots_lock);
> > -
> > -                     wake_up_process(kvm->arch.nx_lpage_recovery_thread);
> > -             }
> > +             list_for_each_entry(kvm, &vm_list, vm_list)
> > +                     kvm_set_nx_huge_pages(kvm);
>
> This should be kvm_update_nx_huge_pages() right?

Oh woops, duh. Apparently I did not compile-test this patch individually.

>
> >               mutex_unlock(&kvm_lock);
> >       }
> >
> > --
> > 2.35.1.894.gb6a874cedc-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3b8da8b0745e..1b59b56642f1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6195,6 +6195,15 @@  static void __set_nx_huge_pages(bool val)
 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
 }
 
+static int kvm_update_nx_huge_pages(struct kvm *kvm)
+{
+	mutex_lock(&kvm->slots_lock);
+	kvm_mmu_zap_all_fast(kvm);
+	mutex_unlock(&kvm->slots_lock);
+
+	wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+}
+
 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
 {
 	bool old_val = nx_huge_pages;
@@ -6217,13 +6226,8 @@  static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
 
 		mutex_lock(&kvm_lock);
 
-		list_for_each_entry(kvm, &vm_list, vm_list) {
-			mutex_lock(&kvm->slots_lock);
-			kvm_mmu_zap_all_fast(kvm);
-			mutex_unlock(&kvm->slots_lock);
-
-			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
-		}
+		list_for_each_entry(kvm, &vm_list, vm_list)
+			kvm_set_nx_huge_pages(kvm);
 		mutex_unlock(&kvm_lock);
 	}