diff mbox series

[v2] KVM: x86/mmu: Make is_nx_huge_page_enabled an inline function

Message ID 1622102271-63107-1-git-send-email-zhangshaokun@hisilicon.com (mailing list archive)
State New, archived
Headers show
Series [v2] KVM: x86/mmu: Make is_nx_huge_page_enabled an inline function | expand

Commit Message

Shaokun Zhang May 27, 2021, 7:57 a.m. UTC
Function 'is_nx_huge_page_enabled' is called only by kvm/mmu, so make
it as inline fucntion and remove the unnecessary declaration.

Cc: Ben Gardon <bgardon@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Shaokun Zhang <zhangshaokun@hisilicon.com>
---
ChangeLog:
v1-->v2:
    1. Address Sean's comment, make is_nx_huge_page_enabled it as
static inline function and remove the unnecessary declaration.

 arch/x86/kvm/mmu/mmu.c          | 7 +------
 arch/x86/kvm/mmu/mmu_internal.h | 9 ++++++---
 2 files changed, 7 insertions(+), 9 deletions(-)

Comments

Paolo Bonzini May 27, 2021, noon UTC | #1
On 27/05/21 09:57, Shaokun Zhang wrote:
> Function 'is_nx_huge_page_enabled' is called only by kvm/mmu, so make
> it as inline fucntion and remove the unnecessary declaration.
> 
> Cc: Ben Gardon <bgardon@google.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Sean Christopherson <seanjc@google.com>
> Suggested-by: Sean Christopherson <seanjc@google.com>
> Signed-off-by: Shaokun Zhang <zhangshaokun@hisilicon.com>
> ---
> ChangeLog:
> v1-->v2:
>      1. Address Sean's comment, make is_nx_huge_page_enabled it as
> static inline function and remove the unnecessary declaration.
> 
>   arch/x86/kvm/mmu/mmu.c          | 7 +------
>   arch/x86/kvm/mmu/mmu_internal.h | 9 ++++++---
>   2 files changed, 7 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 0144c40d09c7..d1e89e7ded17 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -55,7 +55,7 @@
>   
>   extern bool itlb_multihit_kvm_mitigation;
>   
> -static int __read_mostly nx_huge_pages = -1;
> +int __read_mostly nx_huge_pages = -1;
>   #ifdef CONFIG_PREEMPT_RT
>   /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
>   static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
> @@ -208,11 +208,6 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
>   	kvm_flush_remote_tlbs_with_range(kvm, &range);
>   }
>   
> -bool is_nx_huge_page_enabled(void)
> -{
> -	return READ_ONCE(nx_huge_pages);
> -}
> -
>   static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
>   			   unsigned int access)
>   {
> diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
> index d64ccb417c60..ff4c6256f3f9 100644
> --- a/arch/x86/kvm/mmu/mmu_internal.h
> +++ b/arch/x86/kvm/mmu/mmu_internal.h
> @@ -116,7 +116,12 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
>   	       kvm_x86_ops.cpu_dirty_log_size;
>   }
>   
> -bool is_nx_huge_page_enabled(void);
> +extern int nx_huge_pages;
> +static inline bool is_nx_huge_page_enabled(void)
> +{
> +	return READ_ONCE(nx_huge_pages);
> +}
> +
>   bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
>   			    bool can_unsync);
>   
> @@ -158,8 +163,6 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
>   void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
>   				kvm_pfn_t *pfnp, int *goal_levelp);
>   
> -bool is_nx_huge_page_enabled(void);
> -
>   void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
>   
>   void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0144c40d09c7..d1e89e7ded17 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -55,7 +55,7 @@ 
 
 extern bool itlb_multihit_kvm_mitigation;
 
-static int __read_mostly nx_huge_pages = -1;
+int __read_mostly nx_huge_pages = -1;
 #ifdef CONFIG_PREEMPT_RT
 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
@@ -208,11 +208,6 @@  void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
 
-bool is_nx_huge_page_enabled(void)
-{
-	return READ_ONCE(nx_huge_pages);
-}
-
 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
 			   unsigned int access)
 {
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d64ccb417c60..ff4c6256f3f9 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -116,7 +116,12 @@  static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
 	       kvm_x86_ops.cpu_dirty_log_size;
 }
 
-bool is_nx_huge_page_enabled(void);
+extern int nx_huge_pages;
+static inline bool is_nx_huge_page_enabled(void)
+{
+	return READ_ONCE(nx_huge_pages);
+}
+
 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 			    bool can_unsync);
 
@@ -158,8 +163,6 @@  int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
 				kvm_pfn_t *pfnp, int *goal_levelp);
 
-bool is_nx_huge_page_enabled(void);
-
 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
 
 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);