diff mbox series

KVM: x86/mmu: Grab nx_lpage_splits as an unsigned long before division

Message ID 20210615162905.2132937-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Grab nx_lpage_splits as an unsigned long before division | expand

Commit Message

Sean Christopherson June 15, 2021, 4:29 p.m. UTC
Snapshot kvm->stats.nx_lpage_splits into a local unsigned long to avoid
64-bit division on 32-bit kernels.  Casting to an unsigned long is safe
because the maximum number of shadow pages, n_max_mmu_pages, is also an
unsigned long, i.e. KVM will start recycling shadow pages before the
number of splits can exceed a 32-bit value.

  ERROR: modpost: "__udivdi3" [arch/x86/kvm/kvm.ko] undefined!

Fixes: 7ee093d4f3f5 ("KVM: switch per-VM stats to u64")
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

Comments

Maxim Levitsky June 16, 2021, 7:01 p.m. UTC | #1
On Tue, 2021-06-15 at 09:29 -0700, Sean Christopherson wrote:
> Snapshot kvm->stats.nx_lpage_splits into a local unsigned long to avoid
> 64-bit division on 32-bit kernels.  Casting to an unsigned long is safe
> because the maximum number of shadow pages, n_max_mmu_pages, is also an
> unsigned long, i.e. KVM will start recycling shadow pages before the
> number of splits can exceed a 32-bit value.
> 
>   ERROR: modpost: "__udivdi3" [arch/x86/kvm/kvm.ko] undefined!
> 
> Fixes: 7ee093d4f3f5 ("KVM: switch per-VM stats to u64")
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 720ceb0a1f5c..7d3e57678d34 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6043,6 +6043,7 @@ static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel
>  
>  static void kvm_recover_nx_lpages(struct kvm *kvm)
>  {
> +	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
>  	int rcu_idx;
>  	struct kvm_mmu_page *sp;
>  	unsigned int ratio;
> @@ -6054,7 +6055,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
>  	write_lock(&kvm->mmu_lock);
>  
>  	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> -	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
> +	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
>  	for ( ; to_zap; --to_zap) {
>  		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
>  			break;
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Best regards,
	Maxim Levitsky
Paolo Bonzini June 17, 2021, 5:10 p.m. UTC | #2
On 15/06/21 18:29, Sean Christopherson wrote:
> Snapshot kvm->stats.nx_lpage_splits into a local unsigned long to avoid
> 64-bit division on 32-bit kernels.  Casting to an unsigned long is safe
> because the maximum number of shadow pages, n_max_mmu_pages, is also an
> unsigned long, i.e. KVM will start recycling shadow pages before the
> number of splits can exceed a 32-bit value.
> 
>    ERROR: modpost: "__udivdi3" [arch/x86/kvm/kvm.ko] undefined!
> 
> Fixes: 7ee093d4f3f5 ("KVM: switch per-VM stats to u64")
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>   arch/x86/kvm/mmu/mmu.c | 3 ++-
>   1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 720ceb0a1f5c..7d3e57678d34 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6043,6 +6043,7 @@ static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel
>   
>   static void kvm_recover_nx_lpages(struct kvm *kvm)
>   {
> +	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
>   	int rcu_idx;
>   	struct kvm_mmu_page *sp;
>   	unsigned int ratio;
> @@ -6054,7 +6055,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
>   	write_lock(&kvm->mmu_lock);
>   
>   	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> -	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
> +	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
>   	for ( ; to_zap; --to_zap) {
>   		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
>   			break;
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 720ceb0a1f5c..7d3e57678d34 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6043,6 +6043,7 @@  static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel
 
 static void kvm_recover_nx_lpages(struct kvm *kvm)
 {
+	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
 	int rcu_idx;
 	struct kvm_mmu_page *sp;
 	unsigned int ratio;
@@ -6054,7 +6055,7 @@  static void kvm_recover_nx_lpages(struct kvm *kvm)
 	write_lock(&kvm->mmu_lock);
 
 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
-	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
 	for ( ; to_zap; --to_zap) {
 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
 			break;