diff mbox series

KVM: x86/mmu: Handle "default" period when selectively waking kthread

Message ID 20211120015706.3830341-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Handle "default" period when selectively waking kthread | expand

Commit Message

Sean Christopherson Nov. 20, 2021, 1:57 a.m. UTC
Account for the '0' being a default, "let KVM choose" period, when
determining whether or not the recovery worker needs to be awakened in
response to userspace reducing the period.  Failure to do so results in
the worker not being awakened properly, e.g. when changing the period
from '0' to any small-ish value.

Fixes: 4dfe4f40d845 ("kvm: x86: mmu: Make NX huge page recovery period configurable")
Cc: stable@vger.kernel.org
Cc: Junaid Shahid <junaids@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 48 +++++++++++++++++++++++++++++-------------
 1 file changed, 33 insertions(+), 15 deletions(-)

Comments

Junaid Shahid Nov. 23, 2021, 10:04 p.m. UTC | #1
On 11/19/21 5:57 PM, Sean Christopherson wrote:
> Account for the '0' being a default, "let KVM choose" period, when
> determining whether or not the recovery worker needs to be awakened in
> response to userspace reducing the period.  Failure to do so results in
> the worker not being awakened properly, e.g. when changing the period
> from '0' to any small-ish value.
> 
> Fixes: 4dfe4f40d845 ("kvm: x86: mmu: Make NX huge page recovery period configurable")
> Cc: stable@vger.kernel.org
> Cc: Junaid Shahid <junaids@google.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>   arch/x86/kvm/mmu/mmu.c | 48 +++++++++++++++++++++++++++++-------------
>   1 file changed, 33 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 8f0035517450..db7e1ad4d046 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6165,23 +6165,46 @@ void kvm_mmu_module_exit(void)
>   	mmu_audit_disable();
>   }
>   
> +/*
> + * Calculate the effective recovery period, accounting for '0' meaning "let KVM
> + * select a period of ~1 hour per page".  Returns true if recovery is enabled.
> + */
> +static bool calc_nx_huge_pages_recovery_period(uint *period)
> +{
> +	/*
> +	 * Use READ_ONCE to get the params, this may be called outside of the
> +	 * param setters, e.g. by the kthread to compute its next timeout.
> +	 */
> +	bool enabled = READ_ONCE(nx_huge_pages);
> +	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> +
> +	if (!enabled || !ratio)
> +		return false;
> +
> +	*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
> +	if (!*period) {
> +		/* Make sure the period is not less than one second.  */
> +		ratio = min(ratio, 3600u);
> +		*period = 60 * 60 * 1000 / ratio;
> +	}
> +	return true;
> +}
> +
>   static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
>   {
>   	bool was_recovery_enabled, is_recovery_enabled;
>   	uint old_period, new_period;
>   	int err;
>   
> -	was_recovery_enabled = nx_huge_pages_recovery_ratio;
> -	old_period = nx_huge_pages_recovery_period_ms;
> +	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
>   
>   	err = param_set_uint(val, kp);
>   	if (err)
>   		return err;
>   
> -	is_recovery_enabled = nx_huge_pages_recovery_ratio;
> -	new_period = nx_huge_pages_recovery_period_ms;
> +	is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
>   
> -	if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
> +	if (is_recovery_enabled &&
>   	    (!was_recovery_enabled || old_period > new_period)) {
>   		struct kvm *kvm;
>   
> @@ -6245,18 +6268,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
>   
>   static long get_nx_lpage_recovery_timeout(u64 start_time)
>   {
> -	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
> -	uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
> +	bool enabled;
> +	uint period;
>   
> -	if (!period && ratio) {
> -		/* Make sure the period is not less than one second.  */
> -		ratio = min(ratio, 3600u);
> -		period = 60 * 60 * 1000 / ratio;
> -	}
> +	enabled = calc_nx_huge_pages_recovery_period(&period);
>   
> -	return READ_ONCE(nx_huge_pages) && ratio
> -		? start_time + msecs_to_jiffies(period) - get_jiffies_64()
> -		: MAX_SCHEDULE_TIMEOUT;
> +	return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
> +		       : MAX_SCHEDULE_TIMEOUT;
>   }
>   
>   static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
> 

Reviewed-by: Junaid Shahid <junaids@google.com>
Paolo Bonzini Nov. 26, 2021, 1:10 p.m. UTC | #2
On 11/20/21 02:57, Sean Christopherson wrote:
> +/*
> + * Calculate the effective recovery period, accounting for '0' meaning "let KVM
> + * select a period of ~1 hour per page".  Returns true if recovery is enabled.
> + */

Slightly better: "let KVM select a halving time of ~1 hour".  Queued 
with this change, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8f0035517450..db7e1ad4d046 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6165,23 +6165,46 @@  void kvm_mmu_module_exit(void)
 	mmu_audit_disable();
 }
 
+/*
+ * Calculate the effective recovery period, accounting for '0' meaning "let KVM
+ * select a period of ~1 hour per page".  Returns true if recovery is enabled.
+ */
+static bool calc_nx_huge_pages_recovery_period(uint *period)
+{
+	/*
+	 * Use READ_ONCE to get the params, this may be called outside of the
+	 * param setters, e.g. by the kthread to compute its next timeout.
+	 */
+	bool enabled = READ_ONCE(nx_huge_pages);
+	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+	if (!enabled || !ratio)
+		return false;
+
+	*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+	if (!*period) {
+		/* Make sure the period is not less than one second.  */
+		ratio = min(ratio, 3600u);
+		*period = 60 * 60 * 1000 / ratio;
+	}
+	return true;
+}
+
 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
 {
 	bool was_recovery_enabled, is_recovery_enabled;
 	uint old_period, new_period;
 	int err;
 
-	was_recovery_enabled = nx_huge_pages_recovery_ratio;
-	old_period = nx_huge_pages_recovery_period_ms;
+	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
 
 	err = param_set_uint(val, kp);
 	if (err)
 		return err;
 
-	is_recovery_enabled = nx_huge_pages_recovery_ratio;
-	new_period = nx_huge_pages_recovery_period_ms;
+	is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
 
-	if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
+	if (is_recovery_enabled &&
 	    (!was_recovery_enabled || old_period > new_period)) {
 		struct kvm *kvm;
 
@@ -6245,18 +6268,13 @@  static void kvm_recover_nx_lpages(struct kvm *kvm)
 
 static long get_nx_lpage_recovery_timeout(u64 start_time)
 {
-	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
-	uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+	bool enabled;
+	uint period;
 
-	if (!period && ratio) {
-		/* Make sure the period is not less than one second.  */
-		ratio = min(ratio, 3600u);
-		period = 60 * 60 * 1000 / ratio;
-	}
+	enabled = calc_nx_huge_pages_recovery_period(&period);
 
-	return READ_ONCE(nx_huge_pages) && ratio
-		? start_time + msecs_to_jiffies(period) - get_jiffies_64()
-		: MAX_SCHEDULE_TIMEOUT;
+	return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+		       : MAX_SCHEDULE_TIMEOUT;
 }
 
 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)