diff mbox series

Untested fix for attributes vs. hugepage race

Message ID Z__AAB_EFxGFEjDR@google.com (mailing list archive)
State New
Headers show
Series Untested fix for attributes vs. hugepage race | expand

Commit Message

Sean Christopherson April 16, 2025, 2:34 p.m. UTC
Mike, can you give this a shot and see if it fixes the race where KVM installs a
hugepage mapping when the memory attributes of a subset of the hugepage are
changing?

Compile tested only.

--
From: Sean Christopherson <seanjc@google.com>
Date: Wed, 16 Apr 2025 07:18:19 -0700
Subject: [PATCH] KVM: x86/mmu: Prevent installing hugepages when mem
 attributes are changing

When changing memory attributes on a subset of a potential hugepage, add
the hugepage to the invalidation range tracking to prevent installing a
hugepage until the attributes are fully updated.  Like the actual hugepage
tracking updates in kvm_arch_post_set_memory_attributes(), process only
the head and tail pages, as any potential hugepages that are entirely
covered by the range will already be tracked.

Note, only hugepage chunks whose current attributes are NOT mixed need to
be added to the invalidation set, as mixed attributes already prevent
installing a hugepage, and it's perfectly safe to installing a smaller
mapping for a gfn whose attributes aren't changing.

Reported-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 63 +++++++++++++++++++++++++++++++-----------
 1 file changed, 47 insertions(+), 16 deletions(-)


base-commit: fd02aa45bda6d2f2fedcab70e828867332ef7e1c
--

Comments

Michael Roth April 18, 2025, 12:12 a.m. UTC | #1
On Wed, Apr 16, 2025 at 02:34:40PM +0000, Sean Christopherson wrote:
> Mike, can you give this a shot and see if it fixes the race where KVM installs a
> hugepage mapping when the memory attributes of a subset of the hugepage are
> changing?
> 
> Compile tested only.

Hi Sean,

Thanks for the sending the patch. Still working on verifying this fix on
the real setup, but some hacks to artificially trigger the original case
and verify the behavior seem to indicate that this patch does the trick,
but I did have some comments below.

> 
> --
> From: Sean Christopherson <seanjc@google.com>
> Date: Wed, 16 Apr 2025 07:18:19 -0700
> Subject: [PATCH] KVM: x86/mmu: Prevent installing hugepages when mem
>  attributes are changing
> 
> When changing memory attributes on a subset of a potential hugepage, add
> the hugepage to the invalidation range tracking to prevent installing a
> hugepage until the attributes are fully updated.  Like the actual hugepage
> tracking updates in kvm_arch_post_set_memory_attributes(), process only
> the head and tail pages, as any potential hugepages that are entirely
> covered by the range will already be tracked.
> 
> Note, only hugepage chunks whose current attributes are NOT mixed need to
> be added to the invalidation set, as mixed attributes already prevent
> installing a hugepage, and it's perfectly safe to installing a smaller
> mapping for a gfn whose attributes aren't changing.
> 
> Reported-by: Michael Roth <michael.roth@amd.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 63 +++++++++++++++++++++++++++++++-----------
>  1 file changed, 47 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index a284dce227a0..b324991a0f99 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -7670,9 +7670,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
>  }
>  
>  #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
> +static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
> +				int level)
> +{
> +	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
> +}
> +
> +static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
> +				 int level)
> +{
> +	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
> +}
> +
> +static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
> +			       int level)
> +{
> +	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
> +}
> +
>  bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
>  					struct kvm_gfn_range *range)
>  {
> +	struct kvm_memory_slot *slot = range->slot;
> +	int level;
> +
>  	/*
>  	 * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
>  	 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
> @@ -7687,6 +7708,32 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
>  	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
>  		return false;
>  
> +	/*
> +	 * If the head and tail pages of the range currently allow a hugepage,
> +	 * i.e. reside fully in the slot and don't have mixed attributes, then
> +	 * add each corresponding hugepage range to the ongoing invalidation,
> +	 * e.g. to prevent KVM from creating a hugepage in response to a fault
> +	 * for a gfn whose attributes aren't changing.  Note, only the range
> +	 * of gfns whose attributes are being modified needs to be explicitly
> +	 * unmapped, as that will unmap any existing hugepages.
> +	 */
> +	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> +		gfn_t start = gfn_round_for_level(range->start, level);
> +		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
> +		gfn_t end = gfn_round_for_level(range->end, level);
> +
> +		if ((start != range->start || start + nr_pages > range->end) &&
> +		    start >= slot->base_gfn &&
> +		    start + nr_pages <= slot->base_gfn + slot->npages &&
> +		    !hugepage_test_mixed(slot, start, level))
> +			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);

For the 'start + nr_pages > range->end' case, that seems to correspond
to when the 'start' hugepage covers the end of the range that's being
invalidated. But in that case, 'start' and 'end' hugepages are one and
the same, so the below would also trigger, and we end up updating the range
twice with the same start/end GFN of the same hugepage.

Not a big deal, but maybe we should adjust the above logic to only cover
the case where range->start needs to be extended. Then, if 'start' and
'end' hugepages are the same, we are done with that level:

    if (start < range->start &&
        start >= slot->base_gfn &&
        start + nr_pages <= slot->base_gfn + slot->npages &&
        !hugepage_test_mixed(slot, start, level))
            kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);

    if (start == end)
        continue;

Then what remains to be determined below is whether or not range->end needs
to be additionally extended by 'end' separate hugepage.

> +
> +		if (end < range->end &&

This seems a little weird since end is almost by definition going to be
less-than or equal-to range->end, so it's basically skipping the equal-to
case. To avoid needing to filter than case, maybe we should change this:

  gfn_t end = gfn_round_for_level(range->end, level);

to

  gfn_t end = gfn_round_for_level(range->end - 1, level);

since range->end is non-inclusive and we only care about hugepages that
begin before the end of the range. If we do that, then 'end < range->end'
check will always be true and the below:

> +		if (end < range->end &&
> +		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
> +		    !hugepage_test_mixed(slot, end, level))
> +			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);

can be simplified to:

    if (end + nr_pages <= slot->base_gfn + slot->npages &&
        !hugepage_test_mixed(slot, end, level))
            kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);

Thanks,

Mike

> +	}
> +
>  	/* Unmap the old attribute page. */
>  	if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
>  		range->attr_filter = KVM_FILTER_SHARED;
> @@ -7696,23 +7743,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
>  	return kvm_unmap_gfn_range(kvm, range);
>  }
>  
> -static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
> -				int level)
> -{
> -	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
> -}
>  
> -static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
> -				 int level)
> -{
> -	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
> -}
> -
> -static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
> -			       int level)
> -{
> -	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
> -}
>  
>  static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
>  			       gfn_t gfn, int level, unsigned long attrs)
> 
> base-commit: fd02aa45bda6d2f2fedcab70e828867332ef7e1c
> -- 
>
Sean Christopherson April 18, 2025, 3:13 p.m. UTC | #2
On Thu, Apr 17, 2025, Michael Roth wrote:
> > +	/*
> > +	 * If the head and tail pages of the range currently allow a hugepage,
> > +	 * i.e. reside fully in the slot and don't have mixed attributes, then
> > +	 * add each corresponding hugepage range to the ongoing invalidation,
> > +	 * e.g. to prevent KVM from creating a hugepage in response to a fault
> > +	 * for a gfn whose attributes aren't changing.  Note, only the range
> > +	 * of gfns whose attributes are being modified needs to be explicitly
> > +	 * unmapped, as that will unmap any existing hugepages.
> > +	 */
> > +	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> > +		gfn_t start = gfn_round_for_level(range->start, level);
> > +		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
> > +		gfn_t end = gfn_round_for_level(range->end, level);
> > +
> > +		if ((start != range->start || start + nr_pages > range->end) &&
> > +		    start >= slot->base_gfn &&
> > +		    start + nr_pages <= slot->base_gfn + slot->npages &&
> > +		    !hugepage_test_mixed(slot, start, level))
> > +			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
> 
> For the 'start + nr_pages > range->end' case, that seems to correspond
> to when the 'start' hugepage covers the end of the range that's being
> invalidated.

It covers the case where range->start is hugepage aligned, but the size of the
range is less than a hugepage.

> But in that case, 'start' and 'end' hugepages are one and the same,

Yes.

> so the below would also trigger,

Gah, that's a goof in the computation of "end".

> and we end up updating the range twice with the same start/end GFN of the
> same hugepage.
>
> Not a big deal, but maybe we should adjust the above logic to only cover
> the case where range->start needs to be extended. Then, if 'start' and
> 'end' hugepages are the same, we are done with that level:

FWIW, this is what I was trying to do.

> 
>     if (start < range->start &&
>         start >= slot->base_gfn &&
>         start + nr_pages <= slot->base_gfn + slot->npages &&
>         !hugepage_test_mixed(slot, start, level))
>             kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
> 
>     if (start == end)
>         continue;
> 
> Then what remains to be determined below is whether or not range->end needs
> to be additionally extended by 'end' separate hugepage.
> 
> > +
> > +		if (end < range->end &&
> 
> This seems a little weird since end is almost by definition going to be

Not almost, it is by definition.  But as above, I botched the computation of end.

> less-than or equal-to range->end, so it's basically skipping the equal-to
> case. To avoid needing to filter than case, maybe we should change this:
> 
>   gfn_t end = gfn_round_for_level(range->end, level);
> 
> to
> 
>   gfn_t end = gfn_round_for_level(range->end - 1, level);
> 
> since range->end is non-inclusive and we only care about hugepages that
> begin before the end of the range. If we do that, then 'end < range->end'
> check will always be true and the below:
> 
> > +		if (end < range->end &&
> > +		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
> > +		    !hugepage_test_mixed(slot, end, level))
> > +			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
> 
> can be simplified to:
> 
>     if (end + nr_pages <= slot->base_gfn + slot->npages &&
>         !hugepage_test_mixed(slot, end, level))
>             kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);

That all looks good to me.  And to ensure we don't go off the rails due to bad
inputs (which are supposed to be fully validated by common KVM), we could add a
WARN to detect a non-exclusive range->end.

So this?

	if (WARN_ON_ONCE(range->end <= range->start))
		return false;

	/*
	 * If the head and tail pages of the range currently allow a hugepage,
	 * i.e. reside fully in the slot and don't have mixed attributes, then
	 * add each corresponding hugepage range to the ongoing invalidation,
	 * e.g. to prevent KVM from creating a hugepage in response to a fault
	 * for a gfn whose attributes aren't changing.  Note, only the range
	 * of gfns whose attributes are being modified needs to be explicitly
	 * unmapped, as that will unmap any existing hugepages.
	 */
	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
		gfn_t start = gfn_round_for_level(range->start, level);
		gfn_t end = gfn_round_for_level(range->end - 1, level);
		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);

		if ((start != range->start || start + nr_pages > range->end) &&
		    start >= slot->base_gfn &&
		    start + nr_pages <= slot->base_gfn + slot->npages &&
		    !hugepage_test_mixed(slot, start, level))
			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);

		if (end == start)
			continue;

		if ((end + nr_pages) <= (slot->base_gfn + slot->npages) &&
		    !hugepage_test_mixed(slot, end, level))
			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
	}
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a284dce227a0..b324991a0f99 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7670,9 +7670,30 @@  void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
 }
 
 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+				int level)
+{
+	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+				 int level)
+{
+	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+			       int level)
+{
+	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
+}
+
 bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
 					struct kvm_gfn_range *range)
 {
+	struct kvm_memory_slot *slot = range->slot;
+	int level;
+
 	/*
 	 * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
 	 * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
@@ -7687,6 +7708,32 @@  bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
 	if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
 		return false;
 
+	/*
+	 * If the head and tail pages of the range currently allow a hugepage,
+	 * i.e. reside fully in the slot and don't have mixed attributes, then
+	 * add each corresponding hugepage range to the ongoing invalidation,
+	 * e.g. to prevent KVM from creating a hugepage in response to a fault
+	 * for a gfn whose attributes aren't changing.  Note, only the range
+	 * of gfns whose attributes are being modified needs to be explicitly
+	 * unmapped, as that will unmap any existing hugepages.
+	 */
+	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+		gfn_t start = gfn_round_for_level(range->start, level);
+		gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
+		gfn_t end = gfn_round_for_level(range->end, level);
+
+		if ((start != range->start || start + nr_pages > range->end) &&
+		    start >= slot->base_gfn &&
+		    start + nr_pages <= slot->base_gfn + slot->npages &&
+		    !hugepage_test_mixed(slot, start, level))
+			kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
+
+		if (end < range->end &&
+		    (end + nr_pages) <= (slot->base_gfn + slot->npages) &&
+		    !hugepage_test_mixed(slot, end, level))
+			kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
+	}
+
 	/* Unmap the old attribute page. */
 	if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
 		range->attr_filter = KVM_FILTER_SHARED;
@@ -7696,23 +7743,7 @@  bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
 	return kvm_unmap_gfn_range(kvm, range);
 }
 
-static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
-				int level)
-{
-	return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
-}
 
-static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
-				 int level)
-{
-	lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
-}
-
-static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
-			       int level)
-{
-	lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
-}
 
 static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
 			       gfn_t gfn, int level, unsigned long attrs)