diff mbox series

[2/4] KVM: arm64: Simplify the sanitise_mte_tags() logic

Message ID 20220705142619.4135905-3-catalin.marinas@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: mte: Fix racing on MTE tag initialisation | expand

Commit Message

Catalin Marinas July 5, 2022, 2:26 p.m. UTC
Currently sanitise_mte_tags() checks if it's an online page before
attempting to sanitise the tags. Such detection should be done in the
caller via the VM_MTE_ALLOWED vma flag. Since kvm_set_spte_gfn() does
not have the vma, leave the page unmapped if not already tagged. Tag
initialisation will be done on a subsequent access fault in
user_mem_abort().

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
---
 arch/arm64/kvm/mmu.c | 38 ++++++++++++++------------------------
 1 file changed, 14 insertions(+), 24 deletions(-)

Comments

Peter Collingbourne July 8, 2022, 11 p.m. UTC | #1
On Tue, Jul 5, 2022 at 7:26 AM Catalin Marinas <catalin.marinas@arm.com> wrote:
>
> Currently sanitise_mte_tags() checks if it's an online page before
> attempting to sanitise the tags. Such detection should be done in the
> caller via the VM_MTE_ALLOWED vma flag. Since kvm_set_spte_gfn() does
> not have the vma, leave the page unmapped if not already tagged. Tag
> initialisation will be done on a subsequent access fault in
> user_mem_abort().
>
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Marc Zyngier <maz@kernel.org>
> Cc: Steven Price <steven.price@arm.com>
> Cc: Peter Collingbourne <pcc@google.com>
> ---
>  arch/arm64/kvm/mmu.c | 38 ++++++++++++++------------------------
>  1 file changed, 14 insertions(+), 24 deletions(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 9cfa516452e1..35850f17ae08 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1056,23 +1056,14 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
>   * - mmap_lock protects between a VM faulting a page in and the VMM performing
>   *   an mprotect() to add VM_MTE
>   */
> -static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
> -                            unsigned long size)
> +static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
> +                             unsigned long size)
>  {
>         unsigned long i, nr_pages = size >> PAGE_SHIFT;
>         struct page *page;

Did you intend to change this to "struct page *page =
pfn_to_page(pfn);"? As things are, I get a kernel panic if I try to
start a VM with MTE enabled. The VM boots after making my suggested
change though.

Peter

>
>         if (!kvm_has_mte(kvm))
> -               return 0;
> -
> -       /*
> -        * pfn_to_online_page() is used to reject ZONE_DEVICE pages
> -        * that may not support tags.
> -        */
> -       page = pfn_to_online_page(pfn);
> -
> -       if (!page)
> -               return -EFAULT;
> +               return;
>
>         for (i = 0; i < nr_pages; i++, page++) {
>                 if (!page_mte_tagged(page)) {
> @@ -1080,8 +1071,6 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
>                         set_page_mte_tagged(page);
>                 }
>         }
> -
> -       return 0;
>  }
>
>  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> @@ -1092,7 +1081,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>         bool write_fault, writable, force_pte = false;
>         bool exec_fault;
>         bool device = false;
> -       bool shared;
>         unsigned long mmu_seq;
>         struct kvm *kvm = vcpu->kvm;
>         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
> @@ -1142,8 +1130,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>                 vma_shift = get_vma_page_shift(vma, hva);
>         }
>
> -       shared = (vma->vm_flags & VM_SHARED);
> -
>         switch (vma_shift) {
>  #ifndef __PAGETABLE_PMD_FOLDED
>         case PUD_SHIFT:
> @@ -1264,12 +1250,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>
>         if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
>                 /* Check the VMM hasn't introduced a new VM_SHARED VMA */
> -               if (!shared)
> -                       ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
> -               else
> +               if ((vma->vm_flags & VM_MTE_ALLOWED) &&
> +                   !(vma->vm_flags & VM_SHARED)) {
> +                       sanitise_mte_tags(kvm, pfn, vma_pagesize);
> +               } else {
>                         ret = -EFAULT;
> -               if (ret)
>                         goto out_unlock;
> +               }
>         }
>
>         if (writable)
> @@ -1491,15 +1478,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>         kvm_pfn_t pfn = pte_pfn(range->pte);
> -       int ret;
>
>         if (!kvm->arch.mmu.pgt)
>                 return false;
>
>         WARN_ON(range->end - range->start != 1);
>
> -       ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
> -       if (ret)
> +       /*
> +        * If the page isn't tagged, defer to user_mem_abort() for sanitising
> +        * the MTE tags. The S2 pte should have been unmapped by
> +        * mmu_notifier_invalidate_range_end().
> +        */
> +       if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
>                 return false;
>
>         /*
Catalin Marinas Sept. 1, 2022, 10:42 a.m. UTC | #2
On Fri, Jul 08, 2022 at 04:00:01PM -0700, Peter Collingbourne wrote:
> On Tue, Jul 5, 2022 at 7:26 AM Catalin Marinas <catalin.marinas@arm.com> wrote:
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 9cfa516452e1..35850f17ae08 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -1056,23 +1056,14 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
> >   * - mmap_lock protects between a VM faulting a page in and the VMM performing
> >   *   an mprotect() to add VM_MTE
> >   */
> > -static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
> > -                            unsigned long size)
> > +static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
> > +                             unsigned long size)
> >  {
> >         unsigned long i, nr_pages = size >> PAGE_SHIFT;
> >         struct page *page;
> 
> Did you intend to change this to "struct page *page =
> pfn_to_page(pfn);"? As things are, I get a kernel panic if I try to
> start a VM with MTE enabled. The VM boots after making my suggested
> change though.

Yes, indeed. I think you fixed it when reposting together with the other
patches.

Sorry for the delay, too much holiday this summer ;).
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 9cfa516452e1..35850f17ae08 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1056,23 +1056,14 @@  static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
  * - mmap_lock protects between a VM faulting a page in and the VMM performing
  *   an mprotect() to add VM_MTE
  */
-static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
-			     unsigned long size)
+static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+			      unsigned long size)
 {
 	unsigned long i, nr_pages = size >> PAGE_SHIFT;
 	struct page *page;
 
 	if (!kvm_has_mte(kvm))
-		return 0;
-
-	/*
-	 * pfn_to_online_page() is used to reject ZONE_DEVICE pages
-	 * that may not support tags.
-	 */
-	page = pfn_to_online_page(pfn);
-
-	if (!page)
-		return -EFAULT;
+		return;
 
 	for (i = 0; i < nr_pages; i++, page++) {
 		if (!page_mte_tagged(page)) {
@@ -1080,8 +1071,6 @@  static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
 			set_page_mte_tagged(page);
 		}
 	}
-
-	return 0;
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1092,7 +1081,6 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	bool write_fault, writable, force_pte = false;
 	bool exec_fault;
 	bool device = false;
-	bool shared;
 	unsigned long mmu_seq;
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -1142,8 +1130,6 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		vma_shift = get_vma_page_shift(vma, hva);
 	}
 
-	shared = (vma->vm_flags & VM_SHARED);
-
 	switch (vma_shift) {
 #ifndef __PAGETABLE_PMD_FOLDED
 	case PUD_SHIFT:
@@ -1264,12 +1250,13 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
 	if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
 		/* Check the VMM hasn't introduced a new VM_SHARED VMA */
-		if (!shared)
-			ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
-		else
+		if ((vma->vm_flags & VM_MTE_ALLOWED) &&
+		    !(vma->vm_flags & VM_SHARED)) {
+			sanitise_mte_tags(kvm, pfn, vma_pagesize);
+		} else {
 			ret = -EFAULT;
-		if (ret)
 			goto out_unlock;
+		}
 	}
 
 	if (writable)
@@ -1491,15 +1478,18 @@  bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
 	kvm_pfn_t pfn = pte_pfn(range->pte);
-	int ret;
 
 	if (!kvm->arch.mmu.pgt)
 		return false;
 
 	WARN_ON(range->end - range->start != 1);
 
-	ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
-	if (ret)
+	/*
+	 * If the page isn't tagged, defer to user_mem_abort() for sanitising
+	 * the MTE tags. The S2 pte should have been unmapped by
+	 * mmu_notifier_invalidate_range_end().
+	 */
+	if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
 		return false;
 
 	/*