diff mbox series

[RFC,v3,09/16] KVM: arm64: Use separate function for the mapping size in user_mem_abort()

Message ID 20201027172705.15181-10-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Add Statistical Profiling Extension (SPE) support | expand

Commit Message

Alexandru Elisei Oct. 27, 2020, 5:26 p.m. UTC
user_mem_abort() is already a long and complex function, let's make it
slightly easier to understand by abstracting the algorithm for choosing the
stage 2 IPA entry size into its own function.

This also makes it possible to reuse the code when guest SPE support will
be added.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/kvm/mmu.c | 55 ++++++++++++++++++++++++++------------------
 1 file changed, 33 insertions(+), 22 deletions(-)

Comments

Haibo Xu Nov. 5, 2020, 10:01 a.m. UTC | #1
On Wed, 28 Oct 2020 at 01:26, Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>
> user_mem_abort() is already a long and complex function, let's make it
> slightly easier to understand by abstracting the algorithm for choosing the
> stage 2 IPA entry size into its own function.
>
> This also makes it possible to reuse the code when guest SPE support will
> be added.
>

Better to mention that there is "No functional change"!

> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>  arch/arm64/kvm/mmu.c | 55 ++++++++++++++++++++++++++------------------
>  1 file changed, 33 insertions(+), 22 deletions(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 19aacc7d64de..c3c43555490d 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -738,12 +738,43 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
>         return PAGE_SIZE;
>  }
>
> +static short stage2_max_pageshift(struct kvm_memory_slot *memslot,
> +                                 struct vm_area_struct *vma, hva_t hva,
> +                                 bool *force_pte)
> +{
> +       short pageshift;
> +
> +       *force_pte = false;
> +
> +       if (is_vm_hugetlb_page(vma))
> +               pageshift = huge_page_shift(hstate_vma(vma));
> +       else
> +               pageshift = PAGE_SHIFT;
> +
> +       if (memslot_is_logging(memslot) || (vma->vm_flags & VM_PFNMAP)) {
> +               *force_pte = true;
> +               pageshift = PAGE_SHIFT;
> +       }
> +
> +       if (pageshift == PUD_SHIFT &&
> +           !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
> +               pageshift = PMD_SHIFT;
> +
> +       if (pageshift == PMD_SHIFT &&
> +           !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
> +               *force_pte = true;
> +               pageshift = PAGE_SHIFT;
> +       }
> +
> +       return pageshift;
> +}
> +
>  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>                           struct kvm_memory_slot *memslot, unsigned long hva,
>                           unsigned long fault_status)
>  {
>         int ret = 0;
> -       bool write_fault, writable, force_pte = false;
> +       bool write_fault, writable, force_pte;
>         bool exec_fault;
>         bool device = false;
>         unsigned long mmu_seq;
> @@ -776,27 +807,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>                 return -EFAULT;
>         }
>
> -       if (is_vm_hugetlb_page(vma))
> -               vma_shift = huge_page_shift(hstate_vma(vma));
> -       else
> -               vma_shift = PAGE_SHIFT;
> -
> -       if (logging_active ||
> -           (vma->vm_flags & VM_PFNMAP)) {
> -               force_pte = true;
> -               vma_shift = PAGE_SHIFT;
> -       }
> -
> -       if (vma_shift == PUD_SHIFT &&
> -           !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
> -              vma_shift = PMD_SHIFT;
> -
> -       if (vma_shift == PMD_SHIFT &&
> -           !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
> -               force_pte = true;
> -               vma_shift = PAGE_SHIFT;
> -       }
> -
> +       vma_shift = stage2_max_pageshift(memslot, vma, hva, &force_pte);
>         vma_pagesize = 1UL << vma_shift;
>         if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
>                 fault_ipa &= ~(vma_pagesize - 1);
> --
> 2.29.1
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Alexandru Elisei Dec. 2, 2020, 4:29 p.m. UTC | #2
Hi Haibo,

On 11/5/20 10:01 AM, Haibo Xu wrote:
> On Wed, 28 Oct 2020 at 01:26, Alexandru Elisei <alexandru.elisei@arm.com> wrote:
>> user_mem_abort() is already a long and complex function, let's make it
>> slightly easier to understand by abstracting the algorithm for choosing the
>> stage 2 IPA entry size into its own function.
>>
>> This also makes it possible to reuse the code when guest SPE support will
>> be added.
>>
> Better to mention that there is "No functional change"!

That's a good point, I'll add it.

Thanks,

Alex

>
>> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
>> ---
>>  arch/arm64/kvm/mmu.c | 55 ++++++++++++++++++++++++++------------------
>>  1 file changed, 33 insertions(+), 22 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
>> index 19aacc7d64de..c3c43555490d 100644
>> --- a/arch/arm64/kvm/mmu.c
>> +++ b/arch/arm64/kvm/mmu.c
>> @@ -738,12 +738,43 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
>>         return PAGE_SIZE;
>>  }
>>
>> +static short stage2_max_pageshift(struct kvm_memory_slot *memslot,
>> +                                 struct vm_area_struct *vma, hva_t hva,
>> +                                 bool *force_pte)
>> +{
>> +       short pageshift;
>> +
>> +       *force_pte = false;
>> +
>> +       if (is_vm_hugetlb_page(vma))
>> +               pageshift = huge_page_shift(hstate_vma(vma));
>> +       else
>> +               pageshift = PAGE_SHIFT;
>> +
>> +       if (memslot_is_logging(memslot) || (vma->vm_flags & VM_PFNMAP)) {
>> +               *force_pte = true;
>> +               pageshift = PAGE_SHIFT;
>> +       }
>> +
>> +       if (pageshift == PUD_SHIFT &&
>> +           !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
>> +               pageshift = PMD_SHIFT;
>> +
>> +       if (pageshift == PMD_SHIFT &&
>> +           !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
>> +               *force_pte = true;
>> +               pageshift = PAGE_SHIFT;
>> +       }
>> +
>> +       return pageshift;
>> +}
>> +
>>  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>>                           struct kvm_memory_slot *memslot, unsigned long hva,
>>                           unsigned long fault_status)
>>  {
>>         int ret = 0;
>> -       bool write_fault, writable, force_pte = false;
>> +       bool write_fault, writable, force_pte;
>>         bool exec_fault;
>>         bool device = false;
>>         unsigned long mmu_seq;
>> @@ -776,27 +807,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>>                 return -EFAULT;
>>         }
>>
>> -       if (is_vm_hugetlb_page(vma))
>> -               vma_shift = huge_page_shift(hstate_vma(vma));
>> -       else
>> -               vma_shift = PAGE_SHIFT;
>> -
>> -       if (logging_active ||
>> -           (vma->vm_flags & VM_PFNMAP)) {
>> -               force_pte = true;
>> -               vma_shift = PAGE_SHIFT;
>> -       }
>> -
>> -       if (vma_shift == PUD_SHIFT &&
>> -           !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
>> -              vma_shift = PMD_SHIFT;
>> -
>> -       if (vma_shift == PMD_SHIFT &&
>> -           !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
>> -               force_pte = true;
>> -               vma_shift = PAGE_SHIFT;
>> -       }
>> -
>> +       vma_shift = stage2_max_pageshift(memslot, vma, hva, &force_pte);
>>         vma_pagesize = 1UL << vma_shift;
>>         if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
>>                 fault_ipa &= ~(vma_pagesize - 1);
>> --
>> 2.29.1
>>
>> _______________________________________________
>> kvmarm mailing list
>> kvmarm@lists.cs.columbia.edu
>> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 19aacc7d64de..c3c43555490d 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -738,12 +738,43 @@  transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
 	return PAGE_SIZE;
 }
 
+static short stage2_max_pageshift(struct kvm_memory_slot *memslot,
+				  struct vm_area_struct *vma, hva_t hva,
+				  bool *force_pte)
+{
+	short pageshift;
+
+	*force_pte = false;
+
+	if (is_vm_hugetlb_page(vma))
+		pageshift = huge_page_shift(hstate_vma(vma));
+	else
+		pageshift = PAGE_SHIFT;
+
+	if (memslot_is_logging(memslot) || (vma->vm_flags & VM_PFNMAP)) {
+		*force_pte = true;
+		pageshift = PAGE_SHIFT;
+	}
+
+	if (pageshift == PUD_SHIFT &&
+	    !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+		pageshift = PMD_SHIFT;
+
+	if (pageshift == PMD_SHIFT &&
+	    !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
+		*force_pte = true;
+		pageshift = PAGE_SHIFT;
+	}
+
+	return pageshift;
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 			  struct kvm_memory_slot *memslot, unsigned long hva,
 			  unsigned long fault_status)
 {
 	int ret = 0;
-	bool write_fault, writable, force_pte = false;
+	bool write_fault, writable, force_pte;
 	bool exec_fault;
 	bool device = false;
 	unsigned long mmu_seq;
@@ -776,27 +807,7 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		return -EFAULT;
 	}
 
-	if (is_vm_hugetlb_page(vma))
-		vma_shift = huge_page_shift(hstate_vma(vma));
-	else
-		vma_shift = PAGE_SHIFT;
-
-	if (logging_active ||
-	    (vma->vm_flags & VM_PFNMAP)) {
-		force_pte = true;
-		vma_shift = PAGE_SHIFT;
-	}
-
-	if (vma_shift == PUD_SHIFT &&
-	    !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
-	       vma_shift = PMD_SHIFT;
-
-	if (vma_shift == PMD_SHIFT &&
-	    !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
-		force_pte = true;
-		vma_shift = PAGE_SHIFT;
-	}
-
+	vma_shift = stage2_max_pageshift(memslot, vma, hva, &force_pte);
 	vma_pagesize = 1UL << vma_shift;
 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
 		fault_ipa &= ~(vma_pagesize - 1);