diff mbox series

[v7,3/4] KVM: arm64: Tweak parameters of guest cache maintenance functions

Message ID 20210617105824.31752-4-wangyanan55@huawei.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Improve efficiency of stage2 page table | expand

Commit Message

Yanan Wang June 17, 2021, 10:58 a.m. UTC
Adjust the parameter "kvm_pfn_t pfn" of __clean_dcache_guest_page
and __invalidate_icache_guest_page to "void *va", which paves the
way for converting these two guest CMO functions into callbacks in
structure kvm_pgtable_mm_ops. No functional change.

Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
---
 arch/arm64/include/asm/kvm_mmu.h |  9 ++-------
 arch/arm64/kvm/mmu.c             | 28 +++++++++++++++-------------
 2 files changed, 17 insertions(+), 20 deletions(-)

Comments

Fuad Tabba June 18, 2021, 9:29 a.m. UTC | #1
Hi Yanan,

On Thu, Jun 17, 2021 at 11:58 AM Yanan Wang <wangyanan55@huawei.com> wrote:
>
> Adjust the parameter "kvm_pfn_t pfn" of __clean_dcache_guest_page
> and __invalidate_icache_guest_page to "void *va", which paves the
> way for converting these two guest CMO functions into callbacks in
> structure kvm_pgtable_mm_ops. No functional change.
>
> Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
> ---
>  arch/arm64/include/asm/kvm_mmu.h |  9 ++-------
>  arch/arm64/kvm/mmu.c             | 28 +++++++++++++++-------------
>  2 files changed, 17 insertions(+), 20 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 25ed956f9af1..6844a7550392 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -187,10 +187,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
>         return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
>  }
>
> -static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
> +static inline void __clean_dcache_guest_page(void *va, size_t size)
>  {
> -       void *va = page_address(pfn_to_page(pfn));
> -
>         /*
>          * With FWB, we ensure that the guest always accesses memory using
>          * cacheable attributes, and we don't have to clean to PoC when
> @@ -203,16 +201,13 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
>         kvm_flush_dcache_to_poc(va, size);
>  }
>
> -static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
> -                                                 unsigned long size)
> +static inline void __invalidate_icache_guest_page(void *va, size_t size)
>  {
>         if (icache_is_aliasing()) {
>                 /* any kind of VIPT cache */
>                 __flush_icache_all();
>         } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
>                 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
> -               void *va = page_address(pfn_to_page(pfn));
> -
>                 invalidate_icache_range((unsigned long)va,
>                                         (unsigned long)va + size);
>         }
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 5742ba765ff9..b980f8a47cbb 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -126,6 +126,16 @@ static void *kvm_host_va(phys_addr_t phys)
>         return __va(phys);
>  }
>
> +static void clean_dcache_guest_page(void *va, size_t size)
> +{
> +       __clean_dcache_guest_page(va, size);
> +}
> +
> +static void invalidate_icache_guest_page(void *va, size_t size)
> +{
> +       __invalidate_icache_guest_page(va, size);
> +}
> +
>  /*
>   * Unmapping vs dcache management:
>   *
> @@ -693,16 +703,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>         kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
>  }
>
> -static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
> -{
> -       __clean_dcache_guest_page(pfn, size);
> -}
> -
> -static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
> -{
> -       __invalidate_icache_guest_page(pfn, size);
> -}
> -
>  static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
>  {
>         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
> @@ -1013,11 +1013,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>                 prot |= KVM_PGTABLE_PROT_W;
>
>         if (fault_status != FSC_PERM && !device)
> -               clean_dcache_guest_page(pfn, vma_pagesize);
> +               clean_dcache_guest_page(page_address(pfn_to_page(pfn)),
> +                                       vma_pagesize);
>
>         if (exec_fault) {
>                 prot |= KVM_PGTABLE_PROT_X;
> -               invalidate_icache_guest_page(pfn, vma_pagesize);
> +               invalidate_icache_guest_page(page_address(pfn_to_page(pfn)),
> +                                            vma_pagesize);
>         }
>
>         if (device)
> @@ -1219,7 +1221,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>          * We've moved a page around, probably through CoW, so let's treat it
>          * just like a translation fault and clean the cache to the PoC.
>          */
> -       clean_dcache_guest_page(pfn, PAGE_SIZE);
> +       clean_dcache_guest_page(page_address(pfn_to_page(pfn), PAGE_SIZE);
>
>         /*
>          * The MMU notifiers will have unmapped a huge PMD before calling
> --
> 2.23.0


Reviewed-by: Fuad Tabba <tabba@google.com>

Thanks,
/fuad

> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Marc Zyngier June 18, 2021, 11:30 a.m. UTC | #2
On Thu, 17 Jun 2021 11:58:23 +0100,
Yanan Wang <wangyanan55@huawei.com> wrote:
> 
> Adjust the parameter "kvm_pfn_t pfn" of __clean_dcache_guest_page
> and __invalidate_icache_guest_page to "void *va", which paves the
> way for converting these two guest CMO functions into callbacks in
> structure kvm_pgtable_mm_ops. No functional change.
> 
> Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
> ---
>  arch/arm64/include/asm/kvm_mmu.h |  9 ++-------
>  arch/arm64/kvm/mmu.c             | 28 +++++++++++++++-------------
>  2 files changed, 17 insertions(+), 20 deletions(-)
>

[...]

> @@ -1219,7 +1221,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  	 * We've moved a page around, probably through CoW, so let's treat it
>  	 * just like a translation fault and clean the cache to the PoC.
>  	 */
> -	clean_dcache_guest_page(pfn, PAGE_SIZE);
> +	clean_dcache_guest_page(page_address(pfn_to_page(pfn), PAGE_SIZE);

This obviously doesn't compile. I have fixed it locally, but in the
future please make sure that patch series can be bisected correctly.

Thanks,

	M.
Yanan Wang June 18, 2021, 1:14 p.m. UTC | #3
On 2021/6/18 19:30, Marc Zyngier wrote:
> On Thu, 17 Jun 2021 11:58:23 +0100,
> Yanan Wang <wangyanan55@huawei.com> wrote:
>> Adjust the parameter "kvm_pfn_t pfn" of __clean_dcache_guest_page
>> and __invalidate_icache_guest_page to "void *va", which paves the
>> way for converting these two guest CMO functions into callbacks in
>> structure kvm_pgtable_mm_ops. No functional change.
>>
>> Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
>> ---
>>   arch/arm64/include/asm/kvm_mmu.h |  9 ++-------
>>   arch/arm64/kvm/mmu.c             | 28 +++++++++++++++-------------
>>   2 files changed, 17 insertions(+), 20 deletions(-)
>>
> [...]
>
>> @@ -1219,7 +1221,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>>   	 * We've moved a page around, probably through CoW, so let's treat it
>>   	 * just like a translation fault and clean the cache to the PoC.
>>   	 */
>> -	clean_dcache_guest_page(pfn, PAGE_SIZE);
>> +	clean_dcache_guest_page(page_address(pfn_to_page(pfn), PAGE_SIZE);
> This obviously doesn't compile. I have fixed it locally, but in the
> future please make sure that patch series can be bisected correctly.
Ah, yes, I figure out what I have missed by mistake now, and this should
have never happened... Much thanks for the local fixes for this series.
Also thank Fuad for the naming reference and review.

Regards,
Yanan
.
> Thanks,
>
> 	M.
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 25ed956f9af1..6844a7550392 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -187,10 +187,8 @@  static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 }
 
-static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+static inline void __clean_dcache_guest_page(void *va, size_t size)
 {
-	void *va = page_address(pfn_to_page(pfn));
-
 	/*
 	 * With FWB, we ensure that the guest always accesses memory using
 	 * cacheable attributes, and we don't have to clean to PoC when
@@ -203,16 +201,13 @@  static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
 	kvm_flush_dcache_to_poc(va, size);
 }
 
-static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
-						  unsigned long size)
+static inline void __invalidate_icache_guest_page(void *va, size_t size)
 {
 	if (icache_is_aliasing()) {
 		/* any kind of VIPT cache */
 		__flush_icache_all();
 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
-		void *va = page_address(pfn_to_page(pfn));
-
 		invalidate_icache_range((unsigned long)va,
 					(unsigned long)va + size);
 	}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 5742ba765ff9..b980f8a47cbb 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -126,6 +126,16 @@  static void *kvm_host_va(phys_addr_t phys)
 	return __va(phys);
 }
 
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+	__clean_dcache_guest_page(va, size);
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+	__invalidate_icache_guest_page(va, size);
+}
+
 /*
  * Unmapping vs dcache management:
  *
@@ -693,16 +703,6 @@  void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
-static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
-	__clean_dcache_guest_page(pfn, size);
-}
-
-static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
-	__invalidate_icache_guest_page(pfn, size);
-}
-
 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
 {
 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
@@ -1013,11 +1013,13 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		prot |= KVM_PGTABLE_PROT_W;
 
 	if (fault_status != FSC_PERM && !device)
-		clean_dcache_guest_page(pfn, vma_pagesize);
+		clean_dcache_guest_page(page_address(pfn_to_page(pfn)),
+					vma_pagesize);
 
 	if (exec_fault) {
 		prot |= KVM_PGTABLE_PROT_X;
-		invalidate_icache_guest_page(pfn, vma_pagesize);
+		invalidate_icache_guest_page(page_address(pfn_to_page(pfn)),
+					     vma_pagesize);
 	}
 
 	if (device)
@@ -1219,7 +1221,7 @@  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 	 * We've moved a page around, probably through CoW, so let's treat it
 	 * just like a translation fault and clean the cache to the PoC.
 	 */
-	clean_dcache_guest_page(pfn, PAGE_SIZE);
+	clean_dcache_guest_page(page_address(pfn_to_page(pfn), PAGE_SIZE);
 
 	/*
 	 * The MMU notifiers will have unmapped a huge PMD before calling