diff mbox series

[v7,10/12] KVM: arm64: Open-code kvm_mmu_write_protect_pt_masked()

Message ID 20230409063000.3559991-12-ricarkol@google.com (mailing list archive)
State New, archived
Headers show
Series Implement Eager Page Splitting for ARM | expand

Commit Message

Ricardo Koller April 9, 2023, 6:29 a.m. UTC
Move the functionality of kvm_mmu_write_protect_pt_masked() into its
caller, kvm_arch_mmu_enable_log_dirty_pt_masked().  This will be used
in a subsequent commit in order to share some of the code in
kvm_arch_mmu_enable_log_dirty_pt_masked().

Signed-off-by: Ricardo Koller <ricarkol@google.com>
---
 arch/arm64/kvm/mmu.c | 42 +++++++++++++++---------------------------
 1 file changed, 15 insertions(+), 27 deletions(-)

Comments

Gavin Shan April 17, 2023, 7:14 a.m. UTC | #1
On 4/9/23 2:29 PM, Ricardo Koller wrote:
> Move the functionality of kvm_mmu_write_protect_pt_masked() into its
> caller, kvm_arch_mmu_enable_log_dirty_pt_masked().  This will be used
> in a subsequent commit in order to share some of the code in
> kvm_arch_mmu_enable_log_dirty_pt_masked().
> 
> Signed-off-by: Ricardo Koller <ricarkol@google.com>
> ---
>   arch/arm64/kvm/mmu.c | 42 +++++++++++++++---------------------------
>   1 file changed, 15 insertions(+), 27 deletions(-)
> 

Reviewed-by: Gavin Shan <gshan@redhat.com>

> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index aaefabd8de89d..16fa24f761152 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1058,28 +1058,6 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
>   	kvm_flush_remote_tlbs(kvm);
>   }
>   
> -/**
> - * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
> - * @kvm:	The KVM pointer
> - * @slot:	The memory slot associated with mask
> - * @gfn_offset:	The gfn offset in memory slot
> - * @mask:	The mask of dirty pages at offset 'gfn_offset' in this memory
> - *		slot to be write protected
> - *
> - * Walks bits set in mask write protects the associated pte's. Caller must
> - * acquire kvm_mmu_lock.
> - */
> -static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
> -		struct kvm_memory_slot *slot,
> -		gfn_t gfn_offset, unsigned long mask)
> -{
> -	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
> -	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
> -	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
> -
> -	stage2_wp_range(&kvm->arch.mmu, start, end);
> -}
> -
>   /**
>    * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
>    *				   pages for memory slot
> @@ -1109,17 +1087,27 @@ static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
>   }
>   
>   /*
> - * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
> - * dirty pages.
> + * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
> + * @kvm:	The KVM pointer
> + * @slot:	The memory slot associated with mask
> + * @gfn_offset:	The gfn offset in memory slot
> + * @mask:	The mask of pages at offset 'gfn_offset' in this memory
> + *		slot to enable dirty logging on
>    *
> - * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
> - * enable dirty logging for them.
> + * Writes protect selected pages to enable dirty logging for them. Caller must
> + * acquire kvm->mmu_lock.
>    */
>   void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>   		struct kvm_memory_slot *slot,
>   		gfn_t gfn_offset, unsigned long mask)
>   {
> -	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
> +	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
> +	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
> +	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
> +
> +	lockdep_assert_held_write(&kvm->mmu_lock);
> +
> +	stage2_wp_range(&kvm->arch.mmu, start, end);
>   }
>   
>   static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
>
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index aaefabd8de89d..16fa24f761152 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1058,28 +1058,6 @@  static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
 	kvm_flush_remote_tlbs(kvm);
 }
 
-/**
- * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
- * @kvm:	The KVM pointer
- * @slot:	The memory slot associated with mask
- * @gfn_offset:	The gfn offset in memory slot
- * @mask:	The mask of dirty pages at offset 'gfn_offset' in this memory
- *		slot to be write protected
- *
- * Walks bits set in mask write protects the associated pte's. Caller must
- * acquire kvm_mmu_lock.
- */
-static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
-		struct kvm_memory_slot *slot,
-		gfn_t gfn_offset, unsigned long mask)
-{
-	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
-	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
-	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
-
-	stage2_wp_range(&kvm->arch.mmu, start, end);
-}
-
 /**
  * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE
  *				   pages for memory slot
@@ -1109,17 +1087,27 @@  static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
 }
 
 /*
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * dirty pages.
+ * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages.
+ * @kvm:	The KVM pointer
+ * @slot:	The memory slot associated with mask
+ * @gfn_offset:	The gfn offset in memory slot
+ * @mask:	The mask of pages at offset 'gfn_offset' in this memory
+ *		slot to enable dirty logging on
  *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
+ * Writes protect selected pages to enable dirty logging for them. Caller must
+ * acquire kvm->mmu_lock.
  */
 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 		struct kvm_memory_slot *slot,
 		gfn_t gfn_offset, unsigned long mask)
 {
-	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
+	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+
+	lockdep_assert_held_write(&kvm->mmu_lock);
+
+	stage2_wp_range(&kvm->arch.mmu, start, end);
 }
 
 static void kvm_send_hwpoison_signal(unsigned long address, short lsb)