diff mbox series

[v2] KVM: x86/mmu: Clean up function comments for dirty logging APIs

Message ID 20240802202006.340854-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series [v2] KVM: x86/mmu: Clean up function comments for dirty logging APIs | expand

Commit Message

Sean Christopherson Aug. 2, 2024, 8:20 p.m. UTC
Rework the function comment for kvm_arch_mmu_enable_log_dirty_pt_masked()
into the body of the function, as it has gotten a bit stale, is harder to
read without the code context, and is the last source of warnings for W=1
builds in KVM x86 due to using a kernel-doc comment without documenting
all parameters.

Opportunistically subsume the functions comments for
kvm_mmu_write_protect_pt_masked() and kvm_mmu_clear_dirty_pt_masked(), as
there is no value in regurgitating similar information at a higher level,
and capturing the differences between write-protection and PML-based dirty
logging is best done in a common location.

No functional change intended.

Cc: David Matlack <dmatlack@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---

v2: Put the comments in the function body. [David]

v1: https://lore.kernel.org/all/20240611215805.340664-1-seanjc@google.com

 arch/x86/kvm/mmu/mmu.c | 48 +++++++++++++-----------------------------
 1 file changed, 15 insertions(+), 33 deletions(-)


base-commit: 332d2c1d713e232e163386c35a3ba0c1b90df83f

Comments

Huang, Kai Aug. 7, 2024, 5:39 a.m. UTC | #1
On Fri, 2024-08-02 at 13:20 -0700, Sean Christopherson wrote:
> Rework the function comment for kvm_arch_mmu_enable_log_dirty_pt_masked()
> into the body of the function, as it has gotten a bit stale, is harder to
> read without the code context, and is the last source of warnings for W=1
> builds in KVM x86 due to using a kernel-doc comment without documenting
> all parameters.
> 
> Opportunistically subsume the functions comments for
> kvm_mmu_write_protect_pt_masked() and kvm_mmu_clear_dirty_pt_masked(), as
> there is no value in regurgitating similar information at a higher level,
> and capturing the differences between write-protection and PML-based dirty
> logging is best done in a common location.
> 
> No functional change intended.
> 
> Cc: David Matlack <dmatlack@google.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> 

Reviewed-by: Kai Huang <kai.huang@intel.com>

[...]

>  
> -	/* Now handle 4K PTEs.  */
> +	/*
> +	 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
> +	 * mask.  If PML is enabled and the and the GFN doesn't need to be
				            ^
					double "and the"
Gupta, Pankaj Aug. 7, 2024, 7:25 a.m. UTC | #2
> Rework the function comment for kvm_arch_mmu_enable_log_dirty_pt_masked()
> into the body of the function, as it has gotten a bit stale, is harder to
> read without the code context, and is the last source of warnings for W=1
> builds in KVM x86 due to using a kernel-doc comment without documenting
> all parameters.
> 
> Opportunistically subsume the functions comments for
> kvm_mmu_write_protect_pt_masked() and kvm_mmu_clear_dirty_pt_masked(), as
> there is no value in regurgitating similar information at a higher level,
> and capturing the differences between write-protection and PML-based dirty
> logging is best done in a common location.
> 
> No functional change intended.
> 
> Cc: David Matlack <dmatlack@google.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> 
> v2: Put the comments in the function body. [David]
> 
> v1: https://lore.kernel.org/all/20240611215805.340664-1-seanjc@google.com
> 
>   arch/x86/kvm/mmu/mmu.c | 48 +++++++++++++-----------------------------
>   1 file changed, 15 insertions(+), 33 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 901be9e420a4..45e7e9bd5e76 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1307,15 +1307,6 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
>   	return flush;
>   }
>   
> -/**
> - * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
> - * @kvm: kvm instance
> - * @slot: slot to protect
> - * @gfn_offset: start of the BITS_PER_LONG pages we care about
> - * @mask: indicates which pages we should protect
> - *
> - * Used when we do not need to care about huge page mappings.
> - */
>   static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
>   				     struct kvm_memory_slot *slot,
>   				     gfn_t gfn_offset, unsigned long mask)
> @@ -1339,16 +1330,6 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
>   	}
>   }
>   
> -/**
> - * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
> - * protect the page if the D-bit isn't supported.
> - * @kvm: kvm instance
> - * @slot: slot to clear D-bit
> - * @gfn_offset: start of the BITS_PER_LONG pages we care about
> - * @mask: indicates which pages we should clear D-bit
> - *
> - * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
> - */
>   static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
>   					 struct kvm_memory_slot *slot,
>   					 gfn_t gfn_offset, unsigned long mask)
> @@ -1372,24 +1353,16 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
>   	}
>   }
>   
> -/**
> - * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
> - * PT level pages.
> - *
> - * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
> - * enable dirty logging for them.
> - *
> - * We need to care about huge page mappings: e.g. during dirty logging we may
> - * have such mappings.
> - */
>   void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>   				struct kvm_memory_slot *slot,
>   				gfn_t gfn_offset, unsigned long mask)
>   {
>   	/*
> -	 * Huge pages are NOT write protected when we start dirty logging in
> -	 * initially-all-set mode; must write protect them here so that they
> -	 * are split to 4K on the first write.
> +	 * If the slot was assumed to be "initially all dirty", write-protect
> +	 * huge pages to ensure they are split to 4KiB on the first write (KVM
> +	 * dirty logs at 4KiB granularity). If eager page splitting is enabled,
> +	 * immediately try to split huge pages, e.g. so that vCPUs don't get
> +	 * saddled with the cost of splitting.
>   	 *
>   	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
>   	 * of memslot has no such restriction, so the range can cross two large
> @@ -1411,7 +1384,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>   						       PG_LEVEL_2M);
>   	}
>   
> -	/* Now handle 4K PTEs.  */
> +	/*
> +	 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
> +	 * mask.  If PML is enabled and the and the GFN doesn't need to be
> +	 * write-protected for other reasons, e.g. shadow paging, clear the
> +	 * Dirty bit.  Otherwise clear the Writable bit.
> +	 *
> +	 * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
> +	 * enabled but it chooses between clearing the Dirty bit and Writeable
> +	 * bit based on the context.
> +	 */
>   	if (kvm_x86_ops.cpu_dirty_log_size)
>   		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
>   	else

Thanks for fixing this comment. Indeed it required to look a bit in code 
if the condition means, PML is enabled, I faced this as well.

Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>


> 
> base-commit: 332d2c1d713e232e163386c35a3ba0c1b90df83f
Sean Christopherson Aug. 23, 2024, 11:47 p.m. UTC | #3
On Fri, 02 Aug 2024 13:20:06 -0700, Sean Christopherson wrote:
> Rework the function comment for kvm_arch_mmu_enable_log_dirty_pt_masked()
> into the body of the function, as it has gotten a bit stale, is harder to
> read without the code context, and is the last source of warnings for W=1
> builds in KVM x86 due to using a kernel-doc comment without documenting
> all parameters.
> 
> Opportunistically subsume the functions comments for
> kvm_mmu_write_protect_pt_masked() and kvm_mmu_clear_dirty_pt_masked(), as
> there is no value in regurgitating similar information at a higher level,
> and capturing the differences between write-protection and PML-based dirty
> logging is best done in a common location.
> 
> [...]

Applied to kvm-x86 mmu, thanks!

[1/1] KVM: x86/mmu: Clean up function comments for dirty logging APIs
      https://github.com/kvm-x86/linux/commit/acf2923271ef

--
https://github.com/kvm-x86/linux/tree/next
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 901be9e420a4..45e7e9bd5e76 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1307,15 +1307,6 @@  static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 	return flush;
 }
 
-/**
- * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
- * @kvm: kvm instance
- * @slot: slot to protect
- * @gfn_offset: start of the BITS_PER_LONG pages we care about
- * @mask: indicates which pages we should protect
- *
- * Used when we do not need to care about huge page mappings.
- */
 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 				     struct kvm_memory_slot *slot,
 				     gfn_t gfn_offset, unsigned long mask)
@@ -1339,16 +1330,6 @@  static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 	}
 }
 
-/**
- * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
- * protect the page if the D-bit isn't supported.
- * @kvm: kvm instance
- * @slot: slot to clear D-bit
- * @gfn_offset: start of the BITS_PER_LONG pages we care about
- * @mask: indicates which pages we should clear D-bit
- *
- * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
- */
 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 					 struct kvm_memory_slot *slot,
 					 gfn_t gfn_offset, unsigned long mask)
@@ -1372,24 +1353,16 @@  static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 	}
 }
 
-/**
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * PT level pages.
- *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
- *
- * We need to care about huge page mappings: e.g. during dirty logging we may
- * have such mappings.
- */
 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 				struct kvm_memory_slot *slot,
 				gfn_t gfn_offset, unsigned long mask)
 {
 	/*
-	 * Huge pages are NOT write protected when we start dirty logging in
-	 * initially-all-set mode; must write protect them here so that they
-	 * are split to 4K on the first write.
+	 * If the slot was assumed to be "initially all dirty", write-protect
+	 * huge pages to ensure they are split to 4KiB on the first write (KVM
+	 * dirty logs at 4KiB granularity). If eager page splitting is enabled,
+	 * immediately try to split huge pages, e.g. so that vCPUs don't get
+	 * saddled with the cost of splitting.
 	 *
 	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
 	 * of memslot has no such restriction, so the range can cross two large
@@ -1411,7 +1384,16 @@  void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 						       PG_LEVEL_2M);
 	}
 
-	/* Now handle 4K PTEs.  */
+	/*
+	 * (Re)Enable dirty logging for all 4KiB SPTEs that map the GFNs in
+	 * mask.  If PML is enabled and the and the GFN doesn't need to be
+	 * write-protected for other reasons, e.g. shadow paging, clear the
+	 * Dirty bit.  Otherwise clear the Writable bit.
+	 *
+	 * Note that kvm_mmu_clear_dirty_pt_masked() is called whenever PML is
+	 * enabled but it chooses between clearing the Dirty bit and Writeable
+	 * bit based on the context.
+	 */
 	if (kvm_x86_ops.cpu_dirty_log_size)
 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
 	else