diff mbox series

KVM: selftests: Don't assert on exact number of 4KiB in dirty log split test

Message ID 20240131222728.4100079-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Don't assert on exact number of 4KiB in dirty log split test | expand

Commit Message

Sean Christopherson Jan. 31, 2024, 10:27 p.m. UTC
Drop dirty_log_page_splitting_test's assertion that the number of 4KiB
pages remains the same across dirty logging being enabled and disabled, as
the test doesn't guarantee that mappings outside of the memslots being
dirty logged are stable, e.g. KVM's mappings for code and pages in
memslot0 can be zapped by things like NUMA balancing.

To preserve the spirit of the check, assert that (a) the number of 4KiB
pages after splitting is _at least_ the number of 4KiB pages across all
memslots under test, and (b) the number of hugepages before splitting adds
up to the number of pages across all memslots under test.  (b) is a little
tenuous as it relies on memslot0 being incompatible with transparent
hugepages, but that holds true for now as selftests explicitly madvise()
MADV_NOHUGEPAGE for memslot0 (__vm_create() unconditionally specifies the
backing type as VM_MEM_SRC_ANONYMOUS).

Reported-by: Yi Lai <yi1.lai@intel.com>
Reported-by: Tao Su <tao1.su@linux.intel.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 .../x86_64/dirty_log_page_splitting_test.c    | 21 +++++++++++--------
 1 file changed, 12 insertions(+), 9 deletions(-)


base-commit: f0f3b810edda57f317d79f452056786257089667

Comments

Tao Su Feb. 6, 2024, 5:11 a.m. UTC | #1
On Wed, Jan 31, 2024 at 02:27:28PM -0800, Sean Christopherson wrote:
> Drop dirty_log_page_splitting_test's assertion that the number of 4KiB
> pages remains the same across dirty logging being enabled and disabled, as
> the test doesn't guarantee that mappings outside of the memslots being
> dirty logged are stable, e.g. KVM's mappings for code and pages in
> memslot0 can be zapped by things like NUMA balancing.
> 
> To preserve the spirit of the check, assert that (a) the number of 4KiB
> pages after splitting is _at least_ the number of 4KiB pages across all
> memslots under test, and (b) the number of hugepages before splitting adds
> up to the number of pages across all memslots under test.  (b) is a little
> tenuous as it relies on memslot0 being incompatible with transparent
> hugepages, but that holds true for now as selftests explicitly madvise()
> MADV_NOHUGEPAGE for memslot0 (__vm_create() unconditionally specifies the
> backing type as VM_MEM_SRC_ANONYMOUS).
> 
> Reported-by: Yi Lai <yi1.lai@intel.com>
> Reported-by: Tao Su <tao1.su@linux.intel.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  .../x86_64/dirty_log_page_splitting_test.c    | 21 +++++++++++--------
>  1 file changed, 12 insertions(+), 9 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
> index 634c6bfcd572..4864cf3fae57 100644
> --- a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
> @@ -92,7 +92,6 @@ static void run_test(enum vm_guest_mode mode, void *unused)
>  	uint64_t host_num_pages;
>  	uint64_t pages_per_slot;
>  	int i;
> -	uint64_t total_4k_pages;
>  	struct kvm_page_stats stats_populated;
>  	struct kvm_page_stats stats_dirty_logging_enabled;
>  	struct kvm_page_stats stats_dirty_pass[ITERATIONS];
> @@ -107,6 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
>  	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
>  	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
>  	pages_per_slot = host_num_pages / SLOTS;
> +	TEST_ASSERT_EQ(host_num_pages, pages_per_slot * SLOTS);
> +	TEST_ASSERT(!(host_num_pages % 512),
> +		    "Number of pages, '%lu' not a multiple of 2MiB", host_num_pages);
>  
>  	bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
>  
> @@ -165,10 +167,8 @@ static void run_test(enum vm_guest_mode mode, void *unused)
>  	memstress_free_bitmaps(bitmaps, SLOTS);
>  	memstress_destroy_vm(vm);
>  
> -	/* Make assertions about the page counts. */
> -	total_4k_pages = stats_populated.pages_4k;
> -	total_4k_pages += stats_populated.pages_2m * 512;
> -	total_4k_pages += stats_populated.pages_1g * 512 * 512;
> +	TEST_ASSERT_EQ((stats_populated.pages_2m * 512 +
> +			stats_populated.pages_1g * 512 * 512), host_num_pages);
>  
>  	/*
>  	 * Check that all huge pages were split. Since large pages can only
> @@ -180,19 +180,22 @@ static void run_test(enum vm_guest_mode mode, void *unused)
>  	 */
>  	if (dirty_log_manual_caps) {
>  		TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
> -		TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
> +		TEST_ASSERT(stats_clear_pass[0].pages_4k >= host_num_pages,
> +			    "Expected at least '%lu' 4KiB pages, found only '%lu'",
> +			    host_num_pages, stats_clear_pass[0].pages_4k);
>  		TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
>  	} else {
>  		TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
> -		TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
> +		TEST_ASSERT(stats_dirty_logging_enabled.pages_4k >= host_num_pages,
> +			    "Expected at least '%lu' 4KiB pages, found only '%lu'",
> +			    host_num_pages, stats_clear_pass[0].pages_4k);

Here should print stats_dirty_logging_enabled.pages_4k, not stats_clear_pass[0].pages_4k.

Everything else looks great.

Reviewed-by: Tao Su <tao1.su@linux.intel.com>

>  	}
>  
>  	/*
>  	 * Once dirty logging is disabled and the vCPUs have touched all their
> -	 * memory again, the page counts should be the same as they were
> +	 * memory again, the hugepage counts should be the same as they were
>  	 * right after initial population of memory.
>  	 */
> -	TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
>  	TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
>  	TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
>  }
> 
> base-commit: f0f3b810edda57f317d79f452056786257089667
> -- 
> 2.43.0.429.g432eaa2c6b-goog
>
Sean Christopherson Feb. 6, 2024, 9:36 p.m. UTC | #2
On Wed, 31 Jan 2024 14:27:28 -0800, Sean Christopherson wrote:
> Drop dirty_log_page_splitting_test's assertion that the number of 4KiB
> pages remains the same across dirty logging being enabled and disabled, as
> the test doesn't guarantee that mappings outside of the memslots being
> dirty logged are stable, e.g. KVM's mappings for code and pages in
> memslot0 can be zapped by things like NUMA balancing.
> 
> To preserve the spirit of the check, assert that (a) the number of 4KiB
> pages after splitting is _at least_ the number of 4KiB pages across all
> memslots under test, and (b) the number of hugepages before splitting adds
> up to the number of pages across all memslots under test.  (b) is a little
> tenuous as it relies on memslot0 being incompatible with transparent
> hugepages, but that holds true for now as selftests explicitly madvise()
> MADV_NOHUGEPAGE for memslot0 (__vm_create() unconditionally specifies the
> backing type as VM_MEM_SRC_ANONYMOUS).
> 
> [...]

Applied to kvm-x86 selftests, with the assert print goof fixed.  Thanks Tao!

[1/1] KVM: selftests: Don't assert on exact number of 4KiB in dirty log split test
      https://github.com/kvm-x86/linux/commit/6fd78beed021

--
https://github.com/kvm-x86/linux/tree/next
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
index 634c6bfcd572..4864cf3fae57 100644
--- a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
+++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
@@ -92,7 +92,6 @@  static void run_test(enum vm_guest_mode mode, void *unused)
 	uint64_t host_num_pages;
 	uint64_t pages_per_slot;
 	int i;
-	uint64_t total_4k_pages;
 	struct kvm_page_stats stats_populated;
 	struct kvm_page_stats stats_dirty_logging_enabled;
 	struct kvm_page_stats stats_dirty_pass[ITERATIONS];
@@ -107,6 +106,9 @@  static void run_test(enum vm_guest_mode mode, void *unused)
 	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
 	host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 	pages_per_slot = host_num_pages / SLOTS;
+	TEST_ASSERT_EQ(host_num_pages, pages_per_slot * SLOTS);
+	TEST_ASSERT(!(host_num_pages % 512),
+		    "Number of pages, '%lu' not a multiple of 2MiB", host_num_pages);
 
 	bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
 
@@ -165,10 +167,8 @@  static void run_test(enum vm_guest_mode mode, void *unused)
 	memstress_free_bitmaps(bitmaps, SLOTS);
 	memstress_destroy_vm(vm);
 
-	/* Make assertions about the page counts. */
-	total_4k_pages = stats_populated.pages_4k;
-	total_4k_pages += stats_populated.pages_2m * 512;
-	total_4k_pages += stats_populated.pages_1g * 512 * 512;
+	TEST_ASSERT_EQ((stats_populated.pages_2m * 512 +
+			stats_populated.pages_1g * 512 * 512), host_num_pages);
 
 	/*
 	 * Check that all huge pages were split. Since large pages can only
@@ -180,19 +180,22 @@  static void run_test(enum vm_guest_mode mode, void *unused)
 	 */
 	if (dirty_log_manual_caps) {
 		TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
-		TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
+		TEST_ASSERT(stats_clear_pass[0].pages_4k >= host_num_pages,
+			    "Expected at least '%lu' 4KiB pages, found only '%lu'",
+			    host_num_pages, stats_clear_pass[0].pages_4k);
 		TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
 	} else {
 		TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
-		TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
+		TEST_ASSERT(stats_dirty_logging_enabled.pages_4k >= host_num_pages,
+			    "Expected at least '%lu' 4KiB pages, found only '%lu'",
+			    host_num_pages, stats_clear_pass[0].pages_4k);
 	}
 
 	/*
 	 * Once dirty logging is disabled and the vCPUs have touched all their
-	 * memory again, the page counts should be the same as they were
+	 * memory again, the hugepage counts should be the same as they were
 	 * right after initial population of memory.
 	 */
-	TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
 	TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
 	TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
 }