diff mbox series

[4/4] selftests: KVM: use dirty logging to check if page stats work correctly

Message ID 20220321002638.379672-5-mizhang@google.com (mailing list archive)
State New, archived
Headers show
Series Verify dirty logging works properly with page stats | expand

Commit Message

Mingwei Zhang March 21, 2022, 12:26 a.m. UTC
When dirty logging is enabled, KVM splits the all hugepage mapping in
NPT/EPT into the smallest 4K size. This property could be used to check if
the page stats metrics work properly in KVM mmu. At the same time, this
logic might be used the other way around: using page stats to verify if
dirty logging really splits all huge pages. Moreover, when dirty logging is
disabled, KVM zaps corresponding SPTEs and we could check whether the large
pages come back when guest touches the pages again.

So add page stats checking in dirty logging performance selftest. In
particular, add checks in three locations:
 - just after vm is created;
 - after populating memory into vm but before enabling dirty logging;
 - just after turning on dirty logging.
 - after one final iteration after turning off dirty logging.

Tested using commands:
 - ./dirty_log_perf_test -s anonymous_hugetlb_1gb
 - ./dirty_log_perf_test -s anonymous_thp

Cc: Sean Christopherson <seanjc@google.com>
Cc: David Matlack <dmatlack@google.com>
Cc: Jing Zhang <jingzhangos@google.com>
Cc: Peter Xu <peterx@redhat.com>

Suggested-by: Ben Gardon <bgorden@google.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
---
 .../selftests/kvm/dirty_log_perf_test.c       | 52 +++++++++++++++++++
 1 file changed, 52 insertions(+)

Comments

Ben Gardon March 21, 2022, 5:55 p.m. UTC | #1
On Sun, Mar 20, 2022 at 5:26 PM Mingwei Zhang <mizhang@google.com> wrote:
>
> When dirty logging is enabled, KVM splits the all hugepage mapping in
> NPT/EPT into the smallest 4K size. This property could be used to check if

Note this is only true if eager page splitting is enabled. It would be
more accurate to say:
"While dirty logging is enabled, KVM will re-map any accessed page in
NPT/EPT at 4K."

> the page stats metrics work properly in KVM mmu. At the same time, this
> logic might be used the other way around: using page stats to verify if
> dirty logging really splits all huge pages. Moreover, when dirty logging is

It might be worth having a follow up commit which checks if eager
splitting is enabled and changes the assertions accordingly.

> disabled, KVM zaps corresponding SPTEs and we could check whether the large
> pages come back when guest touches the pages again.
>
> So add page stats checking in dirty logging performance selftest. In
> particular, add checks in three locations:
>  - just after vm is created;
>  - after populating memory into vm but before enabling dirty logging;
>  - just after turning on dirty logging.

Note a key stage here is after dirty logging is enabled, and then the
VM touches all the memory in the data region.
I believe that's the point at which you're making the assertion that
all mappings are 4k currently, which is the right place if eager
splitting is not enabled.

>  - after one final iteration after turning off dirty logging.
>
> Tested using commands:
>  - ./dirty_log_perf_test -s anonymous_hugetlb_1gb
>  - ./dirty_log_perf_test -s anonymous_thp
>
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: David Matlack <dmatlack@google.com>
> Cc: Jing Zhang <jingzhangos@google.com>
> Cc: Peter Xu <peterx@redhat.com>
>
> Suggested-by: Ben Gardon <bgorden@google.com>
> Signed-off-by: Mingwei Zhang <mizhang@google.com>
> ---
>  .../selftests/kvm/dirty_log_perf_test.c       | 52 +++++++++++++++++++
>  1 file changed, 52 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> index 1954b964d1cf..ab0457d91658 100644
> --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> @@ -19,6 +19,10 @@
>  #include "perf_test_util.h"
>  #include "guest_modes.h"
>
> +#ifdef __x86_64__
> +#include "processor.h"
> +#endif
> +
>  /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
>  #define TEST_HOST_LOOP_N               2UL
>
> @@ -185,6 +189,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                                  p->slots, p->backing_src,
>                                  p->partition_vcpu_memory_access);
>
> +#ifdef __x86_64__
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
> +                   "4K page is non zero");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> +                   "2M page is non zero");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> +                   "1G page is non zero");
> +#endif
>         perf_test_set_wr_fract(vm, p->wr_fract);
>
>         guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
> @@ -222,6 +234,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>         pr_info("Populate memory time: %ld.%.9lds\n",
>                 ts_diff.tv_sec, ts_diff.tv_nsec);
>
> +#ifdef __x86_64__
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> +                   "4K page is zero");
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)

This should also handle 2M hugetlb memory.
I think there might be a library function to translate backing src
type to page size too, which could make this check cleaner.

> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> +                           "2M page is zero");
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> +                           "1G page is zero");
> +#endif
>         /* Enable dirty logging */
>         clock_gettime(CLOCK_MONOTONIC, &start);
>         enable_dirty_logging(vm, p->slots);
> @@ -267,6 +289,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                                 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
>                 }
>         }
> +#ifdef __x86_64__
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> +                   "4K page is zero after dirty logging");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> +                   "2M page is non-zero after dirty logging");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> +                   "1G page is non-zero after dirty logging");
> +#endif

Note this is after dirty logging has been enabled, AND all pages in
the data region have been written by the guest.

>
>         /* Disable dirty logging */
>         clock_gettime(CLOCK_MONOTONIC, &start);
> @@ -275,6 +305,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>         pr_info("Disabling dirty logging time: %ld.%.9lds\n",
>                 ts_diff.tv_sec, ts_diff.tv_nsec);
>
> +#ifdef __x86_64__
> +       /*
> +        * Increment iteration to run the vcpus again to verify if huge pages
> +        * come back.
> +        */
> +       iteration++;
> +       pr_info("Starting the final iteration to verify page stats\n");
> +
> +       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
> +               while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
> +                      != iteration)
> +                       ;
> +       }

We might as well do this on all archs. Even without the stats, it at
least validates that disabling dirty logging doesn't break the VM.

> +
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> +                           "2M page is zero");
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> +                           "1G page is zero");
> +#endif
> +
>         /* Tell the vcpu thread to quit */
>         host_quit = true;
>         perf_test_join_vcpu_threads(nr_vcpus);
> --
> 2.35.1.894.gb6a874cedc-goog
>
Ben Gardon March 21, 2022, 6:08 p.m. UTC | #2
On Sun, Mar 20, 2022 at 5:26 PM Mingwei Zhang <mizhang@google.com> wrote:
>
> When dirty logging is enabled, KVM splits the all hugepage mapping in
> NPT/EPT into the smallest 4K size. This property could be used to check if
> the page stats metrics work properly in KVM mmu. At the same time, this
> logic might be used the other way around: using page stats to verify if
> dirty logging really splits all huge pages. Moreover, when dirty logging is
> disabled, KVM zaps corresponding SPTEs and we could check whether the large
> pages come back when guest touches the pages again.
>
> So add page stats checking in dirty logging performance selftest. In
> particular, add checks in three locations:
>  - just after vm is created;
>  - after populating memory into vm but before enabling dirty logging;
>  - just after turning on dirty logging.
>  - after one final iteration after turning off dirty logging.
>
> Tested using commands:
>  - ./dirty_log_perf_test -s anonymous_hugetlb_1gb
>  - ./dirty_log_perf_test -s anonymous_thp
>
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: David Matlack <dmatlack@google.com>
> Cc: Jing Zhang <jingzhangos@google.com>
> Cc: Peter Xu <peterx@redhat.com>
>
> Suggested-by: Ben Gardon <bgorden@google.com>

Woops, got a mail bounce from this. Should be:
Suggested-by: Ben Gardon <bgardon@google.com>

> Signed-off-by: Mingwei Zhang <mizhang@google.com>
> ---
>  .../selftests/kvm/dirty_log_perf_test.c       | 52 +++++++++++++++++++
>  1 file changed, 52 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> index 1954b964d1cf..ab0457d91658 100644
> --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> @@ -19,6 +19,10 @@
>  #include "perf_test_util.h"
>  #include "guest_modes.h"
>
> +#ifdef __x86_64__
> +#include "processor.h"
> +#endif
> +
>  /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
>  #define TEST_HOST_LOOP_N               2UL
>
> @@ -185,6 +189,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                                  p->slots, p->backing_src,
>                                  p->partition_vcpu_memory_access);
>
> +#ifdef __x86_64__
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
> +                   "4K page is non zero");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> +                   "2M page is non zero");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> +                   "1G page is non zero");
> +#endif
>         perf_test_set_wr_fract(vm, p->wr_fract);
>
>         guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
> @@ -222,6 +234,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>         pr_info("Populate memory time: %ld.%.9lds\n",
>                 ts_diff.tv_sec, ts_diff.tv_nsec);
>
> +#ifdef __x86_64__
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> +                   "4K page is zero");
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> +                           "2M page is zero");
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> +                           "1G page is zero");
> +#endif
>         /* Enable dirty logging */
>         clock_gettime(CLOCK_MONOTONIC, &start);
>         enable_dirty_logging(vm, p->slots);
> @@ -267,6 +289,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                                 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
>                 }
>         }
> +#ifdef __x86_64__
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> +                   "4K page is zero after dirty logging");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> +                   "2M page is non-zero after dirty logging");
> +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> +                   "1G page is non-zero after dirty logging");
> +#endif
>
>         /* Disable dirty logging */
>         clock_gettime(CLOCK_MONOTONIC, &start);
> @@ -275,6 +305,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>         pr_info("Disabling dirty logging time: %ld.%.9lds\n",
>                 ts_diff.tv_sec, ts_diff.tv_nsec);
>
> +#ifdef __x86_64__
> +       /*
> +        * Increment iteration to run the vcpus again to verify if huge pages
> +        * come back.
> +        */
> +       iteration++;
> +       pr_info("Starting the final iteration to verify page stats\n");
> +
> +       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
> +               while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
> +                      != iteration)
> +                       ;
> +       }
> +
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> +                           "2M page is zero");
> +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> +                           "1G page is zero");
> +#endif
> +
>         /* Tell the vcpu thread to quit */
>         host_quit = true;
>         perf_test_join_vcpu_threads(nr_vcpus);
> --
> 2.35.1.894.gb6a874cedc-goog
>
Mingwei Zhang March 22, 2022, 5:01 a.m. UTC | #3
On Mon, Mar 21, 2022, Ben Gardon wrote:
> On Sun, Mar 20, 2022 at 5:26 PM Mingwei Zhang <mizhang@google.com> wrote:
> >
> > When dirty logging is enabled, KVM splits the all hugepage mapping in
> > NPT/EPT into the smallest 4K size. This property could be used to check if
> 
> Note this is only true if eager page splitting is enabled. It would be
> more accurate to say:
> "While dirty logging is enabled, KVM will re-map any accessed page in
> NPT/EPT at 4K."
> 
> > the page stats metrics work properly in KVM mmu. At the same time, this
> > logic might be used the other way around: using page stats to verify if
> > dirty logging really splits all huge pages. Moreover, when dirty logging is
> 
> It might be worth having a follow up commit which checks if eager
> splitting is enabled and changes the assertions accordingly.

So eager splitting is still pending for review, right? But yes, I can
add one after the feature get merged.

> 
> > disabled, KVM zaps corresponding SPTEs and we could check whether the large
> > pages come back when guest touches the pages again.
> >
> > So add page stats checking in dirty logging performance selftest. In
> > particular, add checks in three locations:
> >  - just after vm is created;
> >  - after populating memory into vm but before enabling dirty logging;
> >  - just after turning on dirty logging.
> 
> Note a key stage here is after dirty logging is enabled, and then the
> VM touches all the memory in the data region.
> I believe that's the point at which you're making the assertion that
> all mappings are 4k currently, which is the right place if eager
> splitting is not enabled.

Oh, sorry. This one should be after dirty logging is done, not 'just
after turning on dirty logging'. Will update it.

> 
> >  - after one final iteration after turning off dirty logging.
> >
> > Tested using commands:
> >  - ./dirty_log_perf_test -s anonymous_hugetlb_1gb
> >  - ./dirty_log_perf_test -s anonymous_thp
> >
> > Cc: Sean Christopherson <seanjc@google.com>
> > Cc: David Matlack <dmatlack@google.com>
> > Cc: Jing Zhang <jingzhangos@google.com>
> > Cc: Peter Xu <peterx@redhat.com>
> >
> > Suggested-by: Ben Gardon <bgorden@google.com>
> > Signed-off-by: Mingwei Zhang <mizhang@google.com>
> > ---
> >  .../selftests/kvm/dirty_log_perf_test.c       | 52 +++++++++++++++++++
> >  1 file changed, 52 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > index 1954b964d1cf..ab0457d91658 100644
> > --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > @@ -19,6 +19,10 @@
> >  #include "perf_test_util.h"
> >  #include "guest_modes.h"
> >
> > +#ifdef __x86_64__
> > +#include "processor.h"
> > +#endif
> > +
> >  /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
> >  #define TEST_HOST_LOOP_N               2UL
> >
> > @@ -185,6 +189,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >                                  p->slots, p->backing_src,
> >                                  p->partition_vcpu_memory_access);
> >
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
> > +                   "4K page is non zero");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> > +                   "2M page is non zero");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> > +                   "1G page is non zero");
> > +#endif
> >         perf_test_set_wr_fract(vm, p->wr_fract);
> >
> >         guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
> > @@ -222,6 +234,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >         pr_info("Populate memory time: %ld.%.9lds\n",
> >                 ts_diff.tv_sec, ts_diff.tv_nsec);
> >
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> > +                   "4K page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> 
> This should also handle 2M hugetlb memory.
> I think there might be a library function to translate backing src
> type to page size too, which could make this check cleaner.

Ack.
> 
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> > +                           "2M page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> > +                           "1G page is zero");
> > +#endif
> >         /* Enable dirty logging */
> >         clock_gettime(CLOCK_MONOTONIC, &start);
> >         enable_dirty_logging(vm, p->slots);
> > @@ -267,6 +289,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >                                 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
> >                 }
> >         }
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> > +                   "4K page is zero after dirty logging");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> > +                   "2M page is non-zero after dirty logging");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> > +                   "1G page is non-zero after dirty logging");
> > +#endif
> 
> Note this is after dirty logging has been enabled, AND all pages in
> the data region have been written by the guest.
> 
> >
> >         /* Disable dirty logging */
> >         clock_gettime(CLOCK_MONOTONIC, &start);
> > @@ -275,6 +305,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >         pr_info("Disabling dirty logging time: %ld.%.9lds\n",
> >                 ts_diff.tv_sec, ts_diff.tv_nsec);
> >
> > +#ifdef __x86_64__
> > +       /*
> > +        * Increment iteration to run the vcpus again to verify if huge pages
> > +        * come back.
> > +        */
> > +       iteration++;
> > +       pr_info("Starting the final iteration to verify page stats\n");
> > +
> > +       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
> > +               while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
> > +                      != iteration)
> > +                       ;
> > +       }
> 
> We might as well do this on all archs. Even without the stats, it at
> least validates that disabling dirty logging doesn't break the VM.
> 
Ack.
> > +
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> > +                           "2M page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> > +                           "1G page is zero");
> > +#endif
> > +
> >         /* Tell the vcpu thread to quit */
> >         host_quit = true;
> >         perf_test_join_vcpu_threads(nr_vcpus);
> > --
> > 2.35.1.894.gb6a874cedc-goog
> >
Mingwei Zhang March 22, 2022, 5:09 a.m. UTC | #4
On Mon, Mar 21, 2022, Ben Gardon wrote:
> On Sun, Mar 20, 2022 at 5:26 PM Mingwei Zhang <mizhang@google.com> wrote:
> >
> > When dirty logging is enabled, KVM splits the all hugepage mapping in
> > NPT/EPT into the smallest 4K size. This property could be used to check if
> > the page stats metrics work properly in KVM mmu. At the same time, this
> > logic might be used the other way around: using page stats to verify if
> > dirty logging really splits all huge pages. Moreover, when dirty logging is
> > disabled, KVM zaps corresponding SPTEs and we could check whether the large
> > pages come back when guest touches the pages again.
> >
> > So add page stats checking in dirty logging performance selftest. In
> > particular, add checks in three locations:
> >  - just after vm is created;
> >  - after populating memory into vm but before enabling dirty logging;
> >  - just after turning on dirty logging.
> >  - after one final iteration after turning off dirty logging.
> >
> > Tested using commands:
> >  - ./dirty_log_perf_test -s anonymous_hugetlb_1gb
> >  - ./dirty_log_perf_test -s anonymous_thp
> >
> > Cc: Sean Christopherson <seanjc@google.com>
> > Cc: David Matlack <dmatlack@google.com>
> > Cc: Jing Zhang <jingzhangos@google.com>
> > Cc: Peter Xu <peterx@redhat.com>
> >
> > Suggested-by: Ben Gardon <bgorden@google.com>
> 
> Woops, got a mail bounce from this. Should be:
> Suggested-by: Ben Gardon <bgardon@google.com>
> 

Oh... sorry about that. Will discuss with you offline. Really want to
avoid this in the future.

> > Signed-off-by: Mingwei Zhang <mizhang@google.com>
> > ---
> >  .../selftests/kvm/dirty_log_perf_test.c       | 52 +++++++++++++++++++
> >  1 file changed, 52 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > index 1954b964d1cf..ab0457d91658 100644
> > --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > @@ -19,6 +19,10 @@
> >  #include "perf_test_util.h"
> >  #include "guest_modes.h"
> >
> > +#ifdef __x86_64__
> > +#include "processor.h"
> > +#endif
> > +
> >  /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
> >  #define TEST_HOST_LOOP_N               2UL
> >
> > @@ -185,6 +189,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >                                  p->slots, p->backing_src,
> >                                  p->partition_vcpu_memory_access);
> >
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
> > +                   "4K page is non zero");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> > +                   "2M page is non zero");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> > +                   "1G page is non zero");
> > +#endif
> >         perf_test_set_wr_fract(vm, p->wr_fract);
> >
> >         guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
> > @@ -222,6 +234,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >         pr_info("Populate memory time: %ld.%.9lds\n",
> >                 ts_diff.tv_sec, ts_diff.tv_nsec);
> >
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> > +                   "4K page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> > +                           "2M page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> > +                           "1G page is zero");
> > +#endif
> >         /* Enable dirty logging */
> >         clock_gettime(CLOCK_MONOTONIC, &start);
> >         enable_dirty_logging(vm, p->slots);
> > @@ -267,6 +289,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >                                 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
> >                 }
> >         }
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> > +                   "4K page is zero after dirty logging");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> > +                   "2M page is non-zero after dirty logging");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> > +                   "1G page is non-zero after dirty logging");
> > +#endif
> >
> >         /* Disable dirty logging */
> >         clock_gettime(CLOCK_MONOTONIC, &start);
> > @@ -275,6 +305,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >         pr_info("Disabling dirty logging time: %ld.%.9lds\n",
> >                 ts_diff.tv_sec, ts_diff.tv_nsec);
> >
> > +#ifdef __x86_64__
> > +       /*
> > +        * Increment iteration to run the vcpus again to verify if huge pages
> > +        * come back.
> > +        */
> > +       iteration++;
> > +       pr_info("Starting the final iteration to verify page stats\n");
> > +
> > +       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
> > +               while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
> > +                      != iteration)
> > +                       ;
> > +       }
> > +
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> > +                           "2M page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> > +                           "1G page is zero");
> > +#endif
> > +
> >         /* Tell the vcpu thread to quit */
> >         host_quit = true;
> >         perf_test_join_vcpu_threads(nr_vcpus);
> > --
> > 2.35.1.894.gb6a874cedc-goog
> >
Mingwei Zhang March 23, 2022, 6:21 p.m. UTC | #5
On Mon, Mar 21, 2022, Ben Gardon wrote:
> On Sun, Mar 20, 2022 at 5:26 PM Mingwei Zhang <mizhang@google.com> wrote:
> >
> > When dirty logging is enabled, KVM splits the all hugepage mapping in
> > NPT/EPT into the smallest 4K size. This property could be used to check if
> 
> Note this is only true if eager page splitting is enabled. It would be
> more accurate to say:
> "While dirty logging is enabled, KVM will re-map any accessed page in
> NPT/EPT at 4K."
> 
> > the page stats metrics work properly in KVM mmu. At the same time, this
> > logic might be used the other way around: using page stats to verify if
> > dirty logging really splits all huge pages. Moreover, when dirty logging is
> 
> It might be worth having a follow up commit which checks if eager
> splitting is enabled and changes the assertions accordingly.
> 
> > disabled, KVM zaps corresponding SPTEs and we could check whether the large
> > pages come back when guest touches the pages again.
> >
> > So add page stats checking in dirty logging performance selftest. In
> > particular, add checks in three locations:
> >  - just after vm is created;
> >  - after populating memory into vm but before enabling dirty logging;
> >  - just after turning on dirty logging.
> 
> Note a key stage here is after dirty logging is enabled, and then the
> VM touches all the memory in the data region.
> I believe that's the point at which you're making the assertion that
> all mappings are 4k currently, which is the right place if eager
> splitting is not enabled.
> 
> >  - after one final iteration after turning off dirty logging.
> >
> > Tested using commands:
> >  - ./dirty_log_perf_test -s anonymous_hugetlb_1gb
> >  - ./dirty_log_perf_test -s anonymous_thp
> >
> > Cc: Sean Christopherson <seanjc@google.com>
> > Cc: David Matlack <dmatlack@google.com>
> > Cc: Jing Zhang <jingzhangos@google.com>
> > Cc: Peter Xu <peterx@redhat.com>
> >
> > Suggested-by: Ben Gardon <bgorden@google.com>
> > Signed-off-by: Mingwei Zhang <mizhang@google.com>
> > ---
> >  .../selftests/kvm/dirty_log_perf_test.c       | 52 +++++++++++++++++++
> >  1 file changed, 52 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > index 1954b964d1cf..ab0457d91658 100644
> > --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> > @@ -19,6 +19,10 @@
> >  #include "perf_test_util.h"
> >  #include "guest_modes.h"
> >
> > +#ifdef __x86_64__
> > +#include "processor.h"
> > +#endif
> > +
> >  /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
> >  #define TEST_HOST_LOOP_N               2UL
> >
> > @@ -185,6 +189,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >                                  p->slots, p->backing_src,
> >                                  p->partition_vcpu_memory_access);
> >
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
> > +                   "4K page is non zero");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> > +                   "2M page is non zero");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> > +                   "1G page is non zero");
> > +#endif
> >         perf_test_set_wr_fract(vm, p->wr_fract);
> >
> >         guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
> > @@ -222,6 +234,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >         pr_info("Populate memory time: %ld.%.9lds\n",
> >                 ts_diff.tv_sec, ts_diff.tv_nsec);
> >
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> > +                   "4K page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> 
> This should also handle 2M hugetlb memory.
> I think there might be a library function to translate backing src
> type to page size too, which could make this check cleaner.
> 
Just went through the selftest code again, it seems this logic a quite
x86 and there were no similar checks in other places. So I think I'll
just add another condition here for now.

> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> > +                           "2M page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> > +                           "1G page is zero");
> > +#endif
> >         /* Enable dirty logging */
> >         clock_gettime(CLOCK_MONOTONIC, &start);
> >         enable_dirty_logging(vm, p->slots);
> > @@ -267,6 +289,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >                                 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
> >                 }
> >         }
> > +#ifdef __x86_64__
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
> > +                   "4K page is zero after dirty logging");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
> > +                   "2M page is non-zero after dirty logging");
> > +       TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
> > +                   "1G page is non-zero after dirty logging");
> > +#endif
> 
> Note this is after dirty logging has been enabled, AND all pages in
> the data region have been written by the guest.
> 
> >
> >         /* Disable dirty logging */
> >         clock_gettime(CLOCK_MONOTONIC, &start);
> > @@ -275,6 +305,28 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> >         pr_info("Disabling dirty logging time: %ld.%.9lds\n",
> >                 ts_diff.tv_sec, ts_diff.tv_nsec);
> >
> > +#ifdef __x86_64__
> > +       /*
> > +        * Increment iteration to run the vcpus again to verify if huge pages
> > +        * come back.
> > +        */
> > +       iteration++;
> > +       pr_info("Starting the final iteration to verify page stats\n");
> > +
> > +       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
> > +               while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
> > +                      != iteration)
> > +                       ;
> > +       }
> 
> We might as well do this on all archs. Even without the stats, it at
> least validates that disabling dirty logging doesn't break the VM.
> 
> > +
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
> > +                           "2M page is zero");
> > +       if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
> > +               TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
> > +                           "1G page is zero");
> > +#endif
> > +
> >         /* Tell the vcpu thread to quit */
> >         host_quit = true;
> >         perf_test_join_vcpu_threads(nr_vcpus);
> > --
> > 2.35.1.894.gb6a874cedc-goog
> >
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 1954b964d1cf..ab0457d91658 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -19,6 +19,10 @@ 
 #include "perf_test_util.h"
 #include "guest_modes.h"
 
+#ifdef __x86_64__
+#include "processor.h"
+#endif
+
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
 #define TEST_HOST_LOOP_N		2UL
 
@@ -185,6 +189,14 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 				 p->slots, p->backing_src,
 				 p->partition_vcpu_memory_access);
 
+#ifdef __x86_64__
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") == 0,
+		    "4K page is non zero");
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+		    "2M page is non zero");
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+		    "1G page is non zero");
+#endif
 	perf_test_set_wr_fract(vm, p->wr_fract);
 
 	guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
@@ -222,6 +234,16 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 	pr_info("Populate memory time: %ld.%.9lds\n",
 		ts_diff.tv_sec, ts_diff.tv_nsec);
 
+#ifdef __x86_64__
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+		    "4K page is zero");
+	if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
+		TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+			    "2M page is zero");
+	if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+		TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+			    "1G page is zero");
+#endif
 	/* Enable dirty logging */
 	clock_gettime(CLOCK_MONOTONIC, &start);
 	enable_dirty_logging(vm, p->slots);
@@ -267,6 +289,14 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 				iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 		}
 	}
+#ifdef __x86_64__
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_4k") != 0,
+		    "4K page is zero after dirty logging");
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") == 0,
+		    "2M page is non-zero after dirty logging");
+	TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") == 0,
+		    "1G page is non-zero after dirty logging");
+#endif
 
 	/* Disable dirty logging */
 	clock_gettime(CLOCK_MONOTONIC, &start);
@@ -275,6 +305,28 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 	pr_info("Disabling dirty logging time: %ld.%.9lds\n",
 		ts_diff.tv_sec, ts_diff.tv_nsec);
 
+#ifdef __x86_64__
+	/*
+	 * Increment iteration to run the vcpus again to verify if huge pages
+	 * come back.
+	 */
+	iteration++;
+	pr_info("Starting the final iteration to verify page stats\n");
+
+	for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+		while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+		       != iteration)
+			;
+	}
+
+	if (p->backing_src == VM_MEM_SRC_ANONYMOUS_THP)
+		TEST_ASSERT(vm_get_single_stat(vm, "pages_2m") != 0,
+			    "2M page is zero");
+	if (p->backing_src == VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB)
+		TEST_ASSERT(vm_get_single_stat(vm, "pages_1g") != 0,
+			    "1G page is zero");
+#endif
+
 	/* Tell the vcpu thread to quit */
 	host_quit = true;
 	perf_test_join_vcpu_threads(nr_vcpus);