diff mbox series

[RFC,01/16] KVM: selftests: move vm_phy_pages_alloc() earlier in file

Message ID 20211005234459.430873-2-michael.roth@amd.com (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Add tests for SEV, SEV-ES, and SEV-SNP guests | expand

Commit Message

Michael Roth Oct. 5, 2021, 11:44 p.m. UTC
Subsequent patches will break some of this code out into file-local
helper functions, which will be used by functions like vm_vaddr_alloc(),
which currently are defined earlier in the file, so a forward
declaration would be needed.

Instead, move it earlier in the file, just above vm_vaddr_alloc() and
and friends, which are the main users.

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 tools/testing/selftests/kvm/lib/kvm_util.c | 146 ++++++++++-----------
 1 file changed, 73 insertions(+), 73 deletions(-)

Comments

Mingwei Zhang Oct. 18, 2021, 3 p.m. UTC | #1
On Tue, Oct 5, 2021 at 4:46 PM Michael Roth <michael.roth@amd.com> wrote:
>
> Subsequent patches will break some of this code out into file-local
> helper functions, which will be used by functions like vm_vaddr_alloc(),
> which currently are defined earlier in the file, so a forward
> declaration would be needed.
>
> Instead, move it earlier in the file, just above vm_vaddr_alloc() and
> and friends, which are the main users.
>
> Signed-off-by: Michael Roth <michael.roth@amd.com>
> ---
>  tools/testing/selftests/kvm/lib/kvm_util.c | 146 ++++++++++-----------
>  1 file changed, 73 insertions(+), 73 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 10a8ed691c66..92f59adddebe 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -1145,6 +1145,79 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
>         list_add(&vcpu->list, &vm->vcpus);
>  }
>
> +/*
> + * Physical Contiguous Page Allocator
> + *
> + * Input Args:
> + *   vm - Virtual Machine
> + *   num - number of pages
> + *   paddr_min - Physical address minimum
> + *   memslot - Memory region to allocate page from
> + *
> + * Output Args: None
> + *
> + * Return:
> + *   Starting physical address
> + *
> + * Within the VM specified by vm, locates a range of available physical
> + * pages at or above paddr_min. If found, the pages are marked as in use
> + * and their base address is returned. A TEST_ASSERT failure occurs if
> + * not enough pages are available at or above paddr_min.
> + */
> +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> +                             vm_paddr_t paddr_min, uint32_t memslot)
> +{
> +       struct userspace_mem_region *region;
> +       sparsebit_idx_t pg, base;
> +
> +       TEST_ASSERT(num > 0, "Must allocate at least one page");
> +
> +       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> +               "not divisible by page size.\n"
> +               "  paddr_min: 0x%lx page_size: 0x%x",
> +               paddr_min, vm->page_size);
> +
> +       region = memslot2region(vm, memslot);
> +       base = pg = paddr_min >> vm->page_shift;
> +
> +       do {
> +               for (; pg < base + num; ++pg) {
> +                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> +                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> +                               break;
> +                       }
> +               }
> +       } while (pg && pg != base + num);
> +
> +       if (pg == 0) {
> +               fprintf(stderr, "No guest physical page available, "
> +                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> +                       paddr_min, vm->page_size, memslot);
> +               fputs("---- vm dump ----\n", stderr);
> +               vm_dump(stderr, vm, 2);
> +               abort();
> +       }
> +
> +       for (pg = base; pg < base + num; ++pg)
> +               sparsebit_clear(region->unused_phy_pages, pg);
> +
> +       return base * vm->page_size;
> +}
> +
> +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> +                            uint32_t memslot)
> +{
> +       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> +}
> +
> +/* Arbitrary minimum physical address used for virtual translation tables. */
> +#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> +
> +vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> +{
> +       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> +}
> +
>  /*
>   * VM Virtual Address Unused Gap
>   *
> @@ -2149,79 +2222,6 @@ const char *exit_reason_str(unsigned int exit_reason)
>         return "Unknown";
>  }
>
> -/*
> - * Physical Contiguous Page Allocator
> - *
> - * Input Args:
> - *   vm - Virtual Machine
> - *   num - number of pages
> - *   paddr_min - Physical address minimum
> - *   memslot - Memory region to allocate page from
> - *
> - * Output Args: None
> - *
> - * Return:
> - *   Starting physical address
> - *
> - * Within the VM specified by vm, locates a range of available physical
> - * pages at or above paddr_min. If found, the pages are marked as in use
> - * and their base address is returned. A TEST_ASSERT failure occurs if
> - * not enough pages are available at or above paddr_min.
> - */
> -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> -                             vm_paddr_t paddr_min, uint32_t memslot)
> -{
> -       struct userspace_mem_region *region;
> -       sparsebit_idx_t pg, base;
> -
> -       TEST_ASSERT(num > 0, "Must allocate at least one page");
> -
> -       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> -               "not divisible by page size.\n"
> -               "  paddr_min: 0x%lx page_size: 0x%x",
> -               paddr_min, vm->page_size);
> -
> -       region = memslot2region(vm, memslot);
> -       base = pg = paddr_min >> vm->page_shift;
> -
> -       do {
> -               for (; pg < base + num; ++pg) {
> -                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> -                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> -                               break;
> -                       }
> -               }
> -       } while (pg && pg != base + num);
> -
> -       if (pg == 0) {
> -               fprintf(stderr, "No guest physical page available, "
> -                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> -                       paddr_min, vm->page_size, memslot);
> -               fputs("---- vm dump ----\n", stderr);
> -               vm_dump(stderr, vm, 2);
> -               abort();
> -       }
> -
> -       for (pg = base; pg < base + num; ++pg)
> -               sparsebit_clear(region->unused_phy_pages, pg);
> -
> -       return base * vm->page_size;
> -}
> -
> -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> -                            uint32_t memslot)
> -{
> -       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> -}
> -
> -/* Arbitrary minimum physical address used for virtual translation tables. */
> -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> -
> -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> -{
> -       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> -}
> -
>  /*
>   * Address Guest Virtual to Host Virtual
>   *
> --
> 2.25.1
>

Why move the function implementation? Maybe just adding a declaration
at the top of kvm_util.c should suffice.
Michael Roth Oct. 21, 2021, 3:45 a.m. UTC | #2
On Mon, Oct 18, 2021 at 08:00:00AM -0700, Mingwei Zhang wrote:
> On Tue, Oct 5, 2021 at 4:46 PM Michael Roth <michael.roth@amd.com> wrote:
> >
> > Subsequent patches will break some of this code out into file-local
> > helper functions, which will be used by functions like vm_vaddr_alloc(),
> > which currently are defined earlier in the file, so a forward
> > declaration would be needed.
> >
> > Instead, move it earlier in the file, just above vm_vaddr_alloc() and
> > and friends, which are the main users.
> >
> > Signed-off-by: Michael Roth <michael.roth@amd.com>
> > ---
> >  tools/testing/selftests/kvm/lib/kvm_util.c | 146 ++++++++++-----------
> >  1 file changed, 73 insertions(+), 73 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > index 10a8ed691c66..92f59adddebe 100644
> > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > @@ -1145,6 +1145,79 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
> >         list_add(&vcpu->list, &vm->vcpus);
> >  }
> >
> > +/*
> > + * Physical Contiguous Page Allocator
> > + *
> > + * Input Args:
> > + *   vm - Virtual Machine
> > + *   num - number of pages
> > + *   paddr_min - Physical address minimum
> > + *   memslot - Memory region to allocate page from
> > + *
> > + * Output Args: None
> > + *
> > + * Return:
> > + *   Starting physical address
> > + *
> > + * Within the VM specified by vm, locates a range of available physical
> > + * pages at or above paddr_min. If found, the pages are marked as in use
> > + * and their base address is returned. A TEST_ASSERT failure occurs if
> > + * not enough pages are available at or above paddr_min.
> > + */
> > +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> > +                             vm_paddr_t paddr_min, uint32_t memslot)
> > +{
> > +       struct userspace_mem_region *region;
> > +       sparsebit_idx_t pg, base;
> > +
> > +       TEST_ASSERT(num > 0, "Must allocate at least one page");
> > +
> > +       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> > +               "not divisible by page size.\n"
> > +               "  paddr_min: 0x%lx page_size: 0x%x",
> > +               paddr_min, vm->page_size);
> > +
> > +       region = memslot2region(vm, memslot);
> > +       base = pg = paddr_min >> vm->page_shift;
> > +
> > +       do {
> > +               for (; pg < base + num; ++pg) {
> > +                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> > +                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> > +                               break;
> > +                       }
> > +               }
> > +       } while (pg && pg != base + num);
> > +
> > +       if (pg == 0) {
> > +               fprintf(stderr, "No guest physical page available, "
> > +                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> > +                       paddr_min, vm->page_size, memslot);
> > +               fputs("---- vm dump ----\n", stderr);
> > +               vm_dump(stderr, vm, 2);
> > +               abort();
> > +       }
> > +
> > +       for (pg = base; pg < base + num; ++pg)
> > +               sparsebit_clear(region->unused_phy_pages, pg);
> > +
> > +       return base * vm->page_size;
> > +}
> > +
> > +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> > +                            uint32_t memslot)
> > +{
> > +       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> > +}
> > +
> > +/* Arbitrary minimum physical address used for virtual translation tables. */
> > +#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> > +
> > +vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> > +{
> > +       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> > +}
> > +
> >  /*
> >   * VM Virtual Address Unused Gap
> >   *
> > @@ -2149,79 +2222,6 @@ const char *exit_reason_str(unsigned int exit_reason)
> >         return "Unknown";
> >  }
> >
> > -/*
> > - * Physical Contiguous Page Allocator
> > - *
> > - * Input Args:
> > - *   vm - Virtual Machine
> > - *   num - number of pages
> > - *   paddr_min - Physical address minimum
> > - *   memslot - Memory region to allocate page from
> > - *
> > - * Output Args: None
> > - *
> > - * Return:
> > - *   Starting physical address
> > - *
> > - * Within the VM specified by vm, locates a range of available physical
> > - * pages at or above paddr_min. If found, the pages are marked as in use
> > - * and their base address is returned. A TEST_ASSERT failure occurs if
> > - * not enough pages are available at or above paddr_min.
> > - */
> > -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> > -                             vm_paddr_t paddr_min, uint32_t memslot)
> > -{
> > -       struct userspace_mem_region *region;
> > -       sparsebit_idx_t pg, base;
> > -
> > -       TEST_ASSERT(num > 0, "Must allocate at least one page");
> > -
> > -       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> > -               "not divisible by page size.\n"
> > -               "  paddr_min: 0x%lx page_size: 0x%x",
> > -               paddr_min, vm->page_size);
> > -
> > -       region = memslot2region(vm, memslot);
> > -       base = pg = paddr_min >> vm->page_shift;
> > -
> > -       do {
> > -               for (; pg < base + num; ++pg) {
> > -                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> > -                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> > -                               break;
> > -                       }
> > -               }
> > -       } while (pg && pg != base + num);
> > -
> > -       if (pg == 0) {
> > -               fprintf(stderr, "No guest physical page available, "
> > -                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> > -                       paddr_min, vm->page_size, memslot);
> > -               fputs("---- vm dump ----\n", stderr);
> > -               vm_dump(stderr, vm, 2);
> > -               abort();
> > -       }
> > -
> > -       for (pg = base; pg < base + num; ++pg)
> > -               sparsebit_clear(region->unused_phy_pages, pg);
> > -
> > -       return base * vm->page_size;
> > -}
> > -
> > -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> > -                            uint32_t memslot)
> > -{
> > -       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> > -}
> > -
> > -/* Arbitrary minimum physical address used for virtual translation tables. */
> > -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> > -
> > -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> > -{
> > -       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> > -}
> > -
> >  /*
> >   * Address Guest Virtual to Host Virtual
> >   *
> > --
> > 2.25.1
> >
> 
> Why move the function implementation? Maybe just adding a declaration
> at the top of kvm_util.c should suffice.

At least from working on other projects I'd gotten the impression that
forward function declarations should be avoided if they can be solved by
moving the function above the caller. Certainly don't mind taking your
suggestion and dropping this patch if that's not the case here though.
Paolo Bonzini Oct. 21, 2021, 3:20 p.m. UTC | #3
On 21/10/21 05:45, Michael Roth wrote:
>> Why move the function implementation? Maybe just adding a declaration
>> at the top of kvm_util.c should suffice.
> At least from working on other projects I'd gotten the impression that
> forward function declarations should be avoided if they can be solved by
> moving the function above the caller. Certainly don't mind taking your
> suggestion and dropping this patch if that's not the case here though.

I don't mind moving the code, so the patch is fine.

Paolo
Mingwei Zhang Oct. 26, 2021, 3:52 p.m. UTC | #4
On Wed, Oct 20, 2021 at 8:47 PM Michael Roth <michael.roth@amd.com> wrote:
>
> On Mon, Oct 18, 2021 at 08:00:00AM -0700, Mingwei Zhang wrote:
> > On Tue, Oct 5, 2021 at 4:46 PM Michael Roth <michael.roth@amd.com> wrote:
> > >
> > > Subsequent patches will break some of this code out into file-local
> > > helper functions, which will be used by functions like vm_vaddr_alloc(),
> > > which currently are defined earlier in the file, so a forward
> > > declaration would be needed.
> > >
> > > Instead, move it earlier in the file, just above vm_vaddr_alloc() and
> > > and friends, which are the main users.
> > >
> > > Signed-off-by: Michael Roth <michael.roth@amd.com>
> > > ---
> > >  tools/testing/selftests/kvm/lib/kvm_util.c | 146 ++++++++++-----------
> > >  1 file changed, 73 insertions(+), 73 deletions(-)
> > >
> > > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > > index 10a8ed691c66..92f59adddebe 100644
> > > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > > @@ -1145,6 +1145,79 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
> > >         list_add(&vcpu->list, &vm->vcpus);
> > >  }
> > >
> > > +/*
> > > + * Physical Contiguous Page Allocator
> > > + *
> > > + * Input Args:
> > > + *   vm - Virtual Machine
> > > + *   num - number of pages
> > > + *   paddr_min - Physical address minimum
> > > + *   memslot - Memory region to allocate page from
> > > + *
> > > + * Output Args: None
> > > + *
> > > + * Return:
> > > + *   Starting physical address
> > > + *
> > > + * Within the VM specified by vm, locates a range of available physical
> > > + * pages at or above paddr_min. If found, the pages are marked as in use
> > > + * and their base address is returned. A TEST_ASSERT failure occurs if
> > > + * not enough pages are available at or above paddr_min.
> > > + */
> > > +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> > > +                             vm_paddr_t paddr_min, uint32_t memslot)
> > > +{
> > > +       struct userspace_mem_region *region;
> > > +       sparsebit_idx_t pg, base;
> > > +
> > > +       TEST_ASSERT(num > 0, "Must allocate at least one page");
> > > +
> > > +       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> > > +               "not divisible by page size.\n"
> > > +               "  paddr_min: 0x%lx page_size: 0x%x",
> > > +               paddr_min, vm->page_size);
> > > +
> > > +       region = memslot2region(vm, memslot);
> > > +       base = pg = paddr_min >> vm->page_shift;
> > > +
> > > +       do {
> > > +               for (; pg < base + num; ++pg) {
> > > +                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> > > +                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> > > +                               break;
> > > +                       }
> > > +               }
> > > +       } while (pg && pg != base + num);
> > > +
> > > +       if (pg == 0) {
> > > +               fprintf(stderr, "No guest physical page available, "
> > > +                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> > > +                       paddr_min, vm->page_size, memslot);
> > > +               fputs("---- vm dump ----\n", stderr);
> > > +               vm_dump(stderr, vm, 2);
> > > +               abort();
> > > +       }
> > > +
> > > +       for (pg = base; pg < base + num; ++pg)
> > > +               sparsebit_clear(region->unused_phy_pages, pg);
> > > +
> > > +       return base * vm->page_size;
> > > +}
> > > +
> > > +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> > > +                            uint32_t memslot)
> > > +{
> > > +       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> > > +}
> > > +
> > > +/* Arbitrary minimum physical address used for virtual translation tables. */
> > > +#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> > > +
> > > +vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> > > +{
> > > +       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> > > +}
> > > +
> > >  /*
> > >   * VM Virtual Address Unused Gap
> > >   *
> > > @@ -2149,79 +2222,6 @@ const char *exit_reason_str(unsigned int exit_reason)
> > >         return "Unknown";
> > >  }
> > >
> > > -/*
> > > - * Physical Contiguous Page Allocator
> > > - *
> > > - * Input Args:
> > > - *   vm - Virtual Machine
> > > - *   num - number of pages
> > > - *   paddr_min - Physical address minimum
> > > - *   memslot - Memory region to allocate page from
> > > - *
> > > - * Output Args: None
> > > - *
> > > - * Return:
> > > - *   Starting physical address
> > > - *
> > > - * Within the VM specified by vm, locates a range of available physical
> > > - * pages at or above paddr_min. If found, the pages are marked as in use
> > > - * and their base address is returned. A TEST_ASSERT failure occurs if
> > > - * not enough pages are available at or above paddr_min.
> > > - */
> > > -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> > > -                             vm_paddr_t paddr_min, uint32_t memslot)
> > > -{
> > > -       struct userspace_mem_region *region;
> > > -       sparsebit_idx_t pg, base;
> > > -
> > > -       TEST_ASSERT(num > 0, "Must allocate at least one page");
> > > -
> > > -       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> > > -               "not divisible by page size.\n"
> > > -               "  paddr_min: 0x%lx page_size: 0x%x",
> > > -               paddr_min, vm->page_size);
> > > -
> > > -       region = memslot2region(vm, memslot);
> > > -       base = pg = paddr_min >> vm->page_shift;
> > > -
> > > -       do {
> > > -               for (; pg < base + num; ++pg) {
> > > -                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> > > -                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> > > -                               break;
> > > -                       }
> > > -               }
> > > -       } while (pg && pg != base + num);
> > > -
> > > -       if (pg == 0) {
> > > -               fprintf(stderr, "No guest physical page available, "
> > > -                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> > > -                       paddr_min, vm->page_size, memslot);
> > > -               fputs("---- vm dump ----\n", stderr);
> > > -               vm_dump(stderr, vm, 2);
> > > -               abort();
> > > -       }
> > > -
> > > -       for (pg = base; pg < base + num; ++pg)
> > > -               sparsebit_clear(region->unused_phy_pages, pg);
> > > -
> > > -       return base * vm->page_size;
> > > -}
> > > -
> > > -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> > > -                            uint32_t memslot)
> > > -{
> > > -       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> > > -}
> > > -
> > > -/* Arbitrary minimum physical address used for virtual translation tables. */
> > > -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> > > -
> > > -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> > > -{
> > > -       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> > > -}
> > > -
> > >  /*
> > >   * Address Guest Virtual to Host Virtual
> > >   *
> > > --
> > > 2.25.1
> > >
> >
> > Why move the function implementation? Maybe just adding a declaration
> > at the top of kvm_util.c should suffice.
>
> At least from working on other projects I'd gotten the impression that
> forward function declarations should be avoided if they can be solved by
> moving the function above the caller. Certainly don't mind taking your
> suggestion and dropping this patch if that's not the case here though.

Understood. Yes, I think it would be better to follow your experience
then. I was thinking that if you move the code and then potentially
git blame on that function might point to you :)

Thanks.
-Mingwei
Mingwei Zhang Nov. 1, 2021, 5:43 p.m. UTC | #5
On Tue, Oct 26, 2021 at 8:52 AM Mingwei Zhang <mizhang@google.com> wrote:
>
> On Wed, Oct 20, 2021 at 8:47 PM Michael Roth <michael.roth@amd.com> wrote:
> >
> > On Mon, Oct 18, 2021 at 08:00:00AM -0700, Mingwei Zhang wrote:
> > > On Tue, Oct 5, 2021 at 4:46 PM Michael Roth <michael.roth@amd.com> wrote:
> > > >
> > > > Subsequent patches will break some of this code out into file-local
> > > > helper functions, which will be used by functions like vm_vaddr_alloc(),
> > > > which currently are defined earlier in the file, so a forward
> > > > declaration would be needed.
> > > >
> > > > Instead, move it earlier in the file, just above vm_vaddr_alloc() and
> > > > and friends, which are the main users.
> > > >
> > > > Signed-off-by: Michael Roth <michael.roth@amd.com>
> > > > ---
> > > >  tools/testing/selftests/kvm/lib/kvm_util.c | 146 ++++++++++-----------
> > > >  1 file changed, 73 insertions(+), 73 deletions(-)
> > > >
> > > > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > > > index 10a8ed691c66..92f59adddebe 100644
> > > > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > > > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > > > @@ -1145,6 +1145,79 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
> > > >         list_add(&vcpu->list, &vm->vcpus);
> > > >  }
> > > >
> > > > +/*
> > > > + * Physical Contiguous Page Allocator
> > > > + *
> > > > + * Input Args:
> > > > + *   vm - Virtual Machine
> > > > + *   num - number of pages
> > > > + *   paddr_min - Physical address minimum
> > > > + *   memslot - Memory region to allocate page from
> > > > + *
> > > > + * Output Args: None
> > > > + *
> > > > + * Return:
> > > > + *   Starting physical address
> > > > + *
> > > > + * Within the VM specified by vm, locates a range of available physical
> > > > + * pages at or above paddr_min. If found, the pages are marked as in use
> > > > + * and their base address is returned. A TEST_ASSERT failure occurs if
> > > > + * not enough pages are available at or above paddr_min.
> > > > + */
> > > > +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> > > > +                             vm_paddr_t paddr_min, uint32_t memslot)
> > > > +{
> > > > +       struct userspace_mem_region *region;
> > > > +       sparsebit_idx_t pg, base;
> > > > +
> > > > +       TEST_ASSERT(num > 0, "Must allocate at least one page");
> > > > +
> > > > +       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> > > > +               "not divisible by page size.\n"
> > > > +               "  paddr_min: 0x%lx page_size: 0x%x",
> > > > +               paddr_min, vm->page_size);
> > > > +
> > > > +       region = memslot2region(vm, memslot);
> > > > +       base = pg = paddr_min >> vm->page_shift;
> > > > +
> > > > +       do {
> > > > +               for (; pg < base + num; ++pg) {
> > > > +                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> > > > +                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> > > > +                               break;
> > > > +                       }
> > > > +               }
> > > > +       } while (pg && pg != base + num);
> > > > +
> > > > +       if (pg == 0) {
> > > > +               fprintf(stderr, "No guest physical page available, "
> > > > +                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> > > > +                       paddr_min, vm->page_size, memslot);
> > > > +               fputs("---- vm dump ----\n", stderr);
> > > > +               vm_dump(stderr, vm, 2);
> > > > +               abort();
> > > > +       }
> > > > +
> > > > +       for (pg = base; pg < base + num; ++pg)
> > > > +               sparsebit_clear(region->unused_phy_pages, pg);
> > > > +
> > > > +       return base * vm->page_size;
> > > > +}
> > > > +
> > > > +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> > > > +                            uint32_t memslot)
> > > > +{
> > > > +       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> > > > +}
> > > > +
> > > > +/* Arbitrary minimum physical address used for virtual translation tables. */
> > > > +#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> > > > +
> > > > +vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> > > > +{
> > > > +       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> > > > +}
> > > > +
> > > >  /*
> > > >   * VM Virtual Address Unused Gap
> > > >   *
> > > > @@ -2149,79 +2222,6 @@ const char *exit_reason_str(unsigned int exit_reason)
> > > >         return "Unknown";
> > > >  }
> > > >
> > > > -/*
> > > > - * Physical Contiguous Page Allocator
> > > > - *
> > > > - * Input Args:
> > > > - *   vm - Virtual Machine
> > > > - *   num - number of pages
> > > > - *   paddr_min - Physical address minimum
> > > > - *   memslot - Memory region to allocate page from
> > > > - *
> > > > - * Output Args: None
> > > > - *
> > > > - * Return:
> > > > - *   Starting physical address
> > > > - *
> > > > - * Within the VM specified by vm, locates a range of available physical
> > > > - * pages at or above paddr_min. If found, the pages are marked as in use
> > > > - * and their base address is returned. A TEST_ASSERT failure occurs if
> > > > - * not enough pages are available at or above paddr_min.
> > > > - */
> > > > -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> > > > -                             vm_paddr_t paddr_min, uint32_t memslot)
> > > > -{
> > > > -       struct userspace_mem_region *region;
> > > > -       sparsebit_idx_t pg, base;
> > > > -
> > > > -       TEST_ASSERT(num > 0, "Must allocate at least one page");
> > > > -
> > > > -       TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
> > > > -               "not divisible by page size.\n"
> > > > -               "  paddr_min: 0x%lx page_size: 0x%x",
> > > > -               paddr_min, vm->page_size);
> > > > -
> > > > -       region = memslot2region(vm, memslot);
> > > > -       base = pg = paddr_min >> vm->page_shift;
> > > > -
> > > > -       do {
> > > > -               for (; pg < base + num; ++pg) {
> > > > -                       if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> > > > -                               base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
> > > > -                               break;
> > > > -                       }
> > > > -               }
> > > > -       } while (pg && pg != base + num);
> > > > -
> > > > -       if (pg == 0) {
> > > > -               fprintf(stderr, "No guest physical page available, "
> > > > -                       "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
> > > > -                       paddr_min, vm->page_size, memslot);
> > > > -               fputs("---- vm dump ----\n", stderr);
> > > > -               vm_dump(stderr, vm, 2);
> > > > -               abort();
> > > > -       }
> > > > -
> > > > -       for (pg = base; pg < base + num; ++pg)
> > > > -               sparsebit_clear(region->unused_phy_pages, pg);
> > > > -
> > > > -       return base * vm->page_size;
> > > > -}
> > > > -
> > > > -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> > > > -                            uint32_t memslot)
> > > > -{
> > > > -       return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
> > > > -}
> > > > -
> > > > -/* Arbitrary minimum physical address used for virtual translation tables. */
> > > > -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
> > > > -
> > > > -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
> > > > -{
> > > > -       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
> > > > -}
> > > > -
> > > >  /*
> > > >   * Address Guest Virtual to Host Virtual
> > > >   *
> > > > --
> > > > 2.25.1
> > > >
> > >
> > > Why move the function implementation? Maybe just adding a declaration
> > > at the top of kvm_util.c should suffice.
> >
> > At least from working on other projects I'd gotten the impression that
> > forward function declarations should be avoided if they can be solved by
> > moving the function above the caller. Certainly don't mind taking your
> > suggestion and dropping this patch if that's not the case here though.
>
> Understood. Yes, I think it would be better to follow your experience
> then. I was thinking that if you move the code and then potentially
> git blame on that function might point to you :)
>
> Thanks.
> -Mingwei


Reviewed-by: Mingwei Zhang <mizhang@google.com>

Thanks.
-Mingwei
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 10a8ed691c66..92f59adddebe 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1145,6 +1145,79 @@  void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
 	list_add(&vcpu->list, &vm->vcpus);
 }
 
+/*
+ * Physical Contiguous Page Allocator
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   num - number of pages
+ *   paddr_min - Physical address minimum
+ *   memslot - Memory region to allocate page from
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Starting physical address
+ *
+ * Within the VM specified by vm, locates a range of available physical
+ * pages at or above paddr_min. If found, the pages are marked as in use
+ * and their base address is returned. A TEST_ASSERT failure occurs if
+ * not enough pages are available at or above paddr_min.
+ */
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+			      vm_paddr_t paddr_min, uint32_t memslot)
+{
+	struct userspace_mem_region *region;
+	sparsebit_idx_t pg, base;
+
+	TEST_ASSERT(num > 0, "Must allocate at least one page");
+
+	TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
+		"not divisible by page size.\n"
+		"  paddr_min: 0x%lx page_size: 0x%x",
+		paddr_min, vm->page_size);
+
+	region = memslot2region(vm, memslot);
+	base = pg = paddr_min >> vm->page_shift;
+
+	do {
+		for (; pg < base + num; ++pg) {
+			if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
+				base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
+				break;
+			}
+		}
+	} while (pg && pg != base + num);
+
+	if (pg == 0) {
+		fprintf(stderr, "No guest physical page available, "
+			"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
+			paddr_min, vm->page_size, memslot);
+		fputs("---- vm dump ----\n", stderr);
+		vm_dump(stderr, vm, 2);
+		abort();
+	}
+
+	for (pg = base; pg < base + num; ++pg)
+		sparsebit_clear(region->unused_phy_pages, pg);
+
+	return base * vm->page_size;
+}
+
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+			     uint32_t memslot)
+{
+	return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+}
+
+/* Arbitrary minimum physical address used for virtual translation tables. */
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+{
+	return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+}
+
 /*
  * VM Virtual Address Unused Gap
  *
@@ -2149,79 +2222,6 @@  const char *exit_reason_str(unsigned int exit_reason)
 	return "Unknown";
 }
 
-/*
- * Physical Contiguous Page Allocator
- *
- * Input Args:
- *   vm - Virtual Machine
- *   num - number of pages
- *   paddr_min - Physical address minimum
- *   memslot - Memory region to allocate page from
- *
- * Output Args: None
- *
- * Return:
- *   Starting physical address
- *
- * Within the VM specified by vm, locates a range of available physical
- * pages at or above paddr_min. If found, the pages are marked as in use
- * and their base address is returned. A TEST_ASSERT failure occurs if
- * not enough pages are available at or above paddr_min.
- */
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
-			      vm_paddr_t paddr_min, uint32_t memslot)
-{
-	struct userspace_mem_region *region;
-	sparsebit_idx_t pg, base;
-
-	TEST_ASSERT(num > 0, "Must allocate at least one page");
-
-	TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
-		"not divisible by page size.\n"
-		"  paddr_min: 0x%lx page_size: 0x%x",
-		paddr_min, vm->page_size);
-
-	region = memslot2region(vm, memslot);
-	base = pg = paddr_min >> vm->page_shift;
-
-	do {
-		for (; pg < base + num; ++pg) {
-			if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
-				base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
-				break;
-			}
-		}
-	} while (pg && pg != base + num);
-
-	if (pg == 0) {
-		fprintf(stderr, "No guest physical page available, "
-			"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
-			paddr_min, vm->page_size, memslot);
-		fputs("---- vm dump ----\n", stderr);
-		vm_dump(stderr, vm, 2);
-		abort();
-	}
-
-	for (pg = base; pg < base + num; ++pg)
-		sparsebit_clear(region->unused_phy_pages, pg);
-
-	return base * vm->page_size;
-}
-
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
-			     uint32_t memslot)
-{
-	return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
-}
-
-/* Arbitrary minimum physical address used for virtual translation tables. */
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
-{
-	return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-}
-
 /*
  * Address Guest Virtual to Host Virtual
  *