diff mbox series

[RFC,v5,01/29] KVM: selftests: Add function to allow one-to-one GVA to GPA mappings

Message ID 20231212204647.2170650-2-sagis@google.com (mailing list archive)
State New
Headers show
Series TDX KVM selftests | expand

Commit Message

Sagi Shahar Dec. 12, 2023, 8:46 p.m. UTC
From: Ackerley Tng <ackerleytng@google.com>

One-to-one GVA to GPA mappings can be used in the guest to set up boot
sequences during which paging is enabled, hence requiring a transition
from using physical to virtual addresses in consecutive instructions.

Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ryan Afranji <afranji@google.com>
Signed-off-by: Sagi Shahar <sagis@google.com>
---
 .../selftests/kvm/include/kvm_util_base.h     |  2 +
 tools/testing/selftests/kvm/lib/kvm_util.c    | 63 ++++++++++++++++---
 2 files changed, 55 insertions(+), 10 deletions(-)

Comments

Binbin Wu Feb. 21, 2024, 1:43 a.m. UTC | #1
On 12/13/2023 4:46 AM, Sagi Shahar wrote:
> From: Ackerley Tng <ackerleytng@google.com>
>
> One-to-one GVA to GPA mappings can be used in the guest to set up boot
> sequences during which paging is enabled, hence requiring a transition
> from using physical to virtual addresses in consecutive instructions.
>
> Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> Signed-off-by: Ryan Afranji <afranji@google.com>
> Signed-off-by: Sagi Shahar <sagis@google.com>
> ---
>   .../selftests/kvm/include/kvm_util_base.h     |  2 +
>   tools/testing/selftests/kvm/lib/kvm_util.c    | 63 ++++++++++++++++---
>   2 files changed, 55 insertions(+), 10 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index 1426e88ebdc7..c2e5c5f25dfc 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -564,6 +564,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
>   vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
>   			    enum kvm_mem_region_type type);
>   vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
> +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> +			       vm_vaddr_t vaddr_min, uint32_t data_memslot);
>   vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
>   vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
>   				 enum kvm_mem_region_type type);
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index febc63d7a46b..4f1ae0f1eef0 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -1388,17 +1388,37 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
>   	return pgidx_start * vm->page_size;
>   }
>   
> +/*
> + * VM Virtual Address Allocate Shared/Encrypted
> + *
> + * Input Args:
> + *   vm - Virtual Machine
> + *   sz - Size in bytes
> + *   vaddr_min - Minimum starting virtual address
> + *   paddr_min - Minimum starting physical address
> + *   data_memslot - memslot number to allocate in
> + *   encrypt - Whether the region should be handled as encrypted
> + *
> + * Output Args: None
> + *
> + * Return:
> + *   Starting guest virtual address
> + *
> + * Allocates at least sz bytes within the virtual address space of the vm
> + * given by vm.  The allocated bytes are mapped to a virtual address >=
> + * the address given by vaddr_min.  Note that each allocation uses a
> + * a unique set of pages, with the minimum real allocation being at least
> + * a page.
> + */
>   static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> -				     vm_vaddr_t vaddr_min,
> -				     enum kvm_mem_region_type type,
> -				     bool encrypt)
> +				     vm_vaddr_t vaddr_min, vm_paddr_t paddr_min,
> +				     uint32_t data_memslot, bool encrypt)
>   {
>   	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
>   
>   	virt_pgd_alloc(vm);
> -	vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages,
> -					      KVM_UTIL_MIN_PFN * vm->page_size,
> -					      vm->memslots[type], encrypt);
> +	vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages, paddr_min,
> +					       data_memslot, encrypt);
>   
>   	/*
>   	 * Find an unused range of virtual page addresses of at least
> @@ -1408,8 +1428,7 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
>   
>   	/* Map the virtual pages. */
>   	for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
> -		pages--, vaddr += vm->page_size, paddr += vm->page_size) {
> -
> +	     pages--, vaddr += vm->page_size, paddr += vm->page_size) {
>   		virt_pg_map(vm, vaddr, paddr);
>   
>   		sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
> @@ -1421,12 +1440,16 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
>   vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
>   			    enum kvm_mem_region_type type)
>   {
> -	return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, vm->protected);
> +	return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> +				  KVM_UTIL_MIN_PFN * vm->page_size,
> +				  vm->memslots[type], vm->protected);
>   }
>   
>   vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
>   {
> -	return ____vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA, false);
> +	return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> +				  KVM_UTIL_MIN_PFN * vm->page_size,
> +				  vm->memslots[MEM_REGION_TEST_DATA], false);
>   }
>   
>   /*
> @@ -1453,6 +1476,26 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
>   	return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
>   }
>   
> +/**
> + * Allocate memory in @vm of size @sz in memslot with id @data_memslot,
> + * beginning with the desired address of @vaddr_min.
> + *
> + * If there isn't enough memory at @vaddr_min, find the next possible address
> + * that can meet the requested size in the given memslot.
> + *
> + * Return the address where the memory is allocated.
> + */
> +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> +			       vm_vaddr_t vaddr_min, uint32_t data_memslot)
> +{
> +	vm_vaddr_t gva = ____vm_vaddr_alloc(vm, sz, vaddr_min,
> +					    (vm_paddr_t)vaddr_min, data_memslot,
> +					    vm->protected);
> +	TEST_ASSERT_EQ(gva, addr_gva2gpa(vm, gva));

How can this be guaranteed?
For ____vm_vaddr_alloc(), generically there is no enforcement about the
identity of virtual and physical address.

> +
> +	return gva;
> +}
> +
>   /*
>    * VM Virtual Address Allocate Pages
>    *
Zhang, Dongsheng X March 21, 2024, 10:29 p.m. UTC | #2
On 12/12/2023 12:46 PM, Sagi Shahar wrote:
> From: Ackerley Tng <ackerleytng@google.com>
> 
> One-to-one GVA to GPA mappings can be used in the guest to set up boot
> sequences during which paging is enabled, hence requiring a transition
> from using physical to virtual addresses in consecutive instructions.
> 
> Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> Signed-off-by: Ryan Afranji <afranji@google.com>
> Signed-off-by: Sagi Shahar <sagis@google.com>
> ---
>  .../selftests/kvm/include/kvm_util_base.h     |  2 +
>  tools/testing/selftests/kvm/lib/kvm_util.c    | 63 ++++++++++++++++---
>  2 files changed, 55 insertions(+), 10 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index 1426e88ebdc7..c2e5c5f25dfc 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -564,6 +564,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
>  vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
>  			    enum kvm_mem_region_type type);
>  vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
> +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> +			       vm_vaddr_t vaddr_min, uint32_t data_memslot);
>  vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
>  vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
>  				 enum kvm_mem_region_type type);
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index febc63d7a46b..4f1ae0f1eef0 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -1388,17 +1388,37 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
>  	return pgidx_start * vm->page_size;
>  }
>  
> +/*
> + * VM Virtual Address Allocate Shared/Encrypted
> + *
> + * Input Args:
> + *   vm - Virtual Machine
> + *   sz - Size in bytes
> + *   vaddr_min - Minimum starting virtual address
> + *   paddr_min - Minimum starting physical address
> + *   data_memslot - memslot number to allocate in
> + *   encrypt - Whether the region should be handled as encrypted
> + *
> + * Output Args: None
> + *
> + * Return:
> + *   Starting guest virtual address
> + *
> + * Allocates at least sz bytes within the virtual address space of the vm
> + * given by vm.  The allocated bytes are mapped to a virtual address >=
> + * the address given by vaddr_min.  Note that each allocation uses a
> + * a unique set of pages, with the minimum real allocation being at least
> + * a page.
> + */
>  static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> -				     vm_vaddr_t vaddr_min,
> -				     enum kvm_mem_region_type type,
> -				     bool encrypt)
> +				     vm_vaddr_t vaddr_min, vm_paddr_t paddr_min,
> +				     uint32_t data_memslot, bool encrypt)
>  {
>  	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
>  
>  	virt_pgd_alloc(vm);
> -	vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages,
> -					      KVM_UTIL_MIN_PFN * vm->page_size,
> -					      vm->memslots[type], encrypt);
> +	vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages, paddr_min,
> +					       data_memslot, encrypt);
>  
>  	/*
>  	 * Find an unused range of virtual page addresses of at least
> @@ -1408,8 +1428,7 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
>  
>  	/* Map the virtual pages. */
>  	for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
> -		pages--, vaddr += vm->page_size, paddr += vm->page_size) {
> -
> +	     pages--, vaddr += vm->page_size, paddr += vm->page_size) {
>  		virt_pg_map(vm, vaddr, paddr);
>  
>  		sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
> @@ -1421,12 +1440,16 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
>  vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
>  			    enum kvm_mem_region_type type)
>  {
> -	return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, vm->protected);
> +	return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> +				  KVM_UTIL_MIN_PFN * vm->page_size,
> +				  vm->memslots[type], vm->protected);
>  }
>  
>  vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
>  {
> -	return ____vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA, false);
> +	return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> +				  KVM_UTIL_MIN_PFN * vm->page_size,
> +				  vm->memslots[MEM_REGION_TEST_DATA], false);
>  }
>  
>  /*
> @@ -1453,6 +1476,26 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
>  	return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
>  }
>  
> +/**
> + * Allocate memory in @vm of size @sz in memslot with id @data_memslot,
> + * beginning with the desired address of @vaddr_min.
> + *
> + * If there isn't enough memory at @vaddr_min, find the next possible address
> + * that can meet the requested size in the given memslot.
> + *
> + * Return the address where the memory is allocated.
> + */
> +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> +			       vm_vaddr_t vaddr_min, uint32_t data_memslot)
> +{
> +	vm_vaddr_t gva = ____vm_vaddr_alloc(vm, sz, vaddr_min,
> +					    (vm_paddr_t)vaddr_min, data_memslot,
> +					    vm->protected);
> +	TEST_ASSERT_EQ(gva, addr_gva2gpa(vm, gva));

By 1to1, do you mean virtual address=physical address?, community tends to call this identity mapping.
Examples (function name):
create_identity_mapping_pagetables()
hellcreek_setup_tc_identity_mapping()
identity_mapping_add()

> +
> +	return gva;
> +}
> +
>  /*
>   * VM Virtual Address Allocate Pages
>   *
Sagi Shahar July 23, 2024, 7:55 p.m. UTC | #3
On Tue, Feb 20, 2024 at 7:43 PM Binbin Wu <binbin.wu@linux.intel.com> wrote:
>
>
>
> On 12/13/2023 4:46 AM, Sagi Shahar wrote:
> > From: Ackerley Tng <ackerleytng@google.com>
> >
> > One-to-one GVA to GPA mappings can be used in the guest to set up boot
> > sequences during which paging is enabled, hence requiring a transition
> > from using physical to virtual addresses in consecutive instructions.
> >
> > Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> > Signed-off-by: Ryan Afranji <afranji@google.com>
> > Signed-off-by: Sagi Shahar <sagis@google.com>
> > ---
> >   .../selftests/kvm/include/kvm_util_base.h     |  2 +
> >   tools/testing/selftests/kvm/lib/kvm_util.c    | 63 ++++++++++++++++---
> >   2 files changed, 55 insertions(+), 10 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > index 1426e88ebdc7..c2e5c5f25dfc 100644
> > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > @@ -564,6 +564,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
> >   vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
> >                           enum kvm_mem_region_type type);
> >   vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
> > +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> > +                            vm_vaddr_t vaddr_min, uint32_t data_memslot);
> >   vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
> >   vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
> >                                enum kvm_mem_region_type type);
> > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > index febc63d7a46b..4f1ae0f1eef0 100644
> > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > @@ -1388,17 +1388,37 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
> >       return pgidx_start * vm->page_size;
> >   }
> >
> > +/*
> > + * VM Virtual Address Allocate Shared/Encrypted
> > + *
> > + * Input Args:
> > + *   vm - Virtual Machine
> > + *   sz - Size in bytes
> > + *   vaddr_min - Minimum starting virtual address
> > + *   paddr_min - Minimum starting physical address
> > + *   data_memslot - memslot number to allocate in
> > + *   encrypt - Whether the region should be handled as encrypted
> > + *
> > + * Output Args: None
> > + *
> > + * Return:
> > + *   Starting guest virtual address
> > + *
> > + * Allocates at least sz bytes within the virtual address space of the vm
> > + * given by vm.  The allocated bytes are mapped to a virtual address >=
> > + * the address given by vaddr_min.  Note that each allocation uses a
> > + * a unique set of pages, with the minimum real allocation being at least
> > + * a page.
> > + */
> >   static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> > -                                  vm_vaddr_t vaddr_min,
> > -                                  enum kvm_mem_region_type type,
> > -                                  bool encrypt)
> > +                                  vm_vaddr_t vaddr_min, vm_paddr_t paddr_min,
> > +                                  uint32_t data_memslot, bool encrypt)
> >   {
> >       uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
> >
> >       virt_pgd_alloc(vm);
> > -     vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages,
> > -                                           KVM_UTIL_MIN_PFN * vm->page_size,
> > -                                           vm->memslots[type], encrypt);
> > +     vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages, paddr_min,
> > +                                            data_memslot, encrypt);
> >
> >       /*
> >        * Find an unused range of virtual page addresses of at least
> > @@ -1408,8 +1428,7 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> >
> >       /* Map the virtual pages. */
> >       for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
> > -             pages--, vaddr += vm->page_size, paddr += vm->page_size) {
> > -
> > +          pages--, vaddr += vm->page_size, paddr += vm->page_size) {
> >               virt_pg_map(vm, vaddr, paddr);
> >
> >               sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
> > @@ -1421,12 +1440,16 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> >   vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
> >                           enum kvm_mem_region_type type)
> >   {
> > -     return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, vm->protected);
> > +     return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> > +                               KVM_UTIL_MIN_PFN * vm->page_size,
> > +                               vm->memslots[type], vm->protected);
> >   }
> >
> >   vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
> >   {
> > -     return ____vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA, false);
> > +     return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> > +                               KVM_UTIL_MIN_PFN * vm->page_size,
> > +                               vm->memslots[MEM_REGION_TEST_DATA], false);
> >   }
> >
> >   /*
> > @@ -1453,6 +1476,26 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
> >       return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
> >   }
> >
> > +/**
> > + * Allocate memory in @vm of size @sz in memslot with id @data_memslot,
> > + * beginning with the desired address of @vaddr_min.
> > + *
> > + * If there isn't enough memory at @vaddr_min, find the next possible address
> > + * that can meet the requested size in the given memslot.
> > + *
> > + * Return the address where the memory is allocated.
> > + */
> > +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> > +                            vm_vaddr_t vaddr_min, uint32_t data_memslot)
> > +{
> > +     vm_vaddr_t gva = ____vm_vaddr_alloc(vm, sz, vaddr_min,
> > +                                         (vm_paddr_t)vaddr_min, data_memslot,
> > +                                         vm->protected);
> > +     TEST_ASSERT_EQ(gva, addr_gva2gpa(vm, gva));
>
> How can this be guaranteed?
> For ____vm_vaddr_alloc(), generically there is no enforcement about the
> identity of virtual and physical address.

The problem is that if the allocation won't be 1-to-1 the tests won't
work. So we figured it's better to fail early.
The way this is used in practice generally guarantees that the mapping
can be 1-to-1 since we create these mappings at an early stage.
>
> > +
> > +     return gva;
> > +}
> > +
> >   /*
> >    * VM Virtual Address Allocate Pages
> >    *
>
>
Sagi Shahar July 23, 2024, 7:56 p.m. UTC | #4
On Thu, Mar 21, 2024 at 5:29 PM Zhang, Dongsheng X
<dongsheng.x.zhang@intel.com> wrote:
>
>
>
> On 12/12/2023 12:46 PM, Sagi Shahar wrote:
> > From: Ackerley Tng <ackerleytng@google.com>
> >
> > One-to-one GVA to GPA mappings can be used in the guest to set up boot
> > sequences during which paging is enabled, hence requiring a transition
> > from using physical to virtual addresses in consecutive instructions.
> >
> > Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> > Signed-off-by: Ryan Afranji <afranji@google.com>
> > Signed-off-by: Sagi Shahar <sagis@google.com>
> > ---
> >  .../selftests/kvm/include/kvm_util_base.h     |  2 +
> >  tools/testing/selftests/kvm/lib/kvm_util.c    | 63 ++++++++++++++++---
> >  2 files changed, 55 insertions(+), 10 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > index 1426e88ebdc7..c2e5c5f25dfc 100644
> > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > @@ -564,6 +564,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
> >  vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
> >                           enum kvm_mem_region_type type);
> >  vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
> > +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> > +                            vm_vaddr_t vaddr_min, uint32_t data_memslot);
> >  vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
> >  vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
> >                                enum kvm_mem_region_type type);
> > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > index febc63d7a46b..4f1ae0f1eef0 100644
> > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > @@ -1388,17 +1388,37 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
> >       return pgidx_start * vm->page_size;
> >  }
> >
> > +/*
> > + * VM Virtual Address Allocate Shared/Encrypted
> > + *
> > + * Input Args:
> > + *   vm - Virtual Machine
> > + *   sz - Size in bytes
> > + *   vaddr_min - Minimum starting virtual address
> > + *   paddr_min - Minimum starting physical address
> > + *   data_memslot - memslot number to allocate in
> > + *   encrypt - Whether the region should be handled as encrypted
> > + *
> > + * Output Args: None
> > + *
> > + * Return:
> > + *   Starting guest virtual address
> > + *
> > + * Allocates at least sz bytes within the virtual address space of the vm
> > + * given by vm.  The allocated bytes are mapped to a virtual address >=
> > + * the address given by vaddr_min.  Note that each allocation uses a
> > + * a unique set of pages, with the minimum real allocation being at least
> > + * a page.
> > + */
> >  static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> > -                                  vm_vaddr_t vaddr_min,
> > -                                  enum kvm_mem_region_type type,
> > -                                  bool encrypt)
> > +                                  vm_vaddr_t vaddr_min, vm_paddr_t paddr_min,
> > +                                  uint32_t data_memslot, bool encrypt)
> >  {
> >       uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
> >
> >       virt_pgd_alloc(vm);
> > -     vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages,
> > -                                           KVM_UTIL_MIN_PFN * vm->page_size,
> > -                                           vm->memslots[type], encrypt);
> > +     vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages, paddr_min,
> > +                                            data_memslot, encrypt);
> >
> >       /*
> >        * Find an unused range of virtual page addresses of at least
> > @@ -1408,8 +1428,7 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> >
> >       /* Map the virtual pages. */
> >       for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
> > -             pages--, vaddr += vm->page_size, paddr += vm->page_size) {
> > -
> > +          pages--, vaddr += vm->page_size, paddr += vm->page_size) {
> >               virt_pg_map(vm, vaddr, paddr);
> >
> >               sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
> > @@ -1421,12 +1440,16 @@ static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
> >  vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
> >                           enum kvm_mem_region_type type)
> >  {
> > -     return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, vm->protected);
> > +     return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> > +                               KVM_UTIL_MIN_PFN * vm->page_size,
> > +                               vm->memslots[type], vm->protected);
> >  }
> >
> >  vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
> >  {
> > -     return ____vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA, false);
> > +     return ____vm_vaddr_alloc(vm, sz, vaddr_min,
> > +                               KVM_UTIL_MIN_PFN * vm->page_size,
> > +                               vm->memslots[MEM_REGION_TEST_DATA], false);
> >  }
> >
> >  /*
> > @@ -1453,6 +1476,26 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
> >       return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
> >  }
> >
> > +/**
> > + * Allocate memory in @vm of size @sz in memslot with id @data_memslot,
> > + * beginning with the desired address of @vaddr_min.
> > + *
> > + * If there isn't enough memory at @vaddr_min, find the next possible address
> > + * that can meet the requested size in the given memslot.
> > + *
> > + * Return the address where the memory is allocated.
> > + */
> > +vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
> > +                            vm_vaddr_t vaddr_min, uint32_t data_memslot)
> > +{
> > +     vm_vaddr_t gva = ____vm_vaddr_alloc(vm, sz, vaddr_min,
> > +                                         (vm_paddr_t)vaddr_min, data_memslot,
> > +                                         vm->protected);
> > +     TEST_ASSERT_EQ(gva, addr_gva2gpa(vm, gva));
>
> By 1to1, do you mean virtual address=physical address?, community tends to call this identity mapping.
> Examples (function name):
> create_identity_mapping_pagetables()
> hellcreek_setup_tc_identity_mapping()
> identity_mapping_add()

Thanks for the input. Will switch to vm_vaddr_identity_alloc()
>
> > +
> > +     return gva;
> > +}
> > +
> >  /*
> >   * VM Virtual Address Allocate Pages
> >   *
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index 1426e88ebdc7..c2e5c5f25dfc 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -564,6 +564,8 @@  vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
 			    enum kvm_mem_region_type type);
 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
+vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
+			       vm_vaddr_t vaddr_min, uint32_t data_memslot);
 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
 				 enum kvm_mem_region_type type);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index febc63d7a46b..4f1ae0f1eef0 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1388,17 +1388,37 @@  vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
 	return pgidx_start * vm->page_size;
 }
 
+/*
+ * VM Virtual Address Allocate Shared/Encrypted
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   sz - Size in bytes
+ *   vaddr_min - Minimum starting virtual address
+ *   paddr_min - Minimum starting physical address
+ *   data_memslot - memslot number to allocate in
+ *   encrypt - Whether the region should be handled as encrypted
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Starting guest virtual address
+ *
+ * Allocates at least sz bytes within the virtual address space of the vm
+ * given by vm.  The allocated bytes are mapped to a virtual address >=
+ * the address given by vaddr_min.  Note that each allocation uses a
+ * a unique set of pages, with the minimum real allocation being at least
+ * a page.
+ */
 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
-				     vm_vaddr_t vaddr_min,
-				     enum kvm_mem_region_type type,
-				     bool encrypt)
+				     vm_vaddr_t vaddr_min, vm_paddr_t paddr_min,
+				     uint32_t data_memslot, bool encrypt)
 {
 	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
 
 	virt_pgd_alloc(vm);
-	vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages,
-					      KVM_UTIL_MIN_PFN * vm->page_size,
-					      vm->memslots[type], encrypt);
+	vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages, paddr_min,
+					       data_memslot, encrypt);
 
 	/*
 	 * Find an unused range of virtual page addresses of at least
@@ -1408,8 +1428,7 @@  static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
 
 	/* Map the virtual pages. */
 	for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
-		pages--, vaddr += vm->page_size, paddr += vm->page_size) {
-
+	     pages--, vaddr += vm->page_size, paddr += vm->page_size) {
 		virt_pg_map(vm, vaddr, paddr);
 
 		sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
@@ -1421,12 +1440,16 @@  static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
 			    enum kvm_mem_region_type type)
 {
-	return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, vm->protected);
+	return ____vm_vaddr_alloc(vm, sz, vaddr_min,
+				  KVM_UTIL_MIN_PFN * vm->page_size,
+				  vm->memslots[type], vm->protected);
 }
 
 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
 {
-	return ____vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA, false);
+	return ____vm_vaddr_alloc(vm, sz, vaddr_min,
+				  KVM_UTIL_MIN_PFN * vm->page_size,
+				  vm->memslots[MEM_REGION_TEST_DATA], false);
 }
 
 /*
@@ -1453,6 +1476,26 @@  vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
 	return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
 }
 
+/**
+ * Allocate memory in @vm of size @sz in memslot with id @data_memslot,
+ * beginning with the desired address of @vaddr_min.
+ *
+ * If there isn't enough memory at @vaddr_min, find the next possible address
+ * that can meet the requested size in the given memslot.
+ *
+ * Return the address where the memory is allocated.
+ */
+vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz,
+			       vm_vaddr_t vaddr_min, uint32_t data_memslot)
+{
+	vm_vaddr_t gva = ____vm_vaddr_alloc(vm, sz, vaddr_min,
+					    (vm_paddr_t)vaddr_min, data_memslot,
+					    vm->protected);
+	TEST_ASSERT_EQ(gva, addr_gva2gpa(vm, gva));
+
+	return gva;
+}
+
 /*
  * VM Virtual Address Allocate Pages
  *