diff mbox series

[v4,09/18] KVM: arm64: selftests: Add guest support to get the vcpuid

Message ID 20210909013818.1191270-10-rananta@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: selftests: Introduce arch_timer selftest | expand

Commit Message

Raghavendra Rao Ananta Sept. 9, 2021, 1:38 a.m. UTC
At times, such as when in the interrupt handler, the guest wants
to get the vcpuid that it's running on. As a result, introduce
get_vcpuid() that returns the vcpuid of the calling vcpu. At its
backend, the VMM prepares a map of vcpuid and mpidr during VM
initialization and exports the map to the guest for it to read.

Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
 .../selftests/kvm/include/aarch64/processor.h |  3 ++
 .../selftests/kvm/lib/aarch64/processor.c     | 46 +++++++++++++++++++
 2 files changed, 49 insertions(+)

Comments

Oliver Upton Sept. 9, 2021, 5:09 a.m. UTC | #1
On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> At times, such as when in the interrupt handler, the guest wants
> to get the vcpuid that it's running on. As a result, introduce
> get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> backend, the VMM prepares a map of vcpuid and mpidr during VM
> initialization and exports the map to the guest for it to read.
> 
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> ---
>  .../selftests/kvm/include/aarch64/processor.h |  3 ++
>  .../selftests/kvm/lib/aarch64/processor.c     | 46 +++++++++++++++++++
>  2 files changed, 49 insertions(+)
> 
> diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
> index b6088c3c67a3..150f63101f4c 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/processor.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
> @@ -133,6 +133,7 @@ void vm_install_exception_handler(struct kvm_vm *vm,
>  		int vector, handler_fn handler);
>  void vm_install_sync_handler(struct kvm_vm *vm,
>  		int vector, int ec, handler_fn handler);
> +void vm_vcpuid_map_init(struct kvm_vm *vm);
>  
>  static inline void cpu_relax(void)
>  {
> @@ -194,4 +195,6 @@ static inline void local_irq_disable(void)
>  	asm volatile("msr daifset, #3" : : : "memory");
>  }
>  
> +int get_vcpuid(void);
> +

I believe both of these functions could use some documentation. The
former has implicit ordering requirements (can only be called after all
vCPUs are created) and the latter can only be used within a guest.

>  #endif /* SELFTEST_KVM_PROCESSOR_H */
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> index 632b74d6b3ca..9844b62227b1 100644
> --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> @@ -13,9 +13,17 @@
>  #include "processor.h"
>  
>  #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
> +#define VM_VCPUID_MAP_INVAL			-1
>  
>  static vm_vaddr_t exception_handlers;
>  
> +struct vm_vcpuid_map {
> +	uint64_t mpidr;
> +	int vcpuid;
> +};
> +
> +static struct vm_vcpuid_map vcpuid_map[KVM_MAX_VCPUS];
> +

Hmm.

I'm not too big of a fan that the KVM_MAX_VCPUS macro is defined in the
KVM selftests. Really, userspace should discover the limit from the
kernel. Especially when we want to write tests that test behavior at
KVM's limit.

That being said, there are more instances of these static allocations in
the selftests code, so you aren't to be blamed.

Related: commit 074c82c8f7cf ("kvm: x86: Increase MAX_VCPUS to 1024")
has raised this limit.

>  static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
>  {
>  	return (v + vm->page_size) & ~(vm->page_size - 1);
> @@ -426,3 +434,41 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
>  	assert(vector < VECTOR_NUM);
>  	handlers->exception_handlers[vector][0] = handler;
>  }
> +
> +void vm_vcpuid_map_init(struct kvm_vm *vm)
> +{
> +	int i = 0;
> +	struct vcpu *vcpu;
> +	struct vm_vcpuid_map *map;
> +
> +	list_for_each_entry(vcpu, &vm->vcpus, list) {
> +		map = &vcpuid_map[i++];
> +		map->vcpuid = vcpu->id;
> +		get_reg(vm, vcpu->id,
> +			ARM64_SYS_KVM_REG(SYS_MPIDR_EL1), &map->mpidr);
> +		map->mpidr &= MPIDR_HWID_BITMASK;
> +	}
> +
> +	if (i < KVM_MAX_VCPUS)
> +		vcpuid_map[i].vcpuid = VM_VCPUID_MAP_INVAL;
> +
> +	sync_global_to_guest(vm, vcpuid_map);
> +}
> +
> +int get_vcpuid(void)

nit: guest_get_vcpuid()

> +{
> +	int i, vcpuid;
> +	uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
> +
> +	for (i = 0; i < KVM_MAX_VCPUS; i++) {
> +		vcpuid = vcpuid_map[i].vcpuid;
> +		GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
> +
> +		if (mpidr == vcpuid_map[i].mpidr)
> +			return vcpuid;
> +	}
> +
> +	/* We should not be reaching here */
> +	GUEST_ASSERT_1(0, mpidr);
> +	return -1;
> +}
> -- 
> 2.33.0.153.gba50c8fa24-goog
>
Andrew Jones Sept. 9, 2021, 7:56 a.m. UTC | #2
On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> At times, such as when in the interrupt handler, the guest wants
> to get the vcpuid that it's running on. As a result, introduce
> get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> backend, the VMM prepares a map of vcpuid and mpidr during VM
> initialization and exports the map to the guest for it to read.
> 
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> ---
>  .../selftests/kvm/include/aarch64/processor.h |  3 ++
>  .../selftests/kvm/lib/aarch64/processor.c     | 46 +++++++++++++++++++
>  2 files changed, 49 insertions(+)
> 
> diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
> index b6088c3c67a3..150f63101f4c 100644
> --- a/tools/testing/selftests/kvm/include/aarch64/processor.h
> +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
> @@ -133,6 +133,7 @@ void vm_install_exception_handler(struct kvm_vm *vm,
>  		int vector, handler_fn handler);
>  void vm_install_sync_handler(struct kvm_vm *vm,
>  		int vector, int ec, handler_fn handler);
> +void vm_vcpuid_map_init(struct kvm_vm *vm);
>  
>  static inline void cpu_relax(void)
>  {
> @@ -194,4 +195,6 @@ static inline void local_irq_disable(void)
>  	asm volatile("msr daifset, #3" : : : "memory");
>  }
>  
> +int get_vcpuid(void);
> +
>  #endif /* SELFTEST_KVM_PROCESSOR_H */
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> index 632b74d6b3ca..9844b62227b1 100644
> --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> @@ -13,9 +13,17 @@
>  #include "processor.h"
>  
>  #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
> +#define VM_VCPUID_MAP_INVAL			-1
>  
>  static vm_vaddr_t exception_handlers;
>  
> +struct vm_vcpuid_map {
> +	uint64_t mpidr;
> +	int vcpuid;
> +};

I'd prefer we create an arch neutral map structure that has arch specific
vm_vcpuid_map_add() functions to populate them. So, instead of calling the
'mpidr' member mpidr, we should call it 'cpuid'. On x86, for example,
cpuid would be the APIC ID.

> +
> +static struct vm_vcpuid_map vcpuid_map[KVM_MAX_VCPUS];
> +
>  static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
>  {
>  	return (v + vm->page_size) & ~(vm->page_size - 1);
> @@ -426,3 +434,41 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
>  	assert(vector < VECTOR_NUM);
>  	handlers->exception_handlers[vector][0] = handler;
>  }
> +
> +void vm_vcpuid_map_init(struct kvm_vm *vm)
> +{
> +	int i = 0;
> +	struct vcpu *vcpu;
> +	struct vm_vcpuid_map *map;
> +
> +	list_for_each_entry(vcpu, &vm->vcpus, list) {
> +		map = &vcpuid_map[i++];
> +		map->vcpuid = vcpu->id;
> +		get_reg(vm, vcpu->id,
> +			ARM64_SYS_KVM_REG(SYS_MPIDR_EL1), &map->mpidr);
> +		map->mpidr &= MPIDR_HWID_BITMASK;
> +	}

Here we should assert that i is no longer zero. If it is, then we should
complain that vcpus need to be added before this call is made.

But, rather than providing an init function that inits the whole map
after all vcpus are created, I think we should add each vcpu's map entry
as we add vcpus to the vm. So we need to call the arch-specific
vm_vcpuid_map_add() from vm_vcpu_add(). We can just create stubs
for x86 and s390 for now. Also, in vm_vcpu_rm() we should find the
corresponding entry in the vcpuid map and set it to VM_VCPUID_MAP_INVAL
in order to remove it.

> +
> +	if (i < KVM_MAX_VCPUS)
> +		vcpuid_map[i].vcpuid = VM_VCPUID_MAP_INVAL;
> +
> +	sync_global_to_guest(vm, vcpuid_map);

We can't do this synch part for the test code at vcpu add time since we
don't know if the guest page tables are ready. I think it's OK to require
the test code to do this when the guest code needs it though. We should
document that requirement above the vm_vcpuid_map struct declaration,
which will be in kvm_util.h.

> +}
> +
> +int get_vcpuid(void)
> +{
> +	int i, vcpuid;
> +	uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
> +
> +	for (i = 0; i < KVM_MAX_VCPUS; i++) {
> +		vcpuid = vcpuid_map[i].vcpuid;
> +		GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);

We don't want this assert if it's possible to have sparse maps, which
it probably isn't ever going to be, but...

> +
> +		if (mpidr == vcpuid_map[i].mpidr)
> +			return vcpuid;
> +	}
> +
> +	/* We should not be reaching here */
> +	GUEST_ASSERT_1(0, mpidr);

...this assert should be good enough to sanity check the map by itself
anyway.

Also, the only arch-specific aspect of get_vcpuid() is the looking up
the cpuid. So we should make get_vcpuid arch-neutral and call an arch-
specific get_cpuid() from it.

> +	return -1;
> +}
> -- 
> 2.33.0.153.gba50c8fa24-goog
>

Thanks,
drew
Raghavendra Rao Ananta Sept. 9, 2021, 4:59 p.m. UTC | #3
On Wed, Sep 8, 2021 at 10:09 PM Oliver Upton <oupton@google.com> wrote:
>
> On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> > At times, such as when in the interrupt handler, the guest wants
> > to get the vcpuid that it's running on. As a result, introduce
> > get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> > backend, the VMM prepares a map of vcpuid and mpidr during VM
> > initialization and exports the map to the guest for it to read.
> >
> > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> > ---
> >  .../selftests/kvm/include/aarch64/processor.h |  3 ++
> >  .../selftests/kvm/lib/aarch64/processor.c     | 46 +++++++++++++++++++
> >  2 files changed, 49 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
> > index b6088c3c67a3..150f63101f4c 100644
> > --- a/tools/testing/selftests/kvm/include/aarch64/processor.h
> > +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
> > @@ -133,6 +133,7 @@ void vm_install_exception_handler(struct kvm_vm *vm,
> >               int vector, handler_fn handler);
> >  void vm_install_sync_handler(struct kvm_vm *vm,
> >               int vector, int ec, handler_fn handler);
> > +void vm_vcpuid_map_init(struct kvm_vm *vm);
> >
> >  static inline void cpu_relax(void)
> >  {
> > @@ -194,4 +195,6 @@ static inline void local_irq_disable(void)
> >       asm volatile("msr daifset, #3" : : : "memory");
> >  }
> >
> > +int get_vcpuid(void);
> > +
>
> I believe both of these functions could use some documentation. The
> former has implicit ordering requirements (can only be called after all
> vCPUs are created) and the latter can only be used within a guest.
>
> >  #endif /* SELFTEST_KVM_PROCESSOR_H */
> > diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > index 632b74d6b3ca..9844b62227b1 100644
> > --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > @@ -13,9 +13,17 @@
> >  #include "processor.h"
> >
> >  #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN  0xac0000
> > +#define VM_VCPUID_MAP_INVAL                  -1
> >
> >  static vm_vaddr_t exception_handlers;
> >
> > +struct vm_vcpuid_map {
> > +     uint64_t mpidr;
> > +     int vcpuid;
> > +};
> > +
> > +static struct vm_vcpuid_map vcpuid_map[KVM_MAX_VCPUS];
> > +
>
> Hmm.
>
> I'm not too big of a fan that the KVM_MAX_VCPUS macro is defined in the
> KVM selftests. Really, userspace should discover the limit from the
> kernel. Especially when we want to write tests that test behavior at
> KVM's limit.
>
> That being said, there are more instances of these static allocations in
> the selftests code, so you aren't to be blamed.
>
> Related: commit 074c82c8f7cf ("kvm: x86: Increase MAX_VCPUS to 1024")
> has raised this limit.
>
I'm not a fan of static allocations either, but the fact that
sync_global_to_guest() doesn't have a size argument (yet), makes me
want to take a shorter route. Anyway, if you want I can allocate it
dynamically and copy it to the guest's memory by hand, or come up with
a utility wrapper while I'm at it.
(Just wanted to make sure we are not over-engineering our needs here).

> >  static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
> >  {
> >       return (v + vm->page_size) & ~(vm->page_size - 1);
> > @@ -426,3 +434,41 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
> >       assert(vector < VECTOR_NUM);
> >       handlers->exception_handlers[vector][0] = handler;
> >  }
> > +
> > +void vm_vcpuid_map_init(struct kvm_vm *vm)
> > +{
> > +     int i = 0;
> > +     struct vcpu *vcpu;
> > +     struct vm_vcpuid_map *map;
> > +
> > +     list_for_each_entry(vcpu, &vm->vcpus, list) {
> > +             map = &vcpuid_map[i++];
> > +             map->vcpuid = vcpu->id;
> > +             get_reg(vm, vcpu->id,
> > +                     ARM64_SYS_KVM_REG(SYS_MPIDR_EL1), &map->mpidr);
> > +             map->mpidr &= MPIDR_HWID_BITMASK;
> > +     }
> > +
> > +     if (i < KVM_MAX_VCPUS)
> > +             vcpuid_map[i].vcpuid = VM_VCPUID_MAP_INVAL;
> > +
> > +     sync_global_to_guest(vm, vcpuid_map);
> > +}
> > +
> > +int get_vcpuid(void)
>
> nit: guest_get_vcpuid()
>
Sounds nice. Since we have a lot of guest utility functions now, I'm
fancying a world where we prefix guest_ with all of them to avoid
confusion.

Regards,
Raghavendra
> > +{
> > +     int i, vcpuid;
> > +     uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
> > +
> > +     for (i = 0; i < KVM_MAX_VCPUS; i++) {
> > +             vcpuid = vcpuid_map[i].vcpuid;
> > +             GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
> > +
> > +             if (mpidr == vcpuid_map[i].mpidr)
> > +                     return vcpuid;
> > +     }
> > +
> > +     /* We should not be reaching here */
> > +     GUEST_ASSERT_1(0, mpidr);
> > +     return -1;
> > +}
> > --
> > 2.33.0.153.gba50c8fa24-goog
> >
Oliver Upton Sept. 9, 2021, 5:04 p.m. UTC | #4
On Thu, Sep 9, 2021 at 12:59 PM Raghavendra Rao Ananta
<rananta@google.com> wrote:
>
> On Wed, Sep 8, 2021 at 10:09 PM Oliver Upton <oupton@google.com> wrote:
> >
> > On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> > > At times, such as when in the interrupt handler, the guest wants
> > > to get the vcpuid that it's running on. As a result, introduce
> > > get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> > > backend, the VMM prepares a map of vcpuid and mpidr during VM
> > > initialization and exports the map to the guest for it to read.
> > >
> > > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> > > ---
> > >  .../selftests/kvm/include/aarch64/processor.h |  3 ++
> > >  .../selftests/kvm/lib/aarch64/processor.c     | 46 +++++++++++++++++++
> > >  2 files changed, 49 insertions(+)
> > >
> > > diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
> > > index b6088c3c67a3..150f63101f4c 100644
> > > --- a/tools/testing/selftests/kvm/include/aarch64/processor.h
> > > +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
> > > @@ -133,6 +133,7 @@ void vm_install_exception_handler(struct kvm_vm *vm,
> > >               int vector, handler_fn handler);
> > >  void vm_install_sync_handler(struct kvm_vm *vm,
> > >               int vector, int ec, handler_fn handler);
> > > +void vm_vcpuid_map_init(struct kvm_vm *vm);
> > >
> > >  static inline void cpu_relax(void)
> > >  {
> > > @@ -194,4 +195,6 @@ static inline void local_irq_disable(void)
> > >       asm volatile("msr daifset, #3" : : : "memory");
> > >  }
> > >
> > > +int get_vcpuid(void);
> > > +
> >
> > I believe both of these functions could use some documentation. The
> > former has implicit ordering requirements (can only be called after all
> > vCPUs are created) and the latter can only be used within a guest.
> >
> > >  #endif /* SELFTEST_KVM_PROCESSOR_H */
> > > diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > > index 632b74d6b3ca..9844b62227b1 100644
> > > --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > > +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > > @@ -13,9 +13,17 @@
> > >  #include "processor.h"
> > >
> > >  #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN  0xac0000
> > > +#define VM_VCPUID_MAP_INVAL                  -1
> > >
> > >  static vm_vaddr_t exception_handlers;
> > >
> > > +struct vm_vcpuid_map {
> > > +     uint64_t mpidr;
> > > +     int vcpuid;
> > > +};
> > > +
> > > +static struct vm_vcpuid_map vcpuid_map[KVM_MAX_VCPUS];
> > > +
> >
> > Hmm.
> >
> > I'm not too big of a fan that the KVM_MAX_VCPUS macro is defined in the
> > KVM selftests. Really, userspace should discover the limit from the
> > kernel. Especially when we want to write tests that test behavior at
> > KVM's limit.
> >
> > That being said, there are more instances of these static allocations in
> > the selftests code, so you aren't to be blamed.
> >
> > Related: commit 074c82c8f7cf ("kvm: x86: Increase MAX_VCPUS to 1024")
> > has raised this limit.
> >
> I'm not a fan of static allocations either, but the fact that
> sync_global_to_guest() doesn't have a size argument (yet), makes me
> want to take a shorter route. Anyway, if you want I can allocate it
> dynamically and copy it to the guest's memory by hand, or come up with
> a utility wrapper while I'm at it.
> (Just wanted to make sure we are not over-engineering our needs here).

No, please don't worry about it in your series. I'm just openly
whining is all :-)

> > >  static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
> > >  {
> > >       return (v + vm->page_size) & ~(vm->page_size - 1);
> > > @@ -426,3 +434,41 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
> > >       assert(vector < VECTOR_NUM);
> > >       handlers->exception_handlers[vector][0] = handler;
> > >  }
> > > +
> > > +void vm_vcpuid_map_init(struct kvm_vm *vm)
> > > +{
> > > +     int i = 0;
> > > +     struct vcpu *vcpu;
> > > +     struct vm_vcpuid_map *map;
> > > +
> > > +     list_for_each_entry(vcpu, &vm->vcpus, list) {
> > > +             map = &vcpuid_map[i++];
> > > +             map->vcpuid = vcpu->id;
> > > +             get_reg(vm, vcpu->id,
> > > +                     ARM64_SYS_KVM_REG(SYS_MPIDR_EL1), &map->mpidr);
> > > +             map->mpidr &= MPIDR_HWID_BITMASK;
> > > +     }
> > > +
> > > +     if (i < KVM_MAX_VCPUS)
> > > +             vcpuid_map[i].vcpuid = VM_VCPUID_MAP_INVAL;
> > > +
> > > +     sync_global_to_guest(vm, vcpuid_map);
> > > +}
> > > +
> > > +int get_vcpuid(void)
> >
> > nit: guest_get_vcpuid()
> >
> Sounds nice. Since we have a lot of guest utility functions now, I'm
> fancying a world where we prefix guest_ with all of them to avoid
> confusion.
>

Sounds good to me!

--
Thanks,
Oliver
Raghavendra Rao Ananta Sept. 9, 2021, 5:10 p.m. UTC | #5
On Thu, Sep 9, 2021 at 12:56 AM Andrew Jones <drjones@redhat.com> wrote:
>
> On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> > At times, such as when in the interrupt handler, the guest wants
> > to get the vcpuid that it's running on. As a result, introduce
> > get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> > backend, the VMM prepares a map of vcpuid and mpidr during VM
> > initialization and exports the map to the guest for it to read.
> >
> > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> > ---
> >  .../selftests/kvm/include/aarch64/processor.h |  3 ++
> >  .../selftests/kvm/lib/aarch64/processor.c     | 46 +++++++++++++++++++
> >  2 files changed, 49 insertions(+)
> >
> > diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
> > index b6088c3c67a3..150f63101f4c 100644
> > --- a/tools/testing/selftests/kvm/include/aarch64/processor.h
> > +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
> > @@ -133,6 +133,7 @@ void vm_install_exception_handler(struct kvm_vm *vm,
> >               int vector, handler_fn handler);
> >  void vm_install_sync_handler(struct kvm_vm *vm,
> >               int vector, int ec, handler_fn handler);
> > +void vm_vcpuid_map_init(struct kvm_vm *vm);
> >
> >  static inline void cpu_relax(void)
> >  {
> > @@ -194,4 +195,6 @@ static inline void local_irq_disable(void)
> >       asm volatile("msr daifset, #3" : : : "memory");
> >  }
> >
> > +int get_vcpuid(void);
> > +
> >  #endif /* SELFTEST_KVM_PROCESSOR_H */
> > diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > index 632b74d6b3ca..9844b62227b1 100644
> > --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> > @@ -13,9 +13,17 @@
> >  #include "processor.h"
> >
> >  #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN  0xac0000
> > +#define VM_VCPUID_MAP_INVAL                  -1
> >
> >  static vm_vaddr_t exception_handlers;
> >
> > +struct vm_vcpuid_map {
> > +     uint64_t mpidr;
> > +     int vcpuid;
> > +};
>
> I'd prefer we create an arch neutral map structure that has arch specific
> vm_vcpuid_map_add() functions to populate them. So, instead of calling the
> 'mpidr' member mpidr, we should call it 'cpuid'. On x86, for example,
> cpuid would be the APIC ID.
>
Great idea. Let me think about it..

> > +
> > +static struct vm_vcpuid_map vcpuid_map[KVM_MAX_VCPUS];
> > +
> >  static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
> >  {
> >       return (v + vm->page_size) & ~(vm->page_size - 1);
> > @@ -426,3 +434,41 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
> >       assert(vector < VECTOR_NUM);
> >       handlers->exception_handlers[vector][0] = handler;
> >  }
> > +
> > +void vm_vcpuid_map_init(struct kvm_vm *vm)
> > +{
> > +     int i = 0;
> > +     struct vcpu *vcpu;
> > +     struct vm_vcpuid_map *map;
> > +
> > +     list_for_each_entry(vcpu, &vm->vcpus, list) {
> > +             map = &vcpuid_map[i++];
> > +             map->vcpuid = vcpu->id;
> > +             get_reg(vm, vcpu->id,
> > +                     ARM64_SYS_KVM_REG(SYS_MPIDR_EL1), &map->mpidr);
> > +             map->mpidr &= MPIDR_HWID_BITMASK;
> > +     }
>
> Here we should assert that i is no longer zero. If it is, then we should
> complain that vcpus need to be added before this call is made.
>
Makes sense, I'll add an ASSERT to be safe.
> But, rather than providing an init function that inits the whole map
> after all vcpus are created, I think we should add each vcpu's map entry
> as we add vcpus to the vm. So we need to call the arch-specific
> vm_vcpuid_map_add() from vm_vcpu_add(). We can just create stubs
> for x86 and s390 for now. Also, in vm_vcpu_rm() we should find the
> corresponding entry in the vcpuid map and set it to VM_VCPUID_MAP_INVAL
> in order to remove it.
>
> > +
> > +     if (i < KVM_MAX_VCPUS)
> > +             vcpuid_map[i].vcpuid = VM_VCPUID_MAP_INVAL;
> > +
> > +     sync_global_to_guest(vm, vcpuid_map);
>
> We can't do this synch part for the test code at vcpu add time since we
> don't know if the guest page tables are ready. I think it's OK to require
> the test code to do this when the guest code needs it though. We should
> document that requirement above the vm_vcpuid_map struct declaration,
> which will be in kvm_util.h.
>
Sure, I'll add a comment.
> > +}
> > +
> > +int get_vcpuid(void)
> > +{
> > +     int i, vcpuid;
> > +     uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
> > +
> > +     for (i = 0; i < KVM_MAX_VCPUS; i++) {
> > +             vcpuid = vcpuid_map[i].vcpuid;
> > +             GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
>
> We don't want this assert if it's possible to have sparse maps, which
> it probably isn't ever going to be, but...
>
If you look at the way the array is arranged, the element with
VM_VCPUID_MAP_INVAL acts as a sentinel for us and all the proper
elements would lie before this. So, I don't think we'd have a sparse
array here.

Regards,
Raghavendra
> > +
> > +             if (mpidr == vcpuid_map[i].mpidr)
> > +                     return vcpuid;
> > +     }
> > +
> > +     /* We should not be reaching here */
> > +     GUEST_ASSERT_1(0, mpidr);
>
> ...this assert should be good enough to sanity check the map by itself
> anyway.
>
> Also, the only arch-specific aspect of get_vcpuid() is the looking up
> the cpuid. So we should make get_vcpuid arch-neutral and call an arch-
> specific get_cpuid() from it.
>
> > +     return -1;
> > +}
> > --
> > 2.33.0.153.gba50c8fa24-goog
> >
>
> Thanks,
> drew
>
Andrew Jones Sept. 10, 2021, 8:10 a.m. UTC | #6
On Thu, Sep 09, 2021 at 10:10:56AM -0700, Raghavendra Rao Ananta wrote:
> On Thu, Sep 9, 2021 at 12:56 AM Andrew Jones <drjones@redhat.com> wrote:
> >
> > On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
...
> > > +     for (i = 0; i < KVM_MAX_VCPUS; i++) {
> > > +             vcpuid = vcpuid_map[i].vcpuid;
> > > +             GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
> >
> > We don't want this assert if it's possible to have sparse maps, which
> > it probably isn't ever going to be, but...
> >
> If you look at the way the array is arranged, the element with
> VM_VCPUID_MAP_INVAL acts as a sentinel for us and all the proper
> elements would lie before this. So, I don't think we'd have a sparse
> array here.

If we switch to my suggestion of adding map entries at vcpu-add time and
removing them at vcpu-rm time, then the array may become sparse depending
on the order of removals.

Thanks,
drew
Raghavendra Rao Ananta Sept. 10, 2021, 6:03 p.m. UTC | #7
On Fri, Sep 10, 2021 at 1:10 AM Andrew Jones <drjones@redhat.com> wrote:
>
> On Thu, Sep 09, 2021 at 10:10:56AM -0700, Raghavendra Rao Ananta wrote:
> > On Thu, Sep 9, 2021 at 12:56 AM Andrew Jones <drjones@redhat.com> wrote:
> > >
> > > On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> ...
> > > > +     for (i = 0; i < KVM_MAX_VCPUS; i++) {
> > > > +             vcpuid = vcpuid_map[i].vcpuid;
> > > > +             GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
> > >
> > > We don't want this assert if it's possible to have sparse maps, which
> > > it probably isn't ever going to be, but...
> > >
> > If you look at the way the array is arranged, the element with
> > VM_VCPUID_MAP_INVAL acts as a sentinel for us and all the proper
> > elements would lie before this. So, I don't think we'd have a sparse
> > array here.
>
> If we switch to my suggestion of adding map entries at vcpu-add time and
> removing them at vcpu-rm time, then the array may become sparse depending
> on the order of removals.
>
Oh, I get it now. But like you mentioned, we add entries to the map
while the vCPUs are getting added and then sync_global_to_guest()
later. This seems like a lot of maintainance, unless I'm interpreting
it wrong or not seeing an advantage.
I like your idea of coming up an arch-independent interface, however.
So I modified it similar to the familiar ucall interface that we have
and does everything in one shot to avoid any confusion:

diff --git a/tools/testing/selftests/kvm/include/kvm_util.h
b/tools/testing/selftests/kvm/include/kvm_util.h
index 010b59b13917..0e87cb0c980b 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -400,4 +400,24 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t
vcpu_id, struct ucall *uc);
 int vm_get_stats_fd(struct kvm_vm *vm);
 int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid);

+#define VM_CPUID_MAP_INVAL -1
+
+struct vm_cpuid_map {
+       uint64_t hw_cpuid;
+       int vcpuid;
+};
+
+/*
+ * Create a vcpuid:hw_cpuid map and export it to the guest
+ *
+ * Input Args:
+ *   vm - KVM VM.
+ *
+ * Output Args: None
+ *
+ * Must be called after all the vCPUs are added to the VM
+ */
+void vm_cpuid_map_init(struct kvm_vm *vm);
+int guest_get_vcpuid(void);
+
 #endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c
b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index db64ee206064..e796bb3984a6 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -16,6 +16,8 @@

 static vm_vaddr_t exception_handlers;

+static struct vm_cpuid_map cpuid_map[KVM_MAX_VCPUS];
+
 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
 {
        return (v + vm->page_size) & ~(vm->page_size - 1);
@@ -426,3 +428,42 @@ void vm_install_exception_handler(struct kvm_vm
*vm, int vector,
        assert(vector < VECTOR_NUM);
        handlers->exception_handlers[vector][0] = handler;
 }
+
+void vm_cpuid_map_init(struct kvm_vm *vm)
+{
+       int i = 0;
+       struct vcpu *vcpu;
+       struct vm_cpuid_map *map;
+
+       TEST_ASSERT(!list_empty(&vm->vcpus), "vCPUs must have been created\n");
+
+       list_for_each_entry(vcpu, &vm->vcpus, list) {
+               map = &cpuid_map[i++];
+               map->vcpuid = vcpu->id;
+               get_reg(vm, vcpu->id,
KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &map->hw_cpuid);
+               map->hw_cpuid &= MPIDR_HWID_BITMASK;
+       }
+
+       if (i < KVM_MAX_VCPUS)
+               cpuid_map[i].vcpuid = VM_CPUID_MAP_INVAL;
+
+       sync_global_to_guest(vm, cpuid_map);
+}
+
+int guest_get_vcpuid(void)
+{
+       int i, vcpuid;
+       uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
+
+       for (i = 0; i < KVM_MAX_VCPUS; i++) {
+               vcpuid = cpuid_map[i].vcpuid;
+
+               /* Was this vCPU added to the VM after the map was
initialized? */
+               GUEST_ASSERT_1(vcpuid != VM_CPUID_MAP_INVAL, mpidr);
+
+               if (mpidr == cpuid_map[i].hw_cpuid)
+                       return vcpuid;
+       }
+
+       /* We should not be reaching here */
+       GUEST_ASSERT_1(0, mpidr);
+       return -1;
+}

This would ensure that we don't have a sparse array and can use the
last non-vCPU element as a sentinal node.
If you still feel preparing the map as and when the vCPUs are created
makes more sense, I can go for it.

Regards,
Raghavendra
> Thanks,
> drew
>
Reiji Watanabe Sept. 12, 2021, 7:05 a.m. UTC | #8
Hi Raghu and all,

On Wed, Sep 8, 2021 at 6:38 PM Raghavendra Rao Ananta
<rananta@google.com> wrote:
>
> At times, such as when in the interrupt handler, the guest wants
> to get the vcpuid that it's running on. As a result, introduce
> get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> backend, the VMM prepares a map of vcpuid and mpidr during VM
> initialization and exports the map to the guest for it to read.

How about using TPIDR_EL1 to hold the vcpuid ?
i.e. have aarch64_vcpu_setup() set the register to vcpuid and
guest_get_vcpuid() simply return a value of the register.
This would be a simpler solution to implement.

Thanks,
Reiji
Andrew Jones Sept. 13, 2021, 7:25 a.m. UTC | #9
On Fri, Sep 10, 2021 at 11:03:58AM -0700, Raghavendra Rao Ananta wrote:
> On Fri, Sep 10, 2021 at 1:10 AM Andrew Jones <drjones@redhat.com> wrote:
> >
> > On Thu, Sep 09, 2021 at 10:10:56AM -0700, Raghavendra Rao Ananta wrote:
> > > On Thu, Sep 9, 2021 at 12:56 AM Andrew Jones <drjones@redhat.com> wrote:
> > > >
> > > > On Thu, Sep 09, 2021 at 01:38:09AM +0000, Raghavendra Rao Ananta wrote:
> > ...
> > > > > +     for (i = 0; i < KVM_MAX_VCPUS; i++) {
> > > > > +             vcpuid = vcpuid_map[i].vcpuid;
> > > > > +             GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
> > > >
> > > > We don't want this assert if it's possible to have sparse maps, which
> > > > it probably isn't ever going to be, but...
> > > >
> > > If you look at the way the array is arranged, the element with
> > > VM_VCPUID_MAP_INVAL acts as a sentinel for us and all the proper
> > > elements would lie before this. So, I don't think we'd have a sparse
> > > array here.
> >
> > If we switch to my suggestion of adding map entries at vcpu-add time and
> > removing them at vcpu-rm time, then the array may become sparse depending
> > on the order of removals.
> >
> Oh, I get it now. But like you mentioned, we add entries to the map
> while the vCPUs are getting added and then sync_global_to_guest()
> later. This seems like a lot of maintainance, unless I'm interpreting
> it wrong or not seeing an advantage.

The advantage is that you don't need to create all vcpus before calling
the map init function. While it's true that we'll still require a call
after adding all vcpus if we want to export the map to the guest, i.e.
sync_global_to_guest, we'll never have to worry about the map being
out of synch wrt vcpus on the host side, and there's no need to call
sync_global_to_guest at all when the test needs the map, but the guest
doesn't need to access it.

> I like your idea of coming up an arch-independent interface, however.
> So I modified it similar to the familiar ucall interface that we have
> and does everything in one shot to avoid any confusion:

Right, ucall_init does call sync_global_to_guest, but it's the only
lib function so far. Everything else exported to the guest must be
done explicitly.

> 
> diff --git a/tools/testing/selftests/kvm/include/kvm_util.h
> b/tools/testing/selftests/kvm/include/kvm_util.h
> index 010b59b13917..0e87cb0c980b 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util.h
> @@ -400,4 +400,24 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t
> vcpu_id, struct ucall *uc);
>  int vm_get_stats_fd(struct kvm_vm *vm);
>  int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid);
> 
> +#define VM_CPUID_MAP_INVAL -1
> +
> +struct vm_cpuid_map {
> +       uint64_t hw_cpuid;
> +       int vcpuid;
> +};
> +
> +/*
> + * Create a vcpuid:hw_cpuid map and export it to the guest
> + *
> + * Input Args:
> + *   vm - KVM VM.
> + *
> + * Output Args: None
> + *
> + * Must be called after all the vCPUs are added to the VM
> + */
> +void vm_cpuid_map_init(struct kvm_vm *vm);
> +int guest_get_vcpuid(void);
> +
>  #endif /* SELFTEST_KVM_UTIL_H */
> diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> index db64ee206064..e796bb3984a6 100644
> --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
> +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
> @@ -16,6 +16,8 @@
> 
>  static vm_vaddr_t exception_handlers;
> 
> +static struct vm_cpuid_map cpuid_map[KVM_MAX_VCPUS];
> +
>  static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
>  {
>         return (v + vm->page_size) & ~(vm->page_size - 1);
> @@ -426,3 +428,42 @@ void vm_install_exception_handler(struct kvm_vm
> *vm, int vector,
>         assert(vector < VECTOR_NUM);
>         handlers->exception_handlers[vector][0] = handler;
>  }
> +
> +void vm_cpuid_map_init(struct kvm_vm *vm)
> +{
> +       int i = 0;
> +       struct vcpu *vcpu;
> +       struct vm_cpuid_map *map;
> +
> +       TEST_ASSERT(!list_empty(&vm->vcpus), "vCPUs must have been created\n");
> +
> +       list_for_each_entry(vcpu, &vm->vcpus, list) {
> +               map = &cpuid_map[i++];
> +               map->vcpuid = vcpu->id;
> +               get_reg(vm, vcpu->id,
> KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &map->hw_cpuid);
> +               map->hw_cpuid &= MPIDR_HWID_BITMASK;
> +       }
> +
> +       if (i < KVM_MAX_VCPUS)
> +               cpuid_map[i].vcpuid = VM_CPUID_MAP_INVAL;
> +
> +       sync_global_to_guest(vm, cpuid_map);
> +}
> +
> +int guest_get_vcpuid(void)
> +{
> +       int i, vcpuid;
> +       uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
> +
> +       for (i = 0; i < KVM_MAX_VCPUS; i++) {
> +               vcpuid = cpuid_map[i].vcpuid;
> +
> +               /* Was this vCPU added to the VM after the map was
> initialized? */
> +               GUEST_ASSERT_1(vcpuid != VM_CPUID_MAP_INVAL, mpidr);
> +
> +               if (mpidr == cpuid_map[i].hw_cpuid)
> +                       return vcpuid;
> +       }
> +
> +       /* We should not be reaching here */
> +       GUEST_ASSERT_1(0, mpidr);
> +       return -1;
> +}
> 
> This would ensure that we don't have a sparse array and can use the
> last non-vCPU element as a sentinal node.
> If you still feel preparing the map as and when the vCPUs are created
> makes more sense, I can go for it.

Yup, I think that's still my preference. We don't really need a
sentinel node for such a small array. We can just do

static struct vm_cpuid_map cpuid_map[KVM_MAX_VCPUS] = { [0 ... KVM_MAX_VCPUS - 1] = VM_CPUID_MAP_INVAL };

to ensure all invalid nodes are invalid. After a full loop
if we didn't find a valid entry, then we assert, which easily
supports a sparse array.

Also, please don't forget that guest_get_vcpuid() can be common for all
architectures. We just need an arch-specific call for get_hw_cpuid().

Thanks,
drew

> 
> Regards,
> Raghavendra
> > Thanks,
> > drew
> >
>
Andrew Jones Sept. 13, 2021, 7:35 a.m. UTC | #10
On Sun, Sep 12, 2021 at 12:05:22AM -0700, Reiji Watanabe wrote:
> Hi Raghu and all,
> 
> On Wed, Sep 8, 2021 at 6:38 PM Raghavendra Rao Ananta
> <rananta@google.com> wrote:
> >
> > At times, such as when in the interrupt handler, the guest wants
> > to get the vcpuid that it's running on. As a result, introduce
> > get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> > backend, the VMM prepares a map of vcpuid and mpidr during VM
> > initialization and exports the map to the guest for it to read.
> 
> How about using TPIDR_EL1 to hold the vcpuid ?
> i.e. have aarch64_vcpu_setup() set the register to vcpuid and
> guest_get_vcpuid() simply return a value of the register.
> This would be a simpler solution to implement.

That is a great suggestion. It's arch-specific, but maybe the
other architectures can mimic it with their own capabilities.
And, in the unlikely event a unit test wants that register for
itself, then it can build its own mpidr-vcpuid map if necessary.
Ship it :-)

Thanks,
drew
Raghavendra Rao Ananta Sept. 13, 2021, 4:51 p.m. UTC | #11
On Mon, Sep 13, 2021 at 12:35 AM Andrew Jones <drjones@redhat.com> wrote:
>
> On Sun, Sep 12, 2021 at 12:05:22AM -0700, Reiji Watanabe wrote:
> > Hi Raghu and all,
> >
> > On Wed, Sep 8, 2021 at 6:38 PM Raghavendra Rao Ananta
> > <rananta@google.com> wrote:
> > >
> > > At times, such as when in the interrupt handler, the guest wants
> > > to get the vcpuid that it's running on. As a result, introduce
> > > get_vcpuid() that returns the vcpuid of the calling vcpu. At its
> > > backend, the VMM prepares a map of vcpuid and mpidr during VM
> > > initialization and exports the map to the guest for it to read.
> >
> > How about using TPIDR_EL1 to hold the vcpuid ?
> > i.e. have aarch64_vcpu_setup() set the register to vcpuid and
> > guest_get_vcpuid() simply return a value of the register.
> > This would be a simpler solution to implement.
>
> That is a great suggestion. It's arch-specific, but maybe the
> other architectures can mimic it with their own capabilities.
> And, in the unlikely event a unit test wants that register for
> itself, then it can build its own mpidr-vcpuid map if necessary.
> Ship it :-)
>
Thanks for the suggestion, Reiji. I'll send out a patch soon for this.

Regards,
Raghavendra
> Thanks,
> drew
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
index b6088c3c67a3..150f63101f4c 100644
--- a/tools/testing/selftests/kvm/include/aarch64/processor.h
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -133,6 +133,7 @@  void vm_install_exception_handler(struct kvm_vm *vm,
 		int vector, handler_fn handler);
 void vm_install_sync_handler(struct kvm_vm *vm,
 		int vector, int ec, handler_fn handler);
+void vm_vcpuid_map_init(struct kvm_vm *vm);
 
 static inline void cpu_relax(void)
 {
@@ -194,4 +195,6 @@  static inline void local_irq_disable(void)
 	asm volatile("msr daifset, #3" : : : "memory");
 }
 
+int get_vcpuid(void);
+
 #endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 632b74d6b3ca..9844b62227b1 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -13,9 +13,17 @@ 
 #include "processor.h"
 
 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN	0xac0000
+#define VM_VCPUID_MAP_INVAL			-1
 
 static vm_vaddr_t exception_handlers;
 
+struct vm_vcpuid_map {
+	uint64_t mpidr;
+	int vcpuid;
+};
+
+static struct vm_vcpuid_map vcpuid_map[KVM_MAX_VCPUS];
+
 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
 {
 	return (v + vm->page_size) & ~(vm->page_size - 1);
@@ -426,3 +434,41 @@  void vm_install_exception_handler(struct kvm_vm *vm, int vector,
 	assert(vector < VECTOR_NUM);
 	handlers->exception_handlers[vector][0] = handler;
 }
+
+void vm_vcpuid_map_init(struct kvm_vm *vm)
+{
+	int i = 0;
+	struct vcpu *vcpu;
+	struct vm_vcpuid_map *map;
+
+	list_for_each_entry(vcpu, &vm->vcpus, list) {
+		map = &vcpuid_map[i++];
+		map->vcpuid = vcpu->id;
+		get_reg(vm, vcpu->id,
+			ARM64_SYS_KVM_REG(SYS_MPIDR_EL1), &map->mpidr);
+		map->mpidr &= MPIDR_HWID_BITMASK;
+	}
+
+	if (i < KVM_MAX_VCPUS)
+		vcpuid_map[i].vcpuid = VM_VCPUID_MAP_INVAL;
+
+	sync_global_to_guest(vm, vcpuid_map);
+}
+
+int get_vcpuid(void)
+{
+	int i, vcpuid;
+	uint64_t mpidr = read_sysreg(mpidr_el1) & MPIDR_HWID_BITMASK;
+
+	for (i = 0; i < KVM_MAX_VCPUS; i++) {
+		vcpuid = vcpuid_map[i].vcpuid;
+		GUEST_ASSERT_1(vcpuid != VM_VCPUID_MAP_INVAL, mpidr);
+
+		if (mpidr == vcpuid_map[i].mpidr)
+			return vcpuid;
+	}
+
+	/* We should not be reaching here */
+	GUEST_ASSERT_1(0, mpidr);
+	return -1;
+}