@@ -15,24 +15,25 @@
/*
- * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
- * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init0, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init1.
*/
-static int add_init_2vcpus(struct kvm_vcpu_init *init1,
- struct kvm_vcpu_init *init2)
+static int add_init_2vcpus(struct kvm_vcpu_init *init0,
+ struct kvm_vcpu_init *init1)
{
+ struct kvm_vcpu *vcpu0, *vcpu1;
struct kvm_vm *vm;
int ret;
vm = vm_create_barebones();
- __vm_vcpu_add(vm, 0);
- ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+ vcpu0 = __vm_vcpu_add(vm, 0);
+ ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
- __vm_vcpu_add(vm, 1);
- ret = __vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+ vcpu1 = __vm_vcpu_add(vm, 1);
+ ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
@@ -40,25 +41,26 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init1,
}
/*
- * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
- * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init0,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init1.
*/
-static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
- struct kvm_vcpu_init *init2)
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init0,
+ struct kvm_vcpu_init *init1)
{
+ struct kvm_vcpu *vcpu0, *vcpu1;
struct kvm_vm *vm;
int ret;
vm = vm_create_barebones();
- __vm_vcpu_add(vm, 0);
- __vm_vcpu_add(vm, 1);
+ vcpu0 = __vm_vcpu_add(vm, 0);
+ vcpu1 = __vm_vcpu_add(vm, 1);
- ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+ ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
- ret = __vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+ ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
@@ -76,7 +78,7 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
*/
int main(void)
{
- struct kvm_vcpu_init init1, init2;
+ struct kvm_vcpu_init init0, init1;
struct kvm_vm *vm;
int ret;
@@ -85,36 +87,36 @@ int main(void)
exit(KSFT_SKIP);
}
- /* Get the preferred target type and copy that to init2 for later use */
+ /* Get the preferred target type and copy that to init1 for later use */
vm = vm_create_barebones();
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init0);
kvm_vm_free(vm);
- init2 = init1;
+ init1 = init0;
/* Test with 64bit vCPUs */
- ret = add_init_2vcpus(&init1, &init1);
+ ret = add_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 64bit EL1 vCPUs failed unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init1, &init1);
+ ret = add_2vcpus_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 64bit EL1 vCPUs failed unexpectedly");
/* Test with 32bit vCPUs */
- init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
- ret = add_init_2vcpus(&init1, &init1);
+ init0.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+ ret = add_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 32bit EL1 vCPUs failed unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init1, &init1);
+ ret = add_2vcpus_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 32bit EL1 vCPUs failed unexpectedly");
/* Test with mixed-width vCPUs */
- init1.features[0] = 0;
- init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
- ret = add_init_2vcpus(&init1, &init2);
+ init0.features[0] = 0;
+ init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+ ret = add_init_2vcpus(&init0, &init1);
TEST_ASSERT(ret != 0,
"Configuring mixed-width vCPUs worked unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init1, &init2);
+ ret = add_2vcpus_init_2vcpus(&init0, &init1);
TEST_ASSERT(ret != 0,
"Configuring mixed-width vCPUs worked unexpectedly");
In preparation for taking a vCPU pointer in vCPU-scoped functions, grab the vCPU(s) created by __vm_vcpu_add() and use the ID from the vCPU object instead of hardcoding the ID in ioctl() invocations. Rename init1/init2 => init0/init1 to avoid having odd/confusing code where vcpu0 consumes init1 and vcpu1 consumes init2. Note, this change could easily be done when the functions are converted in the future, and/or the vcpu{0,1} vs. init{1,2} discrepancy could be ignored, but then there would be no opportunity to poke fun at the 1-based counting scheme. Signed-off-by: Sean Christopherson <seanjc@google.com> --- .../selftests/kvm/aarch64/vcpu_width_config.c | 60 ++++++++++--------- 1 file changed, 31 insertions(+), 29 deletions(-)