diff mbox

[PART1,RFC,v3,02/12] KVM: x86: Introducing kvm_x86_ops VM init/uninit hooks

Message ID 1458281388-14452-3-git-send-email-Suravee.Suthikulpanit@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Suthikulpanit, Suravee March 18, 2016, 6:09 a.m. UTC
Adding function pointers in struct kvm_x86_ops for processor-specific
layer to provide hooks for when KVM initialize and un-initialize VM.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 arch/x86/include/asm/kvm_host.h |  3 +++
 arch/x86/kvm/x86.c              | 10 +++++++++-
 virt/kvm/kvm_main.c             |  8 ++++----
 3 files changed, 16 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini March 18, 2016, 10:11 a.m. UTC | #1
On 18/03/2016 07:09, Suravee Suthikulpanit wrote:
> Adding function pointers in struct kvm_x86_ops for processor-specific
> layer to provide hooks for when KVM initialize and un-initialize VM.

This is not the only thing your patch is doing, and the "other" change
definitely needs a lot more explanation about why you did it and how you
audited the code to ensure that it is safe.

Paolo

> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  3 +++
>  arch/x86/kvm/x86.c              | 10 +++++++++-
>  virt/kvm/kvm_main.c             |  8 ++++----
>  3 files changed, 16 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 44adbb8..4b0dd0f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -828,6 +828,9 @@ struct kvm_x86_ops {
>  	bool (*cpu_has_high_real_mode_segbase)(void);
>  	void (*cpuid_update)(struct kvm_vcpu *vcpu);
>  
> +	int (*vm_init)(struct kvm *kvm);
> +	void (*vm_uninit)(struct kvm *kvm);
> +
>  	/* Create, but do not attach this VCPU */
>  	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
>  	void (*vcpu_free)(struct kvm_vcpu *vcpu);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 429c3f5..4d2961d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -7700,6 +7700,8 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
>  
>  int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  {
> +	int ret = 0;
> +
>  	if (type)
>  		return -EINVAL;
>  
> @@ -7724,7 +7726,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
>  	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
>  
> -	return 0;
> +	if (kvm_x86_ops->vm_init)
> +		ret = kvm_x86_ops->vm_init(kvm);
> +
> +	return ret;
>  }
>  
>  static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
> @@ -7751,6 +7756,9 @@ static void kvm_free_vcpus(struct kvm *kvm)
>  	kvm_for_each_vcpu(i, vcpu, kvm)
>  		kvm_arch_vcpu_free(vcpu);
>  
> +	if (kvm_x86_ops->vm_uninit)
> +		kvm_x86_ops->vm_uninit(kvm);
> +
>  	mutex_lock(&kvm->lock);
>  	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
>  		kvm->vcpus[i] = NULL;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 1ca0258..5460325 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -536,10 +536,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
>  	if (!kvm)
>  		return ERR_PTR(-ENOMEM);
>  
> -	r = kvm_arch_init_vm(kvm, type);
> -	if (r)
> -		goto out_err_no_disable;
> -
>  	r = hardware_enable_all();
>  	if (r)
>  		goto out_err_no_disable;
> @@ -578,6 +574,10 @@ static struct kvm *kvm_create_vm(unsigned long type)
>  	atomic_set(&kvm->users_count, 1);
>  	INIT_LIST_HEAD(&kvm->devices);
>  
> +	r = kvm_arch_init_vm(kvm, type);
> +	if (r)
> +		goto out_err;
> +
>  	r = kvm_init_mmu_notifier(kvm);
>  	if (r)
>  		goto out_err;
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Suthikulpanit, Suravee March 29, 2016, 5:27 a.m. UTC | #2
Hi Paolo,

On 3/18/16 17:11, Paolo Bonzini wrote:
>
> On 18/03/2016 07:09, Suravee Suthikulpanit wrote:
>> >Adding function pointers in struct kvm_x86_ops for processor-specific
>> >layer to provide hooks for when KVM initialize and un-initialize VM.
> This is not the only thing your patch is doing, and the "other" change
> definitely needs a lot more explanation about why you did it and how you
> audited the code to ensure that it is safe.
>
> Paolo
>

Sorry, for not mentioning this earlier. I am moving the 
kvm_arch_init_vm() call mainly to go after mutex_init(&kvm->slots_lock) 
since I am calling the x86_set_memory_region() (which uses slots_lock) 
in the vm_init() hooks (for AVIC initialization).

Lemme re-check if this would be safe for other code.

Thanks,
Suravee

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini March 29, 2016, 10:21 a.m. UTC | #3
On 29/03/2016 07:27, Suravee Suthikulpanit wrote:
>>
>>> >Adding function pointers in struct kvm_x86_ops for processor-specific
>>> >layer to provide hooks for when KVM initialize and un-initialize VM.
>> This is not the only thing your patch is doing, and the "other" change
>> definitely needs a lot more explanation about why you did it and how you
>> audited the code to ensure that it is safe.
>>
>> Paolo
>>
> 
> Sorry, for not mentioning this earlier. I am moving the
> kvm_arch_init_vm() call mainly to go after mutex_init(&kvm->slots_lock)
> since I am calling the x86_set_memory_region() (which uses slots_lock)
> in the vm_init() hooks (for AVIC initialization).
> 
> Lemme re-check if this would be safe for other code.

No problem.  In the meanwhile a patch got in ("KVM: fix spin_lock_init
order on x86") that should help you.

Thanks,

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Suthikulpanit, Suravee March 29, 2016, 11:47 a.m. UTC | #4
Hi,

On 03/29/2016 05:21 PM, Paolo Bonzini wrote:
>
>
> On 29/03/2016 07:27, Suravee Suthikulpanit wrote:
>>>
>>>>> Adding function pointers in struct kvm_x86_ops for processor-specific
>>>>> layer to provide hooks for when KVM initialize and un-initialize VM.
>>> This is not the only thing your patch is doing, and the "other" change
>>> definitely needs a lot more explanation about why you did it and how you
>>> audited the code to ensure that it is safe.
>>>
>>> Paolo
>>>
>>
>> Sorry, for not mentioning this earlier. I am moving the
>> kvm_arch_init_vm() call mainly to go after mutex_init(&kvm->slots_lock)
>> since I am calling the x86_set_memory_region() (which uses slots_lock)
>> in the vm_init() hooks (for AVIC initialization).
>>
>> Lemme re-check if this would be safe for other code.
>
> No problem.  In the meanwhile a patch got in ("KVM: fix spin_lock_init
> order on x86") that should help you.
>
> Thanks,
>
> Paolo
>

Right.... that's just what I need :) I'll re-base to the latest tip then.

Thanks,
Suravee
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Suthikulpanit, Suravee March 30, 2016, 10 a.m. UTC | #5
Hi Paolo,

On 3/29/16 18:47, Suravee Suthikulpanit wrote:
> Hi,
>
> On 03/29/2016 05:21 PM, Paolo Bonzini wrote:
>>
>>
>> On 29/03/2016 07:27, Suravee Suthikulpanit wrote:
>>>>
>>>>>> Adding function pointers in struct kvm_x86_ops for processor-specific
>>>>>> layer to provide hooks for when KVM initialize and un-initialize VM.
>>>> This is not the only thing your patch is doing, and the "other" change
>>>> definitely needs a lot more explanation about why you did it and how
>>>> you
>>>> audited the code to ensure that it is safe.
>>>>
>>>> Paolo
>>>>
>>>
>>> Sorry, for not mentioning this earlier. I am moving the
>>> kvm_arch_init_vm() call mainly to go after mutex_init(&kvm->slots_lock)
>>> since I am calling the x86_set_memory_region() (which uses slots_lock)
>>> in the vm_init() hooks (for AVIC initialization).
>>>
>>> Lemme re-check if this would be safe for other code.
>>
>> No problem.  In the meanwhile a patch got in ("KVM: fix spin_lock_init
>> order on x86") that should help you.
>>
>> Thanks,
>>
>> Paolo
>>
>
> Right.... that's just what I need :) I'll re-base to the latest tip then.

Actually, in the file virt/kvm/kvm_main.c, I still need to move the 
kvm_arch_init_vm() to some place after the call to kvm_alloc_memslots() 
since I am calling x86_set_memory_region() in the vm_init hook.

         r = -ENOMEM;
         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                 kvm->memslots[i] = kvm_alloc_memslots();
                 if (!kvm->memslots[i])
                         goto out_err_no_srcu;
         }

         if (init_srcu_struct(&kvm->srcu))
                 goto out_err_no_srcu;
         if (init_srcu_struct(&kvm->irq_srcu))
                 goto out_err_no_irq_srcu;
         for (i = 0; i < KVM_NR_BUSES; i++) {
                 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
                                         GFP_KERNEL);
                 if (!kvm->buses[i])
                         goto out_err;
         }
//HERE
         r = kvm_arch_init_vm(kvm, type);
         if (r)
                 goto out_err;

Do you think that would be a problem?

Thanks,
Suravee

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini March 30, 2016, 12:07 p.m. UTC | #6
On 30/03/2016 12:00, Suravee Suthikulpanit wrote:
> Hi Paolo,
> 
> On 3/29/16 18:47, Suravee Suthikulpanit wrote:
>> Hi,
>>
>> On 03/29/2016 05:21 PM, Paolo Bonzini wrote:
>>>
>>>
>>> On 29/03/2016 07:27, Suravee Suthikulpanit wrote:
>>>>>
>>>>>>> Adding function pointers in struct kvm_x86_ops for
>>>>>>> processor-specific
>>>>>>> layer to provide hooks for when KVM initialize and un-initialize VM.
>>>>> This is not the only thing your patch is doing, and the "other" change
>>>>> definitely needs a lot more explanation about why you did it and how
>>>>> you
>>>>> audited the code to ensure that it is safe.
>>>>>
>>>>> Paolo
>>>>>
>>>>
>>>> Sorry, for not mentioning this earlier. I am moving the
>>>> kvm_arch_init_vm() call mainly to go after mutex_init(&kvm->slots_lock)
>>>> since I am calling the x86_set_memory_region() (which uses slots_lock)
>>>> in the vm_init() hooks (for AVIC initialization).
>>>>
>>>> Lemme re-check if this would be safe for other code.
>>>
>>> No problem.  In the meanwhile a patch got in ("KVM: fix spin_lock_init
>>> order on x86") that should help you.
>>>
>>> Thanks,
>>>
>>> Paolo
>>>
>>
>> Right.... that's just what I need :) I'll re-base to the latest tip then.
> 
> Actually, in the file virt/kvm/kvm_main.c, I still need to move the
> kvm_arch_init_vm() to some place after the call to kvm_alloc_memslots()
> since I am calling x86_set_memory_region() in the vm_init hook.
> 
>         r = -ENOMEM;
>         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
>                 kvm->memslots[i] = kvm_alloc_memslots();
>                 if (!kvm->memslots[i])
>                         goto out_err_no_srcu;
>         }
> 
>         if (init_srcu_struct(&kvm->srcu))
>                 goto out_err_no_srcu;
>         if (init_srcu_struct(&kvm->irq_srcu))
>                 goto out_err_no_irq_srcu;
>         for (i = 0; i < KVM_NR_BUSES; i++) {
>                 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
>                                         GFP_KERNEL);
>                 if (!kvm->buses[i])
>                         goto out_err;
>         }
> //HERE
>         r = kvm_arch_init_vm(kvm, type);
>         if (r)
>                 goto out_err;
> 
> Do you think that would be a problem?

Can you delay that to after the creation of the first VCPU?

Allocating AVIC data structures is not required if userspace has not
called KVM_CREATE_IRQCHIP or enabled KVM_CAP_SPLIT_IRQCHIP.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Suthikulpanit, Suravee March 30, 2016, 12:18 p.m. UTC | #7
Hi Paolo,

On 3/30/16 19:07, Paolo Bonzini wrote:
>
>
> On 30/03/2016 12:00, Suravee Suthikulpanit wrote:
>> Hi Paolo,
>>
>> On 3/29/16 18:47, Suravee Suthikulpanit wrote:
>>> Hi,
>>>
>>> On 03/29/2016 05:21 PM, Paolo Bonzini wrote:
>>>>
>>>>
>>>> On 29/03/2016 07:27, Suravee Suthikulpanit wrote:
>>>>>>
>>>>>>>> Adding function pointers in struct kvm_x86_ops for
>>>>>>>> processor-specific
>>>>>>>> layer to provide hooks for when KVM initialize and un-initialize VM.
>>>>>> This is not the only thing your patch is doing, and the "other" change
>>>>>> definitely needs a lot more explanation about why you did it and how
>>>>>> you
>>>>>> audited the code to ensure that it is safe.
>>>>>>
>>>>>> Paolo
>>>>>>
>>>>>
>>>>> Sorry, for not mentioning this earlier. I am moving the
>>>>> kvm_arch_init_vm() call mainly to go after mutex_init(&kvm->slots_lock)
>>>>> since I am calling the x86_set_memory_region() (which uses slots_lock)
>>>>> in the vm_init() hooks (for AVIC initialization).
>>>>>
>>>>> Lemme re-check if this would be safe for other code.
>>>>
>>>> No problem.  In the meanwhile a patch got in ("KVM: fix spin_lock_init
>>>> order on x86") that should help you.
>>>>
>>>> Thanks,
>>>>
>>>> Paolo
>>>>
>>>
>>> Right.... that's just what I need :) I'll re-base to the latest tip then.
>>
>> Actually, in the file virt/kvm/kvm_main.c, I still need to move the
>> kvm_arch_init_vm() to some place after the call to kvm_alloc_memslots()
>> since I am calling x86_set_memory_region() in the vm_init hook.
>>
>>          r = -ENOMEM;
>>          for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
>>                  kvm->memslots[i] = kvm_alloc_memslots();
>>                  if (!kvm->memslots[i])
>>                          goto out_err_no_srcu;
>>          }
>>
>>          if (init_srcu_struct(&kvm->srcu))
>>                  goto out_err_no_srcu;
>>          if (init_srcu_struct(&kvm->irq_srcu))
>>                  goto out_err_no_irq_srcu;
>>          for (i = 0; i < KVM_NR_BUSES; i++) {
>>                  kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
>>                                          GFP_KERNEL);
>>                  if (!kvm->buses[i])
>>                          goto out_err;
>>          }
>> //HERE
>>          r = kvm_arch_init_vm(kvm, type);
>>          if (r)
>>                  goto out_err;
>>
>> Do you think that would be a problem?
>
> Can you delay that to after the creation of the first VCPU?

Sure, I can. That's what I was doing originally before we introduce the 
vm_init hook. I just thought that this would make a nice place.

> Allocating AVIC data structures is not required if userspace has not
> called KVM_CREATE_IRQCHIP or enabled KVM_CAP_SPLIT_IRQCHIP.
>
> Paolo
>

Okay.

Thanks,
Suravee
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 44adbb8..4b0dd0f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -828,6 +828,9 @@  struct kvm_x86_ops {
 	bool (*cpu_has_high_real_mode_segbase)(void);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
+	int (*vm_init)(struct kvm *kvm);
+	void (*vm_uninit)(struct kvm *kvm);
+
 	/* Create, but do not attach this VCPU */
 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 429c3f5..4d2961d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7700,6 +7700,8 @@  void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
+	int ret = 0;
+
 	if (type)
 		return -EINVAL;
 
@@ -7724,7 +7726,10 @@  int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
-	return 0;
+	if (kvm_x86_ops->vm_init)
+		ret = kvm_x86_ops->vm_init(kvm);
+
+	return ret;
 }
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -7751,6 +7756,9 @@  static void kvm_free_vcpus(struct kvm *kvm)
 	kvm_for_each_vcpu(i, vcpu, kvm)
 		kvm_arch_vcpu_free(vcpu);
 
+	if (kvm_x86_ops->vm_uninit)
+		kvm_x86_ops->vm_uninit(kvm);
+
 	mutex_lock(&kvm->lock);
 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 		kvm->vcpus[i] = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1ca0258..5460325 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -536,10 +536,6 @@  static struct kvm *kvm_create_vm(unsigned long type)
 	if (!kvm)
 		return ERR_PTR(-ENOMEM);
 
-	r = kvm_arch_init_vm(kvm, type);
-	if (r)
-		goto out_err_no_disable;
-
 	r = hardware_enable_all();
 	if (r)
 		goto out_err_no_disable;
@@ -578,6 +574,10 @@  static struct kvm *kvm_create_vm(unsigned long type)
 	atomic_set(&kvm->users_count, 1);
 	INIT_LIST_HEAD(&kvm->devices);
 
+	r = kvm_arch_init_vm(kvm, type);
+	if (r)
+		goto out_err;
+
 	r = kvm_init_mmu_notifier(kvm);
 	if (r)
 		goto out_err;