[3/5] KVM: VMX: add struct kvm_vmx to hold VMX specific KVM vars
diff mbox

Message ID 1521573442-12206-4-git-send-email-sean.j.christopherson@intel.com
State New
Headers show

Commit Message

Sean Christopherson March 20, 2018, 7:17 p.m. UTC
Add struct kvm_vmx, which wraps struct kvm, and a helper to_kvm_vmx()
that retrieves 'struct kvm_vmx *' from 'struct kvm *'.  Move the VMX
specific variables out of kvm_arch and into kvm_vmx.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h |  4 ----
 arch/x86/kvm/vmx.c              | 46 +++++++++++++++++++++++++++--------------
 2 files changed, 31 insertions(+), 19 deletions(-)

Comments

Jim Mattson April 11, 2018, 10:20 p.m. UTC | #1
On Tue, Mar 20, 2018 at 12:17 PM, Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
> Add struct kvm_vmx, which wraps struct kvm, and a helper to_kvm_vmx()
> that retrieves 'struct kvm_vmx *' from 'struct kvm *'.  Move the VMX
> specific variables out of kvm_arch and into kvm_vmx.

>  static struct kvm *vmx_vm_alloc(void)
>  {
> -       return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> +       struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
> +       return &kvm_vmx->kvm;
>  }

struct kvm_vmx is getting quite large:
(gdb) p sizeof(struct kvm_vmx)
$1 = 42216

We sometimes find that kmalloc cannot satisfy order 4 allocation
requests due to fragmentation. Is vmalloc an option?
Paolo Bonzini April 12, 2018, 8:59 a.m. UTC | #2
On 12/04/2018 00:20, Jim Mattson wrote:
> On Tue, Mar 20, 2018 at 12:17 PM, Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
>> Add struct kvm_vmx, which wraps struct kvm, and a helper to_kvm_vmx()
>> that retrieves 'struct kvm_vmx *' from 'struct kvm *'.  Move the VMX
>> specific variables out of kvm_arch and into kvm_vmx.
> 
>>  static struct kvm *vmx_vm_alloc(void)
>>  {
>> -       return kzalloc(sizeof(struct kvm), GFP_KERNEL);
>> +       struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
>> +       return &kvm_vmx->kvm;
>>  }
> 
> struct kvm_vmx is getting quite large:
> (gdb) p sizeof(struct kvm_vmx)
> $1 = 42216
> 
> We sometimes find that kmalloc cannot satisfy order 4 allocation
> requests due to fragmentation. Is vmalloc an option?

cpuid info could definitely be moved to a separate vmalloc-ed block, as
a start.

Paolo
Konrad Rzeszutek Wilk April 12, 2018, 2:05 p.m. UTC | #3
On Thu, Apr 12, 2018 at 10:59:51AM +0200, Paolo Bonzini wrote:
> On 12/04/2018 00:20, Jim Mattson wrote:
> > On Tue, Mar 20, 2018 at 12:17 PM, Sean Christopherson
> > <sean.j.christopherson@intel.com> wrote:
> >> Add struct kvm_vmx, which wraps struct kvm, and a helper to_kvm_vmx()
> >> that retrieves 'struct kvm_vmx *' from 'struct kvm *'.  Move the VMX
> >> specific variables out of kvm_arch and into kvm_vmx.
> > 
> >>  static struct kvm *vmx_vm_alloc(void)
> >>  {
> >> -       return kzalloc(sizeof(struct kvm), GFP_KERNEL);
> >> +       struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
> >> +       return &kvm_vmx->kvm;
> >>  }
> > 
> > struct kvm_vmx is getting quite large:
> > (gdb) p sizeof(struct kvm_vmx)
> > $1 = 42216
> > 
> > We sometimes find that kmalloc cannot satisfy order 4 allocation
> > requests due to fragmentation. Is vmalloc an option?
> 
> cpuid info could definitely be moved to a separate vmalloc-ed block, as
> a start.

Didn't Karim post some patches to use vmalloc? Could this work piggyhbsack on that?

> 
> Paolo
>
Jim Mattson April 12, 2018, 4:53 p.m. UTC | #4
On Thu, Apr 12, 2018 at 1:59 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> On 12/04/2018 00:20, Jim Mattson wrote:
>> On Tue, Mar 20, 2018 at 12:17 PM, Sean Christopherson
>> <sean.j.christopherson@intel.com> wrote:
>>> Add struct kvm_vmx, which wraps struct kvm, and a helper to_kvm_vmx()
>>> that retrieves 'struct kvm_vmx *' from 'struct kvm *'.  Move the VMX
>>> specific variables out of kvm_arch and into kvm_vmx.
>>
>>>  static struct kvm *vmx_vm_alloc(void)
>>>  {
>>> -       return kzalloc(sizeof(struct kvm), GFP_KERNEL);
>>> +       struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
>>> +       return &kvm_vmx->kvm;
>>>  }
>>
>> struct kvm_vmx is getting quite large:
>> (gdb) p sizeof(struct kvm_vmx)
>> $1 = 42216
>>
>> We sometimes find that kmalloc cannot satisfy order 4 allocation
>> requests due to fragmentation. Is vmalloc an option?
>
> cpuid info could definitely be moved to a separate vmalloc-ed block, as
> a start.
>
> Paolo

It's a small start, but it won't get us down to order 3...

(gdb) p sizeof(((struct kvm_vcpu_arch *)0)->cpuid_entries)
$1 = 3200

What is the objection to a single vmalloc for the entire structure? Is
there anything in there that needs physically contiguous pages?
Paolo Bonzini April 12, 2018, 5 p.m. UTC | #5
On 12/04/2018 18:53, Jim Mattson wrote:
>> cpuid info could definitely be moved to a separate vmalloc-ed block, as
>> a start.
>>
>> Paolo
> It's a small start, but it won't get us down to order 3...
> 
> (gdb) p sizeof(((struct kvm_vcpu_arch *)0)->cpuid_entries)
> $1 = 3200
> 
> What is the objection to a single vmalloc for the entire structure? Is
> there anything in there that needs physically contiguous pages?

No objection, just that I didn't know offhand.  If it works, let's go
for it.

Paolo

Patch
diff mbox

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55240cd..a98e89c3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -806,14 +806,10 @@  struct kvm_arch {
 	struct mutex apic_map_lock;
 	struct kvm_apic_map *apic_map;
 
-	unsigned int tss_addr;
 	bool apic_access_page_done;
 
 	gpa_t wall_clock;
 
-	bool ept_identity_pagetable_done;
-	gpa_t ept_identity_map_addr;
-
 	unsigned long irq_sources_bitmap;
 	s64 kvmclock_offset;
 	raw_spinlock_t tsc_write_lock;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 01de5f9..ccebad3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -196,6 +196,14 @@  module_param(ple_window_max, int, S_IRUGO);
 
 extern const ulong vmx_return;
 
+struct kvm_vmx {
+	struct kvm kvm;
+
+	unsigned int tss_addr;
+	bool ept_identity_pagetable_done;
+	gpa_t ept_identity_map_addr;
+};
+
 #define NR_AUTOLOAD_MSRS 8
 
 struct vmcs {
@@ -698,6 +706,11 @@  enum segment_cache_field {
 	SEG_FIELD_NR = 4
 };
 
+static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
+{
+	return container_of(kvm, struct kvm_vmx, kvm);
+}
+
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
 {
 	return container_of(vcpu, struct vcpu_vmx, vcpu);
@@ -4198,6 +4211,7 @@  static void enter_rmode(struct kvm_vcpu *vcpu)
 {
 	unsigned long flags;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
 
 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
@@ -4213,13 +4227,13 @@  static void enter_rmode(struct kvm_vcpu *vcpu)
 	 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
 	 * vcpu. Warn the user that an update is overdue.
 	 */
-	if (!vcpu->kvm->arch.tss_addr)
+	if (!kvm_vmx->tss_addr)
 		printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
 			     "called before entering vcpu\n");
 
 	vmx_segment_cache_clear(vmx);
 
-	vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
+	vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
 	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
 	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
@@ -4509,7 +4523,7 @@  static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 		    is_guest_mode(vcpu))
 			guest_cr3 = kvm_read_cr3(vcpu);
 		else
-			guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
+			guest_cr3 = to_kvm_vmx(vcpu->kvm)->ept_identity_map_addr;
 		ept_load_pdptrs(vcpu);
 	}
 
@@ -4950,7 +4964,7 @@  static int init_rmode_tss(struct kvm *kvm)
 	int idx, r;
 
 	idx = srcu_read_lock(&kvm->srcu);
-	fn = kvm->arch.tss_addr >> PAGE_SHIFT;
+	fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
 	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
 	if (r < 0)
 		goto out;
@@ -4976,22 +4990,23 @@  static int init_rmode_tss(struct kvm *kvm)
 
 static int init_rmode_identity_map(struct kvm *kvm)
 {
+	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
 	int i, idx, r = 0;
 	kvm_pfn_t identity_map_pfn;
 	u32 tmp;
 
-	/* Protect kvm->arch.ept_identity_pagetable_done. */
+	/* Protect kvm_vmx->ept_identity_pagetable_done. */
 	mutex_lock(&kvm->slots_lock);
 
-	if (likely(kvm->arch.ept_identity_pagetable_done))
+	if (likely(kvm_vmx->ept_identity_pagetable_done))
 		goto out2;
 
-	if (!kvm->arch.ept_identity_map_addr)
-		kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
-	identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
+	if (!kvm_vmx->ept_identity_map_addr)
+		kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+	identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
 
 	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
-				    kvm->arch.ept_identity_map_addr, PAGE_SIZE);
+				    kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
 	if (r < 0)
 		goto out2;
 
@@ -5008,7 +5023,7 @@  static int init_rmode_identity_map(struct kvm *kvm)
 		if (r < 0)
 			goto out;
 	}
-	kvm->arch.ept_identity_pagetable_done = true;
+	kvm_vmx->ept_identity_pagetable_done = true;
 
 out:
 	srcu_read_unlock(&kvm->srcu, idx);
@@ -6074,13 +6089,13 @@  static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 				    PAGE_SIZE * 3);
 	if (ret)
 		return ret;
-	kvm->arch.tss_addr = addr;
+	to_kvm_vmx(kvm)->tss_addr = addr;
 	return init_rmode_tss(kvm);
 }
 
 static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
 {
-	kvm->arch.ept_identity_map_addr = ident_addr;
+	to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
 	return 0;
 }
 
@@ -9739,12 +9754,13 @@  STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static struct kvm *vmx_vm_alloc(void)
 {
-	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+	struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL);
+	return &kvm_vmx->kvm;
 }
 
 static void vmx_vm_free(struct kvm *kvm)
 {
-	kfree(kvm);
+	kfree(to_kvm_vmx(kvm));
 }
 
 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)