diff mbox

[4/4] kvm, mem-hotplug: Update apic access page when it is migrated.

Message ID 1404291637-15048-5-git-send-email-tangchen@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show

Commit Message

tangchen July 2, 2014, 9 a.m. UTC
apic access page is pinned in memory, and as a result it cannot be
migrated/hot-removed.

Actually it doesn't need to be pinned in memory.

This patch introduces a new vcpu request: KVM_REQ_MIGRATE_EPT. This requet
will be made when kvm_mmu_notifier_invalidate_page() is called when the page
is unmapped from the qemu user space to reset APIC_ACCESS_ADDR pointer in
each online vcpu to 0. And will also be made when ept violation happens to
reset APIC_ACCESS_ADDR to the new page phys_addr (host phys_addr).
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/mmu.c              | 15 +++++++++++++++
 arch/x86/kvm/vmx.c              |  9 ++++++++-
 arch/x86/kvm/x86.c              | 20 ++++++++++++++++++++
 include/linux/kvm_host.h        |  1 +
 virt/kvm/kvm_main.c             | 15 +++++++++++++++
 6 files changed, 61 insertions(+), 1 deletion(-)

Comments

Gleb Natapov July 3, 2014, 1:55 p.m. UTC | #1
On Wed, Jul 02, 2014 at 05:00:37PM +0800, Tang Chen wrote:
> apic access page is pinned in memory, and as a result it cannot be
> migrated/hot-removed.
> 
> Actually it doesn't need to be pinned in memory.
> 
> This patch introduces a new vcpu request: KVM_REQ_MIGRATE_EPT. This requet
> will be made when kvm_mmu_notifier_invalidate_page() is called when the page
> is unmapped from the qemu user space to reset APIC_ACCESS_ADDR pointer in
> each online vcpu to 0. And will also be made when ept violation happens to
> reset APIC_ACCESS_ADDR to the new page phys_addr (host phys_addr).
> ---
>  arch/x86/include/asm/kvm_host.h |  2 ++
>  arch/x86/kvm/mmu.c              | 15 +++++++++++++++
>  arch/x86/kvm/vmx.c              |  9 ++++++++-
>  arch/x86/kvm/x86.c              | 20 ++++++++++++++++++++
>  include/linux/kvm_host.h        |  1 +
>  virt/kvm/kvm_main.c             | 15 +++++++++++++++
>  6 files changed, 61 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 8771c0f..f104b87 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -575,6 +575,7 @@ struct kvm_arch {
>  
>  	unsigned int tss_addr;
>  	struct page *apic_access_page;
> +	bool apic_access_page_migrated;
Better have two requests KVM_REQ_APIC_PAGE_MAP, KVM_REQ_APIC_PAGE_UNMAP IMO.

>  
>  	gpa_t wall_clock;
>  
> @@ -739,6 +740,7 @@ struct kvm_x86_ops {
>  	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
>  	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
>  	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
> +	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
>  	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
>  	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
>  	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index c0d72f6..a655444 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>  		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
>  	}
>  
> +	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR &&
> +	    vcpu->kvm->arch.apic_access_page_migrated) {
Why check arch.apic_access_page_migrated here? Isn't it enough that the fault is on apic
address.

> +		int i;
> +
> +		vcpu->kvm->arch.apic_access_page_migrated = false;
> +
> +		/*
> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
> +		 * all the online vcpus.
> +		 */
> +		for (i = 0; i < atomic_read(&vcpu->kvm->online_vcpus); i++)
> +			kvm_make_request(KVM_REQ_MIGRATE_APIC,
> +					 vcpu->kvm->vcpus[i]);
make_all_cpus_request(). You need to kick all vcpus from a guest mode.

> +	}
> +
>  	spin_unlock(&vcpu->kvm->mmu_lock);
>  
>  	return r;
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c336cb3..abc152f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
>  	if (r)
>  		goto out;
>  
> -	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
> +	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
>  	if (is_error_page(page)) {
>  		r = -EFAULT;
>  		goto out;
> @@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
>  	vmx_set_msr_bitmap(vcpu);
>  }
>  
> +static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
> +{
> +	if (vm_need_virtualize_apic_accesses(kvm))
This shouldn't even been called if apic access page is not supported. Nor
mmu_notifier path neither tdp_page_fault path should ever see 0xfee00000
address. BUG() is more appropriate here.


> +		vmcs_write64(APIC_ACCESS_ADDR, hpa);
> +}
> +
>  static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
>  {
>  	u16 status;
> @@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>  	.enable_irq_window = enable_irq_window,
>  	.update_cr8_intercept = update_cr8_intercept,
>  	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
> +	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
svm needs that too.

>  	.vm_has_apicv = vmx_vm_has_apicv,
>  	.load_eoi_exitmap = vmx_load_eoi_exitmap,
>  	.hwapic_irr_update = vmx_hwapic_irr_update,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a26524f..14e7174 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> +static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +
> +	if (kvm->arch.apic_access_page_migrated) {
> +		if (kvm->arch.apic_access_page)
> +			kvm->arch.apic_access_page = pfn_to_page(0);
All vcpus will access apic_access_page without locking here. May be
set kvm->arch.apic_access_page to zero in mmu_notifier and here call
 kvm_x86_ops->set_apic_access_page_addr(kvm, kvm->arch.apic_access_page);


> +		kvm_x86_ops->set_apic_access_page_addr(kvm, 0x0ull);
> +	} else {
> +		struct page *page;
> +		page = gfn_to_page_no_pin(kvm,
> +				VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
> +		kvm->arch.apic_access_page = page;
Same, set it during tdp fault when page is mapped.

> +		kvm_x86_ops->set_apic_access_page_addr(kvm,
> +						       page_to_phys(page));
> +	}
> +}
> +
>  /*
>   * Returns 1 to let __vcpu_run() continue the guest execution loop without
>   * exiting to the userspace.  Otherwise, the value will be returned to the
> @@ -6005,6 +6023,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  			vcpu_scan_ioapic(vcpu);
>  		if (kvm_check_request(KVM_REQ_MIGRATE_EPT, vcpu))
>  			vcpu_migrated_page_update_ept(vcpu);
> +		if (kvm_check_request(KVM_REQ_MIGRATE_APIC, vcpu))
> +			vcpu_migrated_page_update_apic(vcpu);
>  	}
>  
>  	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 4b7e51a..e2ad65e 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -137,6 +137,7 @@ static inline bool is_error_page(struct page *page)
>  #define KVM_REQ_ENABLE_IBS        23
>  #define KVM_REQ_DISABLE_IBS       24
>  #define KVM_REQ_MIGRATE_EPT       25
> +#define KVM_REQ_MIGRATE_APIC      26
>  
>  #define KVM_USERSPACE_IRQ_SOURCE_ID		0
>  #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index d271e89..f06438c 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -54,6 +54,7 @@
>  #include <asm/io.h>
>  #include <asm/uaccess.h>
>  #include <asm/pgtable.h>
> +#include <asm/vmx.h>
>  
>  #include "coalesced_mmio.h"
>  #include "async_pf.h"
> @@ -300,6 +301,20 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
>  		kvm_make_request(KVM_REQ_MIGRATE_EPT, kvm->vcpus[0]);
>  	}
>  
> +	if (address ==
> +	    gfn_to_hva(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT)) {
> +		int i;
> +
> +		kvm->arch.apic_access_page_migrated = true;
> +
> +		/*
> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
> +		 * all the online vcpus.
> +		 */
> +		for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
> +			kvm_make_request(KVM_REQ_MIGRATE_APIC, kvm->vcpus[i]);
> +	}
make_all_cpus_request()

Also you need to drop put_page(kvm->arch.apic_access_page); from x86.c
> +
>  	spin_unlock(&kvm->mmu_lock);
>  	srcu_read_unlock(&kvm->srcu, idx);
>  }
> -- 
> 1.8.3.1
> 

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
tangchen July 4, 2014, 2:18 a.m. UTC | #2
Hi Gleb,

Thanks for the advices. Please see below.

On 07/03/2014 09:55 PM, Gleb Natapov wrote:
......
>> @@ -575,6 +575,7 @@ struct kvm_arch {
>>
>>   	unsigned int tss_addr;
>>   	struct page *apic_access_page;
>> +	bool apic_access_page_migrated;
> Better have two requests KVM_REQ_APIC_PAGE_MAP, KVM_REQ_APIC_PAGE_UNMAP IMO.
>

vcpu->requests is an unsigned long, and we can only has 64 requests. Isn't
adding two requests for apic page and another similar two for ept page too
many ?  Not sure.

>>
>>   	gpa_t wall_clock;
>>
>> @@ -739,6 +740,7 @@ struct kvm_x86_ops {
>>   	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
>>   	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
>>   	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
>> +	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
>>   	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
>>   	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
>>   	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index c0d72f6..a655444 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>>   		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
>>   	}
>>
>> +	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR&&
>> +	    vcpu->kvm->arch.apic_access_page_migrated) {
> Why check arch.apic_access_page_migrated here? Isn't it enough that the fault is on apic
> address.
>

True. It's enough. Followed.

>> +		int i;
>> +
>> +		vcpu->kvm->arch.apic_access_page_migrated = false;
>> +
>> +		/*
>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>> +		 * all the online vcpus.
>> +		 */
>> +		for (i = 0; i<  atomic_read(&vcpu->kvm->online_vcpus); i++)
>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC,
>> +					 vcpu->kvm->vcpus[i]);
> make_all_cpus_request(). You need to kick all vcpus from a guest mode.
>

OK, followed. But would you please explain more about this. :)
Why need to kick all vcpus from guest mode when making request to all 
vcpus ?

>> +	}
>> +
>>   	spin_unlock(&vcpu->kvm->mmu_lock);
>>
>>   	return r;
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c336cb3..abc152f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
>>   	if (r)
>>   		goto out;
>>
>> -	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> +	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>>   	if (is_error_page(page)) {
>>   		r = -EFAULT;
>>   		goto out;
>> @@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
>>   	vmx_set_msr_bitmap(vcpu);
>>   }
>>
>> +static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
>> +{
>> +	if (vm_need_virtualize_apic_accesses(kvm))
> This shouldn't even been called if apic access page is not supported. Nor
> mmu_notifier path neither tdp_page_fault path should ever see 0xfee00000
> address. BUG() is more appropriate here.
>

I don't quite understand. Why calling this function here will leed to bug ?
(Sorry, I'm not quite understand the internal of KVM. Please help.)

>
>> +		vmcs_write64(APIC_ACCESS_ADDR, hpa);
>> +}
>> +
>>   static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
>>   {
>>   	u16 status;
>> @@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>>   	.enable_irq_window = enable_irq_window,
>>   	.update_cr8_intercept = update_cr8_intercept,
>>   	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
>> +	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
> svm needs that too.
>

OK, will add one for svm.

>>   	.vm_has_apicv = vmx_vm_has_apicv,
>>   	.load_eoi_exitmap = vmx_load_eoi_exitmap,
>>   	.hwapic_irr_update = vmx_hwapic_irr_update,
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index a26524f..14e7174 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
>>   	}
>>   }
>>
>> +static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
>> +{
>> +	struct kvm *kvm = vcpu->kvm;
>> +
>> +	if (kvm->arch.apic_access_page_migrated) {
>> +		if (kvm->arch.apic_access_page)
>> +			kvm->arch.apic_access_page = pfn_to_page(0);
> All vcpus will access apic_access_page without locking here. May be
> set kvm->arch.apic_access_page to zero in mmu_notifier and here call
>   kvm_x86_ops->set_apic_access_page_addr(kvm, kvm->arch.apic_access_page);
>

I'm a little confused. apic access page's phys_addr is stored in vmcs, and
I think it will be used by vcpu directly to access the physical page.
Setting kvm->arch.apic_access_page to zero will not stop it, right ?

I'm wondering what happens when apic page is migrated, but the vmcs is 
still
holding its old phys_addr before the vcpu request is handled.

>
>> +		kvm_x86_ops->set_apic_access_page_addr(kvm, 0x0ull);
>> +	} else {
>> +		struct page *page;
>> +		page = gfn_to_page_no_pin(kvm,
>> +				VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> +		kvm->arch.apic_access_page = page;
> Same, set it during tdp fault when page is mapped.
>
>> +		kvm_x86_ops->set_apic_access_page_addr(kvm,
>> +						       page_to_phys(page));
>> +	}
>> +}
>> +
>>   /*
>>    * Returns 1 to let __vcpu_run() continue the guest execution loop without
>>    * exiting to the userspace.  Otherwise, the value will be returned to the
>> @@ -6005,6 +6023,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>>   			vcpu_scan_ioapic(vcpu);
>>   		if (kvm_check_request(KVM_REQ_MIGRATE_EPT, vcpu))
>>   			vcpu_migrated_page_update_ept(vcpu);
>> +		if (kvm_check_request(KVM_REQ_MIGRATE_APIC, vcpu))
>> +			vcpu_migrated_page_update_apic(vcpu);
>>   	}
>>
>>   	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 4b7e51a..e2ad65e 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -137,6 +137,7 @@ static inline bool is_error_page(struct page *page)
>>   #define KVM_REQ_ENABLE_IBS        23
>>   #define KVM_REQ_DISABLE_IBS       24
>>   #define KVM_REQ_MIGRATE_EPT       25
>> +#define KVM_REQ_MIGRATE_APIC      26
>>
>>   #define KVM_USERSPACE_IRQ_SOURCE_ID		0
>>   #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index d271e89..f06438c 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -54,6 +54,7 @@
>>   #include<asm/io.h>
>>   #include<asm/uaccess.h>
>>   #include<asm/pgtable.h>
>> +#include<asm/vmx.h>
>>
>>   #include "coalesced_mmio.h"
>>   #include "async_pf.h"
>> @@ -300,6 +301,20 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
>>   		kvm_make_request(KVM_REQ_MIGRATE_EPT, kvm->vcpus[0]);
>>   	}
>>
>> +	if (address ==
>> +	    gfn_to_hva(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT)) {
>> +		int i;
>> +
>> +		kvm->arch.apic_access_page_migrated = true;
>> +
>> +		/*
>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>> +		 * all the online vcpus.
>> +		 */
>> +		for (i = 0; i<  atomic_read(&kvm->online_vcpus); i++)
>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC, kvm->vcpus[i]);
>> +	}
> make_all_cpus_request()
>
> Also you need to drop put_page(kvm->arch.apic_access_page); from x86.c

Yes. Forgot it.

Thanks. :)

>> +
>>   	spin_unlock(&kvm->mmu_lock);
>>   	srcu_read_unlock(&kvm->srcu, idx);
>>   }
>> --
>> 1.8.3.1
>>
>
> --
> 			Gleb.
> .
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
tangchen July 4, 2014, 2:18 a.m. UTC | #3
Hi Gleb,

Thanks for the advices. Please see below.

On 07/03/2014 09:55 PM, Gleb Natapov wrote:
......
>> @@ -575,6 +575,7 @@ struct kvm_arch {
>>
>>   	unsigned int tss_addr;
>>   	struct page *apic_access_page;
>> +	bool apic_access_page_migrated;
> Better have two requests KVM_REQ_APIC_PAGE_MAP, KVM_REQ_APIC_PAGE_UNMAP IMO.
>

vcpu->requests is an unsigned long, and we can only has 64 requests. Isn't
adding two requests for apic page and another similar two for ept page too
many ?  Not sure.

>>
>>   	gpa_t wall_clock;
>>
>> @@ -739,6 +740,7 @@ struct kvm_x86_ops {
>>   	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
>>   	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
>>   	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
>> +	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
>>   	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
>>   	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
>>   	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index c0d72f6..a655444 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>>   		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
>>   	}
>>
>> +	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR&&
>> +	    vcpu->kvm->arch.apic_access_page_migrated) {
> Why check arch.apic_access_page_migrated here? Isn't it enough that the fault is on apic
> address.
>

True. It's enough. Followed.

>> +		int i;
>> +
>> +		vcpu->kvm->arch.apic_access_page_migrated = false;
>> +
>> +		/*
>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>> +		 * all the online vcpus.
>> +		 */
>> +		for (i = 0; i<  atomic_read(&vcpu->kvm->online_vcpus); i++)
>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC,
>> +					 vcpu->kvm->vcpus[i]);
> make_all_cpus_request(). You need to kick all vcpus from a guest mode.
>

OK, followed. But would you please explain more about this. :)
Why need to kick all vcpus from guest mode when making request to all 
vcpus ?

>> +	}
>> +
>>   	spin_unlock(&vcpu->kvm->mmu_lock);
>>
>>   	return r;
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c336cb3..abc152f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
>>   	if (r)
>>   		goto out;
>>
>> -	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> +	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>>   	if (is_error_page(page)) {
>>   		r = -EFAULT;
>>   		goto out;
>> @@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
>>   	vmx_set_msr_bitmap(vcpu);
>>   }
>>
>> +static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
>> +{
>> +	if (vm_need_virtualize_apic_accesses(kvm))
> This shouldn't even been called if apic access page is not supported. Nor
> mmu_notifier path neither tdp_page_fault path should ever see 0xfee00000
> address. BUG() is more appropriate here.
>

I don't quite understand. Why calling this function here will leed to bug ?
(Sorry, I'm not quite understand the internal of KVM. Please help.)

>
>> +		vmcs_write64(APIC_ACCESS_ADDR, hpa);
>> +}
>> +
>>   static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
>>   {
>>   	u16 status;
>> @@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>>   	.enable_irq_window = enable_irq_window,
>>   	.update_cr8_intercept = update_cr8_intercept,
>>   	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
>> +	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
> svm needs that too.
>

OK, will add one for svm.

>>   	.vm_has_apicv = vmx_vm_has_apicv,
>>   	.load_eoi_exitmap = vmx_load_eoi_exitmap,
>>   	.hwapic_irr_update = vmx_hwapic_irr_update,
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index a26524f..14e7174 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
>>   	}
>>   }
>>
>> +static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
>> +{
>> +	struct kvm *kvm = vcpu->kvm;
>> +
>> +	if (kvm->arch.apic_access_page_migrated) {
>> +		if (kvm->arch.apic_access_page)
>> +			kvm->arch.apic_access_page = pfn_to_page(0);
> All vcpus will access apic_access_page without locking here. May be
> set kvm->arch.apic_access_page to zero in mmu_notifier and here call
>   kvm_x86_ops->set_apic_access_page_addr(kvm, kvm->arch.apic_access_page);
>

I'm a little confused. apic access page's phys_addr is stored in vmcs, and
I think it will be used by vcpu directly to access the physical page.
Setting kvm->arch.apic_access_page to zero will not stop it, right ?

I'm wondering what happens when apic page is migrated, but the vmcs is 
still
holding its old phys_addr before the vcpu request is handled.

>
>> +		kvm_x86_ops->set_apic_access_page_addr(kvm, 0x0ull);
>> +	} else {
>> +		struct page *page;
>> +		page = gfn_to_page_no_pin(kvm,
>> +				VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> +		kvm->arch.apic_access_page = page;
> Same, set it during tdp fault when page is mapped.
>
>> +		kvm_x86_ops->set_apic_access_page_addr(kvm,
>> +						       page_to_phys(page));
>> +	}
>> +}
>> +
>>   /*
>>    * Returns 1 to let __vcpu_run() continue the guest execution loop without
>>    * exiting to the userspace.  Otherwise, the value will be returned to the
>> @@ -6005,6 +6023,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>>   			vcpu_scan_ioapic(vcpu);
>>   		if (kvm_check_request(KVM_REQ_MIGRATE_EPT, vcpu))
>>   			vcpu_migrated_page_update_ept(vcpu);
>> +		if (kvm_check_request(KVM_REQ_MIGRATE_APIC, vcpu))
>> +			vcpu_migrated_page_update_apic(vcpu);
>>   	}
>>
>>   	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 4b7e51a..e2ad65e 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -137,6 +137,7 @@ static inline bool is_error_page(struct page *page)
>>   #define KVM_REQ_ENABLE_IBS        23
>>   #define KVM_REQ_DISABLE_IBS       24
>>   #define KVM_REQ_MIGRATE_EPT       25
>> +#define KVM_REQ_MIGRATE_APIC      26
>>
>>   #define KVM_USERSPACE_IRQ_SOURCE_ID		0
>>   #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index d271e89..f06438c 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -54,6 +54,7 @@
>>   #include<asm/io.h>
>>   #include<asm/uaccess.h>
>>   #include<asm/pgtable.h>
>> +#include<asm/vmx.h>
>>
>>   #include "coalesced_mmio.h"
>>   #include "async_pf.h"
>> @@ -300,6 +301,20 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
>>   		kvm_make_request(KVM_REQ_MIGRATE_EPT, kvm->vcpus[0]);
>>   	}
>>
>> +	if (address ==
>> +	    gfn_to_hva(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT)) {
>> +		int i;
>> +
>> +		kvm->arch.apic_access_page_migrated = true;
>> +
>> +		/*
>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>> +		 * all the online vcpus.
>> +		 */
>> +		for (i = 0; i<  atomic_read(&kvm->online_vcpus); i++)
>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC, kvm->vcpus[i]);
>> +	}
> make_all_cpus_request()
>
> Also you need to drop put_page(kvm->arch.apic_access_page); from x86.c

Yes. Forgot it.

Thanks. :)

>> +
>>   	spin_unlock(&kvm->mmu_lock);
>>   	srcu_read_unlock(&kvm->srcu, idx);
>>   }
>> --
>> 1.8.3.1
>>
>
> --
> 			Gleb.
> .
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov July 4, 2014, 10:13 a.m. UTC | #4
On Fri, Jul 04, 2014 at 10:18:25AM +0800, Tang Chen wrote:
> Hi Gleb,
> 
> Thanks for the advices. Please see below.
> 
> On 07/03/2014 09:55 PM, Gleb Natapov wrote:
> ......
> >>@@ -575,6 +575,7 @@ struct kvm_arch {
> >>
> >>  	unsigned int tss_addr;
> >>  	struct page *apic_access_page;
> >>+	bool apic_access_page_migrated;
> >Better have two requests KVM_REQ_APIC_PAGE_MAP, KVM_REQ_APIC_PAGE_UNMAP IMO.
> >
> 
> vcpu->requests is an unsigned long, and we can only has 64 requests. Isn't
> adding two requests for apic page and another similar two for ept page too
> many ?  Not sure.
> 
Lets not worry about that for now. May be it is enough to have only one
KVM_REQ_APIC_PAGE_RELOAD request set apic_access_page to a new value
before sending the request and reload whatever is in apic_access_page
during KVM_REQ_APIC_PAGE_RELOAD processing. Or we can even reload
apic_access_page as part of mmu reload and reuse KVM_REQ_MMU_RELOAD.

> >>
> >>  	gpa_t wall_clock;
> >>
> >>@@ -739,6 +740,7 @@ struct kvm_x86_ops {
> >>  	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
> >>  	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
> >>  	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
> >>+	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
> >>  	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
> >>  	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
> >>  	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
> >>diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> >>index c0d72f6..a655444 100644
> >>--- a/arch/x86/kvm/mmu.c
> >>+++ b/arch/x86/kvm/mmu.c
> >>@@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
> >>  		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
> >>  	}
> >>
> >>+	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR&&
> >>+	    vcpu->kvm->arch.apic_access_page_migrated) {
> >Why check arch.apic_access_page_migrated here? Isn't it enough that the fault is on apic
> >address.
> >
> 
> True. It's enough. Followed.
> 
> >>+		int i;
> >>+
> >>+		vcpu->kvm->arch.apic_access_page_migrated = false;
> >>+
> >>+		/*
> >>+		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
> >>+		 * all the online vcpus.
> >>+		 */
> >>+		for (i = 0; i<  atomic_read(&vcpu->kvm->online_vcpus); i++)
> >>+			kvm_make_request(KVM_REQ_MIGRATE_APIC,
> >>+					 vcpu->kvm->vcpus[i]);
> >make_all_cpus_request(). You need to kick all vcpus from a guest mode.
> >
> 
> OK, followed. But would you please explain more about this. :)
> Why need to kick all vcpus from guest mode when making request to all vcpus
> ?
Because if you do not force other vcpus from a guest mode they will not reload
apic_access_page value till next vmexit, but since EPT page table now has a mapping
for 0xfee00000 access to this address will not cause EPT violation and will not cause
apic exit either.

> 
> >>+	}
> >>+
> >>  	spin_unlock(&vcpu->kvm->mmu_lock);
> >>
> >>  	return r;
> >>diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> >>index c336cb3..abc152f 100644
> >>--- a/arch/x86/kvm/vmx.c
> >>+++ b/arch/x86/kvm/vmx.c
> >>@@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
> >>  	if (r)
> >>  		goto out;
> >>
> >>-	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
> >>+	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
> >>  	if (is_error_page(page)) {
> >>  		r = -EFAULT;
> >>  		goto out;
> >>@@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
> >>  	vmx_set_msr_bitmap(vcpu);
> >>  }
> >>
> >>+static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
> >>+{
> >>+	if (vm_need_virtualize_apic_accesses(kvm))
> >This shouldn't even been called if apic access page is not supported. Nor
> >mmu_notifier path neither tdp_page_fault path should ever see 0xfee00000
> >address. BUG() is more appropriate here.
> >
> 
> I don't quite understand. Why calling this function here will leed to bug ?
> (Sorry, I'm not quite understand the internal of KVM. Please help.)
I didn't say that calling this function here will lead to a bug. I am saying that
if vm_need_virtualize_apic_accesses() is false this function should not be called
at all, so this check is redundant.

> 
> >
> >>+		vmcs_write64(APIC_ACCESS_ADDR, hpa);
> >>+}
> >>+
> >>  static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
> >>  {
> >>  	u16 status;
> >>@@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
> >>  	.enable_irq_window = enable_irq_window,
> >>  	.update_cr8_intercept = update_cr8_intercept,
> >>  	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
> >>+	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
> >svm needs that too.
> >
> 
> OK, will add one for svm.
> 
> >>  	.vm_has_apicv = vmx_vm_has_apicv,
> >>  	.load_eoi_exitmap = vmx_load_eoi_exitmap,
> >>  	.hwapic_irr_update = vmx_hwapic_irr_update,
> >>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> >>index a26524f..14e7174 100644
> >>--- a/arch/x86/kvm/x86.c
> >>+++ b/arch/x86/kvm/x86.c
> >>@@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
> >>  	}
> >>  }
> >>
> >>+static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
> >>+{
> >>+	struct kvm *kvm = vcpu->kvm;
> >>+
> >>+	if (kvm->arch.apic_access_page_migrated) {
> >>+		if (kvm->arch.apic_access_page)
> >>+			kvm->arch.apic_access_page = pfn_to_page(0);
> >All vcpus will access apic_access_page without locking here. May be
> >set kvm->arch.apic_access_page to zero in mmu_notifier and here call
> >  kvm_x86_ops->set_apic_access_page_addr(kvm, kvm->arch.apic_access_page);
> >
> 
> I'm a little confused. apic access page's phys_addr is stored in vmcs, and
> I think it will be used by vcpu directly to access the physical page.
> Setting kvm->arch.apic_access_page to zero will not stop it, right ?
> 
Right, kvm->arch.apic_access_page is just a shadow value for whatever is written
in vmcs. After setting it all vcpus need to update their vmcs values.

> I'm wondering what happens when apic page is migrated, but the vmcs is still
> holding its old phys_addr before the vcpu request is handled.
> 
apic page should not be migrated untill all vpus are forced out of a guest mode and
instructed to reload new value on a next guest entry. That's what we are trying to
achieve here.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
tangchen July 7, 2014, 6:17 a.m. UTC | #5
Hi Gleb,

Thanks for all the advices. Please see below.

On 07/04/2014 06:13 PM, Gleb Natapov wrote:
......
>>>> +static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
>>>> +{
>>>> +	if (vm_need_virtualize_apic_accesses(kvm))
>>> This shouldn't even been called if apic access page is not supported. Nor
>>> mmu_notifier path neither tdp_page_fault path should ever see 0xfee00000
>>> address. BUG() is more appropriate here.
>>>
>>
>> I don't quite understand. Why calling this function here will leed to bug ?
>> (Sorry, I'm not quite understand the internal of KVM. Please help.)
> I didn't say that calling this function here will lead to a bug. I am saying that
> if vm_need_virtualize_apic_accesses() is false this function should not be called
> at all, so this check is redundant.
>

Do you mean when vm_need_virtualize_apic_accesses() is false, it should 
not be called ?
It has to be true ?

......
>>>> +	if (kvm->arch.apic_access_page_migrated) {
>>>> +		if (kvm->arch.apic_access_page)
>>>> +			kvm->arch.apic_access_page = pfn_to_page(0);
>>> All vcpus will access apic_access_page without locking here. May be
>>> set kvm->arch.apic_access_page to zero in mmu_notifier and here call
>>>   kvm_x86_ops->set_apic_access_page_addr(kvm, kvm->arch.apic_access_page);
>>>
>>
>> I'm a little confused. apic access page's phys_addr is stored in vmcs, and
>> I think it will be used by vcpu directly to access the physical page.
>> Setting kvm->arch.apic_access_page to zero will not stop it, right ?
>>
> Right, kvm->arch.apic_access_page is just a shadow value for whatever is written
> in vmcs. After setting it all vcpus need to update their vmcs values.
>
>> I'm wondering what happens when apic page is migrated, but the vmcs is still
>> holding its old phys_addr before the vcpu request is handled.
>>
> apic page should not be migrated untill all vpus are forced out of a guest mode and
> instructed to reload new value on a next guest entry. That's what we are trying to
> achieve here.
>

So, setting VMCS APIC_ACCESS_ADDR pointer to zero will not stop vcpu to 
access
apic access page, right ?

If so, all the vcpus have to stop till apic page finishes its migration, 
and new
value is set in each vcpu, which means we should stop guest, right ?

Thanks.



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
tangchen July 7, 2014, 9:52 a.m. UTC | #6
Hi Gleb,

The guest hang problem has been solved.

When mmu_notifier is called, I set VMCS APIC_ACCESS_ADDR to the new value
instead of setting it to 0. And only update kvm->arch.apic_access_page in
the next ept violation.

The guest is running well now.

I'll post the new patches tomorrow. ;)

Thanks.


On 07/04/2014 06:13 PM, Gleb Natapov wrote:
> On Fri, Jul 04, 2014 at 10:18:25AM +0800, Tang Chen wrote:
>> Hi Gleb,
>>
>> Thanks for the advices. Please see below.
>>
>> On 07/03/2014 09:55 PM, Gleb Natapov wrote:
>> ......
>>>> @@ -575,6 +575,7 @@ struct kvm_arch {
>>>>
>>>>   	unsigned int tss_addr;
>>>>   	struct page *apic_access_page;
>>>> +	bool apic_access_page_migrated;
>>> Better have two requests KVM_REQ_APIC_PAGE_MAP, KVM_REQ_APIC_PAGE_UNMAP IMO.
>>>
>>
>> vcpu->requests is an unsigned long, and we can only has 64 requests. Isn't
>> adding two requests for apic page and another similar two for ept page too
>> many ?  Not sure.
>>
> Lets not worry about that for now. May be it is enough to have only one
> KVM_REQ_APIC_PAGE_RELOAD request set apic_access_page to a new value
> before sending the request and reload whatever is in apic_access_page
> during KVM_REQ_APIC_PAGE_RELOAD processing. Or we can even reload
> apic_access_page as part of mmu reload and reuse KVM_REQ_MMU_RELOAD.
>
>>>>
>>>>   	gpa_t wall_clock;
>>>>
>>>> @@ -739,6 +740,7 @@ struct kvm_x86_ops {
>>>>   	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
>>>>   	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
>>>>   	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
>>>> +	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
>>>>   	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
>>>>   	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
>>>>   	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>>> index c0d72f6..a655444 100644
>>>> --- a/arch/x86/kvm/mmu.c
>>>> +++ b/arch/x86/kvm/mmu.c
>>>> @@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>>>>   		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
>>>>   	}
>>>>
>>>> +	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR&&
>>>> +	    vcpu->kvm->arch.apic_access_page_migrated) {
>>> Why check arch.apic_access_page_migrated here? Isn't it enough that the fault is on apic
>>> address.
>>>
>>
>> True. It's enough. Followed.
>>
>>>> +		int i;
>>>> +
>>>> +		vcpu->kvm->arch.apic_access_page_migrated = false;
>>>> +
>>>> +		/*
>>>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>>>> +		 * all the online vcpus.
>>>> +		 */
>>>> +		for (i = 0; i<   atomic_read(&vcpu->kvm->online_vcpus); i++)
>>>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC,
>>>> +					 vcpu->kvm->vcpus[i]);
>>> make_all_cpus_request(). You need to kick all vcpus from a guest mode.
>>>
>>
>> OK, followed. But would you please explain more about this. :)
>> Why need to kick all vcpus from guest mode when making request to all vcpus
>> ?
> Because if you do not force other vcpus from a guest mode they will not reload
> apic_access_page value till next vmexit, but since EPT page table now has a mapping
> for 0xfee00000 access to this address will not cause EPT violation and will not cause
> apic exit either.
>
>>
>>>> +	}
>>>> +
>>>>   	spin_unlock(&vcpu->kvm->mmu_lock);
>>>>
>>>>   	return r;
>>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>>> index c336cb3..abc152f 100644
>>>> --- a/arch/x86/kvm/vmx.c
>>>> +++ b/arch/x86/kvm/vmx.c
>>>> @@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
>>>>   	if (r)
>>>>   		goto out;
>>>>
>>>> -	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>   PAGE_SHIFT);
>>>> +	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>   PAGE_SHIFT);
>>>>   	if (is_error_page(page)) {
>>>>   		r = -EFAULT;
>>>>   		goto out;
>>>> @@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
>>>>   	vmx_set_msr_bitmap(vcpu);
>>>>   }
>>>>
>>>> +static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
>>>> +{
>>>> +	if (vm_need_virtualize_apic_accesses(kvm))
>>> This shouldn't even been called if apic access page is not supported. Nor
>>> mmu_notifier path neither tdp_page_fault path should ever see 0xfee00000
>>> address. BUG() is more appropriate here.
>>>
>>
>> I don't quite understand. Why calling this function here will leed to bug ?
>> (Sorry, I'm not quite understand the internal of KVM. Please help.)
> I didn't say that calling this function here will lead to a bug. I am saying that
> if vm_need_virtualize_apic_accesses() is false this function should not be called
> at all, so this check is redundant.
>
>>
>>>
>>>> +		vmcs_write64(APIC_ACCESS_ADDR, hpa);
>>>> +}
>>>> +
>>>>   static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
>>>>   {
>>>>   	u16 status;
>>>> @@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>>>>   	.enable_irq_window = enable_irq_window,
>>>>   	.update_cr8_intercept = update_cr8_intercept,
>>>>   	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
>>>> +	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
>>> svm needs that too.
>>>
>>
>> OK, will add one for svm.
>>
>>>>   	.vm_has_apicv = vmx_vm_has_apicv,
>>>>   	.load_eoi_exitmap = vmx_load_eoi_exitmap,
>>>>   	.hwapic_irr_update = vmx_hwapic_irr_update,
>>>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>>>> index a26524f..14e7174 100644
>>>> --- a/arch/x86/kvm/x86.c
>>>> +++ b/arch/x86/kvm/x86.c
>>>> @@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
>>>>   	}
>>>>   }
>>>>
>>>> +static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
>>>> +{
>>>> +	struct kvm *kvm = vcpu->kvm;
>>>> +
>>>> +	if (kvm->arch.apic_access_page_migrated) {
>>>> +		if (kvm->arch.apic_access_page)
>>>> +			kvm->arch.apic_access_page = pfn_to_page(0);
>>> All vcpus will access apic_access_page without locking here. May be
>>> set kvm->arch.apic_access_page to zero in mmu_notifier and here call
>>>   kvm_x86_ops->set_apic_access_page_addr(kvm, kvm->arch.apic_access_page);
>>>
>>
>> I'm a little confused. apic access page's phys_addr is stored in vmcs, and
>> I think it will be used by vcpu directly to access the physical page.
>> Setting kvm->arch.apic_access_page to zero will not stop it, right ?
>>
> Right, kvm->arch.apic_access_page is just a shadow value for whatever is written
> in vmcs. After setting it all vcpus need to update their vmcs values.
>
>> I'm wondering what happens when apic page is migrated, but the vmcs is still
>> holding its old phys_addr before the vcpu request is handled.
>>
> apic page should not be migrated untill all vpus are forced out of a guest mode and
> instructed to reload new value on a next guest entry. That's what we are trying to
> achieve here.
>
> --
> 			Gleb.
> .
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wanpeng Li July 7, 2014, 10:35 a.m. UTC | #7
On Wed, Jul 02, 2014 at 05:00:37PM +0800, Tang Chen wrote:
>apic access page is pinned in memory, and as a result it cannot be
>migrated/hot-removed.
>
>Actually it doesn't need to be pinned in memory.
>
>This patch introduces a new vcpu request: KVM_REQ_MIGRATE_EPT. This requet

s/KVM_REQ_MIGRATE_EPT/KVM_REQ_MIGRATE_APIC

>will be made when kvm_mmu_notifier_invalidate_page() is called when the page
>is unmapped from the qemu user space to reset APIC_ACCESS_ADDR pointer in
>each online vcpu to 0. And will also be made when ept violation happens to
>reset APIC_ACCESS_ADDR to the new page phys_addr (host phys_addr).
>---
> arch/x86/include/asm/kvm_host.h |  2 ++
> arch/x86/kvm/mmu.c              | 15 +++++++++++++++
> arch/x86/kvm/vmx.c              |  9 ++++++++-
> arch/x86/kvm/x86.c              | 20 ++++++++++++++++++++
> include/linux/kvm_host.h        |  1 +
> virt/kvm/kvm_main.c             | 15 +++++++++++++++
> 6 files changed, 61 insertions(+), 1 deletion(-)
>
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index 8771c0f..f104b87 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -575,6 +575,7 @@ struct kvm_arch {
> 
> 	unsigned int tss_addr;
> 	struct page *apic_access_page;
>+	bool apic_access_page_migrated;
> 
> 	gpa_t wall_clock;
> 
>@@ -739,6 +740,7 @@ struct kvm_x86_ops {
> 	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
> 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
> 	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
>+	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
> 	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
> 	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
> 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>index c0d72f6..a655444 100644
>--- a/arch/x86/kvm/mmu.c
>+++ b/arch/x86/kvm/mmu.c
>@@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
> 		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
> 	}
> 
>+	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR &&
>+	    vcpu->kvm->arch.apic_access_page_migrated) {
>+		int i;
>+
>+		vcpu->kvm->arch.apic_access_page_migrated = false;
>+
>+		/*
>+		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>+		 * all the online vcpus.
>+		 */
>+		for (i = 0; i < atomic_read(&vcpu->kvm->online_vcpus); i++)
>+			kvm_make_request(KVM_REQ_MIGRATE_APIC,
>+					 vcpu->kvm->vcpus[i]);
>+	}
>+
> 	spin_unlock(&vcpu->kvm->mmu_lock);
> 
> 	return r;
>diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>index c336cb3..abc152f 100644
>--- a/arch/x86/kvm/vmx.c
>+++ b/arch/x86/kvm/vmx.c
>@@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
> 	if (r)
> 		goto out;
> 
>-	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
>+	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
> 	if (is_error_page(page)) {
> 		r = -EFAULT;
> 		goto out;
>@@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
> 	vmx_set_msr_bitmap(vcpu);
> }
> 
>+static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
>+{
>+	if (vm_need_virtualize_apic_accesses(kvm))
>+		vmcs_write64(APIC_ACCESS_ADDR, hpa);
>+}
>+
> static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
> {
> 	u16 status;
>@@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
> 	.enable_irq_window = enable_irq_window,
> 	.update_cr8_intercept = update_cr8_intercept,
> 	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
>+	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
> 	.vm_has_apicv = vmx_vm_has_apicv,
> 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
> 	.hwapic_irr_update = vmx_hwapic_irr_update,
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index a26524f..14e7174 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
> 	}
> }
> 
>+static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
>+{
>+	struct kvm *kvm = vcpu->kvm;
>+
>+	if (kvm->arch.apic_access_page_migrated) {
>+		if (kvm->arch.apic_access_page)
>+			kvm->arch.apic_access_page = pfn_to_page(0);
>+		kvm_x86_ops->set_apic_access_page_addr(kvm, 0x0ull);
>+	} else {
>+		struct page *page;
>+		page = gfn_to_page_no_pin(kvm,
>+				VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
>+		kvm->arch.apic_access_page = page;
>+		kvm_x86_ops->set_apic_access_page_addr(kvm,
>+						       page_to_phys(page));
>+	}
>+}
>+
> /*
>  * Returns 1 to let __vcpu_run() continue the guest execution loop without
>  * exiting to the userspace.  Otherwise, the value will be returned to the
>@@ -6005,6 +6023,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> 			vcpu_scan_ioapic(vcpu);
> 		if (kvm_check_request(KVM_REQ_MIGRATE_EPT, vcpu))
> 			vcpu_migrated_page_update_ept(vcpu);
>+		if (kvm_check_request(KVM_REQ_MIGRATE_APIC, vcpu))
>+			vcpu_migrated_page_update_apic(vcpu);
> 	}
> 
> 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
>diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>index 4b7e51a..e2ad65e 100644
>--- a/include/linux/kvm_host.h
>+++ b/include/linux/kvm_host.h
>@@ -137,6 +137,7 @@ static inline bool is_error_page(struct page *page)
> #define KVM_REQ_ENABLE_IBS        23
> #define KVM_REQ_DISABLE_IBS       24
> #define KVM_REQ_MIGRATE_EPT       25
>+#define KVM_REQ_MIGRATE_APIC      26
> 
> #define KVM_USERSPACE_IRQ_SOURCE_ID		0
> #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
>diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>index d271e89..f06438c 100644
>--- a/virt/kvm/kvm_main.c
>+++ b/virt/kvm/kvm_main.c
>@@ -54,6 +54,7 @@
> #include <asm/io.h>
> #include <asm/uaccess.h>
> #include <asm/pgtable.h>
>+#include <asm/vmx.h>
> 
> #include "coalesced_mmio.h"
> #include "async_pf.h"
>@@ -300,6 +301,20 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
> 		kvm_make_request(KVM_REQ_MIGRATE_EPT, kvm->vcpus[0]);
> 	}
> 
>+	if (address ==
>+	    gfn_to_hva(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT)) {
>+		int i;
>+
>+		kvm->arch.apic_access_page_migrated = true;
>+
>+		/*
>+		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>+		 * all the online vcpus.
>+		 */
>+		for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
>+			kvm_make_request(KVM_REQ_MIGRATE_APIC, kvm->vcpus[i]);
>+	}
>+
> 	spin_unlock(&kvm->mmu_lock);
> 	srcu_read_unlock(&kvm->srcu, idx);
> }
>-- 
>1.8.3.1
>
>--
>To unsubscribe from this list: send the line "unsubscribe kvm" in
>the body of a message to majordomo@vger.kernel.org
>More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nadav Amit July 7, 2014, 11:42 a.m. UTC | #8
Tang,

Running some (unrelated) tests I see that KVM does not handle APIC base 
relocation correctly. When the base is changed, kvm_lapic_set_base just 
changes lapic->base_address without taking further action (i.e., 
modifying the VMCS apic address in VMX).

This patch follows KVM bad behavior by using the constant 
VMX_APIC_ACCESS_PAGE_ADDR instead of lapic->base_address.

Anyhow, I didn't see anything that would make my life (in fixing the 
lapic base issue) too difficult. Yet, feel free in making it more 
"fix-friendly".

Thanks,
Nadav

On 7/7/14, 12:52 PM, Tang Chen wrote:
> Hi Gleb,
>
> The guest hang problem has been solved.
>
> When mmu_notifier is called, I set VMCS APIC_ACCESS_ADDR to the new value
> instead of setting it to 0. And only update kvm->arch.apic_access_page in
> the next ept violation.
>
> The guest is running well now.
>
> I'll post the new patches tomorrow. ;)
>
> Thanks.
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov July 7, 2014, 11:54 a.m. UTC | #9
On Mon, Jul 07, 2014 at 02:42:27PM +0300, Nadav Amit wrote:
> Tang,
> 
> Running some (unrelated) tests I see that KVM does not handle APIC base
> relocation correctly. When the base is changed, kvm_lapic_set_base just
> changes lapic->base_address without taking further action (i.e., modifying
> the VMCS apic address in VMX).
> 
> This patch follows KVM bad behavior by using the constant
> VMX_APIC_ACCESS_PAGE_ADDR instead of lapic->base_address.
There is no OS out there that relocates APIC base (in fact it was not always
relocatable on real HW), so there is not point in complicating the code to support
it. In fact current APIC_ACCESS_ADDR handling relies on the fact that all vcpus
has apic mapped at the same address.

> 
> Anyhow, I didn't see anything that would make my life (in fixing the lapic
> base issue) too difficult. Yet, feel free in making it more "fix-friendly".
> 
Why would you want to fix it?

> Thanks,
> Nadav
> 
> On 7/7/14, 12:52 PM, Tang Chen wrote:
> >Hi Gleb,
> >
> >The guest hang problem has been solved.
> >
> >When mmu_notifier is called, I set VMCS APIC_ACCESS_ADDR to the new value
> >instead of setting it to 0. And only update kvm->arch.apic_access_page in
> >the next ept violation.
> >
> >The guest is running well now.
> >
> >I'll post the new patches tomorrow. ;)
> >
> >Thanks.
> >

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nadav Amit July 7, 2014, 12:10 p.m. UTC | #10
On 7/7/14, 2:54 PM, Gleb Natapov wrote:
> On Mon, Jul 07, 2014 at 02:42:27PM +0300, Nadav Amit wrote:
>> Tang,
>>
>> Running some (unrelated) tests I see that KVM does not handle APIC base
>> relocation correctly. When the base is changed, kvm_lapic_set_base just
>> changes lapic->base_address without taking further action (i.e., modifying
>> the VMCS apic address in VMX).
>>
>> This patch follows KVM bad behavior by using the constant
>> VMX_APIC_ACCESS_PAGE_ADDR instead of lapic->base_address.
> There is no OS out there that relocates APIC base (in fact it was not always
> relocatable on real HW), so there is not point in complicating the code to support
> it. In fact current APIC_ACCESS_ADDR handling relies on the fact that all vcpus
> has apic mapped at the same address.
>
>>
>> Anyhow, I didn't see anything that would make my life (in fixing the lapic
>> base issue) too difficult. Yet, feel free in making it more "fix-friendly".
>>
> Why would you want to fix it?
>
If there is no general need, I will not send a fix. However, I think the 
very least a warning message should be appear if the guest relocates the 
APIC base.

Nadav

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov July 7, 2014, 12:15 p.m. UTC | #11
On Mon, Jul 07, 2014 at 03:10:23PM +0300, Nadav Amit wrote:
> On 7/7/14, 2:54 PM, Gleb Natapov wrote:
> >On Mon, Jul 07, 2014 at 02:42:27PM +0300, Nadav Amit wrote:
> >>Tang,
> >>
> >>Running some (unrelated) tests I see that KVM does not handle APIC base
> >>relocation correctly. When the base is changed, kvm_lapic_set_base just
> >>changes lapic->base_address without taking further action (i.e., modifying
> >>the VMCS apic address in VMX).
> >>
> >>This patch follows KVM bad behavior by using the constant
> >>VMX_APIC_ACCESS_PAGE_ADDR instead of lapic->base_address.
> >There is no OS out there that relocates APIC base (in fact it was not always
> >relocatable on real HW), so there is not point in complicating the code to support
> >it. In fact current APIC_ACCESS_ADDR handling relies on the fact that all vcpus
> >has apic mapped at the same address.
> >
> >>
> >>Anyhow, I didn't see anything that would make my life (in fixing the lapic
> >>base issue) too difficult. Yet, feel free in making it more "fix-friendly".
> >>
> >Why would you want to fix it?
> >
> If there is no general need, I will not send a fix. However, I think the
It is much more works that it may look like for no apparent gain.

> very least a warning message should be appear if the guest relocates the
> APIC base.
> 
Warning is fine. Make it _once().

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
tangchen July 8, 2014, 1:44 a.m. UTC | #12
Hi Nadav,

Thanks for the reply, please see below.

On 07/07/2014 08:10 PM, Nadav Amit wrote:
> On 7/7/14, 2:54 PM, Gleb Natapov wrote:
>> On Mon, Jul 07, 2014 at 02:42:27PM +0300, Nadav Amit wrote:
>>> Tang,
>>>
>>> Running some (unrelated) tests I see that KVM does not handle APIC base
>>> relocation correctly. When the base is changed, kvm_lapic_set_base just
>>> changes lapic->base_address without taking further action (i.e.,
>>> modifying
>>> the VMCS apic address in VMX).
>>>
>>> This patch follows KVM bad behavior by using the constant
>>> VMX_APIC_ACCESS_PAGE_ADDR instead of lapic->base_address.
>> There is no OS out there that relocates APIC base (in fact it was not
>> always
>> relocatable on real HW), so there is not point in complicating the
>> code to support
>> it. In fact current APIC_ACCESS_ADDR handling relies on the fact that
>> all vcpus
>> has apic mapped at the same address.
>>
>>>
>>> Anyhow, I didn't see anything that would make my life (in fixing the
>>> lapic
>>> base issue) too difficult. Yet, feel free in making it more
>>> "fix-friendly".
>>>
>> Why would you want to fix it?
>>
> If there is no general need, I will not send a fix. However, I think the
> very least a warning message should be appear if the guest relocates the
> APIC base.

Maybe I didn't understand you question correctly. If I'm wrong, please 
tell me.

This patch does not relocate APIC base in guest, but in host. Host migrates
the apic page to somewhere else, and KVM updates ept pagetable to track it.
In guest, apic base address (gpa) doesn't change.

Is this lapic->base_address a hpa ?

Is there anywhere I need to update in my patch ?

Thanks.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nadav Amit July 8, 2014, 6:46 a.m. UTC | #13
Tang,

I am sorry if I caused any confusion.

Following Gleb response, there is no apparent need for dealing with the 
scenario I mentioned (relocating the APIC base), so you don't need to do 
any changes to your patch, and I will post another patch later to warn 
if the guest relocates its APIC (from the default address to another 
guest physical address). My answers to your questions are below.

On 7/8/14, 4:44 AM, Tang Chen wrote:
> Hi Nadav,
>
> Thanks for the reply, please see below.
>
> On 07/07/2014 08:10 PM, Nadav Amit wrote:
>> On 7/7/14, 2:54 PM, Gleb Natapov wrote:
>>> On Mon, Jul 07, 2014 at 02:42:27PM +0300, Nadav Amit wrote:
>>>> Tang,
>>>>
>>>> Running some (unrelated) tests I see that KVM does not handle APIC base
>>>> relocation correctly. When the base is changed, kvm_lapic_set_base just
>>>> changes lapic->base_address without taking further action (i.e.,
>>>> modifying
>>>> the VMCS apic address in VMX).
>>>>
>>>> This patch follows KVM bad behavior by using the constant
>>>> VMX_APIC_ACCESS_PAGE_ADDR instead of lapic->base_address.
>>> There is no OS out there that relocates APIC base (in fact it was not
>>> always
>>> relocatable on real HW), so there is not point in complicating the
>>> code to support
>>> it. In fact current APIC_ACCESS_ADDR handling relies on the fact that
>>> all vcpus
>>> has apic mapped at the same address.
>>>
>>>>
>>>> Anyhow, I didn't see anything that would make my life (in fixing the
>>>> lapic
>>>> base issue) too difficult. Yet, feel free in making it more
>>>> "fix-friendly".
>>>>
>>> Why would you want to fix it?
>>>
>> If there is no general need, I will not send a fix. However, I think the
>> very least a warning message should be appear if the guest relocates the
>> APIC base.
>
> Maybe I didn't understand you question correctly. If I'm wrong, please
> tell me.
>
> This patch does not relocate APIC base in guest, but in host. Host migrates
> the apic page to somewhere else, and KVM updates ept pagetable to track it.
> In guest, apic base address (gpa) doesn't change.
The last claim is true in practice, according to Gleb, but it is not 
necessarily so according to the specifications. Pentium 4, Intel Xeon 
and P6 family processors support APIC base relocation. See the Intel SDM 
section 10.4.5. Anyhow, Gleb claims it is not used by any OS.

>
> Is this lapic->base_address a hpa ?
No, it is guest physical address.

>
> Is there anywhere I need to update in my patch ?

No. I'll send another patch on top of yours that prints a warning if the 
APIC base is relocated (i.e., the guest physical address of the APIC 
base is changed). Such relocation is done explicitly by the guest, not 
by your patch.

Nadav

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
tangchen July 8, 2014, 9:40 a.m. UTC | #14
Hi Wanpeng,

On 07/07/2014 06:35 PM, Wanpeng Li wrote:
> On Wed, Jul 02, 2014 at 05:00:37PM +0800, Tang Chen wrote:
>> apic access page is pinned in memory, and as a result it cannot be
>> migrated/hot-removed.
>>
>> Actually it doesn't need to be pinned in memory.
>>
>> This patch introduces a new vcpu request: KVM_REQ_MIGRATE_EPT. This requet
>
> s/KVM_REQ_MIGRATE_EPT/KVM_REQ_MIGRATE_APIC

Thanks, will fix it in the next version.

Thanks.

>
>> will be made when kvm_mmu_notifier_invalidate_page() is called when the page
>> is unmapped from the qemu user space to reset APIC_ACCESS_ADDR pointer in
>> each online vcpu to 0. And will also be made when ept violation happens to
>> reset APIC_ACCESS_ADDR to the new page phys_addr (host phys_addr).
>> ---
>> arch/x86/include/asm/kvm_host.h |  2 ++
>> arch/x86/kvm/mmu.c              | 15 +++++++++++++++
>> arch/x86/kvm/vmx.c              |  9 ++++++++-
>> arch/x86/kvm/x86.c              | 20 ++++++++++++++++++++
>> include/linux/kvm_host.h        |  1 +
>> virt/kvm/kvm_main.c             | 15 +++++++++++++++
>> 6 files changed, 61 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index 8771c0f..f104b87 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -575,6 +575,7 @@ struct kvm_arch {
>>
>> 	unsigned int tss_addr;
>> 	struct page *apic_access_page;
>> +	bool apic_access_page_migrated;
>>
>> 	gpa_t wall_clock;
>>
>> @@ -739,6 +740,7 @@ struct kvm_x86_ops {
>> 	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
>> 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
>> 	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
>> +	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
>> 	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
>> 	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
>> 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index c0d72f6..a655444 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -3436,6 +3436,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>> 		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
>> 	}
>>
>> +	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR&&
>> +	    vcpu->kvm->arch.apic_access_page_migrated) {
>> +		int i;
>> +
>> +		vcpu->kvm->arch.apic_access_page_migrated = false;
>> +
>> +		/*
>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>> +		 * all the online vcpus.
>> +		 */
>> +		for (i = 0; i<  atomic_read(&vcpu->kvm->online_vcpus); i++)
>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC,
>> +					 vcpu->kvm->vcpus[i]);
>> +	}
>> +
>> 	spin_unlock(&vcpu->kvm->mmu_lock);
>>
>> 	return r;
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c336cb3..abc152f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -3988,7 +3988,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
>> 	if (r)
>> 		goto out;
>>
>> -	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> +	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> 	if (is_error_page(page)) {
>> 		r = -EFAULT;
>> 		goto out;
>> @@ -7075,6 +7075,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
>> 	vmx_set_msr_bitmap(vcpu);
>> }
>>
>> +static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
>> +{
>> +	if (vm_need_virtualize_apic_accesses(kvm))
>> +		vmcs_write64(APIC_ACCESS_ADDR, hpa);
>> +}
>> +
>> static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
>> {
>> 	u16 status;
>> @@ -8846,6 +8852,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>> 	.enable_irq_window = enable_irq_window,
>> 	.update_cr8_intercept = update_cr8_intercept,
>> 	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
>> +	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
>> 	.vm_has_apicv = vmx_vm_has_apicv,
>> 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
>> 	.hwapic_irr_update = vmx_hwapic_irr_update,
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index a26524f..14e7174 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -5943,6 +5943,24 @@ static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
>> 	}
>> }
>>
>> +static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
>> +{
>> +	struct kvm *kvm = vcpu->kvm;
>> +
>> +	if (kvm->arch.apic_access_page_migrated) {
>> +		if (kvm->arch.apic_access_page)
>> +			kvm->arch.apic_access_page = pfn_to_page(0);
>> +		kvm_x86_ops->set_apic_access_page_addr(kvm, 0x0ull);
>> +	} else {
>> +		struct page *page;
>> +		page = gfn_to_page_no_pin(kvm,
>> +				VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT);
>> +		kvm->arch.apic_access_page = page;
>> +		kvm_x86_ops->set_apic_access_page_addr(kvm,
>> +						       page_to_phys(page));
>> +	}
>> +}
>> +
>> /*
>>   * Returns 1 to let __vcpu_run() continue the guest execution loop without
>>   * exiting to the userspace.  Otherwise, the value will be returned to the
>> @@ -6005,6 +6023,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>> 			vcpu_scan_ioapic(vcpu);
>> 		if (kvm_check_request(KVM_REQ_MIGRATE_EPT, vcpu))
>> 			vcpu_migrated_page_update_ept(vcpu);
>> +		if (kvm_check_request(KVM_REQ_MIGRATE_APIC, vcpu))
>> +			vcpu_migrated_page_update_apic(vcpu);
>> 	}
>>
>> 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 4b7e51a..e2ad65e 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -137,6 +137,7 @@ static inline bool is_error_page(struct page *page)
>> #define KVM_REQ_ENABLE_IBS        23
>> #define KVM_REQ_DISABLE_IBS       24
>> #define KVM_REQ_MIGRATE_EPT       25
>> +#define KVM_REQ_MIGRATE_APIC      26
>>
>> #define KVM_USERSPACE_IRQ_SOURCE_ID		0
>> #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index d271e89..f06438c 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -54,6 +54,7 @@
>> #include<asm/io.h>
>> #include<asm/uaccess.h>
>> #include<asm/pgtable.h>
>> +#include<asm/vmx.h>
>>
>> #include "coalesced_mmio.h"
>> #include "async_pf.h"
>> @@ -300,6 +301,20 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
>> 		kvm_make_request(KVM_REQ_MIGRATE_EPT, kvm->vcpus[0]);
>> 	}
>>
>> +	if (address ==
>> +	    gfn_to_hva(kvm, VMX_APIC_ACCESS_PAGE_ADDR>>  PAGE_SHIFT)) {
>> +		int i;
>> +
>> +		kvm->arch.apic_access_page_migrated = true;
>> +
>> +		/*
>> +		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
>> +		 * all the online vcpus.
>> +		 */
>> +		for (i = 0; i<  atomic_read(&kvm->online_vcpus); i++)
>> +			kvm_make_request(KVM_REQ_MIGRATE_APIC, kvm->vcpus[i]);
>> +	}
>> +
>> 	spin_unlock(&kvm->mmu_lock);
>> 	srcu_read_unlock(&kvm->srcu, idx);
>> }
>> --
>> 1.8.3.1
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> .
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Nadav Amit Aug. 20, 2014, 8:44 a.m. UTC | #15
CC’ing the KVM mailing list which I forgot.

On Aug 20, 2014, at 11:12 AM, Nadav Amit <namit@cs.technion.ac.il> wrote:

> APIC base relocation is unsupported by KVM. If anyone uses it, the least should
> be to report a warning in the hypervisor. Note that kvm-unit-tests performs
> APIC base relocation, and causes the warning to be printed.
> 
> Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
> ---
> arch/x86/kvm/lapic.c | 5 +++++
> 1 file changed, 5 insertions(+)
> 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 08e8a89..6655e20 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -1416,6 +1416,11 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
>  	apic->base_address = apic->vcpu->arch.apic_base &
>  			     MSR_IA32_APICBASE_BASE;
> 
> +	if ((value & MSR_IA32_APICBASE_ENABLE) &&
> +	     apic->base_address != APIC_DEFAULT_PHYS_BASE)
> +		printk_once(KERN_WARNING
> +			    "APIC base relocation is unsupported by KVM\n");
> +
>  	/* with FSB delivery interrupt, we can restart APIC functionality */
>  	apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
>  		   "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
> -- 
> 1.9.1
>
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8771c0f..f104b87 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -575,6 +575,7 @@  struct kvm_arch {
 
 	unsigned int tss_addr;
 	struct page *apic_access_page;
+	bool apic_access_page_migrated;
 
 	gpa_t wall_clock;
 
@@ -739,6 +740,7 @@  struct kvm_x86_ops {
 	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
+	void (*set_apic_access_page_addr)(struct kvm *kvm, hpa_t hpa);
 	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
 	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c0d72f6..a655444 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3436,6 +3436,21 @@  static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 		kvm_make_request(KVM_REQ_MIGRATE_EPT, vcpu);
 	}
 
+	if (gpa == VMX_APIC_ACCESS_PAGE_ADDR &&
+	    vcpu->kvm->arch.apic_access_page_migrated) {
+		int i;
+
+		vcpu->kvm->arch.apic_access_page_migrated = false;
+
+		/*
+		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
+		 * all the online vcpus.
+		 */
+		for (i = 0; i < atomic_read(&vcpu->kvm->online_vcpus); i++)
+			kvm_make_request(KVM_REQ_MIGRATE_APIC,
+					 vcpu->kvm->vcpus[i]);
+	}
+
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 	return r;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c336cb3..abc152f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3988,7 +3988,7 @@  static int alloc_apic_access_page(struct kvm *kvm)
 	if (r)
 		goto out;
 
-	page = gfn_to_page(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
+	page = gfn_to_page_no_pin(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
 	if (is_error_page(page)) {
 		r = -EFAULT;
 		goto out;
@@ -7075,6 +7075,12 @@  static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
 	vmx_set_msr_bitmap(vcpu);
 }
 
+static void vmx_set_apic_access_page_addr(struct kvm *kvm, hpa_t hpa)
+{
+	if (vm_need_virtualize_apic_accesses(kvm))
+		vmcs_write64(APIC_ACCESS_ADDR, hpa);
+}
+
 static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
 {
 	u16 status;
@@ -8846,6 +8852,7 @@  static struct kvm_x86_ops vmx_x86_ops = {
 	.enable_irq_window = enable_irq_window,
 	.update_cr8_intercept = update_cr8_intercept,
 	.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
+	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
 	.vm_has_apicv = vmx_vm_has_apicv,
 	.load_eoi_exitmap = vmx_load_eoi_exitmap,
 	.hwapic_irr_update = vmx_hwapic_irr_update,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a26524f..14e7174 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5943,6 +5943,24 @@  static void vcpu_migrated_page_update_ept(struct kvm_vcpu *vcpu)
 	}
 }
 
+static void vcpu_migrated_page_update_apic(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+
+	if (kvm->arch.apic_access_page_migrated) {
+		if (kvm->arch.apic_access_page)
+			kvm->arch.apic_access_page = pfn_to_page(0);
+		kvm_x86_ops->set_apic_access_page_addr(kvm, 0x0ull);
+	} else {
+		struct page *page;
+		page = gfn_to_page_no_pin(kvm,
+				VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT);
+		kvm->arch.apic_access_page = page;
+		kvm_x86_ops->set_apic_access_page_addr(kvm,
+						       page_to_phys(page));
+	}
+}
+
 /*
  * Returns 1 to let __vcpu_run() continue the guest execution loop without
  * exiting to the userspace.  Otherwise, the value will be returned to the
@@ -6005,6 +6023,8 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 			vcpu_scan_ioapic(vcpu);
 		if (kvm_check_request(KVM_REQ_MIGRATE_EPT, vcpu))
 			vcpu_migrated_page_update_ept(vcpu);
+		if (kvm_check_request(KVM_REQ_MIGRATE_APIC, vcpu))
+			vcpu_migrated_page_update_apic(vcpu);
 	}
 
 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4b7e51a..e2ad65e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -137,6 +137,7 @@  static inline bool is_error_page(struct page *page)
 #define KVM_REQ_ENABLE_IBS        23
 #define KVM_REQ_DISABLE_IBS       24
 #define KVM_REQ_MIGRATE_EPT       25
+#define KVM_REQ_MIGRATE_APIC      26
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d271e89..f06438c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -54,6 +54,7 @@ 
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/vmx.h>
 
 #include "coalesced_mmio.h"
 #include "async_pf.h"
@@ -300,6 +301,20 @@  static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
 		kvm_make_request(KVM_REQ_MIGRATE_EPT, kvm->vcpus[0]);
 	}
 
+	if (address ==
+	    gfn_to_hva(kvm, VMX_APIC_ACCESS_PAGE_ADDR >> PAGE_SHIFT)) {
+		int i;
+
+		kvm->arch.apic_access_page_migrated = true;
+
+		/*
+		 * We need update APIC_ACCESS_ADDR pointer in each VMCS of
+		 * all the online vcpus.
+		 */
+		for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+			kvm_make_request(KVM_REQ_MIGRATE_APIC, kvm->vcpus[i]);
+	}
+
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 }