diff mbox series

[RFC,2/6] KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info

Message ID 20200429093634.1514902-3-vkuznets@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Interrupt-based mechanism for async_pf 'page present' notifications | expand

Commit Message

Vitaly Kuznetsov April 29, 2020, 9:36 a.m. UTC
Currently, APF mechanism relies on the #PF abuse where the token is being
passed through CR2. If we switch to using interrupts to deliver page-ready
notifications we need a different way to pass the data. Extent the existing
'struct kvm_vcpu_pv_apf_data' with token information.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/uapi/asm/kvm_para.h |  3 ++-
 arch/x86/kvm/x86.c                   | 10 ++++++----
 2 files changed, 8 insertions(+), 5 deletions(-)

Comments

Gavin Shan May 4, 2020, 11:52 p.m. UTC | #1
Hi Vitaly,

On 4/29/20 7:36 PM, Vitaly Kuznetsov wrote:
> Currently, APF mechanism relies on the #PF abuse where the token is being
> passed through CR2. If we switch to using interrupts to deliver page-ready
> notifications we need a different way to pass the data. Extent the existing
> 'struct kvm_vcpu_pv_apf_data' with token information.
> 
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> ---
>   arch/x86/include/uapi/asm/kvm_para.h |  3 ++-
>   arch/x86/kvm/x86.c                   | 10 ++++++----
>   2 files changed, 8 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
> index 2a8e0b6b9805..df2ba34037a2 100644
> --- a/arch/x86/include/uapi/asm/kvm_para.h
> +++ b/arch/x86/include/uapi/asm/kvm_para.h
> @@ -113,7 +113,8 @@ struct kvm_mmu_op_release_pt {
>   
>   struct kvm_vcpu_pv_apf_data {
>   	__u32 reason;
> -	__u8 pad[60];
> +	__u32 token;
> +	__u8 pad[56];
>   	__u32 enabled;
>   };
>   
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b93133ee07ba..7c21c0cf0a33 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2662,7 +2662,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
>   	}
>   
>   	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
> -					sizeof(u32)))
> +					sizeof(u64)))
>   		return 1;
>   
>   	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
> @@ -10352,8 +10352,9 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>   	}
>   }
>   
> -static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
> +static int apf_put_user(struct kvm_vcpu *vcpu, u32 reason, u32 token)
>   {
> +	u64 val = (u64)token << 32 | reason;
>   
>   	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
>   				      sizeof(val));
> @@ -10405,7 +10406,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
>   	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
>   
>   	if (kvm_can_deliver_async_pf(vcpu) &&
> -	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
> +	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT,
> +			  work->arch.token)) {
>   		fault.vector = PF_VECTOR;
>   		fault.error_code_valid = true;
>   		fault.error_code = 0;
> @@ -10438,7 +10440,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
>   	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
>   
>   	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
> -	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
> +	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY, work->arch.token)) {
>   			fault.vector = PF_VECTOR;
>   			fault.error_code_valid = true;
>   			fault.error_code = 0;
> 

It would be as below based on two facts: (1) token is more important than reason;
(2) token will be put into high word of @val. I think apf_{get,put}_user() might
be worthy to be inline. However, it's not a big deal.

    static inline int apf_put_user(struct kvm_vcpu *vcpu, u32 token, u32 reason)

Thanks,
Gavin
Vitaly Kuznetsov May 5, 2020, 8:08 a.m. UTC | #2
Gavin Shan <gshan@redhat.com> writes:

> Hi Vitaly,
>
> On 4/29/20 7:36 PM, Vitaly Kuznetsov wrote:
>> Currently, APF mechanism relies on the #PF abuse where the token is being
>> passed through CR2. If we switch to using interrupts to deliver page-ready
>> notifications we need a different way to pass the data. Extent the existing
>> 'struct kvm_vcpu_pv_apf_data' with token information.
>> 
>> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> ---
>>   arch/x86/include/uapi/asm/kvm_para.h |  3 ++-
>>   arch/x86/kvm/x86.c                   | 10 ++++++----
>>   2 files changed, 8 insertions(+), 5 deletions(-)
>> 
>> diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
>> index 2a8e0b6b9805..df2ba34037a2 100644
>> --- a/arch/x86/include/uapi/asm/kvm_para.h
>> +++ b/arch/x86/include/uapi/asm/kvm_para.h
>> @@ -113,7 +113,8 @@ struct kvm_mmu_op_release_pt {
>>   
>>   struct kvm_vcpu_pv_apf_data {
>>   	__u32 reason;
>> -	__u8 pad[60];
>> +	__u32 token;
>> +	__u8 pad[56];
>>   	__u32 enabled;
>>   };
>>   
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index b93133ee07ba..7c21c0cf0a33 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -2662,7 +2662,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
>>   	}
>>   
>>   	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
>> -					sizeof(u32)))
>> +					sizeof(u64)))
>>   		return 1;
>>   
>>   	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
>> @@ -10352,8 +10352,9 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>>   	}
>>   }
>>   
>> -static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
>> +static int apf_put_user(struct kvm_vcpu *vcpu, u32 reason, u32 token)
>>   {
>> +	u64 val = (u64)token << 32 | reason;
>>   
>>   	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
>>   				      sizeof(val));
>> @@ -10405,7 +10406,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
>>   	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
>>   
>>   	if (kvm_can_deliver_async_pf(vcpu) &&
>> -	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
>> +	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT,
>> +			  work->arch.token)) {
>>   		fault.vector = PF_VECTOR;
>>   		fault.error_code_valid = true;
>>   		fault.error_code = 0;
>> @@ -10438,7 +10440,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
>>   	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
>>   
>>   	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
>> -	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
>> +	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY, work->arch.token)) {
>>   			fault.vector = PF_VECTOR;
>>   			fault.error_code_valid = true;
>>   			fault.error_code = 0;
>> 
>
> It would be as below based on two facts: (1) token is more important than reason;
> (2) token will be put into high word of @val. I think apf_{get,put}_user() might
> be worthy to be inline. However, it's not a big deal.

This is to be changed in v1 as we agreed to drop page-ready delivery via
#PF completely.

>     static inline int apf_put_user(struct kvm_vcpu *vcpu, u32 token, u32 reason)
>

Yes, it makes sense to inline these. Thanks!
diff mbox series

Patch

diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 2a8e0b6b9805..df2ba34037a2 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -113,7 +113,8 @@  struct kvm_mmu_op_release_pt {
 
 struct kvm_vcpu_pv_apf_data {
 	__u32 reason;
-	__u8 pad[60];
+	__u32 token;
+	__u8 pad[56];
 	__u32 enabled;
 };
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b93133ee07ba..7c21c0cf0a33 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2662,7 +2662,7 @@  static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 	}
 
 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
-					sizeof(u32)))
+					sizeof(u64)))
 		return 1;
 
 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -10352,8 +10352,9 @@  static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 	}
 }
 
-static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 reason, u32 token)
 {
+	u64 val = (u64)token << 32 | reason;
 
 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
 				      sizeof(val));
@@ -10405,7 +10406,8 @@  void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
 	if (kvm_can_deliver_async_pf(vcpu) &&
-	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT,
+			  work->arch.token)) {
 		fault.vector = PF_VECTOR;
 		fault.error_code_valid = true;
 		fault.error_code = 0;
@@ -10438,7 +10440,7 @@  void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
 	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
 
 	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
-	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY, work->arch.token)) {
 			fault.vector = PF_VECTOR;
 			fault.error_code_valid = true;
 			fault.error_code = 0;