diff mbox series

[v2,1/3] KVM: x86: move kvm_vcpu_gfn_to_memslot() out of try_async_pf()

Message ID 20200807141232.402895-2-vkuznets@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: KVM_MEM_PCI_HOLE memory | expand

Commit Message

Vitaly Kuznetsov Aug. 7, 2020, 2:12 p.m. UTC
No functional change intended. Slot flags will need to be analyzed
prior to try_async_pf() when KVM_MEM_PCI_HOLE is implemented.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c         | 14 ++++++++------
 arch/x86/kvm/mmu/paging_tmpl.h |  7 +++++--
 2 files changed, 13 insertions(+), 8 deletions(-)

Comments

Sean Christopherson Aug. 14, 2020, 1:40 a.m. UTC | #1
On Fri, Aug 07, 2020 at 04:12:30PM +0200, Vitaly Kuznetsov wrote:
> No functional change intended. Slot flags will need to be analyzed
> prior to try_async_pf() when KVM_MEM_PCI_HOLE is implemented.

Why?  Wouldn't it be just as easy, and arguably more appropriate, to add
KVM_PFN_ERR_PCI_HOLE and update handle_abornmal_pfn() accordinaly?

> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> ---
>  arch/x86/kvm/mmu/mmu.c         | 14 ++++++++------
>  arch/x86/kvm/mmu/paging_tmpl.h |  7 +++++--
>  2 files changed, 13 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 862bf418214e..fef6956393f7 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4042,11 +4042,10 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
>  }
>  
> -static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
> -			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
> -			 bool *writable)
> +static bool try_async_pf(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
> +			 bool prefault, gfn_t gfn, gpa_t cr2_or_gpa,
> +			 kvm_pfn_t *pfn, bool write, bool *writable)
>  {
> -	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
>  	bool async;
>  
>  	/* Don't expose private memslots to L2. */
> @@ -4082,7 +4081,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>  	bool exec = error_code & PFERR_FETCH_MASK;
>  	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
>  	bool map_writable;
> -
> +	struct kvm_memory_slot *slot;
>  	gfn_t gfn = gpa >> PAGE_SHIFT;
>  	unsigned long mmu_seq;
>  	kvm_pfn_t pfn;
> @@ -4104,7 +4103,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
>  	smp_rmb();
>  
> -	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +
> +	if (try_async_pf(vcpu, slot, prefault, gfn, gpa, &pfn, write,
> +			 &map_writable))
>  		return RET_PF_RETRY;
>  
>  	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 0172a949f6a7..5c6a895f67c3 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -779,6 +779,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
>  	int write_fault = error_code & PFERR_WRITE_MASK;
>  	int user_fault = error_code & PFERR_USER_MASK;
>  	struct guest_walker walker;
> +	struct kvm_memory_slot *slot;
>  	int r;
>  	kvm_pfn_t pfn;
>  	unsigned long mmu_seq;
> @@ -833,8 +834,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
>  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
>  	smp_rmb();
>  
> -	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
> -			 &map_writable))
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, walker.gfn);
> +
> +	if (try_async_pf(vcpu, slot, prefault, walker.gfn, addr, &pfn,
> +			 write_fault, &map_writable))
>  		return RET_PF_RETRY;
>  
>  	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
> -- 
> 2.25.4
>
Vitaly Kuznetsov Sept. 1, 2020, 2:15 p.m. UTC | #2
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> On Fri, Aug 07, 2020 at 04:12:30PM +0200, Vitaly Kuznetsov wrote:
>> No functional change intended. Slot flags will need to be analyzed
>> prior to try_async_pf() when KVM_MEM_PCI_HOLE is implemented.
>

(Sorry it took me so long to reply. No, I wasn't hoping for Paolo's
magical "queued, thanks", I just tried to not read my email while on
vacation).

> Why?  Wouldn't it be just as easy, and arguably more appropriate, to add
> KVM_PFN_ERR_PCI_HOLE and update handle_abornmal_pfn() accordinaly?
>

Yes, we can do that, but what I don't quite like here is that
try_async_pf() does much more than 'trying async PF'. In particular, it
extracts 'pfn' and this is far from being obvious. Maybe we can rename
try_async_pf() somewhat smartly (e.g. 'try_handle_pf()')? Your
suggestion will make perfect sense to me then.

>> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> ---
>>  arch/x86/kvm/mmu/mmu.c         | 14 ++++++++------
>>  arch/x86/kvm/mmu/paging_tmpl.h |  7 +++++--
>>  2 files changed, 13 insertions(+), 8 deletions(-)
>> 
>> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
>> index 862bf418214e..fef6956393f7 100644
>> --- a/arch/x86/kvm/mmu/mmu.c
>> +++ b/arch/x86/kvm/mmu/mmu.c
>> @@ -4042,11 +4042,10 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>>  				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
>>  }
>>  
>> -static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
>> -			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
>> -			 bool *writable)
>> +static bool try_async_pf(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
>> +			 bool prefault, gfn_t gfn, gpa_t cr2_or_gpa,
>> +			 kvm_pfn_t *pfn, bool write, bool *writable)
>>  {
>> -	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
>>  	bool async;
>>  
>>  	/* Don't expose private memslots to L2. */
>> @@ -4082,7 +4081,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>>  	bool exec = error_code & PFERR_FETCH_MASK;
>>  	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
>>  	bool map_writable;
>> -
>> +	struct kvm_memory_slot *slot;
>>  	gfn_t gfn = gpa >> PAGE_SHIFT;
>>  	unsigned long mmu_seq;
>>  	kvm_pfn_t pfn;
>> @@ -4104,7 +4103,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>>  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
>>  	smp_rmb();
>>  
>> -	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
>> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
>> +
>> +	if (try_async_pf(vcpu, slot, prefault, gfn, gpa, &pfn, write,
>> +			 &map_writable))
>>  		return RET_PF_RETRY;
>>  
>>  	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
>> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
>> index 0172a949f6a7..5c6a895f67c3 100644
>> --- a/arch/x86/kvm/mmu/paging_tmpl.h
>> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
>> @@ -779,6 +779,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
>>  	int write_fault = error_code & PFERR_WRITE_MASK;
>>  	int user_fault = error_code & PFERR_USER_MASK;
>>  	struct guest_walker walker;
>> +	struct kvm_memory_slot *slot;
>>  	int r;
>>  	kvm_pfn_t pfn;
>>  	unsigned long mmu_seq;
>> @@ -833,8 +834,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
>>  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
>>  	smp_rmb();
>>  
>> -	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
>> -			 &map_writable))
>> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, walker.gfn);
>> +
>> +	if (try_async_pf(vcpu, slot, prefault, walker.gfn, addr, &pfn,
>> +			 write_fault, &map_writable))
>>  		return RET_PF_RETRY;
>>  
>>  	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
>> -- 
>> 2.25.4
>> 
>
Sean Christopherson Sept. 4, 2020, 3:47 a.m. UTC | #3
On Tue, Sep 01, 2020 at 04:15:07PM +0200, Vitaly Kuznetsov wrote:
> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> 
> > On Fri, Aug 07, 2020 at 04:12:30PM +0200, Vitaly Kuznetsov wrote:
> >> No functional change intended. Slot flags will need to be analyzed
> >> prior to try_async_pf() when KVM_MEM_PCI_HOLE is implemented.
> >
> 
> (Sorry it took me so long to reply. No, I wasn't hoping for Paolo's
> magical "queued, thanks", I just tried to not read my email while on
> vacation).
> 
> > Why?  Wouldn't it be just as easy, and arguably more appropriate, to add
> > KVM_PFN_ERR_PCI_HOLE and update handle_abornmal_pfn() accordinaly?
> >
> 
> Yes, we can do that, but what I don't quite like here is that
> try_async_pf() does much more than 'trying async PF'. In particular, it
> extracts 'pfn' and this is far from being obvious. Maybe we can rename
> try_async_pf() somewhat smartly (e.g. 'try_handle_pf()')? Your
> suggestion will make perfect sense to me then.

Ya, try_async_pf() is a horrible name.  try_handle_pf() isn't bad, but it's
not technically handling the fault.  Maybe try_get_pfn() with an inverted
return?

	if (!try_get_pfn(...))
		return RET_PF_RETRY;
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 862bf418214e..fef6956393f7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4042,11 +4042,10 @@  static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
-			 bool *writable)
+static bool try_async_pf(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
+			 bool prefault, gfn_t gfn, gpa_t cr2_or_gpa,
+			 kvm_pfn_t *pfn, bool write, bool *writable)
 {
-	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
 	bool async;
 
 	/* Don't expose private memslots to L2. */
@@ -4082,7 +4081,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	bool exec = error_code & PFERR_FETCH_MASK;
 	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
 	bool map_writable;
-
+	struct kvm_memory_slot *slot;
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	unsigned long mmu_seq;
 	kvm_pfn_t pfn;
@@ -4104,7 +4103,10 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
 
-	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+
+	if (try_async_pf(vcpu, slot, prefault, gfn, gpa, &pfn, write,
+			 &map_writable))
 		return RET_PF_RETRY;
 
 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 0172a949f6a7..5c6a895f67c3 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -779,6 +779,7 @@  static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 	int write_fault = error_code & PFERR_WRITE_MASK;
 	int user_fault = error_code & PFERR_USER_MASK;
 	struct guest_walker walker;
+	struct kvm_memory_slot *slot;
 	int r;
 	kvm_pfn_t pfn;
 	unsigned long mmu_seq;
@@ -833,8 +834,10 @@  static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
 
-	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
-			 &map_writable))
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, walker.gfn);
+
+	if (try_async_pf(vcpu, slot, prefault, walker.gfn, addr, &pfn,
+			 write_fault, &map_writable))
 		return RET_PF_RETRY;
 
 	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))