diff mbox

[v2,1/5] KVM: MMU: release noslot pfn on the fail path properly

Message ID 5052FF82.1060106@linux.vnet.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong Sept. 14, 2012, 9:57 a.m. UTC
We can not directly call kvm_release_pfn_clean to release the pfn
since we can meet noslot pfn which is used to cache mmio info into
spte

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |    6 ++++--
 arch/x86/kvm/paging_tmpl.h |    6 ++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

Comments

Marcelo Tosatti Sept. 15, 2012, 3:13 p.m. UTC | #1
On Fri, Sep 14, 2012 at 05:57:22PM +0800, Xiao Guangrong wrote:
> We can not directly call kvm_release_pfn_clean to release the pfn
> since we can meet noslot pfn which is used to cache mmio info into
> spte
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/mmu.c         |    6 ++++--
>  arch/x86/kvm/paging_tmpl.h |    6 ++++--
>  2 files changed, 8 insertions(+), 4 deletions(-)

Its clearer to the reader if is_invalid_pfn() is used instead of 
is_error_pfn.

BTW how about killing this unused helper

static bool mmu_invalid_pfn(pfn_t pfn)
{
        return unlikely(is_invalid_pfn(pfn));
}

This can be done inlined.

> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index aa0b469..f74c63a 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2877,7 +2877,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
> 
>  out_unlock:
>  	spin_unlock(&vcpu->kvm->mmu_lock);
> -	kvm_release_pfn_clean(pfn);
> +	if (!is_error_pfn(pfn))
> +		kvm_release_pfn_clean(pfn);
>  	return 0;
>  }

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong Sept. 18, 2012, 7:46 a.m. UTC | #2
On 09/15/2012 11:13 PM, Marcelo Tosatti wrote:
> On Fri, Sep 14, 2012 at 05:57:22PM +0800, Xiao Guangrong wrote:
>> We can not directly call kvm_release_pfn_clean to release the pfn
>> since we can meet noslot pfn which is used to cache mmio info into
>> spte
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
>> ---
>>  arch/x86/kvm/mmu.c         |    6 ++++--
>>  arch/x86/kvm/paging_tmpl.h |    6 ++++--
>>  2 files changed, 8 insertions(+), 4 deletions(-)
> 
> Its clearer to the reader if is_invalid_pfn() is used instead of 
> is_error_pfn.
> 
> BTW how about killing this unused helper
> 
> static bool mmu_invalid_pfn(pfn_t pfn)
> {
>         return unlikely(is_invalid_pfn(pfn));
> }
> 
> This can be done inlined.

Okay, will do. Thanks Marcelo!


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aa0b469..f74c63a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2877,7 +2877,8 @@  static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return 0;
 }

@@ -3345,7 +3346,8 @@  static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return 0;
 }

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bf8c42b..c004ab6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -544,7 +544,8 @@  static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 out_gpte_changed:
 	if (sp)
 		kvm_mmu_put_page(sp, it.sptep);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return NULL;
 }

@@ -645,7 +646,8 @@  static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return 0;
 }