diff mbox

[v3,1/3] KVM: MMU: Clean up the error handling of walk_addr_generic()

Message ID 20110620232947.83d016d3.takuya.yoshikawa@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Takuya Yoshikawa June 20, 2011, 2:29 p.m. UTC
From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>

Avoid two step jump to the error handling part.  This eliminates the use
of the variables present and rsvd_fault.

We also use the const type qualifier to show that write/user/fetch_fault
do not change in the function.

Both of these were suggested by Ingo Molnar.

Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
---
 v2-v3: only changelog update

 arch/x86/kvm/paging_tmpl.h |   64 +++++++++++++++++++------------------------
 1 files changed, 28 insertions(+), 36 deletions(-)

Comments

Marcelo Tosatti June 22, 2011, 4:46 p.m. UTC | #1
On Mon, Jun 20, 2011 at 11:29:47PM +0900, Takuya Yoshikawa wrote:
> From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
> 
> Avoid two step jump to the error handling part.  This eliminates the use
> of the variables present and rsvd_fault.
> 
> We also use the const type qualifier to show that write/user/fetch_fault
> do not change in the function.
> 
> Both of these were suggested by Ingo Molnar.
> 
> Cc: Ingo Molnar <mingo@elte.hu>
> Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
> ---
>  v2-v3: only changelog update
> 
>  arch/x86/kvm/paging_tmpl.h |   64 +++++++++++++++++++------------------------
>  1 files changed, 28 insertions(+), 36 deletions(-)
> 
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 1caeb4d..137aa45 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
>  	gfn_t table_gfn;
>  	unsigned index, pt_access, uninitialized_var(pte_access);
>  	gpa_t pte_gpa;
> -	bool eperm, present, rsvd_fault;
> -	int offset, write_fault, user_fault, fetch_fault;
> -
> -	write_fault = access & PFERR_WRITE_MASK;
> -	user_fault = access & PFERR_USER_MASK;
> -	fetch_fault = access & PFERR_FETCH_MASK;
> +	bool eperm;
> +	int offset;
> +	const int write_fault = access & PFERR_WRITE_MASK;
> +	const int user_fault  = access & PFERR_USER_MASK;
> +	const int fetch_fault = access & PFERR_FETCH_MASK;
> +	u16 errcode = 0;
>  
>  	trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
>  				     fetch_fault);
>  walk:
> -	present = true;
> -	eperm = rsvd_fault = false;
> +	eperm = false;
>  	walker->level = mmu->root_level;
>  	pte           = mmu->get_cr3(vcpu);
>  
> @@ -145,7 +144,7 @@ walk:
>  		pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
>  		trace_kvm_mmu_paging_element(pte, walker->level);
>  		if (!is_present_gpte(pte)) {
> -			present = false;
> +			errcode |= PFERR_PRESENT_MASK;
>  			goto error;
>  		}
>  		--walker->level;
> @@ -171,34 +170,34 @@ walk:
>  		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
>  					      PFERR_USER_MASK|PFERR_WRITE_MASK);
>  		if (unlikely(real_gfn == UNMAPPED_GVA)) {
> -			present = false;
> -			break;
> +			errcode |= PFERR_PRESENT_MASK;
> +			goto error;
>  		}
>  		real_gfn = gpa_to_gfn(real_gfn);
>  
>  		host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
>  		if (unlikely(kvm_is_error_hva(host_addr))) {
> -			present = false;
> -			break;
> +			errcode |= PFERR_PRESENT_MASK;
> +			goto error;
>  		}
>  
>  		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
>  		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
> -			present = false;
> -			break;
> +			errcode |= PFERR_PRESENT_MASK;
> +			goto error;
>  		}
>  
>  		trace_kvm_mmu_paging_element(pte, walker->level);
>  
>  		if (unlikely(!is_present_gpte(pte))) {
> -			present = false;
> -			break;
> +			errcode |= PFERR_PRESENT_MASK;
> +			goto error;
>  		}

Assignment of PFERR_PRESENT_MASK is inverted.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity June 22, 2011, 5:05 p.m. UTC | #2
On 06/22/2011 07:46 PM, Marcelo Tosatti wrote:
> >   		if (unlikely(!is_present_gpte(pte))) {
> >  -			present = false;
> >  -			break;
> >  +			errcode |= PFERR_PRESENT_MASK;
> >  +			goto error;
> >   		}
>
> Assignment of PFERR_PRESENT_MASK is inverted.
>

Note: kvm-unit-tests.git/x86/access.flat would have caught this.
diff mbox

Patch

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1caeb4d..137aa45 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -125,18 +125,17 @@  static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 	gfn_t table_gfn;
 	unsigned index, pt_access, uninitialized_var(pte_access);
 	gpa_t pte_gpa;
-	bool eperm, present, rsvd_fault;
-	int offset, write_fault, user_fault, fetch_fault;
-
-	write_fault = access & PFERR_WRITE_MASK;
-	user_fault = access & PFERR_USER_MASK;
-	fetch_fault = access & PFERR_FETCH_MASK;
+	bool eperm;
+	int offset;
+	const int write_fault = access & PFERR_WRITE_MASK;
+	const int user_fault  = access & PFERR_USER_MASK;
+	const int fetch_fault = access & PFERR_FETCH_MASK;
+	u16 errcode = 0;
 
 	trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
 				     fetch_fault);
 walk:
-	present = true;
-	eperm = rsvd_fault = false;
+	eperm = false;
 	walker->level = mmu->root_level;
 	pte           = mmu->get_cr3(vcpu);
 
@@ -145,7 +144,7 @@  walk:
 		pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
 		trace_kvm_mmu_paging_element(pte, walker->level);
 		if (!is_present_gpte(pte)) {
-			present = false;
+			errcode |= PFERR_PRESENT_MASK;
 			goto error;
 		}
 		--walker->level;
@@ -171,34 +170,34 @@  walk:
 		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
 					      PFERR_USER_MASK|PFERR_WRITE_MASK);
 		if (unlikely(real_gfn == UNMAPPED_GVA)) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 		real_gfn = gpa_to_gfn(real_gfn);
 
 		host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
 		if (unlikely(kvm_is_error_hva(host_addr))) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 
 		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
 		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 
 		trace_kvm_mmu_paging_element(pte, walker->level);
 
 		if (unlikely(!is_present_gpte(pte))) {
-			present = false;
-			break;
+			errcode |= PFERR_PRESENT_MASK;
+			goto error;
 		}
 
 		if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
 					      walker->level))) {
-			rsvd_fault = true;
-			break;
+			errcode |= PFERR_RSVD_MASK;
+			goto error;
 		}
 
 		if (unlikely(write_fault && !is_writable_pte(pte)
@@ -213,16 +212,15 @@  walk:
 			eperm = true;
 #endif
 
-		if (!eperm && !rsvd_fault
-		    && unlikely(!(pte & PT_ACCESSED_MASK))) {
+		if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
 			int ret;
 			trace_kvm_mmu_set_accessed_bit(table_gfn, index,
 						       sizeof(pte));
 			ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
 						  pte, pte|PT_ACCESSED_MASK);
 			if (unlikely(ret < 0)) {
-				present = false;
-				break;
+				errcode |= PFERR_PRESENT_MASK;
+				goto error;
 			} else if (ret)
 				goto walk;
 
@@ -276,7 +274,7 @@  walk:
 		--walker->level;
 	}
 
-	if (unlikely(!present || eperm || rsvd_fault))
+	if (unlikely(eperm))
 		goto error;
 
 	if (write_fault && unlikely(!is_dirty_gpte(pte))) {
@@ -286,7 +284,7 @@  walk:
 		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
 					  pte, pte|PT_DIRTY_MASK);
 		if (unlikely(ret < 0)) {
-			present = false;
+			errcode |= PFERR_PRESENT_MASK;
 			goto error;
 		} else if (ret)
 			goto walk;
@@ -303,20 +301,14 @@  walk:
 	return 1;
 
 error:
-	walker->fault.vector = PF_VECTOR;
-	walker->fault.error_code_valid = true;
-	walker->fault.error_code = 0;
-	if (present)
-		walker->fault.error_code |= PFERR_PRESENT_MASK;
-
-	walker->fault.error_code |= write_fault | user_fault;
-
+	errcode |= write_fault | user_fault;
 	if (fetch_fault && (mmu->nx ||
 			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
-		walker->fault.error_code |= PFERR_FETCH_MASK;
-	if (rsvd_fault)
-		walker->fault.error_code |= PFERR_RSVD_MASK;
+		errcode |= PFERR_FETCH_MASK;
 
+	walker->fault.vector = PF_VECTOR;
+	walker->fault.error_code_valid = true;
+	walker->fault.error_code = errcode;
 	walker->fault.address = addr;
 	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;