diff mbox

[v4,4/4] Add instruction fetch checking when walking guest page table

Message ID 5D8008F58939784290FAB48F5497519844E9278013@shsmsx502.ccr.corp.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Yang, Wei Y May 29, 2011, 11:42 a.m. UTC
This patch adds instruction fetch checking when walking guest page table.

 Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
 Signed-off-by: Shan, Haitao <haitao.shan@intel.com>
 Signed-off-by: Li, Xin <xin.li@intel.com>

---
 arch/x86/kvm/paging_tmpl.h |    9 ++++++++-
 1 files changed, 8 insertions(+), 1 deletions(-)

--
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc01..6a56ca3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -246,6 +246,12 @@  walk:
 			gfn_t gfn;
 			u32 ac;

+			/* check if the kernel is fetching from user page */
+			if (unlikely((pte_access & PT_USER_MASK) &&
+				     kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+				if (fetch_fault && !user_fault)
+					eperm = true;
+
 			gfn = gpte_to_gfn_lvl(pte, lvl);
 			gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;

@@ -305,7 +311,8 @@  error:

 	walker->fault.error_code |= write_fault | user_fault;

-	if (fetch_fault && mmu->nx)
+	if (fetch_fault && (mmu->nx ||
+			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
 		walker->fault.error_code |= PFERR_FETCH_MASK;
 	if (rsvd_fault)
 		walker->fault.error_code |= PFERR_RSVD_MASK;