diff mbox

[PULL,v2,02/15] target-i386: do not duplicate page protection checks

Message ID 1452880813-141591-2-git-send-email-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini Jan. 15, 2016, 6 p.m. UTC
x86_cpu_handle_mmu_fault is currently checking twice for writability
and executability of pages; the first time to decide whether to
trigger a page fault, the second time to compute the "prot" argument
to tlb_set_page_with_attrs.

Reorganize code so that first "prot" is computed, then it is used
to check whether to raise a page fault, then finally PROT_WRITE is
removed if the D bit will have to be set.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 target-i386/helper.c | 65 +++++++++++++++++++---------------------------------
 1 file changed, 23 insertions(+), 42 deletions(-)
diff mbox

Patch

diff --git a/target-i386/helper.c b/target-i386/helper.c
index d18be95..6b10019 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -890,38 +890,30 @@  do_check_protect_pse36:
         goto do_fault_rsvd;
     }
     ptep ^= PG_NX_MASK;
-    if ((ptep & PG_NX_MASK) && is_write1 == 2) {
+
+    /* can the page can be put in the TLB?  prot will tell us */
+    if (is_user && !(ptep & PG_USER_MASK)) {
         goto do_fault_protect;
     }
-    switch (mmu_idx) {
-    case MMU_USER_IDX:
-        if (!(ptep & PG_USER_MASK)) {
-            goto do_fault_protect;
-        }
-        if (is_write && !(ptep & PG_RW_MASK)) {
-            goto do_fault_protect;
-        }
-        break;
 
-    case MMU_KSMAP_IDX:
-        if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
-            goto do_fault_protect;
+    prot = 0;
+    if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
+        prot |= PAGE_READ;
+        if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
+            prot |= PAGE_WRITE;
         }
-        /* fall through */
-    case MMU_KNOSMAP_IDX:
-        if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
-            (ptep & PG_USER_MASK)) {
-            goto do_fault_protect;
-        }
-        if ((env->cr[0] & CR0_WP_MASK) &&
-            is_write && !(ptep & PG_RW_MASK)) {
-            goto do_fault_protect;
-        }
-        break;
+    }
+    if (!(ptep & PG_NX_MASK) &&
+        (mmu_idx == MMU_USER_IDX ||
+         !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
+        prot |= PAGE_EXEC;
+    }
 
-    default: /* cannot happen */
-        break;
+    if ((prot & (1 << is_write1)) == 0) {
+        goto do_fault_protect;
     }
+
+    /* yes, it can! */
     is_dirty = is_write && !(pte & PG_DIRTY_MASK);
     if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
         pte |= PG_ACCESSED_MASK;
@@ -931,25 +923,13 @@  do_check_protect_pse36:
         x86_stl_phys_notdirty(cs, pte_addr, pte);
     }
 
-    /* the page can be put in the TLB */
-    prot = PAGE_READ;
-    if (!(ptep & PG_NX_MASK) &&
-        (mmu_idx == MMU_USER_IDX ||
-         !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
-        prot |= PAGE_EXEC;
-    }
-    if (pte & PG_DIRTY_MASK) {
+    if (!(pte & PG_DIRTY_MASK)) {
         /* only set write access if already dirty... otherwise wait
            for dirty access */
-        if (is_user) {
-            if (ptep & PG_RW_MASK)
-                prot |= PAGE_WRITE;
-        } else {
-            if (!(env->cr[0] & CR0_WP_MASK) ||
-                (ptep & PG_RW_MASK))
-                prot |= PAGE_WRITE;
-        }
+        assert(!is_write);
+        prot &= ~PAGE_WRITE;
     }
+
  do_mapping:
     pte = pte & env->a20_mask;
 
@@ -962,6 +942,7 @@  do_check_protect_pse36:
     page_offset = vaddr & (page_size - 1);
     paddr = pte + page_offset;
 
+    assert(prot & (1 << is_write1));
     tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
                             prot, mmu_idx, page_size);
     return 0;