diff mbox

[PULL,36/39] target-ppc: Add new TLB invalidate by HPTE call for hash64 MMUs

Message ID 1454044031-5930-38-git-send-email-david@gibson.dropbear.id.au
State New, archived
Headers show

Commit Message

David Gibson Jan. 29, 2016, 5:07 a.m. UTC
When HPTEs are removed or modified by hypercalls on spapr, we need to
invalidate the relevant pages in the qemu TLB.

Currently we do that by doing some complicated calculations to work out the
right encoding for the tlbie instruction, then passing that to
ppc_tlb_invalidate_one()... which totally ignores the argument and flushes
the whole tlb.

Avoid that by adding a new flush-by-hpte helper in mmu-hash64.c.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Alexander Graf <agraf@suse.de>
---
 hw/ppc/spapr_hcall.c    | 46 ++++------------------------------------------
 target-ppc/mmu-hash64.c | 12 ++++++++++++
 target-ppc/mmu-hash64.h |  3 +++
 3 files changed, 19 insertions(+), 42 deletions(-)
diff mbox

Patch

diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 4707196..dedc7e0 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -37,42 +37,6 @@  static void set_spr(CPUState *cs, int spr, target_ulong value,
     run_on_cpu(cs, do_spr_sync, &s);
 }
 
-static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
-                                     target_ulong pte_index)
-{
-    target_ulong rb, va_low;
-
-    rb = (v & ~0x7fULL) << 16; /* AVA field */
-    va_low = pte_index >> 3;
-    if (v & HPTE64_V_SECONDARY) {
-        va_low = ~va_low;
-    }
-    /* xor vsid from AVA */
-    if (!(v & HPTE64_V_1TB_SEG)) {
-        va_low ^= v >> 12;
-    } else {
-        va_low ^= v >> 24;
-    }
-    va_low &= 0x7ff;
-    if (v & HPTE64_V_LARGE) {
-        rb |= 1;                         /* L field */
-#if 0 /* Disable that P7 specific bit for now */
-        if (r & 0xff000) {
-            /* non-16MB large page, must be 64k */
-            /* (masks depend on page size) */
-            rb |= 0x1000;                /* page encoding in LP field */
-            rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
-            rb |= (va_low & 0xfe);       /* AVAL field */
-        }
-#endif
-    } else {
-        /* 4kB page */
-        rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
-    }
-    rb |= (v >> 54) & 0x300;            /* B field */
-    return rb;
-}
-
 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
 {
     /*
@@ -198,7 +162,7 @@  static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
 {
     CPUPPCState *env = &cpu->env;
     uint64_t token;
-    target_ulong v, r, rb;
+    target_ulong v, r;
 
     if (!valid_pte_index(env, ptex)) {
         return REMOVE_PARM;
@@ -217,8 +181,7 @@  static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
     *vp = v;
     *rp = r;
     ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
-    rb = compute_tlbie_rb(v, r, ptex);
-    ppc_tlb_invalidate_one(env, rb);
+    ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
     return REMOVE_SUCCESS;
 }
 
@@ -322,7 +285,7 @@  static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
     target_ulong pte_index = args[1];
     target_ulong avpn = args[2];
     uint64_t token;
-    target_ulong v, r, rb;
+    target_ulong v, r;
 
     if (!valid_pte_index(env, pte_index)) {
         return H_PARAMETER;
@@ -343,10 +306,9 @@  static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
     r |= (flags << 55) & HPTE64_R_PP0;
     r |= (flags << 48) & HPTE64_R_KEY_HI;
     r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
-    rb = compute_tlbie_rb(v, r, pte_index);
     ppc_hash64_store_hpte(cpu, pte_index,
                           (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
-    ppc_tlb_invalidate_one(env, rb);
+    ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
     /* Don't need a memory barrier, due to qemu's global lock */
     ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
     return H_SUCCESS;
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index ee1e8bf..3284776 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -707,3 +707,15 @@  void ppc_hash64_store_hpte(PowerPCCPU *cpu,
                  env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
     }
 }
+
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+                               target_ulong pte_index,
+                               target_ulong pte0, target_ulong pte1)
+{
+    /*
+     * XXX: given the fact that there are too many segments to
+     * invalidate, and we still don't have a tlb_flush_mask(env, n,
+     * mask) in QEMU, we just invalidate all TLBs
+     */
+    tlb_flush(CPU(cpu), 1);
+}
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 24fd2c4..293a951 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -13,6 +13,9 @@  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
                                 int mmu_idx);
 void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
                            target_ulong pte0, target_ulong pte1);
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+                               target_ulong pte_index,
+                               target_ulong pte0, target_ulong pte1);
 #endif
 
 /*