diff mbox

[4/4] ppc: fix VRMA support

Message ID 1467357013-4039-5-git-send-email-clg@kaod.org (mailing list archive)
State New, archived
Headers show

Commit Message

Cédric Le Goater July 1, 2016, 7:10 a.m. UTC
commit 08109fd4360d ('ppc: Add proper real mode translation support')
introduced VRMA support for which SLB entries need to be created. But
it did not take into account the changes in ppc_slb_t and missed the
setting of the segment page size attribute.

However, gcc spotted it :

target-ppc/mmu-hash64.c: In function 'ppc_hash64_get_phys_page_debug':
target-ppc/mmu-hash64.c:936:16: error: '*((void *)&slb+16)' may be used uninitialized in this function [-Werror=maybe-uninitialized]

     pte_offset = ppc_hash64_htab_lookup(cpu, &slb, addr, &pte);

This adds an extra routine to built the slb and compute the segment
page size.

Signed-off-by: Cédric Le Goater <clg@kaod.org>
---

 I am not sure how to handle errors. Could there be one ? If so,
 should we generate a POWERPC_EXCP_MCHECK ?

 target-ppc/mmu-hash64.c | 53 ++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 39 insertions(+), 14 deletions(-)
diff mbox

Patch

diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 7ef45ee53bf5..117f198a9a2e 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -684,6 +684,43 @@  static int64_t ppc_hash64_get_rmls(CPUPPCState *env)
     }
 }
 
+static int ppc_hash64_make_vrma_slb(CPUPPCState *env, ppc_slb_t *slb)
+{
+    uint32_t vrmasd;
+    const struct ppc_one_seg_page_size *sps = NULL;
+    target_ulong esid, vsid;
+    int i;
+
+    vsid = SLB_VSID_VRMA;
+    vrmasd = (env->spr[SPR_LPCR] & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+    vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
+    esid = SLB_ESID_V;
+
+    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+        if (!sps1->page_shift) {
+            break;
+        }
+
+        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+            sps = sps1;
+            break;
+        }
+    }
+
+    if (!sps) {
+        error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
+                     " vsid 0x"TARGET_FMT_lx, esid, vsid);
+        return -1;
+    }
+
+    slb->vsid = vsid;
+    slb->esid = esid;
+    slb->sps = sps;
+    return 0;
+}
+
 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
                                 int rwx, int mmu_idx)
 {
@@ -722,13 +759,7 @@  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
         } else {
             /* Otherwise, check VPM for RMA vs VRMA */
             if (env->spr[SPR_LPCR] & LPCR_VPM0) {
-                uint32_t vrmasd;
-                /* VRMA, we make up an SLB entry */
-                slb.vsid = SLB_VSID_VRMA;
-                vrmasd = (env->spr[SPR_LPCR] & LPCR_VRMASD) >>
-                    LPCR_VRMASD_SHIFT;
-                slb.vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
-                slb.esid = SLB_ESID_V;
+                ppc_hash64_make_vrma_slb(env, &slb);
                 goto skip_slb;
             }
             /* RMA. Check bounds in RMLS */
@@ -893,13 +924,7 @@  hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
 
         /* Otherwise, check VPM for RMA vs VRMA */
         if (env->spr[SPR_LPCR] & LPCR_VPM0) {
-            uint32_t vrmasd;
-
-            /* VRMA, we make up an SLB entry */
-            slb.vsid = SLB_VSID_VRMA;
-            vrmasd = (env->spr[SPR_LPCR] & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
-            slb.vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
-            slb.esid = SLB_ESID_V;
+            ppc_hash64_make_vrma_slb(env, &slb);
             goto skip_slb;
         }
         /* RMA. Check bounds in RMLS */