diff mbox

[05/10] target-ppc: Use actual page size encodings from HPTE

Message ID 1453698952-32092-6-git-send-email-david@gibson.dropbear.id.au (mailing list archive)
State New, archived
Headers show

Commit Message

David Gibson Jan. 25, 2016, 5:15 a.m. UTC
At present the 64-bit hash MMU code uses information from the SLB to
determine the page size of a translation.  We do need that information to
correctly look up the hash table.  However the MMU also allows a
possibly larger page size to be encoded into the HPTE itself, which is used
to populate the TLB.  At present qemu doesn't check that, and so doesn't
support the MPSS "Multiple Page Size per Segment" feature.

This makes a start on allowing this, by adding an hpte_page_shift()
function which looks up the page size of an HPTE.  We use this to validate
page sizes encodings on faults, and populate the qemu TLB with larger
page sizes when appropriate.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
 target-ppc/mmu-hash64.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 70 insertions(+), 4 deletions(-)

Comments

David Gibson Jan. 25, 2016, 1:26 p.m. UTC | #1
On Mon, Jan 25, 2016 at 04:15:47PM +1100, David Gibson wrote:
> At present the 64-bit hash MMU code uses information from the SLB to
> determine the page size of a translation.  We do need that information to
> correctly look up the hash table.  However the MMU also allows a
> possibly larger page size to be encoded into the HPTE itself, which is used
> to populate the TLB.  At present qemu doesn't check that, and so doesn't
> support the MPSS "Multiple Page Size per Segment" feature.
> 
> This makes a start on allowing this, by adding an hpte_page_shift()
> function which looks up the page size of an HPTE.  We use this to validate
> page sizes encodings on faults, and populate the qemu TLB with larger
> page sizes when appropriate.
> 
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
>  target-ppc/mmu-hash64.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++---
>  1 file changed, 70 insertions(+), 4 deletions(-)
> 
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 28ad361..bcad826 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -21,6 +21,7 @@
>  #include "exec/helper-proto.h"
>  #include "qemu/error-report.h"
>  #include "sysemu/kvm.h"
> +#include "qemu/error-report.h"
>  #include "kvm_ppc.h"
>  #include "mmu-hash64.h"
>  
> @@ -474,6 +475,43 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
>      return pte_offset;
>  }
>  
> +static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
> +    uint64_t pte0, uint64_t pte1)
> +{
> +    int i;
> +
> +    if (!(pte0 & HPTE64_V_LARGE)) {
> +        if (sps->page_shift != 12) {
> +            /* 4kiB page in a non 4kiB segment */
> +            return 0;
> +        }
> +        /* Normal 4kiB page */
> +        return 12;
> +    }
> +
> +    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> +        const struct ppc_one_page_size *ps = &sps->enc[i];
> +        uint64_t mask;
> +
> +        if (!ps->page_shift) {
> +            break;
> +        }
> +
> +        if (ps->page_shift == 12) {
> +            /* L bit is set so this can't be a 4kiB page */
> +            continue;
> +        }
> +
> +        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
> +
> +        if ((pte1 & mask) == ps->pte_enc) {

Gah.  This needs to be (ps->pte_enc << HPTE64_R_RPN_SHIFT) or
everything breaks.

I remember fixing this earlier, but somehow I managed to lose the fix
in both this posting and the previous one.
Alexander Graf Jan. 25, 2016, 8:18 p.m. UTC | #2
On 01/25/2016 06:15 AM, David Gibson wrote:
> At present the 64-bit hash MMU code uses information from the SLB to
> determine the page size of a translation.  We do need that information to
> correctly look up the hash table.  However the MMU also allows a
> possibly larger page size to be encoded into the HPTE itself, which is used
> to populate the TLB.  At present qemu doesn't check that, and so doesn't
> support the MPSS "Multiple Page Size per Segment" feature.
>
> This makes a start on allowing this, by adding an hpte_page_shift()
> function which looks up the page size of an HPTE.  We use this to validate
> page sizes encodings on faults, and populate the qemu TLB with larger
> page sizes when appropriate.
>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
>   target-ppc/mmu-hash64.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++---
>   1 file changed, 70 insertions(+), 4 deletions(-)
>
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 28ad361..bcad826 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -21,6 +21,7 @@
>   #include "exec/helper-proto.h"
>   #include "qemu/error-report.h"
>   #include "sysemu/kvm.h"
> +#include "qemu/error-report.h"
>   #include "kvm_ppc.h"
>   #include "mmu-hash64.h"
>   
> @@ -474,6 +475,43 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
>       return pte_offset;
>   }
>   
> +static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
> +    uint64_t pte0, uint64_t pte1)
> +{
> +    int i;
> +
> +    if (!(pte0 & HPTE64_V_LARGE)) {
> +        if (sps->page_shift != 12) {
> +            /* 4kiB page in a non 4kiB segment */
> +            return 0;
> +        }
> +        /* Normal 4kiB page */
> +        return 12;
> +    }
> +
> +    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> +        const struct ppc_one_page_size *ps = &sps->enc[i];
> +        uint64_t mask;
> +
> +        if (!ps->page_shift) {
> +            break;
> +        }
> +
> +        if (ps->page_shift == 12) {
> +            /* L bit is set so this can't be a 4kiB page */
> +            continue;
> +        }
> +
> +        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
> +
> +        if ((pte1 & mask) == ps->pte_enc) {
> +            return ps->page_shift;
> +        }
> +    }
> +
> +    return 0; /* Bad page size encoding */
> +}
> +
>   static hwaddr ppc_hash64_pte_raddr(unsigned page_shift, ppc_hash_pte64_t pte,
>                                      target_ulong eaddr)
>   {
> @@ -489,6 +527,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
>       CPUState *cs = CPU(cpu);
>       CPUPPCState *env = &cpu->env;
>       ppc_slb_t *slb;
> +    unsigned apshift;
>       hwaddr pte_offset;
>       ppc_hash_pte64_t pte;
>       int pp_prot, amr_prot, prot;
> @@ -552,6 +591,28 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
>       qemu_log_mask(CPU_LOG_MMU,
>                   "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
>   
> +    /* Validate page size encoding */
> +    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
> +    if (!apshift) {
> +        error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
> +                     " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
> +        /* Treat it like a hash miss for the guest */
> +        if (rwx == 2) {
> +            cs->exception_index = POWERPC_EXCP_ISI;
> +            env->error_code = 0x40000000;
> +        } else {
> +            cs->exception_index = POWERPC_EXCP_DSI;
> +            env->error_code = 0;
> +            env->spr[SPR_DAR] = eaddr;
> +            if (rwx == 1) {
> +                env->spr[SPR_DSISR] = 0x42000000;
> +            } else {
> +                env->spr[SPR_DSISR] = 0x40000000;

I know that we don't do this for any other DSISR setting yet, but do you 
think we could mark the start here and use names for the bits instead? 
The kernel has a few nice defines.


Alex
David Gibson Jan. 27, 2016, 12:40 a.m. UTC | #3
On Mon, Jan 25, 2016 at 09:18:18PM +0100, Alexander Graf wrote:
> 
> 
> On 01/25/2016 06:15 AM, David Gibson wrote:
> >At present the 64-bit hash MMU code uses information from the SLB to
> >determine the page size of a translation.  We do need that information to
> >correctly look up the hash table.  However the MMU also allows a
> >possibly larger page size to be encoded into the HPTE itself, which is used
> >to populate the TLB.  At present qemu doesn't check that, and so doesn't
> >support the MPSS "Multiple Page Size per Segment" feature.
> >
> >This makes a start on allowing this, by adding an hpte_page_shift()
> >function which looks up the page size of an HPTE.  We use this to validate
> >page sizes encodings on faults, and populate the qemu TLB with larger
> >page sizes when appropriate.
> >
> >Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> >---
> >  target-ppc/mmu-hash64.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++---
> >  1 file changed, 70 insertions(+), 4 deletions(-)
> >
> >diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> >index 28ad361..bcad826 100644
> >--- a/target-ppc/mmu-hash64.c
> >+++ b/target-ppc/mmu-hash64.c
> >@@ -21,6 +21,7 @@
> >  #include "exec/helper-proto.h"
> >  #include "qemu/error-report.h"
> >  #include "sysemu/kvm.h"
> >+#include "qemu/error-report.h"
> >  #include "kvm_ppc.h"
> >  #include "mmu-hash64.h"
> >@@ -474,6 +475,43 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> >      return pte_offset;
> >  }
> >+static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
> >+    uint64_t pte0, uint64_t pte1)
> >+{
> >+    int i;
> >+
> >+    if (!(pte0 & HPTE64_V_LARGE)) {
> >+        if (sps->page_shift != 12) {
> >+            /* 4kiB page in a non 4kiB segment */
> >+            return 0;
> >+        }
> >+        /* Normal 4kiB page */
> >+        return 12;
> >+    }
> >+
> >+    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> >+        const struct ppc_one_page_size *ps = &sps->enc[i];
> >+        uint64_t mask;
> >+
> >+        if (!ps->page_shift) {
> >+            break;
> >+        }
> >+
> >+        if (ps->page_shift == 12) {
> >+            /* L bit is set so this can't be a 4kiB page */
> >+            continue;
> >+        }
> >+
> >+        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
> >+
> >+        if ((pte1 & mask) == ps->pte_enc) {
> >+            return ps->page_shift;
> >+        }
> >+    }
> >+
> >+    return 0; /* Bad page size encoding */
> >+}
> >+
> >  static hwaddr ppc_hash64_pte_raddr(unsigned page_shift, ppc_hash_pte64_t pte,
> >                                     target_ulong eaddr)
> >  {
> >@@ -489,6 +527,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
> >      CPUState *cs = CPU(cpu);
> >      CPUPPCState *env = &cpu->env;
> >      ppc_slb_t *slb;
> >+    unsigned apshift;
> >      hwaddr pte_offset;
> >      ppc_hash_pte64_t pte;
> >      int pp_prot, amr_prot, prot;
> >@@ -552,6 +591,28 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
> >      qemu_log_mask(CPU_LOG_MMU,
> >                  "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
> >+    /* Validate page size encoding */
> >+    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
> >+    if (!apshift) {
> >+        error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
> >+                     " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
> >+        /* Treat it like a hash miss for the guest */
> >+        if (rwx == 2) {
> >+            cs->exception_index = POWERPC_EXCP_ISI;
> >+            env->error_code = 0x40000000;
> >+        } else {
> >+            cs->exception_index = POWERPC_EXCP_DSI;
> >+            env->error_code = 0;
> >+            env->spr[SPR_DAR] = eaddr;
> >+            if (rwx == 1) {
> >+                env->spr[SPR_DSISR] = 0x42000000;
> >+            } else {
> >+                env->spr[SPR_DSISR] = 0x40000000;
> 
> I know that we don't do this for any other DSISR setting yet, but do you
> think we could mark the start here and use names for the bits instead? The
> kernel has a few nice defines.

So, that would be a bit odd, since it's just a copy of the exception
code from below.

But in fact, BenH pointed out that throwing a machine check is
probably more correct behaviour than a DSI or ISI, so this will go
away anyway.
diff mbox

Patch

diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 28ad361..bcad826 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -21,6 +21,7 @@ 
 #include "exec/helper-proto.h"
 #include "qemu/error-report.h"
 #include "sysemu/kvm.h"
+#include "qemu/error-report.h"
 #include "kvm_ppc.h"
 #include "mmu-hash64.h"
 
@@ -474,6 +475,43 @@  static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
     return pte_offset;
 }
 
+static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
+    uint64_t pte0, uint64_t pte1)
+{
+    int i;
+
+    if (!(pte0 & HPTE64_V_LARGE)) {
+        if (sps->page_shift != 12) {
+            /* 4kiB page in a non 4kiB segment */
+            return 0;
+        }
+        /* Normal 4kiB page */
+        return 12;
+    }
+
+    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+        const struct ppc_one_page_size *ps = &sps->enc[i];
+        uint64_t mask;
+
+        if (!ps->page_shift) {
+            break;
+        }
+
+        if (ps->page_shift == 12) {
+            /* L bit is set so this can't be a 4kiB page */
+            continue;
+        }
+
+        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+        if ((pte1 & mask) == ps->pte_enc) {
+            return ps->page_shift;
+        }
+    }
+
+    return 0; /* Bad page size encoding */
+}
+
 static hwaddr ppc_hash64_pte_raddr(unsigned page_shift, ppc_hash_pte64_t pte,
                                    target_ulong eaddr)
 {
@@ -489,6 +527,7 @@  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
     CPUState *cs = CPU(cpu);
     CPUPPCState *env = &cpu->env;
     ppc_slb_t *slb;
+    unsigned apshift;
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
     int pp_prot, amr_prot, prot;
@@ -552,6 +591,28 @@  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
     qemu_log_mask(CPU_LOG_MMU,
                 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
 
+    /* Validate page size encoding */
+    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
+    if (!apshift) {
+        error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
+                     " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
+        /* Treat it like a hash miss for the guest */
+        if (rwx == 2) {
+            cs->exception_index = POWERPC_EXCP_ISI;
+            env->error_code = 0x40000000;
+        } else {
+            cs->exception_index = POWERPC_EXCP_DSI;
+            env->error_code = 0;
+            env->spr[SPR_DAR] = eaddr;
+            if (rwx == 1) {
+                env->spr[SPR_DSISR] = 0x42000000;
+            } else {
+                env->spr[SPR_DSISR] = 0x40000000;
+            }
+        }
+        return 1;
+    }
+
     /* 5. Check access permissions */
 
     pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
@@ -604,10 +665,10 @@  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
 
     /* 7. Determine the real address from the PTE */
 
-    raddr = ppc_hash64_pte_raddr(slb->sps->page_shift, pte, eaddr);
+    raddr = ppc_hash64_pte_raddr(apshift, pte, eaddr);
 
     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
-                 prot, mmu_idx, TARGET_PAGE_SIZE);
+                 prot, mmu_idx, 1ULL << apshift);
 
     return 0;
 }
@@ -618,6 +679,7 @@  hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
     ppc_slb_t *slb;
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
+    unsigned apshift;
 
     if (msr_dr == 0) {
         /* In real mode the top 4 effective address bits are ignored */
@@ -634,8 +696,12 @@  hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
         return -1;
     }
 
-    return ppc_hash64_pte_raddr(slb->sps->page_shift, pte, addr)
-        & TARGET_PAGE_MASK;
+    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
+    if (!apshift) {
+        return -1;
+    }
+
+    return ppc_hash64_pte_raddr(apshift, pte, addr) & TARGET_PAGE_MASK;
 }
 
 void ppc_hash64_store_hpte(PowerPCCPU *cpu,