diff mbox

[03/10] target-ppc: Rework ppc_store_slb

Message ID 1453698952-32092-4-git-send-email-david@gibson.dropbear.id.au (mailing list archive)
State New, archived
Headers show

Commit Message

David Gibson Jan. 25, 2016, 5:15 a.m. UTC
ppc_store_slb updates the SLB for PPC cpus with 64-bit hash MMUs.
Currently it takes two parameters, which contain values encoded as the
register arguments to the slbmte instruction, one register contains the
ESID portion of the SLBE and also the slot number, the other contains the
VSID portion of the SLBE.

We're shortly going to want to do some SLB updates from other code where
it is more convenient to supply the slot number and ESID separately, so
rework this function and its callers to work this way.

As a bonus, this slightly simplifies the emulation of segment registers for
when running a 32-bit OS on a 64-bit CPU.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
 target-ppc/kvm.c        |  2 +-
 target-ppc/mmu-hash64.c | 24 +++++++++++++-----------
 target-ppc/mmu-hash64.h |  3 ++-
 target-ppc/mmu_helper.c | 14 +++++---------
 4 files changed, 21 insertions(+), 22 deletions(-)

Comments

Alexander Graf Jan. 25, 2016, 7:22 p.m. UTC | #1
On 01/25/2016 06:15 AM, David Gibson wrote:
> ppc_store_slb updates the SLB for PPC cpus with 64-bit hash MMUs.
> Currently it takes two parameters, which contain values encoded as the
> register arguments to the slbmte instruction, one register contains the
> ESID portion of the SLBE and also the slot number, the other contains the
> VSID portion of the SLBE.
>
> We're shortly going to want to do some SLB updates from other code where
> it is more convenient to supply the slot number and ESID separately, so
> rework this function and its callers to work this way.
>
> As a bonus, this slightly simplifies the emulation of segment registers for
> when running a 32-bit OS on a 64-bit CPU.
>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
>   target-ppc/kvm.c        |  2 +-
>   target-ppc/mmu-hash64.c | 24 +++++++++++++-----------
>   target-ppc/mmu-hash64.h |  3 ++-
>   target-ppc/mmu_helper.c | 14 +++++---------
>   4 files changed, 21 insertions(+), 22 deletions(-)
>
> diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
> index 98d7ba6..18c7ba2 100644
> --- a/target-ppc/kvm.c
> +++ b/target-ppc/kvm.c
> @@ -1205,7 +1205,7 @@ int kvm_arch_get_registers(CPUState *cs)
>                * Only restore valid entries
>                */
>               if (rb & SLB_ESID_V) {
> -                ppc_store_slb(cpu, rb, rs);
> +                ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs);
>               }
>           }
>   #endif
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 03e25fd..5a6d33b 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -135,28 +135,30 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
>       }
>   }
>   
> -int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs)
> +int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
> +                  target_ulong esid, target_ulong vsid)
>   {
>       CPUPPCState *env = &cpu->env;
> -    int slot = rb & 0xfff;
>       ppc_slb_t *slb = &env->slb[slot];
>   
> -    if (rb & (0x1000 - env->slb_nr)) {
> -        return -1; /* Reserved bits set or slot too high */
> +    if (slot >= env->slb_nr) {
> +        return -1; /* Bad slot number */
> +    }
> +    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
> +        return -1; /* Reserved bits set */
>       }
> -    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
> +    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
>           return -1; /* Bad segment size */
>       }
> -    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
> +    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
>           return -1; /* 1T segment on MMU that doesn't support it */
>       }
>   
> -    /* Mask out the slot number as we store the entry */
> -    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
> -    slb->vsid = rs;
> +    slb->esid = esid;
> +    slb->vsid = vsid;
>   
>       LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
> -            " %016" PRIx64 "\n", __func__, slot, rb, rs,
> +            " %016" PRIx64 "\n", __func__, slot, esid, vsid,
>               slb->esid, slb->vsid);
>   
>       return 0;
> @@ -196,7 +198,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
>   {
>       PowerPCCPU *cpu = ppc_env_get_cpu(env);
>   
> -    if (ppc_store_slb(cpu, rb, rs) < 0) {
> +    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs) < 0) {

This might truncate the esid to 32bits on 32bits hosts, no? Should be 
0xfffULL instead.

Alex

>           helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
>                                      POWERPC_EXCP_INVAL);
>       }
> diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
> index 6e3de7e..24fd2c4 100644
> --- a/target-ppc/mmu-hash64.h
> +++ b/target-ppc/mmu-hash64.h
> @@ -6,7 +6,8 @@
>   #ifdef TARGET_PPC64
>   void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
>   void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
> -int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs);
> +int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
> +                  target_ulong esid, target_ulong vsid);
>   hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
>   int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
>                                   int mmu_idx);
> diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
> index 0ab73bc..c040b17 100644
> --- a/target-ppc/mmu_helper.c
> +++ b/target-ppc/mmu_helper.c
> @@ -2088,21 +2088,17 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
>               (int)srnum, value, env->sr[srnum]);
>   #if defined(TARGET_PPC64)
>       if (env->mmu_model & POWERPC_MMU_64) {
> -        uint64_t rb = 0, rs = 0;
> +        uint64_t esid, vsid;
>   
>           /* ESID = srnum */
> -        rb |= ((uint32_t)srnum & 0xf) << 28;
> -        /* Set the valid bit */
> -        rb |= SLB_ESID_V;
> -        /* Index = ESID */
> -        rb |= (uint32_t)srnum;
> +        esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
>   
>           /* VSID = VSID */
> -        rs |= (value & 0xfffffff) << 12;
> +        vsid = (value & 0xfffffff) << 12;
>           /* flags = flags */
> -        rs |= ((value >> 27) & 0xf) << 8;
> +        vsid |= ((value >> 27) & 0xf) << 8;
>   
> -        ppc_store_slb(cpu, rb, rs);
> +        ppc_store_slb(cpu, srnum, esid, vsid);
>       } else
>   #endif
>       if (env->sr[srnum] != value) {
David Gibson Jan. 27, 2016, 12:04 a.m. UTC | #2
On Mon, Jan 25, 2016 at 08:22:51PM +0100, Alexander Graf wrote:
> 
> 
> On 01/25/2016 06:15 AM, David Gibson wrote:
> >ppc_store_slb updates the SLB for PPC cpus with 64-bit hash MMUs.
> >Currently it takes two parameters, which contain values encoded as the
> >register arguments to the slbmte instruction, one register contains the
> >ESID portion of the SLBE and also the slot number, the other contains the
> >VSID portion of the SLBE.
> >
> >We're shortly going to want to do some SLB updates from other code where
> >it is more convenient to supply the slot number and ESID separately, so
> >rework this function and its callers to work this way.
> >
> >As a bonus, this slightly simplifies the emulation of segment registers for
> >when running a 32-bit OS on a 64-bit CPU.
> >
> >Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> >---
> >  target-ppc/kvm.c        |  2 +-
> >  target-ppc/mmu-hash64.c | 24 +++++++++++++-----------
> >  target-ppc/mmu-hash64.h |  3 ++-
> >  target-ppc/mmu_helper.c | 14 +++++---------
> >  4 files changed, 21 insertions(+), 22 deletions(-)
> >
> >diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
> >index 98d7ba6..18c7ba2 100644
> >--- a/target-ppc/kvm.c
> >+++ b/target-ppc/kvm.c
> >@@ -1205,7 +1205,7 @@ int kvm_arch_get_registers(CPUState *cs)
> >               * Only restore valid entries
> >               */
> >              if (rb & SLB_ESID_V) {
> >-                ppc_store_slb(cpu, rb, rs);
> >+                ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs);
> >              }
> >          }
> >  #endif
> >diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> >index 03e25fd..5a6d33b 100644
> >--- a/target-ppc/mmu-hash64.c
> >+++ b/target-ppc/mmu-hash64.c
> >@@ -135,28 +135,30 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
> >      }
> >  }
> >-int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs)
> >+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
> >+                  target_ulong esid, target_ulong vsid)
> >  {
> >      CPUPPCState *env = &cpu->env;
> >-    int slot = rb & 0xfff;
> >      ppc_slb_t *slb = &env->slb[slot];
> >-    if (rb & (0x1000 - env->slb_nr)) {
> >-        return -1; /* Reserved bits set or slot too high */
> >+    if (slot >= env->slb_nr) {
> >+        return -1; /* Bad slot number */
> >+    }
> >+    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
> >+        return -1; /* Reserved bits set */
> >      }
> >-    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
> >+    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
> >          return -1; /* Bad segment size */
> >      }
> >-    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
> >+    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
> >          return -1; /* 1T segment on MMU that doesn't support it */
> >      }
> >-    /* Mask out the slot number as we store the entry */
> >-    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
> >-    slb->vsid = rs;
> >+    slb->esid = esid;
> >+    slb->vsid = vsid;
> >      LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
> >-            " %016" PRIx64 "\n", __func__, slot, rb, rs,
> >+            " %016" PRIx64 "\n", __func__, slot, esid, vsid,
> >              slb->esid, slb->vsid);
> >      return 0;
> >@@ -196,7 +198,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
> >  {
> >      PowerPCCPU *cpu = ppc_env_get_cpu(env);
> >-    if (ppc_store_slb(cpu, rb, rs) < 0) {
> >+    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs) < 0) {
> 
> This might truncate the esid to 32bits on 32bits hosts, no? Should be
> 0xfffULL instead.

Good point, nice catch.
Thomas Huth Jan. 27, 2016, 7:32 a.m. UTC | #3
On 27.01.2016 01:04, David Gibson wrote:
> On Mon, Jan 25, 2016 at 08:22:51PM +0100, Alexander Graf wrote:
>>
>>
>> On 01/25/2016 06:15 AM, David Gibson wrote:
>>> ppc_store_slb updates the SLB for PPC cpus with 64-bit hash MMUs.
>>> Currently it takes two parameters, which contain values encoded as the
>>> register arguments to the slbmte instruction, one register contains the
>>> ESID portion of the SLBE and also the slot number, the other contains the
>>> VSID portion of the SLBE.
>>>
>>> We're shortly going to want to do some SLB updates from other code where
>>> it is more convenient to supply the slot number and ESID separately, so
>>> rework this function and its callers to work this way.
>>>
>>> As a bonus, this slightly simplifies the emulation of segment registers for
>>> when running a 32-bit OS on a 64-bit CPU.
>>>
>>> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
>>> ---
>>>  target-ppc/kvm.c        |  2 +-
>>>  target-ppc/mmu-hash64.c | 24 +++++++++++++-----------
>>>  target-ppc/mmu-hash64.h |  3 ++-
>>>  target-ppc/mmu_helper.c | 14 +++++---------
>>>  4 files changed, 21 insertions(+), 22 deletions(-)
...
>>> @@ -196,7 +198,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
>>>  {
>>>      PowerPCCPU *cpu = ppc_env_get_cpu(env);
>>> -    if (ppc_store_slb(cpu, rb, rs) < 0) {
>>> +    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs) < 0) {
>>
>> This might truncate the esid to 32bits on 32bits hosts, no? Should be
>> 0xfffULL instead.
> 
> Good point, nice catch.

Are you sure that it is really needed? If I run the following test
program on my 64-bit system:

int main()
{
	unsigned long long ll = -1ULL;
	printf("%llx %llx\n", ll, ll & ~0xfff);
	return 0;
}

Then I get this output:

ffffffffffffffff fffffffffffff000

So it sounds like the value is sign-extended when it is cast to 64-bit.

However, if you respin this patch series anyway, then maybe better add
the ULL for clarity.

 Thomas
David Gibson Jan. 27, 2016, 10:07 a.m. UTC | #4
On Wed, Jan 27, 2016 at 08:32:25AM +0100, Thomas Huth wrote:
> On 27.01.2016 01:04, David Gibson wrote:
> > On Mon, Jan 25, 2016 at 08:22:51PM +0100, Alexander Graf wrote:
> >>
> >>
> >> On 01/25/2016 06:15 AM, David Gibson wrote:
> >>> ppc_store_slb updates the SLB for PPC cpus with 64-bit hash MMUs.
> >>> Currently it takes two parameters, which contain values encoded as the
> >>> register arguments to the slbmte instruction, one register contains the
> >>> ESID portion of the SLBE and also the slot number, the other contains the
> >>> VSID portion of the SLBE.
> >>>
> >>> We're shortly going to want to do some SLB updates from other code where
> >>> it is more convenient to supply the slot number and ESID separately, so
> >>> rework this function and its callers to work this way.
> >>>
> >>> As a bonus, this slightly simplifies the emulation of segment registers for
> >>> when running a 32-bit OS on a 64-bit CPU.
> >>>
> >>> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> >>> ---
> >>>  target-ppc/kvm.c        |  2 +-
> >>>  target-ppc/mmu-hash64.c | 24 +++++++++++++-----------
> >>>  target-ppc/mmu-hash64.h |  3 ++-
> >>>  target-ppc/mmu_helper.c | 14 +++++---------
> >>>  4 files changed, 21 insertions(+), 22 deletions(-)
> ...
> >>> @@ -196,7 +198,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
> >>>  {
> >>>      PowerPCCPU *cpu = ppc_env_get_cpu(env);
> >>> -    if (ppc_store_slb(cpu, rb, rs) < 0) {
> >>> +    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs) < 0) {
> >>
> >> This might truncate the esid to 32bits on 32bits hosts, no? Should be
> >> 0xfffULL instead.
> > 
> > Good point, nice catch.
> 
> Are you sure that it is really needed? If I run the following test
> program on my 64-bit system:
> 
> int main()
> {
> 	unsigned long long ll = -1ULL;
> 	printf("%llx %llx\n", ll, ll & ~0xfff);
> 	return 0;
> }
> 
> Then I get this output:
> 
> ffffffffffffffff fffffffffffff000
> 
> So it sounds like the value is sign-extended when it is cast to
> 64-bit.

Ah, yes.  It would be promoted to s64 and sign extended before being
promoted to u64.  Still that's a pretty subtle detail to be relying on.
 
> However, if you respin this patch series anyway, then maybe better add
> the ULL for clarity.

Already done.
diff mbox

Patch

diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 98d7ba6..18c7ba2 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1205,7 +1205,7 @@  int kvm_arch_get_registers(CPUState *cs)
              * Only restore valid entries
              */
             if (rb & SLB_ESID_V) {
-                ppc_store_slb(cpu, rb, rs);
+                ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs);
             }
         }
 #endif
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 03e25fd..5a6d33b 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -135,28 +135,30 @@  void helper_slbie(CPUPPCState *env, target_ulong addr)
     }
 }
 
-int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs)
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+                  target_ulong esid, target_ulong vsid)
 {
     CPUPPCState *env = &cpu->env;
-    int slot = rb & 0xfff;
     ppc_slb_t *slb = &env->slb[slot];
 
-    if (rb & (0x1000 - env->slb_nr)) {
-        return -1; /* Reserved bits set or slot too high */
+    if (slot >= env->slb_nr) {
+        return -1; /* Bad slot number */
+    }
+    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
+        return -1; /* Reserved bits set */
     }
-    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
+    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
         return -1; /* Bad segment size */
     }
-    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
+    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
         return -1; /* 1T segment on MMU that doesn't support it */
     }
 
-    /* Mask out the slot number as we store the entry */
-    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
-    slb->vsid = rs;
+    slb->esid = esid;
+    slb->vsid = vsid;
 
     LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
-            " %016" PRIx64 "\n", __func__, slot, rb, rs,
+            " %016" PRIx64 "\n", __func__, slot, esid, vsid,
             slb->esid, slb->vsid);
 
     return 0;
@@ -196,7 +198,7 @@  void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
 {
     PowerPCCPU *cpu = ppc_env_get_cpu(env);
 
-    if (ppc_store_slb(cpu, rb, rs) < 0) {
+    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfff, rs) < 0) {
         helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                    POWERPC_EXCP_INVAL);
     }
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 6e3de7e..24fd2c4 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -6,7 +6,8 @@ 
 #ifdef TARGET_PPC64
 void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
 void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
-int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs);
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+                  target_ulong esid, target_ulong vsid);
 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
                                 int mmu_idx);
diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
index 0ab73bc..c040b17 100644
--- a/target-ppc/mmu_helper.c
+++ b/target-ppc/mmu_helper.c
@@ -2088,21 +2088,17 @@  void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
             (int)srnum, value, env->sr[srnum]);
 #if defined(TARGET_PPC64)
     if (env->mmu_model & POWERPC_MMU_64) {
-        uint64_t rb = 0, rs = 0;
+        uint64_t esid, vsid;
 
         /* ESID = srnum */
-        rb |= ((uint32_t)srnum & 0xf) << 28;
-        /* Set the valid bit */
-        rb |= SLB_ESID_V;
-        /* Index = ESID */
-        rb |= (uint32_t)srnum;
+        esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
 
         /* VSID = VSID */
-        rs |= (value & 0xfffffff) << 12;
+        vsid = (value & 0xfffffff) << 12;
         /* flags = flags */
-        rs |= ((value >> 27) & 0xf) << 8;
+        vsid |= ((value >> 27) & 0xf) << 8;
 
-        ppc_store_slb(cpu, rb, rs);
+        ppc_store_slb(cpu, srnum, esid, vsid);
     } else
 #endif
     if (env->sr[srnum] != value) {