Message ID | 20200303034351.333043-11-david@gibson.dropbear.id.au (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | target/ppc: Correct some errors with real mode handling | expand |
On Tue, 3 Mar 2020 14:43:44 +1100 David Gibson <david@gibson.dropbear.id.au> wrote: > When the LPCR is written, we update the env->rmls field with the RMA limit > it implies. Simplify things by just calculating the value directly from > the LPCR value when we need it. > > It's possible this is a little slower, but it's unlikely to be significant, > since this is only for real mode accesses in a translation configuration > that's not used very often, and the whole thing is behind the qemu TLB > anyway. Therefore, keeping the number of state variables down and not > having to worry about making sure it's always in sync seems the better > option. > > Signed-off-by: David Gibson <david@gibson.dropbear.id.au> > --- Reviewed-by: Greg Kurz <groug@kaod.org> > target/ppc/cpu.h | 1 - > target/ppc/mmu-hash64.c | 9 ++++++--- > 2 files changed, 6 insertions(+), 4 deletions(-) > > diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h > index 8077fdb068..f9871b1233 100644 > --- a/target/ppc/cpu.h > +++ b/target/ppc/cpu.h > @@ -1046,7 +1046,6 @@ struct CPUPPCState { > uint64_t insns_flags2; > #if defined(TARGET_PPC64) > ppc_slb_t vrma_slb; > - target_ulong rmls; > #endif > > int error_code; > diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c > index fcccaabb88..4fd7b7ee74 100644 > --- a/target/ppc/mmu-hash64.c > +++ b/target/ppc/mmu-hash64.c > @@ -837,8 +837,10 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, > > goto skip_slb_search; > } else { > + target_ulong limit = rmls_limit(cpu); > + > /* Emulated old-style RMO mode, bounds check against RMLS */ > - if (raddr >= env->rmls) { > + if (raddr >= limit) { > if (rwx == 2) { > ppc_hash64_set_isi(cs, SRR1_PROTFAULT); > } else { > @@ -1000,8 +1002,10 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) > return -1; > } > } else { > + target_ulong limit = rmls_limit(cpu); > + > /* Emulated old-style RMO mode, bounds check against RMLS */ > - if (raddr >= env->rmls) { > + if (raddr >= limit) { > return -1; > } > return raddr | env->spr[SPR_RMOR]; > @@ -1091,7 +1095,6 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) > CPUPPCState *env = &cpu->env; > > env->spr[SPR_LPCR] = val & pcc->lpcr_mask; > - env->rmls = rmls_limit(cpu); > ppc_hash64_update_vrma(cpu); > } >
On 3/3/20 4:43 AM, David Gibson wrote: > When the LPCR is written, we update the env->rmls field with the RMA limit > it implies. Simplify things by just calculating the value directly from > the LPCR value when we need it. > > It's possible this is a little slower, but it's unlikely to be significant, > since this is only for real mode accesses in a translation configuration > that's not used very often, and the whole thing is behind the qemu TLB > anyway. Therefore, keeping the number of state variables down and not > having to worry about making sure it's always in sync seems the better > option. > > Signed-off-by: David Gibson <david@gibson.dropbear.id.au> > --- > target/ppc/cpu.h | 1 - > target/ppc/mmu-hash64.c | 9 ++++++--- > 2 files changed, 6 insertions(+), 4 deletions(-) > > diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h > index 8077fdb068..f9871b1233 100644 > --- a/target/ppc/cpu.h > +++ b/target/ppc/cpu.h > @@ -1046,7 +1046,6 @@ struct CPUPPCState { > uint64_t insns_flags2; > #if defined(TARGET_PPC64) > ppc_slb_t vrma_slb; > - target_ulong rmls; > #endif > > int error_code; > diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c > index fcccaabb88..4fd7b7ee74 100644 > --- a/target/ppc/mmu-hash64.c > +++ b/target/ppc/mmu-hash64.c > @@ -837,8 +837,10 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, > > goto skip_slb_search; > } else { > + target_ulong limit = rmls_limit(cpu); > + > /* Emulated old-style RMO mode, bounds check against RMLS */ > - if (raddr >= env->rmls) { > + if (raddr >= limit) { > if (rwx == 2) { > ppc_hash64_set_isi(cs, SRR1_PROTFAULT); > } else { > @@ -1000,8 +1002,10 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) > return -1; > } > } else { > + target_ulong limit = rmls_limit(cpu); > + > /* Emulated old-style RMO mode, bounds check against RMLS */ > - if (raddr >= env->rmls) { > + if (raddr >= limit) { > return -1; > } > return raddr | env->spr[SPR_RMOR]; > @@ -1091,7 +1095,6 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) > CPUPPCState *env = &cpu->env; > > env->spr[SPR_LPCR] = val & pcc->lpcr_mask; > - env->rmls = rmls_limit(cpu); > ppc_hash64_update_vrma(cpu); > } > > Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 8077fdb068..f9871b1233 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1046,7 +1046,6 @@ struct CPUPPCState { uint64_t insns_flags2; #if defined(TARGET_PPC64) ppc_slb_t vrma_slb; - target_ulong rmls; #endif int error_code; diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index fcccaabb88..4fd7b7ee74 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -837,8 +837,10 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, goto skip_slb_search; } else { + target_ulong limit = rmls_limit(cpu); + /* Emulated old-style RMO mode, bounds check against RMLS */ - if (raddr >= env->rmls) { + if (raddr >= limit) { if (rwx == 2) { ppc_hash64_set_isi(cs, SRR1_PROTFAULT); } else { @@ -1000,8 +1002,10 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) return -1; } } else { + target_ulong limit = rmls_limit(cpu); + /* Emulated old-style RMO mode, bounds check against RMLS */ - if (raddr >= env->rmls) { + if (raddr >= limit) { return -1; } return raddr | env->spr[SPR_RMOR]; @@ -1091,7 +1095,6 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val) CPUPPCState *env = &cpu->env; env->spr[SPR_LPCR] = val & pcc->lpcr_mask; - env->rmls = rmls_limit(cpu); ppc_hash64_update_vrma(cpu); }
When the LPCR is written, we update the env->rmls field with the RMA limit it implies. Simplify things by just calculating the value directly from the LPCR value when we need it. It's possible this is a little slower, but it's unlikely to be significant, since this is only for real mode accesses in a translation configuration that's not used very often, and the whole thing is behind the qemu TLB anyway. Therefore, keeping the number of state variables down and not having to worry about making sure it's always in sync seems the better option. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> --- target/ppc/cpu.h | 1 - target/ppc/mmu-hash64.c | 9 ++++++--- 2 files changed, 6 insertions(+), 4 deletions(-)