diff mbox series

[v3,04/12] target/ppc: Introduce ppc_hash64_use_vrma() helper

Message ID 20200219005414.15635-5-david@gibson.dropbear.id.au (mailing list archive)
State New, archived
Headers show
Series target/ppc: Correct some errors with real mode handling | expand

Commit Message

David Gibson Feb. 19, 2020, 12:54 a.m. UTC
When running guests under a hypervisor, the hypervisor obviously needs to
be protected from guest accesses even if those are in what the guest
considers real mode (translation off).  The POWER hardware provides two
ways of doing that: The old way has guest real mode accesses simply offset
and bounds checked into host addresses.  It works, but requires that a
significant chunk of the guest's memory - the RMA - be physically
contiguous in the host, which is pretty inconvenient.  The new way, known
as VRMA, has guest real mode accesses translated in roughly the normal way
but with some special parameters.

In POWER7 and POWER8 the LPCR[VPM0] bit selected between the two modes, but
in POWER9 only VRMA mode is supported and LPCR[VPM0] no longer exists.  We
handle that difference in behaviour in ppc_hash64_set_isi().. but not in
other places that we blindly check LPCR[VPM0].

Correct those instances with a new helper to tell if we should be in VRMA
mode.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
---
 target/ppc/mmu-hash64.c | 41 +++++++++++++++++++----------------------
 1 file changed, 19 insertions(+), 22 deletions(-)

Comments

Fabiano Rosas Feb. 19, 2020, 2:06 p.m. UTC | #1
David Gibson <david@gibson.dropbear.id.au> writes:

> When running guests under a hypervisor, the hypervisor obviously needs to
> be protected from guest accesses even if those are in what the guest
> considers real mode (translation off).  The POWER hardware provides two
> ways of doing that: The old way has guest real mode accesses simply offset
> and bounds checked into host addresses.  It works, but requires that a
> significant chunk of the guest's memory - the RMA - be physically
> contiguous in the host, which is pretty inconvenient.  The new way, known
> as VRMA, has guest real mode accesses translated in roughly the normal way
> but with some special parameters.
>
> In POWER7 and POWER8 the LPCR[VPM0] bit selected between the two modes, but
> in POWER9 only VRMA mode is supported

... when translation is off, right? Because I see in the 3.0 ISA that
LPCR[VPM1] is still there.

> and LPCR[VPM0] no longer exists.  We
> handle that difference in behaviour in ppc_hash64_set_isi().. but not in
> other places that we blindly check LPCR[VPM0].
>
> Correct those instances with a new helper to tell if we should be in VRMA
> mode.
>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> Reviewed-by: Cédric Le Goater <clg@kaod.org>
> ---
>  target/ppc/mmu-hash64.c | 41 +++++++++++++++++++----------------------
>  1 file changed, 19 insertions(+), 22 deletions(-)
>
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index 5fabd93c92..d878180df5 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -668,6 +668,19 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
>      return 0;
>  }
>  
> +static bool ppc_hash64_use_vrma(CPUPPCState *env)
> +{
> +    switch (env->mmu_model) {
> +    case POWERPC_MMU_3_00:
> +        /* ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
> +         * register no longer exist */
> +        return true;
> +
> +    default:
> +        return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
> +    }
> +}
> +
>  static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
>  {
>      CPUPPCState *env = &POWERPC_CPU(cs)->env;
> @@ -676,15 +689,7 @@ static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
>      if (msr_ir) {
>          vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
>      } else {
> -        switch (env->mmu_model) {
> -        case POWERPC_MMU_3_00:
> -            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
> -            vpm = true;
> -            break;
> -        default:
> -            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
> -            break;
> -        }
> +        vpm = ppc_hash64_use_vrma(env);
>      }
>      if (vpm && !msr_hv) {
>          cs->exception_index = POWERPC_EXCP_HISI;
> @@ -702,15 +707,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
>      if (msr_dr) {
>          vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
>      } else {
> -        switch (env->mmu_model) {
> -        case POWERPC_MMU_3_00:
> -            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
> -            vpm = true;
> -            break;
> -        default:
> -            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
> -            break;
> -        }
> +        vpm = ppc_hash64_use_vrma(env);
>      }
>      if (vpm && !msr_hv) {
>          cs->exception_index = POWERPC_EXCP_HDSI;
> @@ -799,7 +796,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
>              if (!(eaddr >> 63)) {
>                  raddr |= env->spr[SPR_HRMOR];
>              }
> -        } else if (env->spr[SPR_LPCR] & LPCR_VPM0) {
> +        } else if (ppc_hash64_use_vrma(env)) {
>              /* Emulated VRMA mode */
>              slb = &env->vrma_slb;
>              if (!slb->sps) {
> @@ -967,7 +964,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
>          } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
>              /* In HV mode, add HRMOR if top EA bit is clear */
>              return raddr | env->spr[SPR_HRMOR];
> -        } else if (env->spr[SPR_LPCR] & LPCR_VPM0) {
> +        } else if (ppc_hash64_use_vrma(env)) {
>              /* Emulated VRMA mode */
>              slb = &env->vrma_slb;
>              if (!slb->sps) {
> @@ -1056,8 +1053,7 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
>      slb->sps = NULL;
>  
>      /* Is VRMA enabled ? */
> -    lpcr = env->spr[SPR_LPCR];
> -    if (!(lpcr & LPCR_VPM0)) {
> +    if (ppc_hash64_use_vrma(env)) {

Shouldn't this be !ppc_hash64_use_vrma(env)?

And a comment about the original code: all other places that check
LPCR_VPM0 do it after verifying that translation is off, except here
(ppc_hash64_update_vrma). Could that be an issue?

>          return;
>      }
>  
> @@ -1065,6 +1061,7 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
>       * Make one up. Mostly ignore the ESID which will not be needed
>       * for translation
>       */
> +    lpcr = env->spr[SPR_LPCR];
>      vsid = SLB_VSID_VRMA;
>      vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
>      vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
Paul Mackerras Feb. 20, 2020, 2:41 a.m. UTC | #2
On Wed, Feb 19, 2020 at 11:06:20AM -0300, Fabiano Rosas wrote:
> David Gibson <david@gibson.dropbear.id.au> writes:
> 
> > When running guests under a hypervisor, the hypervisor obviously needs to
> > be protected from guest accesses even if those are in what the guest
> > considers real mode (translation off).  The POWER hardware provides two
> > ways of doing that: The old way has guest real mode accesses simply offset
> > and bounds checked into host addresses.  It works, but requires that a
> > significant chunk of the guest's memory - the RMA - be physically
> > contiguous in the host, which is pretty inconvenient.  The new way, known
> > as VRMA, has guest real mode accesses translated in roughly the normal way
> > but with some special parameters.
> >
> > In POWER7 and POWER8 the LPCR[VPM0] bit selected between the two modes, but
> > in POWER9 only VRMA mode is supported
> 
> ... when translation is off, right? Because I see in the 3.0 ISA that
> LPCR[VPM1] is still there.

VRMA stands for virtual real mode area, and the "real mode" part
implies that translation is off.  VRMA is not used when translation is
on because then the CPU is not in real mode.

LPCR[VPM1] is indeed still there, but it is a bit different to VPM0
(or what VPM0 used to do); VPM1 doesn't change how translation is
done, just what happens on a fault.

Paul.
David Gibson Feb. 20, 2020, 3:10 a.m. UTC | #3
On Wed, Feb 19, 2020 at 11:06:20AM -0300, Fabiano Rosas wrote:
> David Gibson <david@gibson.dropbear.id.au> writes:
> 
> > When running guests under a hypervisor, the hypervisor obviously needs to
> > be protected from guest accesses even if those are in what the guest
> > considers real mode (translation off).  The POWER hardware provides two
> > ways of doing that: The old way has guest real mode accesses simply offset
> > and bounds checked into host addresses.  It works, but requires that a
> > significant chunk of the guest's memory - the RMA - be physically
> > contiguous in the host, which is pretty inconvenient.  The new way, known
> > as VRMA, has guest real mode accesses translated in roughly the normal way
> > but with some special parameters.
> >
> > In POWER7 and POWER8 the LPCR[VPM0] bit selected between the two modes, but
> > in POWER9 only VRMA mode is supported
> 
> ... when translation is off, right? Because I see in the 3.0 ISA that
> LPCR[VPM1] is still there.

Right.  This whole patch (and the whole series) is about when the
guest is in translation off mode.

> 
> > and LPCR[VPM0] no longer exists.  We
> > handle that difference in behaviour in ppc_hash64_set_isi().. but not in
> > other places that we blindly check LPCR[VPM0].
> >
> > Correct those instances with a new helper to tell if we should be in VRMA
> > mode.
> >
> > Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> > Reviewed-by: Cédric Le Goater <clg@kaod.org>
> > ---
> >  target/ppc/mmu-hash64.c | 41 +++++++++++++++++++----------------------
> >  1 file changed, 19 insertions(+), 22 deletions(-)
> >
> > diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> > index 5fabd93c92..d878180df5 100644
> > --- a/target/ppc/mmu-hash64.c
> > +++ b/target/ppc/mmu-hash64.c
> > @@ -668,6 +668,19 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
> >      return 0;
> >  }
> >  
> > +static bool ppc_hash64_use_vrma(CPUPPCState *env)
> > +{
> > +    switch (env->mmu_model) {
> > +    case POWERPC_MMU_3_00:
> > +        /* ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
> > +         * register no longer exist */
> > +        return true;
> > +
> > +    default:
> > +        return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
> > +    }
> > +}
> > +
> >  static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
> >  {
> >      CPUPPCState *env = &POWERPC_CPU(cs)->env;
> > @@ -676,15 +689,7 @@ static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
> >      if (msr_ir) {
> >          vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
> >      } else {
> > -        switch (env->mmu_model) {
> > -        case POWERPC_MMU_3_00:
> > -            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
> > -            vpm = true;
> > -            break;
> > -        default:
> > -            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
> > -            break;
> > -        }
> > +        vpm = ppc_hash64_use_vrma(env);
> >      }
> >      if (vpm && !msr_hv) {
> >          cs->exception_index = POWERPC_EXCP_HISI;
> > @@ -702,15 +707,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
> >      if (msr_dr) {
> >          vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
> >      } else {
> > -        switch (env->mmu_model) {
> > -        case POWERPC_MMU_3_00:
> > -            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
> > -            vpm = true;
> > -            break;
> > -        default:
> > -            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
> > -            break;
> > -        }
> > +        vpm = ppc_hash64_use_vrma(env);
> >      }
> >      if (vpm && !msr_hv) {
> >          cs->exception_index = POWERPC_EXCP_HDSI;
> > @@ -799,7 +796,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
> >              if (!(eaddr >> 63)) {
> >                  raddr |= env->spr[SPR_HRMOR];
> >              }
> > -        } else if (env->spr[SPR_LPCR] & LPCR_VPM0) {
> > +        } else if (ppc_hash64_use_vrma(env)) {
> >              /* Emulated VRMA mode */
> >              slb = &env->vrma_slb;
> >              if (!slb->sps) {
> > @@ -967,7 +964,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
> >          } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
> >              /* In HV mode, add HRMOR if top EA bit is clear */
> >              return raddr | env->spr[SPR_HRMOR];
> > -        } else if (env->spr[SPR_LPCR] & LPCR_VPM0) {
> > +        } else if (ppc_hash64_use_vrma(env)) {
> >              /* Emulated VRMA mode */
> >              slb = &env->vrma_slb;
> >              if (!slb->sps) {
> > @@ -1056,8 +1053,7 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
> >      slb->sps = NULL;
> >  
> >      /* Is VRMA enabled ? */
> > -    lpcr = env->spr[SPR_LPCR];
> > -    if (!(lpcr & LPCR_VPM0)) {
> > +    if (ppc_hash64_use_vrma(env)) {
> 
> Shouldn't this be !ppc_hash64_use_vrma(env)?
> 
> And a comment about the original code: all other places that check
> LPCR_VPM0 do it after verifying that translation is off, except here
> (ppc_hash64_update_vrma). Could that be an issue?
> 
> >          return;
> >      }
> >  
> > @@ -1065,6 +1061,7 @@ static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
> >       * Make one up. Mostly ignore the ESID which will not be needed
> >       * for translation
> >       */
> > +    lpcr = env->spr[SPR_LPCR];
> >      vsid = SLB_VSID_VRMA;
> >      vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> >      vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
>
diff mbox series

Patch

diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 5fabd93c92..d878180df5 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -668,6 +668,19 @@  unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
     return 0;
 }
 
+static bool ppc_hash64_use_vrma(CPUPPCState *env)
+{
+    switch (env->mmu_model) {
+    case POWERPC_MMU_3_00:
+        /* ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
+         * register no longer exist */
+        return true;
+
+    default:
+        return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+    }
+}
+
 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
 {
     CPUPPCState *env = &POWERPC_CPU(cs)->env;
@@ -676,15 +689,7 @@  static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
     if (msr_ir) {
         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
     } else {
-        switch (env->mmu_model) {
-        case POWERPC_MMU_3_00:
-            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
-            vpm = true;
-            break;
-        default:
-            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
-            break;
-        }
+        vpm = ppc_hash64_use_vrma(env);
     }
     if (vpm && !msr_hv) {
         cs->exception_index = POWERPC_EXCP_HISI;
@@ -702,15 +707,7 @@  static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
     if (msr_dr) {
         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
     } else {
-        switch (env->mmu_model) {
-        case POWERPC_MMU_3_00:
-            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
-            vpm = true;
-            break;
-        default:
-            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
-            break;
-        }
+        vpm = ppc_hash64_use_vrma(env);
     }
     if (vpm && !msr_hv) {
         cs->exception_index = POWERPC_EXCP_HDSI;
@@ -799,7 +796,7 @@  int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
             if (!(eaddr >> 63)) {
                 raddr |= env->spr[SPR_HRMOR];
             }
-        } else if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+        } else if (ppc_hash64_use_vrma(env)) {
             /* Emulated VRMA mode */
             slb = &env->vrma_slb;
             if (!slb->sps) {
@@ -967,7 +964,7 @@  hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
         } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
             /* In HV mode, add HRMOR if top EA bit is clear */
             return raddr | env->spr[SPR_HRMOR];
-        } else if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+        } else if (ppc_hash64_use_vrma(env)) {
             /* Emulated VRMA mode */
             slb = &env->vrma_slb;
             if (!slb->sps) {
@@ -1056,8 +1053,7 @@  static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
     slb->sps = NULL;
 
     /* Is VRMA enabled ? */
-    lpcr = env->spr[SPR_LPCR];
-    if (!(lpcr & LPCR_VPM0)) {
+    if (ppc_hash64_use_vrma(env)) {
         return;
     }
 
@@ -1065,6 +1061,7 @@  static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
      * Make one up. Mostly ignore the ESID which will not be needed
      * for translation
      */
+    lpcr = env->spr[SPR_LPCR];
     vsid = SLB_VSID_VRMA;
     vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
     vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);