diff mbox series

target/arm: Fixed Privileged Access Never (PAN) for aarch32

Message ID 20221019121537.255477-1-tkutergin@gmail.com (mailing list archive)
State New, archived
Headers show
Series target/arm: Fixed Privileged Access Never (PAN) for aarch32 | expand

Commit Message

Timofey Kutergin Oct. 19, 2022, 12:15 p.m. UTC
- synchronize PSTATE.PAN with changes in CPSR.PAN in aarch32 mode
- set PAN bit automatically on exception entry if SCTLR_SPAN bit
  is set
- throw permission fault during address translation when PAN is
  enabled and kernel tries to access user acessible page
- ignore SCTLR_XP bit for armv7 and armv8 (conflicts with SCTLR_SPAN).

Signed-off-by: Timofey Kutergin <tkutergin@gmail.com>
---
 target/arm/helper.c |  6 ++++++
 target/arm/ptw.c    | 11 ++++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

Comments

Peter Maydell Oct. 25, 2022, 1:45 p.m. UTC | #1
On Wed, 19 Oct 2022 at 13:15, Timofey Kutergin <tkutergin@gmail.com> wrote:
>
> - synchronize PSTATE.PAN with changes in CPSR.PAN in aarch32 mode
> - set PAN bit automatically on exception entry if SCTLR_SPAN bit
>   is set
> - throw permission fault during address translation when PAN is
>   enabled and kernel tries to access user acessible page
> - ignore SCTLR_XP bit for armv7 and armv8 (conflicts with SCTLR_SPAN).
>
> Signed-off-by: Timofey Kutergin <tkutergin@gmail.com>
> ---
>  target/arm/helper.c |  6 ++++++
>  target/arm/ptw.c    | 11 ++++++++++-
>  2 files changed, 16 insertions(+), 1 deletion(-)

Thanks for this patch. I think you've caught all the places
we aren't correctly implementing AArch32 PAN handling.

> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index dde64a487a..5299f67e3f 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -9052,6 +9052,11 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
>      }
>      mask &= ~CACHED_CPSR_BITS;
>      env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
> +    if (env->uncached_cpsr & CPSR_PAN) {
> +        env->pstate |= PSTATE_PAN;
> +    } else {
> +        env->pstate &= ~PSTATE_PAN;
> +    }

This approach means we're storing the PAN bit in two places,
both in env->uncached_cpsr and in env->pstate. We don't do
this for any other bits as far as I can see. I think we should
either:
 (1) have the code that changes behaviour based on PAN look
     at either env->pstate or env->uncached_cpsr depending
     on whether we're AArch64 or AArch32
 (2) always store the state in env->pstate only, and handle
     this in read/write of the CPSR the same way we do with
     other "cached" bits

I think the intention of the current code is (1), and the
only place we get this wrong is in arm_mmu_idx_el(),
which is checking env->pstate only. (The other places that
directly check env->pstate are all in AArch64-only code,
and various AArch32-only bits of code already check
env->uncached_cpsr.) A function like

bool arm_pan_enabled(CPUARMState *env)
{
    if (is_a64(env)) {
        return env->pstate & PSTATE_PAN;
    } else {
        return env->uncached_cpsr & CPSR_PAN;
    }
}

and then using that in arm_mmu_idx_el() should I think
mean you don't need to change either cpsr_write() or
take_aarch32_exception().

>      if (rebuild_hflags) {
>          arm_rebuild_hflags(env);
>      }
> @@ -9592,6 +9597,7 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
>                  /* ... the target is EL1 and SCTLR.SPAN is 0.  */
>                  if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
>                      env->uncached_cpsr |= CPSR_PAN;
> +                    env->pstate |= PSTATE_PAN;
>                  }
>                  break;
>              }
> diff --git a/target/arm/ptw.c b/target/arm/ptw.c
> index 23f16f4ff7..204a73350f 100644
> --- a/target/arm/ptw.c
> +++ b/target/arm/ptw.c
> @@ -659,6 +659,13 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
>              goto do_fault;
>          }
>
> +        if (regime_is_pan(env, mmu_idx) && !regime_is_user(env, mmu_idx) &&
> +            simple_ap_to_rw_prot_is_user(ap >> 1, 1) &&
> +            access_type != MMU_INST_FETCH) {
> +            fi->type = ARMFault_Permission;
> +            goto do_fault;
> +        }

This assumes we're using the SCTLR.AFE==1 simplified
permissions model, but PAN should apply even if we're using the
old model. So we need a ap_to_rw_prot_is_user() to check the
permissions in that model.

The check is also being done before the Access fault check, but
the architecture says that Access faults take priority over
Permission faults.

> +
>          if (arm_feature(env, ARM_FEATURE_V6K) &&
>                  (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
>              /* The simplified model uses AP[0] as an access control bit.  */
> @@ -2506,7 +2513,9 @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
>      if (regime_using_lpae_format(env, mmu_idx)) {
>          return get_phys_addr_lpae(env, address, access_type, mmu_idx,
>                                    is_secure, false, result, fi);
> -    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
> +    } else if (arm_feature(env, ARM_FEATURE_V7) ||
> +               arm_feature(env, ARM_FEATURE_V8) || (

V8 always implies V7, so we only need to check V7 here.

> +               regime_sctlr(env, mmu_idx) & SCTLR_XP)) {
>          return get_phys_addr_v6(env, address, access_type, mmu_idx,
>                                  is_secure, result, fi);
>      } else {

thanks
-- PMM
Timofey Kutergin Oct. 27, 2022, 9:22 a.m. UTC | #2
Hi Peter,
> V8 always implies V7, so we only need to check V7 here.
From silicon perspective - yes, but as I see in qemu,
ARM_FEATURE_V7 and ARM_FEATURE_V8 are independent bits which do not affect
each
other in arm_feature() and set_feature() so they should be tested
separately.
Did I miss something?

Thanks
Best regards
Timofey



On Tue, Oct 25, 2022 at 4:45 PM Peter Maydell <peter.maydell@linaro.org>
wrote:

> On Wed, 19 Oct 2022 at 13:15, Timofey Kutergin <tkutergin@gmail.com>
> wrote:
> >
> > - synchronize PSTATE.PAN with changes in CPSR.PAN in aarch32 mode
> > - set PAN bit automatically on exception entry if SCTLR_SPAN bit
> >   is set
> > - throw permission fault during address translation when PAN is
> >   enabled and kernel tries to access user acessible page
> > - ignore SCTLR_XP bit for armv7 and armv8 (conflicts with SCTLR_SPAN).
> >
> > Signed-off-by: Timofey Kutergin <tkutergin@gmail.com>
> > ---
> >  target/arm/helper.c |  6 ++++++
> >  target/arm/ptw.c    | 11 ++++++++++-
> >  2 files changed, 16 insertions(+), 1 deletion(-)
>
> Thanks for this patch. I think you've caught all the places
> we aren't correctly implementing AArch32 PAN handling.
>
> > diff --git a/target/arm/helper.c b/target/arm/helper.c
> > index dde64a487a..5299f67e3f 100644
> > --- a/target/arm/helper.c
> > +++ b/target/arm/helper.c
> > @@ -9052,6 +9052,11 @@ void cpsr_write(CPUARMState *env, uint32_t val,
> uint32_t mask,
> >      }
> >      mask &= ~CACHED_CPSR_BITS;
> >      env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
> > +    if (env->uncached_cpsr & CPSR_PAN) {
> > +        env->pstate |= PSTATE_PAN;
> > +    } else {
> > +        env->pstate &= ~PSTATE_PAN;
> > +    }
>
> This approach means we're storing the PAN bit in two places,
> both in env->uncached_cpsr and in env->pstate. We don't do
> this for any other bits as far as I can see. I think we should
> either:
>  (1) have the code that changes behaviour based on PAN look
>      at either env->pstate or env->uncached_cpsr depending
>      on whether we're AArch64 or AArch32
>  (2) always store the state in env->pstate only, and handle
>      this in read/write of the CPSR the same way we do with
>      other "cached" bits
>
> I think the intention of the current code is (1), and the
> only place we get this wrong is in arm_mmu_idx_el(),
> which is checking env->pstate only. (The other places that
> directly check env->pstate are all in AArch64-only code,
> and various AArch32-only bits of code already check
> env->uncached_cpsr.) A function like
>
> bool arm_pan_enabled(CPUARMState *env)
> {
>     if (is_a64(env)) {
>         return env->pstate & PSTATE_PAN;
>     } else {
>         return env->uncached_cpsr & CPSR_PAN;
>     }
> }
>
> and then using that in arm_mmu_idx_el() should I think
> mean you don't need to change either cpsr_write() or
> take_aarch32_exception().
>
> >      if (rebuild_hflags) {
> >          arm_rebuild_hflags(env);
> >      }
> > @@ -9592,6 +9597,7 @@ static void take_aarch32_exception(CPUARMState
> *env, int new_mode,
> >                  /* ... the target is EL1 and SCTLR.SPAN is 0.  */
> >                  if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
> >                      env->uncached_cpsr |= CPSR_PAN;
> > +                    env->pstate |= PSTATE_PAN;
> >                  }
> >                  break;
> >              }
> > diff --git a/target/arm/ptw.c b/target/arm/ptw.c
> > index 23f16f4ff7..204a73350f 100644
> > --- a/target/arm/ptw.c
> > +++ b/target/arm/ptw.c
> > @@ -659,6 +659,13 @@ static bool get_phys_addr_v6(CPUARMState *env,
> uint32_t address,
> >              goto do_fault;
> >          }
> >
> > +        if (regime_is_pan(env, mmu_idx) && !regime_is_user(env,
> mmu_idx) &&
> > +            simple_ap_to_rw_prot_is_user(ap >> 1, 1) &&
> > +            access_type != MMU_INST_FETCH) {
> > +            fi->type = ARMFault_Permission;
> > +            goto do_fault;
> > +        }
>
> This assumes we're using the SCTLR.AFE==1 simplified
> permissions model, but PAN should apply even if we're using the
> old model. So we need a ap_to_rw_prot_is_user() to check the
> permissions in that model.
>
> The check is also being done before the Access fault check, but
> the architecture says that Access faults take priority over
> Permission faults.
>
> > +
> >          if (arm_feature(env, ARM_FEATURE_V6K) &&
> >                  (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
> >              /* The simplified model uses AP[0] as an access control
> bit.  */
> > @@ -2506,7 +2513,9 @@ bool get_phys_addr_with_secure(CPUARMState *env,
> target_ulong address,
> >      if (regime_using_lpae_format(env, mmu_idx)) {
> >          return get_phys_addr_lpae(env, address, access_type, mmu_idx,
> >                                    is_secure, false, result, fi);
> > -    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
> > +    } else if (arm_feature(env, ARM_FEATURE_V7) ||
> > +               arm_feature(env, ARM_FEATURE_V8) || (
>
> V8 always implies V7, so we only need to check V7 here.
>
> > +               regime_sctlr(env, mmu_idx) & SCTLR_XP)) {
> >          return get_phys_addr_v6(env, address, access_type, mmu_idx,
> >                                  is_secure, result, fi);
> >      } else {
>
> thanks
> -- PMM
>
Peter Maydell Oct. 27, 2022, 9:35 a.m. UTC | #3
On Thu, 27 Oct 2022 at 10:22, Timofey Kutergin <tkutergin@gmail.com> wrote:
> > V8 always implies V7, so we only need to check V7 here.

> From silicon perspective - yes, but as I see in qemu,
> ARM_FEATURE_V7 and ARM_FEATURE_V8 are independent bits which do not affect each
> other in arm_feature() and set_feature() so they should be tested separately.
> Did I miss something?

In arm_cpu_realizefn() there is code which sets feature flags
that are always implied by other feature flags. There we set
the V7VE flag if V8 is set, and the V7 flag if V7VE is set.
So we can rely on any v8 CPU having the V7 feature flag set.

thanks
-- PMM
Timofey Kutergin Oct. 27, 2022, 10:43 a.m. UTC | #4
Understood, thank you a lot :)

Best regards
Timofey


On Thu, Oct 27, 2022 at 12:35 PM Peter Maydell <peter.maydell@linaro.org>
wrote:

> On Thu, 27 Oct 2022 at 10:22, Timofey Kutergin <tkutergin@gmail.com>
> wrote:
> > > V8 always implies V7, so we only need to check V7 here.
>
> > From silicon perspective - yes, but as I see in qemu,
> > ARM_FEATURE_V7 and ARM_FEATURE_V8 are independent bits which do not
> affect each
> > other in arm_feature() and set_feature() so they should be tested
> separately.
> > Did I miss something?
>
> In arm_cpu_realizefn() there is code which sets feature flags
> that are always implied by other feature flags. There we set
> the V7VE flag if V8 is set, and the V7 flag if V7VE is set.
> So we can rely on any v8 CPU having the V7 feature flag set.
>
> thanks
> -- PMM
>
diff mbox series

Patch

diff --git a/target/arm/helper.c b/target/arm/helper.c
index dde64a487a..5299f67e3f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -9052,6 +9052,11 @@  void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
     }
     mask &= ~CACHED_CPSR_BITS;
     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
+    if (env->uncached_cpsr & CPSR_PAN) {
+        env->pstate |= PSTATE_PAN;
+    } else {
+        env->pstate &= ~PSTATE_PAN;
+    }
     if (rebuild_hflags) {
         arm_rebuild_hflags(env);
     }
@@ -9592,6 +9597,7 @@  static void take_aarch32_exception(CPUARMState *env, int new_mode,
                 /* ... the target is EL1 and SCTLR.SPAN is 0.  */
                 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
                     env->uncached_cpsr |= CPSR_PAN;
+                    env->pstate |= PSTATE_PAN;
                 }
                 break;
             }
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 23f16f4ff7..204a73350f 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -659,6 +659,13 @@  static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
             goto do_fault;
         }
 
+        if (regime_is_pan(env, mmu_idx) && !regime_is_user(env, mmu_idx) &&
+            simple_ap_to_rw_prot_is_user(ap >> 1, 1) &&
+            access_type != MMU_INST_FETCH) {
+            fi->type = ARMFault_Permission;
+            goto do_fault;
+        }
+
         if (arm_feature(env, ARM_FEATURE_V6K) &&
                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
             /* The simplified model uses AP[0] as an access control bit.  */
@@ -2506,7 +2513,9 @@  bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
     if (regime_using_lpae_format(env, mmu_idx)) {
         return get_phys_addr_lpae(env, address, access_type, mmu_idx,
                                   is_secure, false, result, fi);
-    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+    } else if (arm_feature(env, ARM_FEATURE_V7) ||
+               arm_feature(env, ARM_FEATURE_V8) || (
+               regime_sctlr(env, mmu_idx) & SCTLR_XP)) {
         return get_phys_addr_v6(env, address, access_type, mmu_idx,
                                 is_secure, result, fi);
     } else {