diff mbox series

[06/62] target/arm: Use PageEntryExtra for BTI

Message ID 20220703082419.770989-7-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series target/arm: Implement FEAT_HAFDBS | expand

Commit Message

Richard Henderson July 3, 2022, 8:23 a.m. UTC
Add a bit to ARMCacheAttrs to hold the guarded bit between
get_phys_addr_lpae and arm_cpu_tlb_fill, then put the bit
into PageEntryExtra.

In is_guarded_page, use probe_access_extra instead of just
guessing that the tlb entry is still present.  Also handles
the FIXME about executing from device memory.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/cpu.h           | 13 -------------
 target/arm/internals.h     |  2 ++
 target/arm/ptw.c           |  4 ++--
 target/arm/tlb_helper.c    |  2 ++
 target/arm/translate-a64.c | 22 ++++++++--------------
 5 files changed, 14 insertions(+), 29 deletions(-)

Comments

Peter Maydell July 5, 2022, 2:12 p.m. UTC | #1
On Sun, 3 Jul 2022 at 09:27, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Add a bit to ARMCacheAttrs to hold the guarded bit between
> get_phys_addr_lpae and arm_cpu_tlb_fill, then put the bit
> into PageEntryExtra.
>
> In is_guarded_page, use probe_access_extra instead of just
> guessing that the tlb entry is still present.  Also handles
> the FIXME about executing from device memory.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a26b9437e9..4a41b5dcef 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3357,19 +3357,6 @@  static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
 /* Shared between translate-sve.c and sve_helper.c.  */
 extern const uint64_t pred_esz_masks[4];
 
-/* Helper for the macros below, validating the argument type. */
-static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
-{
-    return x;
-}
-
-/*
- * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
- * Using these should be a bit more self-documenting than using the
- * generic target bits directly.
- */
-#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
-
 /*
  * AArch64 usage of the PAGE_TARGET_* bits for linux-user.
  */
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 2b38a83574..268c3c7380 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -77,6 +77,7 @@  FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
 /* Bit definitions for PageEntryExtra */
 FIELD(PAGEENTRYEXTRA, ATTRS, 0, 8)
 FIELD(PAGEENTRYEXTRA, SHAREABILITY, 8, 2)
+FIELD(PAGEENTRYEXTRA, GUARDED, 10, 1)
 FIELD(PAGEENTRYEXTRA, PA, 12, 52)
 
 /* Minimum value which is a magic number for exception return */
@@ -1129,6 +1130,7 @@  typedef struct ARMCacheAttrs {
     unsigned int attrs:8;
     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
     bool is_s2_format:1;
+    bool guarded:1;              /* guarded bit of the v8-64 PTE */
 } ARMCacheAttrs;
 
 bool get_phys_addr(CPUARMState *env, target_ulong address,
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index da478104f0..204c820026 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -1320,8 +1320,8 @@  static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
         txattrs->secure = false;
     }
     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
-    if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
-        arm_tlb_bti_gp(txattrs) = true;
+    if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+        cacheattrs->guarded = guarded;
     }
 
     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 1305b6ec7d..7476fcafeb 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -244,6 +244,8 @@  bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
                                  cacheattrs.attrs);
             extra.x = FIELD_DP64(extra.x, PAGEENTRYEXTRA, SHAREABILITY,
                                  cacheattrs.shareability);
+            extra.x = FIELD_DP64(extra.x, PAGEENTRYEXTRA, GUARDED,
+                                 cacheattrs.guarded);
         }
 
         tlb_set_page_with_extra(cs, address, phys_addr, attrs, extra,
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index c86b97b1d4..57f492ccef 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14543,22 +14543,16 @@  static bool is_guarded_page(CPUARMState *env, DisasContext *s)
 #ifdef CONFIG_USER_ONLY
     return page_get_flags(addr) & PAGE_BTI;
 #else
+    MemTxAttrs attrs;
+    PageEntryExtra extra;
+    void *host;
     int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
-    unsigned int index = tlb_index(env, mmu_idx, addr);
-    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+    int flags;
 
-    /*
-     * We test this immediately after reading an insn, which means
-     * that any normal page must be in the TLB.  The only exception
-     * would be for executing from flash or device memory, which
-     * does not retain the TLB entry.
-     *
-     * FIXME: Assume false for those, for now.  We could use
-     * arm_cpu_get_phys_page_attrs_debug to re-read the page
-     * table entry even for that case.
-     */
-    return (tlb_hit(entry->addr_code, addr) &&
-            arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
+    flags = probe_access_extra(env, addr, MMU_INST_FETCH, mmu_idx,
+                               false, &host, &attrs, &extra, 0);
+    assert(!(flags & TLB_INVALID_MASK));
+    return FIELD_EX64(extra.x, PAGEENTRYEXTRA, GUARDED);
 #endif
 }