@@ -2787,8 +2787,7 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2
* + NonSecure PL2
- * + Secure PL0
- * + Secure PL1
+ * + Secure PL1 & 0
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
*
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
@@ -2823,19 +2822,21 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
*
* This gives us the following list of cases:
*
- * EL0 EL1&0 stage 1+2 (aka NS PL0)
- * EL1 EL1&0 stage 1+2 (aka NS PL1)
- * EL1 EL1&0 stage 1+2 +PAN
+ * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
* EL0 EL2&0
* EL2 EL2&0
* EL2 EL2&0 +PAN
* EL2 (aka NS PL2)
- * EL3 (aka S PL1)
+ * EL3 (aka AArch32 S PL1 PL1&0)
+ * AArch32 S PL0 PL1&0 (we call this EL30_0)
+ * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
* Stage2 Secure
* Stage2 NonSecure
* plus one TLB per Physical address space: S, NS, Realm, Root
*
- * for a total of 14 different mmu_idx.
+ * for a total of 16 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
@@ -2899,6 +2900,8 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
/*
* Used for second stage of an S12 page table walk, or for descriptor
@@ -2906,14 +2909,14 @@ typedef enum ARMMMUIdx {
* are in use simultaneously for SecureEL2: the security state for
* the S2 ptw is selected by the NS bit from the S1 ptw.
*/
- ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
/* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
@@ -2952,6 +2955,8 @@ typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
TO_CORE_BIT(E3),
+ TO_CORE_BIT(E30_0),
+ TO_CORE_BIT(E30_3_PAN),
TO_CORE_BIT(Stage2),
TO_CORE_BIT(Stage2_S),
@@ -871,7 +871,16 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
}
}
-/* Return true if this address translation regime has two ranges. */
+/*
+ * Return true if this address translation regime has two ranges.
+ * Note that this will not return the correct answer for AArch32
+ * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
+ * never called from a context where EL3 can be AArch32. (The
+ * correct return value for ARMMMUIdx_E3 would be different for
+ * that case, so we can't just make the function return the
+ * correct value anyway; we would need an extra "bool e3_is_aarch32"
+ * argument which all the current callsites would pass as 'false'.)
+ */
static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
@@ -896,6 +905,7 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E30_3_PAN:
return true;
default:
return false;
@@ -919,10 +929,11 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_E2:
return 2;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
return 3;
case ARMMMUIdx_E10_0:
case ARMMMUIdx_Stage1_E0:
- return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E10_1:
@@ -946,6 +957,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E30_0:
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_MUser:
case ARMMMUIdx_MSUser:
@@ -444,6 +444,9 @@ static int alle1_tlbmask(CPUARMState *env)
* Note that the 'ALL' scope must invalidate both stage 1 and
* stage 2 translations, whereas most other scopes only invalidate
* stage 1 translations.
+ *
+ * For AArch32 this is only used for TLBIALLNSNH and VTTBR
+ * writes, so only needs to apply to NS PL1&0, not S PL1&0.
*/
return (ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
@@ -3776,7 +3779,11 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_E3;
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = ARMMMUIdx_E30_3_PAN;
+ } else {
+ mmu_idx = ARMMMUIdx_E3;
+ }
break;
case 2:
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
@@ -3796,7 +3803,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_E10_0;
+ mmu_idx = ARMMMUIdx_E30_0;
break;
case 2:
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
@@ -4906,11 +4913,14 @@ static int vae1_tlbmask(CPUARMState *env)
uint64_t hcr = arm_hcr_el2_eff(env);
uint16_t mask;
+ assert(arm_feature(env, ARM_FEATURE_AARCH64));
+
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0;
} else {
+ /* This is AArch64 only, so we don't need to touch the EL30_x TLBs */
mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
@@ -4949,6 +4959,8 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
uint64_t hcr = arm_hcr_el2_eff(env);
ARMMMUIdx mmu_idx;
+ assert(arm_feature(env, ARM_FEATURE_AARCH64));
+
/* Only the regime of the mmu_idx below is significant. */
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mmu_idx = ARMMMUIdx_E20_0;
@@ -11862,10 +11874,20 @@ void arm_cpu_do_interrupt(CPUState *cs)
uint64_t arm_sctlr(CPUARMState *env, int el)
{
- /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
+ /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */
if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
- el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
+ switch (mmu_idx) {
+ case ARMMMUIdx_E20_0:
+ el = 2;
+ break;
+ case ARMMMUIdx_E30_0:
+ el = 3;
+ break;
+ default:
+ el = 1;
+ break;
+ }
}
return env->cp15.sctlr_el[el];
}
@@ -12533,6 +12555,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E30_0:
return 0;
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
@@ -12542,6 +12565,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
case ARMMMUIdx_E20_2_PAN:
return 2;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_3_PAN:
return 3;
default:
g_assert_not_reached();
@@ -12570,6 +12594,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
hcr = arm_hcr_el2_eff(env);
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
idx = ARMMMUIdx_E20_0;
+ } else if (arm_is_secure_below_el3(env) &&
+ !arm_el_is_aa64(env, 3)) {
+ idx = ARMMMUIdx_E30_0;
} else {
idx = ARMMMUIdx_E10_0;
}
@@ -12594,6 +12621,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
}
break;
case 3:
+ if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) {
+ return ARMMMUIdx_E30_3_PAN;
+ }
return ARMMMUIdx_E3;
default:
g_assert_not_reached();
@@ -280,6 +280,8 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_E2:
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
break;
case ARMMMUIdx_Phys_S:
@@ -3635,6 +3637,8 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
ss = ARMSS_Secure;
break;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
cpu_isar_feature(aa64_rme, env_archcpu(env))) {
ss = ARMSS_Root;
@@ -912,7 +912,19 @@ void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
{
/* See arm_sctlr(), but we also need the sctlr el. */
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
- int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
+ int target_el;
+
+ switch (mmu_idx) {
+ case ARMMMUIdx_E20_0:
+ target_el = 2;
+ break;
+ case ARMMMUIdx_E30_0:
+ target_el = 3;
+ break;
+ default:
+ target_el = 1;
+ break;
+ }
/*
* The bit is not valid unless the target el is aa64, but since the
@@ -228,6 +228,9 @@ static inline int get_a32_user_mem_index(DisasContext *s)
*/
switch (s->mmu_idx) {
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E30_0:
+ case ARMMMUIdx_E30_3_PAN:
+ return arm_to_core_mmu_idx(ARMMMUIdx_E30_0);
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1: