@@ -471,6 +471,16 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
return false;
}
+static bool S1_attrs_are_device(uint8_t attrs)
+{
+ /*
+ * This slightly under-decodes the MAIR_ELx field:
+ * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE;
+ * 0b0000dd1x is UNPREDICTABLE.
+ */
+ return (attrs & 0xf0) == 0;
+}
+
static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
{
/*
@@ -1684,6 +1694,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
bool aarch64 = arm_el_is_aa64(env, el);
uint64_t descriptor, new_descriptor;
ARMSecuritySpace out_space;
+ bool device;
/* TODO: This code does not support shareability levels. */
if (aarch64) {
@@ -2106,6 +2117,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
if (regime_is_stage2(mmu_idx)) {
result->cacheattrs.is_s2_format = true;
result->cacheattrs.attrs = extract32(attrs, 2, 4);
+ /*
+ * Security state does not really affect HCR_EL2.FWB;
+ * we only need to filter FWB for aa32 or other FEAT.
+ */
+ device = S2_attrs_are_device(arm_hcr_el2_eff(env),
+ result->cacheattrs.attrs);
} else {
/* Index into MAIR registers for cache attributes */
uint8_t attrindx = extract32(attrs, 2, 3);
@@ -2118,6 +2135,28 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
}
+ device = S1_attrs_are_device(result->cacheattrs.attrs);
+ }
+
+ /*
+ * Enable alignment checks on Device memory.
+ *
+ * Per R_XCHFJ, this check is mis-ordered. The correct ordering
+ * for alignment, permission, and stage 2 faults should be:
+ * - Alignment fault caused by the memory type
+ * - Permission fault
+ * - A stage 2 fault on the memory access
+ * but due to the way the TCG softmmu TLB operates, we will have
+ * implicitly done the permission check and the stage2 lookup in
+ * finding the TLB entry, so the alignment check cannot be done sooner.
+ *
+ * In v7, for a CPU without the Virtualization Extensions this
+ * access is UNPREDICTABLE; we choose to make it take the alignment
+ * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
+ * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
+ */
+ if (device) {
+ result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
}
/*