@@ -362,6 +362,7 @@ typedef struct CPUArchState {
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
uint64_t hcr_el2; /* Hypervisor configuration register */
+ uint64_t hcrx_el2; /* Extended Hypervisor configuration register */
uint64_t scr_el3; /* Secure configuration register. */
union { /* Fault status registers. */
struct {
@@ -1545,6 +1546,19 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_TWEDEN (1ULL << 59)
#define HCR_TWEDEL MAKE_64BIT_MASK(60, 4)
+#define HCRX_ENAS0 (1ULL << 0)
+#define HCRX_ENALS (1ULL << 1)
+#define HCRX_ENASR (1ULL << 2)
+#define HCRX_FNXS (1ULL << 3)
+#define HCRX_FGTNXS (1ULL << 4)
+#define HCRX_SMPME (1ULL << 5)
+#define HCRX_TALLINT (1ULL << 6)
+#define HCRX_VINMI (1ULL << 7)
+#define HCRX_VFNMI (1ULL << 8)
+#define HCRX_CMOW (1ULL << 9)
+#define HCRX_MCE2 (1ULL << 10)
+#define HCRX_MSCEN (1ULL << 11)
+
#define HPFAR_NS (1ULL << 63)
#define SCR_NS (1U << 0)
@@ -2312,6 +2326,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
* Not included here is HCR_RW.
*/
uint64_t arm_hcr_el2_eff(CPUARMState *env);
+uint64_t arm_hcrx_el2_eff(CPUARMState *env);
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
@@ -3933,6 +3948,11 @@ static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
}
+static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
+}
+
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
@@ -934,6 +934,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
+ t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
cpu->isar.id_aa64mmfr1 = t;
t = cpu->isar.id_aa64mmfr2;
@@ -5288,6 +5288,52 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
return ret;
}
+static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t valid_mask = 0;
+
+ /* No features adding bits to HCRX are implemented. */
+
+ /* Clear RES0 bits. */
+ env->cp15.hcrx_el2 = value & valid_mask;
+}
+
+static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_HXEN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo hcrx_el2_reginfo = {
+ .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
+ .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
+ .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
+};
+
+/* Return the effective value of HCRX_EL2. */
+uint64_t arm_hcrx_el2_eff(CPUARMState *env)
+{
+ /*
+ * The bits in this register behave as 0 for all purposes other than
+ * direct reads of the register if:
+ * - EL2 is not enabled in the current security state,
+ * - SCR_EL3.HXEn is 0.
+ */
+ if (!arm_is_el2_enabled(env)
+ || (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_HXEN))) {
+ return 0;
+ }
+ return env->cp15.hcrx_el2;
+}
+
static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -8405,6 +8451,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, zcr_reginfo);
}
+ if (cpu_isar_feature(aa64_hcx, cpu)) {
+ define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
+ }
+
#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_pauth, cpu)) {
define_arm_cp_regs(cpu, pauth_reginfo);