diff mbox series

[1/2] kvm/arm64: Rename HSR to ESR

Message ID 20200629091841.88198-2-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series Refactor ESR related functions | expand

Commit Message

Gavin Shan June 29, 2020, 9:18 a.m. UTC
kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
32bit KVM host support"). So HSR isn't meaningful since then. This
renames HSR to ESR accordingly. This shouldn't cause any functional
changes:

   * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
     function names self-explanatory.
   * Rename variables from @hsr to @esr to make them self-explanatory.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 34 ++++++++++++++--------------
 arch/arm64/include/uapi/asm/kvm.h    |  2 +-
 arch/arm64/kvm/handle_exit.c         | 32 +++++++++++++-------------
 arch/arm64/kvm/hyp/aarch32.c         |  2 +-
 arch/arm64/kvm/hyp/switch.c          | 14 ++++++------
 arch/arm64/kvm/hyp/vgic-v3-sr.c      |  4 ++--
 arch/arm64/kvm/mmu.c                 |  6 ++---
 arch/arm64/kvm/sys_regs.c            | 28 +++++++++++------------
 arch/arm64/kvm/trace_arm.h           | 14 ++++++------
 arch/arm64/kvm/trace_handle_exit.h   | 10 ++++----
 10 files changed, 73 insertions(+), 73 deletions(-)

Comments

Andrew Scull June 29, 2020, 9:44 a.m. UTC | #1
On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
> 32bit KVM host support"). So HSR isn't meaningful since then. This
> renames HSR to ESR accordingly. This shouldn't cause any functional
> changes:
> 
>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>      function names self-explanatory.
>    * Rename variables from @hsr to @esr to make them self-explanatory.

I like this; there's been more than once this has confused me recently!

Acked-by: Andrew Scull <ascull@google.com>
Mark Rutland June 29, 2020, 10:32 a.m. UTC | #2
On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
> 32bit KVM host support"). So HSR isn't meaningful since then. This
> renames HSR to ESR accordingly. This shouldn't cause any functional
> changes:
> 
>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>      function names self-explanatory.
>    * Rename variables from @hsr to @esr to make them self-explanatory.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>

At a high-level, I agree that we should move to the `esr` naming to
match the architecture and minimize surprise. However, I think there are
some ABI changes here, which *are* funcitonal changes, and those need to
be avoided.

[...]

> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> index ba85bb23f060..d54345573a88 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
>  };
>  
>  struct kvm_debug_exit_arch {
> -	__u32 hsr;
> +	__u32 esr;
>  	__u64 far;	/* used for watchpoints */
>  };

This is userspace ABI, and changing this *will* break userspace. This
*is* a functional change.

NAK to this specifically. At best these should be a comment here that
this is naming is legacym but must stay for ABI reasons.

[...]

> diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
> index 4c71270cc097..ee4f691b16ff 100644
> --- a/arch/arm64/kvm/trace_arm.h
> +++ b/arch/arm64/kvm/trace_arm.h
> @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
>  		__entry->vcpu_pc		= vcpu_pc;
>  	),
>  
> -	TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
> +	TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
>  		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
>  		  __entry->esr_ec,
>  		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),

Likewise, isn't all the tracepoint format stuff ABI? I'm not comfortable
that we can change this.

Thanks,
Mark.

> @@ -50,27 +50,27 @@ TRACE_EVENT(kvm_exit,
>  );
>  
>  TRACE_EVENT(kvm_guest_fault,
> -	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
> +	TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
>  		 unsigned long hxfar,
>  		 unsigned long long ipa),
> -	TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
> +	TP_ARGS(vcpu_pc, esr, hxfar, ipa),
>  
>  	TP_STRUCT__entry(
>  		__field(	unsigned long,	vcpu_pc		)
> -		__field(	unsigned long,	hsr		)
> +		__field(	unsigned long,	esr		)
>  		__field(	unsigned long,	hxfar		)
>  		__field(   unsigned long long,	ipa		)
>  	),
>  
>  	TP_fast_assign(
>  		__entry->vcpu_pc		= vcpu_pc;
> -		__entry->hsr			= hsr;
> +		__entry->esr			= esr;
>  		__entry->hxfar			= hxfar;
>  		__entry->ipa			= ipa;
>  	),
>  
> -	TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
> -		  __entry->ipa, __entry->hsr,
> +	TP_printk("ipa %#llx, esr %#08lx, hxfar %#08lx, pc %#08lx",
> +		  __entry->ipa, __entry->esr,
>  		  __entry->hxfar, __entry->vcpu_pc)
>  );
>  
> diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
> index 2c56d1e0f5bd..94ef1a98e609 100644
> --- a/arch/arm64/kvm/trace_handle_exit.h
> +++ b/arch/arm64/kvm/trace_handle_exit.h
> @@ -139,18 +139,18 @@ TRACE_EVENT(trap_reg,
>  );
>  
>  TRACE_EVENT(kvm_handle_sys_reg,
> -	TP_PROTO(unsigned long hsr),
> -	TP_ARGS(hsr),
> +	TP_PROTO(unsigned long esr),
> +	TP_ARGS(esr),
>  
>  	TP_STRUCT__entry(
> -		__field(unsigned long,	hsr)
> +		__field(unsigned long,	esr)
>  	),
>  
>  	TP_fast_assign(
> -		__entry->hsr = hsr;
> +		__entry->esr = esr;
>  	),
>  
> -	TP_printk("HSR 0x%08lx", __entry->hsr)
> +	TP_printk("ESR 0x%08lx", __entry->esr)
>  );
>  
>  TRACE_EVENT(kvm_sys_access,
> -- 
> 2.23.0
>
Mark Rutland June 29, 2020, 11:05 a.m. UTC | #3
On Mon, Jun 29, 2020 at 11:32:08AM +0100, Mark Rutland wrote:
> On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
> > kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
> > 32bit KVM host support"). So HSR isn't meaningful since then. This
> > renames HSR to ESR accordingly. This shouldn't cause any functional
> > changes:
> > 
> >    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
> >      function names self-explanatory.
> >    * Rename variables from @hsr to @esr to make them self-explanatory.
> > 
> > Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> At a high-level, I agree that we should move to the `esr` naming to
> match the architecture and minimize surprise. However, I think there are
> some ABI changes here, which *are* funcitonal changes, and those need to
> be avoided.
> 
> [...]
> 
> > diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
> > index ba85bb23f060..d54345573a88 100644
> > --- a/arch/arm64/include/uapi/asm/kvm.h
> > +++ b/arch/arm64/include/uapi/asm/kvm.h
> > @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
> >  };
> >  
> >  struct kvm_debug_exit_arch {
> > -	__u32 hsr;
> > +	__u32 esr;
> >  	__u64 far;	/* used for watchpoints */
> >  };
> 
> This is userspace ABI, and changing this *will* break userspace. This
> *is* a functional change.

To be slightly clearer: while the structure isn't changed, any userspace
software consuming this header will fail to build after this change,
beacause there will no longer be a field called `hsr`.

Existing binaries will almost certianly not care, but regardless this is
a regression (when building userspce) that I don't think we can permit.

Thanks,
Mark.
Marc Zyngier June 29, 2020, 5 p.m. UTC | #4
On 2020-06-29 11:32, Mark Rutland wrote:
> On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
>> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
>> 32bit KVM host support"). So HSR isn't meaningful since then. This
>> renames HSR to ESR accordingly. This shouldn't cause any functional
>> changes:
>> 
>>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>>      function names self-explanatory.
>>    * Rename variables from @hsr to @esr to make them self-explanatory.
>> 
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> At a high-level, I agree that we should move to the `esr` naming to
> match the architecture and minimize surprise. However, I think there 
> are
> some ABI changes here, which *are* funcitonal changes, and those need 
> to
> be avoided.
> 
> [...]
> 
>> diff --git a/arch/arm64/include/uapi/asm/kvm.h 
>> b/arch/arm64/include/uapi/asm/kvm.h
>> index ba85bb23f060..d54345573a88 100644
>> --- a/arch/arm64/include/uapi/asm/kvm.h
>> +++ b/arch/arm64/include/uapi/asm/kvm.h
>> @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
>>  };
>> 
>>  struct kvm_debug_exit_arch {
>> -	__u32 hsr;
>> +	__u32 esr;
>>  	__u64 far;	/* used for watchpoints */
>>  };
> 
> This is userspace ABI, and changing this *will* break userspace. This
> *is* a functional change.
> 
> NAK to this specifically. At best these should be a comment here that
> this is naming is legacym but must stay for ABI reasons.
> 
> [...]
> 
>> diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
>> index 4c71270cc097..ee4f691b16ff 100644
>> --- a/arch/arm64/kvm/trace_arm.h
>> +++ b/arch/arm64/kvm/trace_arm.h
>> @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
>>  		__entry->vcpu_pc		= vcpu_pc;
>>  	),
>> 
>> -	TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
>> +	TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
>>  		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
>>  		  __entry->esr_ec,
>>  		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
> 
> Likewise, isn't all the tracepoint format stuff ABI? I'm not 
> comfortable
> that we can change this.

Tracepoints are ABI, and they cannot change. As it is, this patch
isn't acceptable (the worse offender being the uapi change though).

         M.
Gavin Shan June 29, 2020, 11:14 p.m. UTC | #5
On 6/30/20 3:00 AM, Marc Zyngier wrote:
> On 2020-06-29 11:32, Mark Rutland wrote:
>> On Mon, Jun 29, 2020 at 07:18:40PM +1000, Gavin Shan wrote:
>>> kvm/arm32 isn't supported since commit 541ad0150ca4 ("arm: Remove
>>> 32bit KVM host support"). So HSR isn't meaningful since then. This
>>> renames HSR to ESR accordingly. This shouldn't cause any functional
>>> changes:
>>>
>>>    * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the
>>>      function names self-explanatory.
>>>    * Rename variables from @hsr to @esr to make them self-explanatory.
>>>
>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>
>> At a high-level, I agree that we should move to the `esr` naming to
>> match the architecture and minimize surprise. However, I think there are
>> some ABI changes here, which *are* funcitonal changes, and those need to
>> be avoided.
>>
>> [...]
>>
>>> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
>>> index ba85bb23f060..d54345573a88 100644
>>> --- a/arch/arm64/include/uapi/asm/kvm.h
>>> +++ b/arch/arm64/include/uapi/asm/kvm.h
>>> @@ -140,7 +140,7 @@ struct kvm_guest_debug_arch {
>>>  };
>>>
>>>  struct kvm_debug_exit_arch {
>>> -    __u32 hsr;
>>> +    __u32 esr;
>>>      __u64 far;    /* used for watchpoints */
>>>  };
>>
>> This is userspace ABI, and changing this *will* break userspace. This
>> *is* a functional change.
>>
>> NAK to this specifically. At best these should be a comment here that
>> this is naming is legacym but must stay for ABI reasons.
>>
>> [...]
>>
>>> diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
>>> index 4c71270cc097..ee4f691b16ff 100644
>>> --- a/arch/arm64/kvm/trace_arm.h
>>> +++ b/arch/arm64/kvm/trace_arm.h
>>> @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
>>>          __entry->vcpu_pc        = vcpu_pc;
>>>      ),
>>>
>>> -    TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
>>> +    TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
>>>            __print_symbolic(__entry->ret, kvm_arm_exception_type),
>>>            __entry->esr_ec,
>>>            __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
>>
>> Likewise, isn't all the tracepoint format stuff ABI? I'm not comfortable
>> that we can change this.
> 
> Tracepoints are ABI, and they cannot change. As it is, this patch
> isn't acceptable (the worse offender being the uapi change though).
> 

Yes, I was reluctant to make the changes regarding the uapi/tracepoint,
which is part of the ABI. I will drop the changes in v2.

Thanks,
Gavin
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 4d0f8ea600ba..c9ba0df47f7d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -259,14 +259,14 @@  static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 	return mode != PSR_MODE_EL0t;
 }
 
-static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
+static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.fault.esr_el2;
 }
 
 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 
 	if (esr & ESR_ELx_CV)
 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -291,64 +291,64 @@  static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
 
 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
 }
 
 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
 }
 
 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
+	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
 }
 
 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
 }
 
 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
 }
 
 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 {
-	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
 }
 
 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
 }
 
 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
 		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
 }
 
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
 }
 
 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
-	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
 }
 
 /* This one is not specific to Data Abort */
 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
+	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 {
-	return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
@@ -358,12 +358,12 @@  static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
 
 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
+	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
+	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
 }
 
 static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
@@ -387,7 +387,7 @@  static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
 
 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	return ESR_ELx_SYS64_ISS_RT(esr);
 }
 
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index ba85bb23f060..d54345573a88 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -140,7 +140,7 @@  struct kvm_guest_debug_arch {
 };
 
 struct kvm_debug_exit_arch {
-	__u32 hsr;
+	__u32 esr;
 	__u64 far;	/* used for watchpoints */
 };
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 5a02d4c90559..9baca85c5aa8 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -89,7 +89,7 @@  static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
  */
 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
+	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
 		vcpu->stat.wfe_exit_stat++;
 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
@@ -119,13 +119,13 @@  static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
  */
 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	int ret = 0;
 
 	run->exit_reason = KVM_EXIT_DEBUG;
-	run->debug.arch.hsr = hsr;
+	run->debug.arch.esr = esr;
 
-	switch (ESR_ELx_EC(hsr)) {
+	switch (ESR_ELx_EC(esr)) {
 	case ESR_ELx_EC_WATCHPT_LOW:
 		run->debug.arch.far = vcpu->arch.fault.far_el2;
 		/* fall through */
@@ -135,8 +135,8 @@  static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	case ESR_ELx_EC_BRK64:
 		break;
 	default:
-		kvm_err("%s: un-handled case hsr: %#08x\n",
-			__func__, (unsigned int) hsr);
+		kvm_err("%s: un-handled case esr: %#08x\n",
+			__func__, (unsigned int) esr);
 		ret = -1;
 		break;
 	}
@@ -146,10 +146,10 @@  static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 
-	kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
-		      hsr, esr_get_class_string(hsr));
+	kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
+		      esr, esr_get_class_string(esr));
 
 	kvm_inject_undefined(vcpu);
 	return 1;
@@ -200,10 +200,10 @@  static exit_handle_fn arm_exit_handlers[] = {
 
 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 {
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
-	u8 hsr_ec = ESR_ELx_EC(hsr);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
+	u8 esr_ec = ESR_ELx_EC(esr);
 
-	return arm_exit_handlers[hsr_ec];
+	return arm_exit_handlers[esr_ec];
 }
 
 /*
@@ -241,15 +241,15 @@  int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		       int exception_index)
 {
 	if (ARM_SERROR_PENDING(exception_index)) {
-		u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+		u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
 
 		/*
 		 * HVC/SMC already have an adjusted PC, which we need
 		 * to correct in order to return to after having
 		 * injected the SError.
 		 */
-		if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
-		    hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
+		if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 ||
+		    esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) {
 			u32 adj =  kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
 			*vcpu_pc(vcpu) -= adj;
 		}
@@ -307,5 +307,5 @@  void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 	exception_index = ARM_EXCEPTION_CODE(exception_index);
 
 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
-		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
 }
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index 25c0e47d57cb..1e948704d60f 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -51,7 +51,7 @@  bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
 	int cond;
 
 	/* Top two bits non-zero?  Unconditional. */
-	if (kvm_vcpu_get_hsr(vcpu) >> 30)
+	if (kvm_vcpu_get_esr(vcpu) >> 30)
 		return true;
 
 	/* Is condition field valid? */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index db1c4487d95d..5164074c1ae1 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -356,7 +356,7 @@  static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 {
 	bool vhe, sve_guest, sve_host;
-	u8 hsr_ec;
+	u8 esr_ec;
 
 	if (!system_supports_fpsimd())
 		return false;
@@ -371,14 +371,14 @@  static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 		vhe = has_vhe();
 	}
 
-	hsr_ec = kvm_vcpu_trap_get_class(vcpu);
-	if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
-	    hsr_ec != ESR_ELx_EC_SVE)
+	esr_ec = kvm_vcpu_trap_get_class(vcpu);
+	if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
+	    esr_ec != ESR_ELx_EC_SVE)
 		return false;
 
 	/* Don't handle SVE traps for non-SVE vcpus here: */
 	if (!sve_guest)
-		if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
+		if (esr_ec != ESR_ELx_EC_FP_ASIMD)
 			return false;
 
 	/* Valid trap.  Switch the context: */
@@ -437,7 +437,7 @@  static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 
 static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
 {
-	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
 	int rt = kvm_vcpu_sys_get_rt(vcpu);
 	u64 val = vcpu_get_reg(vcpu, rt);
 
@@ -529,7 +529,7 @@  static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
 	u64 val;
 
 	if (!vcpu_has_ptrauth(vcpu) ||
-	    !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
+	    !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
 		return false;
 
 	ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 10ed539835c1..bee0a74671ca 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -426,7 +426,7 @@  static int __hyp_text __vgic_v3_bpr_min(void)
 
 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
 
 	return crm != 8;
@@ -992,7 +992,7 @@  int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
 	bool is_read;
 	u32 sysreg;
 
-	esr = kvm_vcpu_get_hsr(vcpu);
+	esr = kvm_vcpu_get_esr(vcpu);
 	if (vcpu_mode_is_32bit(vcpu)) {
 		if (!kvm_condition_valid(vcpu)) {
 			__kvm_skip_instr(vcpu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8c0035cab6b6..36506112480e 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2079,7 +2079,7 @@  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		 * For RAS the host kernel may handle this abort.
 		 * There is no need to pass the error into the guest.
 		 */
-		if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
+		if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
 			return 1;
 
 		if (unlikely(!is_iabt)) {
@@ -2088,7 +2088,7 @@  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		}
 	}
 
-	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
+	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
 			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
 	/* Check the stage-2 fault is trans. fault or write fault */
@@ -2097,7 +2097,7 @@  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
 			kvm_vcpu_trap_get_class(vcpu),
 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
-			(unsigned long)kvm_vcpu_get_hsr(vcpu));
+			(unsigned long)kvm_vcpu_get_esr(vcpu));
 		return -EFAULT;
 	}
 
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index baf5ce9225ce..a96dd62a90ce 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2220,10 +2220,10 @@  static int emulate_cp(struct kvm_vcpu *vcpu,
 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
 				struct sys_reg_params *params)
 {
-	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
 	int cp = -1;
 
-	switch(hsr_ec) {
+	switch (esr_ec) {
 	case ESR_ELx_EC_CP15_32:
 	case ESR_ELx_EC_CP15_64:
 		cp = 15;
@@ -2254,17 +2254,17 @@  static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
 			    size_t nr_specific)
 {
 	struct sys_reg_params params;
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
-	int Rt2 = (hsr >> 10) & 0x1f;
+	int Rt2 = (esr >> 10) & 0x1f;
 
 	params.is_aarch32 = true;
 	params.is_32bit = false;
-	params.CRm = (hsr >> 1) & 0xf;
-	params.is_write = ((hsr & 1) == 0);
+	params.CRm = (esr >> 1) & 0xf;
+	params.is_write = ((esr & 1) == 0);
 
 	params.Op0 = 0;
-	params.Op1 = (hsr >> 16) & 0xf;
+	params.Op1 = (esr >> 16) & 0xf;
 	params.Op2 = 0;
 	params.CRn = 0;
 
@@ -2311,18 +2311,18 @@  static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
 			    size_t nr_specific)
 {
 	struct sys_reg_params params;
-	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+	u32 esr = kvm_vcpu_get_esr(vcpu);
 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
 
 	params.is_aarch32 = true;
 	params.is_32bit = true;
-	params.CRm = (hsr >> 1) & 0xf;
+	params.CRm = (esr >> 1) & 0xf;
 	params.regval = vcpu_get_reg(vcpu, Rt);
-	params.is_write = ((hsr & 1) == 0);
-	params.CRn = (hsr >> 10) & 0xf;
+	params.is_write = ((esr & 1) == 0);
+	params.CRn = (esr >> 10) & 0xf;
 	params.Op0 = 0;
-	params.Op1 = (hsr >> 14) & 0x7;
-	params.Op2 = (hsr >> 17) & 0x7;
+	params.Op1 = (esr >> 14) & 0x7;
+	params.Op2 = (esr >> 17) & 0x7;
 
 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
 	    !emulate_cp(vcpu, &params, global, nr_global)) {
@@ -2421,7 +2421,7 @@  static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	struct sys_reg_params params;
-	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+	unsigned long esr = kvm_vcpu_get_esr(vcpu);
 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
 	int ret;
 
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4c71270cc097..ee4f691b16ff 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -42,7 +42,7 @@  TRACE_EVENT(kvm_exit,
 		__entry->vcpu_pc		= vcpu_pc;
 	),
 
-	TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+	TP_printk("%s: ESR_EC: 0x%04x (%s), PC: 0x%08lx",
 		  __print_symbolic(__entry->ret, kvm_arm_exception_type),
 		  __entry->esr_ec,
 		  __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -50,27 +50,27 @@  TRACE_EVENT(kvm_exit,
 );
 
 TRACE_EVENT(kvm_guest_fault,
-	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
+	TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
 		 unsigned long hxfar,
 		 unsigned long long ipa),
-	TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
+	TP_ARGS(vcpu_pc, esr, hxfar, ipa),
 
 	TP_STRUCT__entry(
 		__field(	unsigned long,	vcpu_pc		)
-		__field(	unsigned long,	hsr		)
+		__field(	unsigned long,	esr		)
 		__field(	unsigned long,	hxfar		)
 		__field(   unsigned long long,	ipa		)
 	),
 
 	TP_fast_assign(
 		__entry->vcpu_pc		= vcpu_pc;
-		__entry->hsr			= hsr;
+		__entry->esr			= esr;
 		__entry->hxfar			= hxfar;
 		__entry->ipa			= ipa;
 	),
 
-	TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
-		  __entry->ipa, __entry->hsr,
+	TP_printk("ipa %#llx, esr %#08lx, hxfar %#08lx, pc %#08lx",
+		  __entry->ipa, __entry->esr,
 		  __entry->hxfar, __entry->vcpu_pc)
 );
 
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..94ef1a98e609 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -139,18 +139,18 @@  TRACE_EVENT(trap_reg,
 );
 
 TRACE_EVENT(kvm_handle_sys_reg,
-	TP_PROTO(unsigned long hsr),
-	TP_ARGS(hsr),
+	TP_PROTO(unsigned long esr),
+	TP_ARGS(esr),
 
 	TP_STRUCT__entry(
-		__field(unsigned long,	hsr)
+		__field(unsigned long,	esr)
 	),
 
 	TP_fast_assign(
-		__entry->hsr = hsr;
+		__entry->esr = esr;
 	),
 
-	TP_printk("HSR 0x%08lx", __entry->hsr)
+	TP_printk("ESR 0x%08lx", __entry->esr)
 );
 
 TRACE_EVENT(kvm_sys_access,