diff mbox series

[2/2] kvm/arm64: Detach ESR operator from vCPU struct

Message ID 20200629091841.88198-3-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series Refactor ESR related functions | expand

Commit Message

Gavin Shan June 29, 2020, 9:18 a.m. UTC
There are a set of inline functions defined in kvm_emulate.h. Those
functions reads ESR from vCPU fault information struct and then operate
on it. So it's tied with vCPU fault information and vCPU struct. It
limits their usage scope.

This detaches these functions from the vCPU struct by introducing an
other set of inline functions in esr.h to manupulate the specified
ESR value. With it, the inline functions defined in kvm_emulate.h
can call these inline functions (in esr.h) instead. This shouldn't
cause any functional changes.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
 arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
 2 files changed, 51 insertions(+), 24 deletions(-)

Comments

Andrew Scull June 29, 2020, 9:59 a.m. UTC | #1
On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
> There are a set of inline functions defined in kvm_emulate.h. Those
> functions reads ESR from vCPU fault information struct and then operate
> on it. So it's tied with vCPU fault information and vCPU struct. It
> limits their usage scope.
> 
> This detaches these functions from the vCPU struct by introducing an
> other set of inline functions in esr.h to manupulate the specified
> ESR value. With it, the inline functions defined in kvm_emulate.h
> can call these inline functions (in esr.h) instead. This shouldn't
> cause any functional changes.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>  arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>  2 files changed, 51 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..950204c5fbe1 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>  	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>  }
>  
> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
> +static inline bool esr_is_##name(u32 esr)	\
> +{						\
> +	return !!(esr & (field));		\
> +}
> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
> +static inline u32 esr_get_##name(u32 esr)	\
> +{						\
> +	return ((esr & (mask)) >> (shift));	\
> +}

Should these be named DEFINE rather than DECLARE given it also includes
the function definition?

> +
> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
> +
> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
> +
>  const char *esr_get_class_string(u32 esr);
>  #endif /* __ASSEMBLY */
>  
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index c9ba0df47f7d..9337d90c517f 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -
> -	if (esr & ESR_ELx_CV)
> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
> -
> -	return -1;
> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>  }
>  
>  static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
> @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
>  
>  static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
> +	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
>  }

It feels a little strange that in the raw esr case it uses macro magic
but in the vcpu cases here it writes everything out in full. Was there a
reason that I'm missing or is there a chance to apply a consistent
approach?

I'm not sure of the style preferences, but if it goes the macro path,
the esr field definitions could be reused with something x-macro like to
get the kvm_emulate.h and esr.h functions generated from a singe list of
the esr fields.

>  static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
> +	return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
> +	return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
> +	return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
> +	return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
>  {
> -	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
> +	return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
> +	return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
> -		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
> +	return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) ||
> +	       esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */
>  }
>  
>  static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
> +	return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
>  {
> -	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
> +	return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  /* This one is not specific to Data Abort */
>  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
> +	return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
>  {
> -	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW;
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
> +	return esr_get_fault(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
> +	return esr_get_fault_type(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
> @@ -387,8 +383,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -	return ESR_ELx_SYS64_ISS_RT(esr);
> +	return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> -- 
> 2.23.0
> 
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Mark Rutland June 29, 2020, 11 a.m. UTC | #2
On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
> There are a set of inline functions defined in kvm_emulate.h. Those
> functions reads ESR from vCPU fault information struct and then operate
> on it. So it's tied with vCPU fault information and vCPU struct. It
> limits their usage scope.
> 
> This detaches these functions from the vCPU struct by introducing an
> other set of inline functions in esr.h to manupulate the specified
> ESR value. With it, the inline functions defined in kvm_emulate.h
> can call these inline functions (in esr.h) instead. This shouldn't
> cause any functional changes.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>

TBH, I'm not sure that this patch makes much sense on its own.

We already use vcpu_get_esr(), which is the bit that'd have to change if
we didn't pass the vcpu around, and the new helpers are just consuming
the value in a sifferent way rather than a necessarily simpler way.

Further comments on that front below.

> ---
>  arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>  arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>  2 files changed, 51 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..950204c5fbe1 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>  	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>  }
>  
> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
> +static inline bool esr_is_##name(u32 esr)	\
> +{						\
> +	return !!(esr & (field));		\
> +}
> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
> +static inline u32 esr_get_##name(u32 esr)	\
> +{						\
> +	return ((esr & (mask)) >> (shift));	\
> +}
> +
> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
> +
> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);

I'm really not keen on this, as I think it's abstracting the problem at
the wrong level, hiding information and making things harder to reason
about rather than abstracting that.

I strongly suspect the right thing to do is use FIELD_GET() in-place in
the functions below, e.g.

   !!FIELD_GET(esr, ESR_ELx_IL);

... rather than:

   esr_get_il_32bit(esr);

... as that avoids the wrapper entirely, minimizing indirection and
making the codebase simpler to navigate.

For the cases where we *really* want a helper, i'd rather write those
out explicitly, e.g.

#define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)

... but I'm not sure if we really need those given these are mostly used
*once* below.

> +
>  const char *esr_get_class_string(u32 esr);
>  #endif /* __ASSEMBLY */
>  
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index c9ba0df47f7d..9337d90c517f 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -
> -	if (esr & ESR_ELx_CV)
> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
> -
> -	return -1;
> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>  }

Do we really need to change the structure of this code? I thought this
was purely about decooupling helpers from the vcpu struct. This could
have stayed as:

static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
	u32 esr = kvm_vcpu_get_esr(vcpu);

	if (esr_is_condition(esr))
		return esr_get_condition(esr);
	
	return -1;
}

... or:

static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
	u32 esr = kvm_vcpu_get_esr(vcpu);

	if (FEILD_GET(esr, ESR_ELx_CV))
		return FIELD_GET(esr, ESR_ELx_COND_MASK);
	
	return -1;
}

Thanks,
Mark.

>  
>  static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
> @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
>  
>  static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
> +	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
> +	return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
> +	return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
> +	return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
> +	return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
>  {
> -	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
> +	return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
> +	return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
> -		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
> +	return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) ||
> +	       esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */
>  }
>  
>  static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
> +	return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
>  {
> -	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
> +	return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  /* This one is not specific to Data Abort */
>  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
>  {
> -	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
> +	return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
>  {
> -	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
> +	return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW;
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
> +	return esr_get_fault(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
>  {
> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
> +	return esr_get_fault_type(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
> @@ -387,8 +383,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
>  
>  static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
>  {
> -	u32 esr = kvm_vcpu_get_esr(vcpu);
> -	return ESR_ELx_SYS64_ISS_RT(esr);
> +	return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu));
>  }
>  
>  static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> -- 
> 2.23.0
>
Gavin Shan June 30, 2020, 12:16 a.m. UTC | #3
Hi Mark,

On 6/29/20 9:00 PM, Mark Rutland wrote:
> On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
>> There are a set of inline functions defined in kvm_emulate.h. Those
>> functions reads ESR from vCPU fault information struct and then operate
>> on it. So it's tied with vCPU fault information and vCPU struct. It
>> limits their usage scope.
>>
>> This detaches these functions from the vCPU struct by introducing an
>> other set of inline functions in esr.h to manupulate the specified
>> ESR value. With it, the inline functions defined in kvm_emulate.h
>> can call these inline functions (in esr.h) instead. This shouldn't
>> cause any functional changes.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> TBH, I'm not sure that this patch makes much sense on its own.
> 
> We already use vcpu_get_esr(), which is the bit that'd have to change if
> we didn't pass the vcpu around, and the new helpers are just consuming
> the value in a sifferent way rather than a necessarily simpler way.
> 
> Further comments on that front below.
> 
>> ---
>>   arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>>   arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>>   2 files changed, 51 insertions(+), 24 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..950204c5fbe1 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>>   	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>>   }
>>   
>> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
>> +static inline bool esr_is_##name(u32 esr)	\
>> +{						\
>> +	return !!(esr & (field));		\
>> +}
>> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
>> +static inline u32 esr_get_##name(u32 esr)	\
>> +{						\
>> +	return ((esr & (mask)) >> (shift));	\
>> +}
>> +
>> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
>> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
>> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
>> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
>> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
>> +
>> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
>> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
>> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
>> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
>> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
>> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
>> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
>> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
>> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
>> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
>> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
> 
> I'm really not keen on this, as I think it's abstracting the problem at
> the wrong level, hiding information and making things harder to reason
> about rather than abstracting that.
> 
> I strongly suspect the right thing to do is use FIELD_GET() in-place in
> the functions below, e.g.
> 
>     !!FIELD_GET(esr, ESR_ELx_IL);
> 
> ... rather than:
> 
>     esr_get_il_32bit(esr);
> 
> ... as that avoids the wrapper entirely, minimizing indirection and
> making the codebase simpler to navigate.
> 
> For the cases where we *really* want a helper, i'd rather write those
> out explicitly, e.g.
> 

It will be no difference except to use FIELD_GET() to make the code
more explicit. Maybe I didn't fully understand your comments here.
Please let me know if something like below is what you expect?

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index c9ba0df47f7d..e8294edcd8f4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -343,7 +343,7 @@ static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *
  /* This one is not specific to Data Abort */
  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
  {
-       return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
+       return !!FIELD_GET(kvm_vcpu_get_esr(vcpu), ESR_ELx_IL);
  }

If my understanding is correct, I think we needn't change the code
and this patch can be dropped.

> #define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)
> 
> ... but I'm not sure if we really need those given these are mostly used
> *once* below.
> 

We don't need these for now, but will be needed when the next revision
of async page fault is posted. Lets ignore this requirement for now
because I can revisit it when the async page fault patchset is posted.
That time, we can have accessors defined in esr.h and helpers in
kvm_emulate.h use those accessors. It's similar to what you're suggesting.

#define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)

static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
{
	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
}


By the way, it's long way to reach that point because I'm still in the
middle of working on virtualizing SDEI currently.

>> +
>>   const char *esr_get_class_string(u32 esr);
>>   #endif /* __ASSEMBLY */
>>   
>> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
>> index c9ba0df47f7d..9337d90c517f 100644
>> --- a/arch/arm64/include/asm/kvm_emulate.h
>> +++ b/arch/arm64/include/asm/kvm_emulate.h
>> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>>   
>>   static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>>   {
>> -	u32 esr = kvm_vcpu_get_esr(vcpu);
>> -
>> -	if (esr & ESR_ELx_CV)
>> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
>> -
>> -	return -1;
>> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
>> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>>   }
> 
> Do we really need to change the structure of this code? I thought this
> was purely about decooupling helpers from the vcpu struct. This could
> have stayed as:
> 
> static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
> {
> 	u32 esr = kvm_vcpu_get_esr(vcpu);
> 
> 	if (esr_is_condition(esr))
> 		return esr_get_condition(esr);
> 	
> 	return -1;
> }
> 
> ... or:
> 
> static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
> {
> 	u32 esr = kvm_vcpu_get_esr(vcpu);
> 
> 	if (FEILD_GET(esr, ESR_ELx_CV))
> 		return FIELD_GET(esr, ESR_ELx_COND_MASK);
> 	
> 	return -1;
> }
> 

It's not needed to change the structure of the code, but it does
reduce the lines of codes. It's kind of my personal taste :)

[...]

Thanks,
Gavin
Gavin Shan June 30, 2020, 12:28 a.m. UTC | #4
Hi Andrew,

On 6/29/20 7:59 PM, Andrew Scull wrote:
> On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
>> There are a set of inline functions defined in kvm_emulate.h. Those
>> functions reads ESR from vCPU fault information struct and then operate
>> on it. So it's tied with vCPU fault information and vCPU struct. It
>> limits their usage scope.
>>
>> This detaches these functions from the vCPU struct by introducing an
>> other set of inline functions in esr.h to manupulate the specified
>> ESR value. With it, the inline functions defined in kvm_emulate.h
>> can call these inline functions (in esr.h) instead. This shouldn't
>> cause any functional changes.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>> ---
>>   arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
>>   arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
>>   2 files changed, 51 insertions(+), 24 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..950204c5fbe1 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
>>   	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
>>   }
>>   
>> +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
>> +static inline bool esr_is_##name(u32 esr)	\
>> +{						\
>> +	return !!(esr & (field));		\
>> +}
>> +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
>> +static inline u32 esr_get_##name(u32 esr)	\
>> +{						\
>> +	return ((esr & (mask)) >> (shift));	\
>> +}
> 
> Should these be named DEFINE rather than DECLARE given it also includes
> the function definition?
> 

Thanks for your comments. Indeed, I think DEFINE is better than
DECLARE. These newly introduced helpers are unlikely needed basing
on the comments (and followup) from Mark Rutland.

>> +
>> +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
>> +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
>> +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
>> +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
>> +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
>> +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
>> +
>> +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
>> +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
>> +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
>> +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
>> +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
>> +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
>> +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
>> +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
>> +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
>> +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
>> +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
>> +
>>   const char *esr_get_class_string(u32 esr);
>>   #endif /* __ASSEMBLY */
>>   
>> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
>> index c9ba0df47f7d..9337d90c517f 100644
>> --- a/arch/arm64/include/asm/kvm_emulate.h
>> +++ b/arch/arm64/include/asm/kvm_emulate.h
>> @@ -266,12 +266,8 @@ static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
>>   
>>   static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
>>   {
>> -	u32 esr = kvm_vcpu_get_esr(vcpu);
>> -
>> -	if (esr & ESR_ELx_CV)
>> -		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
>> -
>> -	return -1;
>> +	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
>> +	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
>>   }
>>   
>>   static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
>> @@ -291,79 +287,79 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
>>   
>>   static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
>>   {
>> -	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
>> +	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
>>   }
> 
> It feels a little strange that in the raw esr case it uses macro magic
> but in the vcpu cases here it writes everything out in full. Was there a
> reason that I'm missing or is there a chance to apply a consistent
> approach?
> 

The request was raised when RFCv2 async page fault patchset was posted.
When async page fault is handled, the ESR is cached in advance, not
fetched from vCPU struct. So we want to detach the helpers defined in
kvm_emulate.h from vCPU struct. Hope the discussion in the following
link can help you to understand a bit more:

https://lore.kernel.org/kvmarm/20200508032919.52147-5-gshan@redhat.com/

> I'm not sure of the style preferences, but if it goes the macro path,
> the esr field definitions could be reused with something x-macro like to
> get the kvm_emulate.h and esr.h functions generated from a singe list of
> the esr fields.
> 

Yeah, it's same thing as Mark Rutland suggested. As I replied to his
comments, it can be postponed when next revision of async page fault
patchset is posted.

[...]

Thanks,
Gavin
Mark Rutland June 30, 2020, 8 a.m. UTC | #5
On Tue, Jun 30, 2020 at 10:16:07AM +1000, Gavin Shan wrote:
> Hi Mark,
> 
> On 6/29/20 9:00 PM, Mark Rutland wrote:
> > On Mon, Jun 29, 2020 at 07:18:41PM +1000, Gavin Shan wrote:
> > > There are a set of inline functions defined in kvm_emulate.h. Those
> > > functions reads ESR from vCPU fault information struct and then operate
> > > on it. So it's tied with vCPU fault information and vCPU struct. It
> > > limits their usage scope.
> > > 
> > > This detaches these functions from the vCPU struct by introducing an
> > > other set of inline functions in esr.h to manupulate the specified
> > > ESR value. With it, the inline functions defined in kvm_emulate.h
> > > can call these inline functions (in esr.h) instead. This shouldn't
> > > cause any functional changes.
> > > 
> > > Signed-off-by: Gavin Shan <gshan@redhat.com>
> > 
> > TBH, I'm not sure that this patch makes much sense on its own.
> > 
> > We already use vcpu_get_esr(), which is the bit that'd have to change if
> > we didn't pass the vcpu around, and the new helpers are just consuming
> > the value in a sifferent way rather than a necessarily simpler way.
> > 
> > Further comments on that front below.
> > 
> > > ---
> > >   arch/arm64/include/asm/esr.h         | 32 +++++++++++++++++++++
> > >   arch/arm64/include/asm/kvm_emulate.h | 43 ++++++++++++----------------
> > >   2 files changed, 51 insertions(+), 24 deletions(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> > > index 035003acfa87..950204c5fbe1 100644
> > > --- a/arch/arm64/include/asm/esr.h
> > > +++ b/arch/arm64/include/asm/esr.h
> > > @@ -326,6 +326,38 @@ static inline bool esr_is_data_abort(u32 esr)
> > >   	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
> > >   }
> > > +#define ESR_DECLARE_CHECK_FUNC(name, field)	\
> > > +static inline bool esr_is_##name(u32 esr)	\
> > > +{						\
> > > +	return !!(esr & (field));		\
> > > +}
> > > +#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
> > > +static inline u32 esr_get_##name(u32 esr)	\
> > > +{						\
> > > +	return ((esr & (mask)) >> (shift));	\
> > > +}
> > > +
> > > +ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
> > > +ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
> > > +ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
> > > +
> > > +ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
> > > +ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
> > > +ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
> > > +ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
> > > +		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
> > > +ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
> > > +ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
> > > +				   ESR_ELx_SYS64_ISS_RT_SHIFT);
> > 
> > I'm really not keen on this, as I think it's abstracting the problem at
> > the wrong level, hiding information and making things harder to reason
> > about rather than abstracting that.
> > 
> > I strongly suspect the right thing to do is use FIELD_GET() in-place in
> > the functions below, e.g.
> > 
> >     !!FIELD_GET(esr, ESR_ELx_IL);
> > 
> > ... rather than:
> > 
> >     esr_get_il_32bit(esr);
> > 
> > ... as that avoids the wrapper entirely, minimizing indirection and
> > making the codebase simpler to navigate.
> > 
> > For the cases where we *really* want a helper, i'd rather write those
> > out explicitly, e.g.
> 
> It will be no difference except to use FIELD_GET() to make the code
> more explicit. Maybe I didn't fully understand your comments here.
> Please let me know if something like below is what you expect?

Sorry; my point here was just that using FIELD_GET() explicitly was
preferable to generating an entire function with
ESR_DECLARE_CHECK_FUNC() if the goal was just to remove the explciit
mask-and-shift at each callsite.

I agree they'd have the same functional behaviour, but I think the
explicit FIELD_GET() approach is easier to read (and possible to search
for), which makes code maintenance much easier.

> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index c9ba0df47f7d..e8294edcd8f4 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -343,7 +343,7 @@ static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *
>  /* This one is not specific to Data Abort */
>  static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
>  {
> -       return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
> +       return !!FIELD_GET(kvm_vcpu_get_esr(vcpu), ESR_ELx_IL);
>  }
> 
> If my understanding is correct, I think we needn't change the code
> and this patch can be dropped.

If you don't see a need for a change, I'm also happy for this to be
dropped.

[...]

> > #define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)
> > 
> > ... but I'm not sure if we really need those given these are mostly used
> > *once* below.
> > 
> 
> We don't need these for now, but will be needed when the next revision
> of async page fault is posted. Lets ignore this requirement for now
> because I can revisit it when the async page fault patchset is posted.
> That time, we can have accessors defined in esr.h and helpers in
> kvm_emulate.h use those accessors. It's similar to what you're suggesting.
> 
> #define esr_get_hvc_imm(esr)	FIELD_GET(esr, ESR_ELx_xVC_IMM_MASK)
> 
> static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
> {
> 	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
> }

That'd be fine by me.

Thanks,
Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..950204c5fbe1 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -326,6 +326,38 @@  static inline bool esr_is_data_abort(u32 esr)
 	return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
 }
 
+#define ESR_DECLARE_CHECK_FUNC(name, field)	\
+static inline bool esr_is_##name(u32 esr)	\
+{						\
+	return !!(esr & (field));		\
+}
+#define ESR_DECLARE_GET_FUNC(name, mask, shift)	\
+static inline u32 esr_get_##name(u32 esr)	\
+{						\
+	return ((esr & (mask)) >> (shift));	\
+}
+
+ESR_DECLARE_CHECK_FUNC(il_32bit,   ESR_ELx_IL);
+ESR_DECLARE_CHECK_FUNC(condition,  ESR_ELx_CV);
+ESR_DECLARE_CHECK_FUNC(dabt_valid, ESR_ELx_ISV);
+ESR_DECLARE_CHECK_FUNC(dabt_sse,   ESR_ELx_SSE);
+ESR_DECLARE_CHECK_FUNC(dabt_sf,    ESR_ELx_SF);
+ESR_DECLARE_CHECK_FUNC(dabt_s1ptw, ESR_ELx_S1PTW);
+ESR_DECLARE_CHECK_FUNC(dabt_write, ESR_ELx_WNR);
+ESR_DECLARE_CHECK_FUNC(dabt_cm,    ESR_ELx_CM);
+
+ESR_DECLARE_GET_FUNC(class,        ESR_ELx_EC_MASK,      ESR_ELx_EC_SHIFT);
+ESR_DECLARE_GET_FUNC(fault,        ESR_ELx_FSC,          0);
+ESR_DECLARE_GET_FUNC(fault_type,   ESR_ELx_FSC_TYPE,     0);
+ESR_DECLARE_GET_FUNC(condition,    ESR_ELx_COND_MASK,    ESR_ELx_COND_SHIFT);
+ESR_DECLARE_GET_FUNC(hvc_imm,      ESR_ELx_xVC_IMM_MASK, 0);
+ESR_DECLARE_GET_FUNC(dabt_iss_nisv_sanitized,
+		     (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC), 0);
+ESR_DECLARE_GET_FUNC(dabt_rd,      ESR_ELx_SRT_MASK,     ESR_ELx_SRT_SHIFT);
+ESR_DECLARE_GET_FUNC(dabt_as,      ESR_ELx_SAS,          ESR_ELx_SAS_SHIFT);
+ESR_DECLARE_GET_FUNC(sys_rt,       ESR_ELx_SYS64_ISS_RT_MASK,
+				   ESR_ELx_SYS64_ISS_RT_SHIFT);
+
 const char *esr_get_class_string(u32 esr);
 #endif /* __ASSEMBLY */
 
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index c9ba0df47f7d..9337d90c517f 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -266,12 +266,8 @@  static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
 
 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_esr(vcpu);
-
-	if (esr & ESR_ELx_CV)
-		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
-
-	return -1;
+	return esr_is_condition(kvm_vcpu_get_esr(vcpu)) ?
+	       esr_get_condition(kvm_vcpu_get_esr(vcpu)) : -1;
 }
 
 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
@@ -291,79 +287,79 @@  static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
 
 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+	return esr_get_hvc_imm(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
+	return esr_is_dabt_valid(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
+	return esr_get_dabt_iss_nisv_sanitized(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
+	return esr_is_dabt_sse(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
+	return esr_is_dabt_sf(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 {
-	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+	return esr_get_dabt_rd(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
+	return esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
-		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+	return esr_is_dabt_write(kvm_vcpu_get_esr(vcpu)) ||
+	       esr_is_dabt_s1ptw(kvm_vcpu_get_esr(vcpu)); /* AF/DBM update */
 }
 
 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
+	return esr_is_dabt_cm(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 {
-	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+	return 1 << esr_get_dabt_as(kvm_vcpu_get_esr(vcpu));
 }
 
 /* This one is not specific to Data Abort */
 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
+	return esr_is_il_32bit(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 {
-	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
+	return esr_get_class(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+	return esr_get_class(kvm_vcpu_get_esr(vcpu)) == ESR_ELx_EC_IABT_LOW;
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
+	return esr_get_fault(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 {
-	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
+	return esr_get_fault_type(kvm_vcpu_get_esr(vcpu));
 }
 
 static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
@@ -387,8 +383,7 @@  static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
 
 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 {
-	u32 esr = kvm_vcpu_get_esr(vcpu);
-	return ESR_ELx_SYS64_ISS_RT(esr);
+	return esr_get_sys_rt(kvm_vcpu_get_esr(vcpu));
 }
 
 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)