diff mbox series

[v2,03/11] KVM: arm64: Encapsulate reset request logic in a helper function

Message ID 20210923191610.3814698-4-oupton@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Implement PSCI SYSTEM_SUSPEND support | expand

Commit Message

Oliver Upton Sept. 23, 2021, 7:16 p.m. UTC
In its implementation of the PSCI function, KVM needs to request that a
target vCPU resets before its next entry into the guest. Wrap the logic
for requesting a reset in a function for later use by other implemented
PSCI calls.

No functional change intended.

Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
 1 file changed, 35 insertions(+), 24 deletions(-)

Comments

Reiji Watanabe Oct. 1, 2021, 6:04 a.m. UTC | #1
On Thu, Sep 23, 2021 at 12:16 PM Oliver Upton <oupton@google.com> wrote:
>
> In its implementation of the PSCI function, KVM needs to request that a
> target vCPU resets before its next entry into the guest. Wrap the logic
> for requesting a reset in a function for later use by other implemented
> PSCI calls.
>
> No functional change intended.
>
> Signed-off-by: Oliver Upton <oupton@google.com>
> ---
>  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
>  1 file changed, 35 insertions(+), 24 deletions(-)
>
> diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> index 310b9cb2b32b..bb59b692998b 100644
> --- a/arch/arm64/kvm/psci.c
> +++ b/arch/arm64/kvm/psci.c
> @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
>         return !(affinity & ~MPIDR_HWID_BITMASK);
>  }
>
> -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> +                                       unsigned long entry_addr,
> +                                       unsigned long context_id,
> +                                       bool big_endian)
>  {
>         struct vcpu_reset_state *reset_state;
> +
> +       lockdep_assert_held(&vcpu->kvm->lock);
> +
> +       reset_state = &vcpu->arch.reset_state;
> +       reset_state->pc = entry_addr;
> +
> +       /* Propagate caller endianness */
> +       reset_state->be = big_endian;
> +
> +       /*
> +        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> +        * the general purpose registers are undefined upon CPU_ON.
> +        */
> +       reset_state->r0 = context_id;
> +
> +       WRITE_ONCE(reset_state->reset, true);
> +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> +
> +       /*
> +        * Make sure the reset request is observed if the change to
> +        * power_state is observed.
> +        */
> +       smp_wmb();
> +       vcpu->arch.power_off = false;
> +}
> +
> +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> +{
>         struct kvm *kvm = source_vcpu->kvm;
>         struct kvm_vcpu *vcpu = NULL;
>         unsigned long cpu_id;
> @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
>                         return PSCI_RET_INVALID_PARAMS;
>         }
>
> -       reset_state = &vcpu->arch.reset_state;
> -
> -       reset_state->pc = smccc_get_arg2(source_vcpu);
> -
> -       /* Propagate caller endianness */
> -       reset_state->be = kvm_vcpu_is_be(source_vcpu);
> -
> -       /*
> -        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> -        * the general purpose registers are undefined upon CPU_ON.
> -        */
> -       reset_state->r0 = smccc_get_arg3(source_vcpu);
> -
> -       WRITE_ONCE(reset_state->reset, true);
> -       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> -
> -       /*
> -        * Make sure the reset request is observed if the change to
> -        * power_state is observed.
> -        */
> -       smp_wmb();
> -
> -       vcpu->arch.power_off = false;
> +       kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> +                                   smccc_get_arg3(source_vcpu),
> +                                   kvm_vcpu_is_be(source_vcpu));
>         kvm_vcpu_wake_up(vcpu);
>
>         return PSCI_RET_SUCCESS;
> --
> 2.33.0.685.g46640cef36-goog

Reviewed-by: Reiji Watanabe <reijiw@google.com>

Not directly related to the patch, but the (original) code doesn't
do any sanity checking for the entry address although the PSCI spec says:

"INVALID_ADDRESS is returned when the entry point address is known
by the implementation to be invalid, because it is in a range that
is known not to be available to the caller."


Thanks,
Reiji
Oliver Upton Oct. 1, 2021, 4:10 p.m. UTC | #2
On Thu, Sep 30, 2021 at 11:05 PM Reiji Watanabe <reijiw@google.com> wrote:
>
> On Thu, Sep 23, 2021 at 12:16 PM Oliver Upton <oupton@google.com> wrote:
> >
> > In its implementation of the PSCI function, KVM needs to request that a
> > target vCPU resets before its next entry into the guest. Wrap the logic
> > for requesting a reset in a function for later use by other implemented
> > PSCI calls.
> >
> > No functional change intended.
> >
> > Signed-off-by: Oliver Upton <oupton@google.com>
> > ---
> >  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
> >  1 file changed, 35 insertions(+), 24 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> > index 310b9cb2b32b..bb59b692998b 100644
> > --- a/arch/arm64/kvm/psci.c
> > +++ b/arch/arm64/kvm/psci.c
> > @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
> >         return !(affinity & ~MPIDR_HWID_BITMASK);
> >  }
> >
> > -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> > +                                       unsigned long entry_addr,
> > +                                       unsigned long context_id,
> > +                                       bool big_endian)
> >  {
> >         struct vcpu_reset_state *reset_state;
> > +
> > +       lockdep_assert_held(&vcpu->kvm->lock);
> > +
> > +       reset_state = &vcpu->arch.reset_state;
> > +       reset_state->pc = entry_addr;
> > +
> > +       /* Propagate caller endianness */
> > +       reset_state->be = big_endian;
> > +
> > +       /*
> > +        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > +        * the general purpose registers are undefined upon CPU_ON.
> > +        */
> > +       reset_state->r0 = context_id;
> > +
> > +       WRITE_ONCE(reset_state->reset, true);
> > +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > +
> > +       /*
> > +        * Make sure the reset request is observed if the change to
> > +        * power_state is observed.
> > +        */
> > +       smp_wmb();
> > +       vcpu->arch.power_off = false;
> > +}
> > +
> > +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > +{
> >         struct kvm *kvm = source_vcpu->kvm;
> >         struct kvm_vcpu *vcpu = NULL;
> >         unsigned long cpu_id;
> > @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> >                         return PSCI_RET_INVALID_PARAMS;
> >         }
> >
> > -       reset_state = &vcpu->arch.reset_state;
> > -
> > -       reset_state->pc = smccc_get_arg2(source_vcpu);
> > -
> > -       /* Propagate caller endianness */
> > -       reset_state->be = kvm_vcpu_is_be(source_vcpu);
> > -
> > -       /*
> > -        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > -        * the general purpose registers are undefined upon CPU_ON.
> > -        */
> > -       reset_state->r0 = smccc_get_arg3(source_vcpu);
> > -
> > -       WRITE_ONCE(reset_state->reset, true);
> > -       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > -
> > -       /*
> > -        * Make sure the reset request is observed if the change to
> > -        * power_state is observed.
> > -        */
> > -       smp_wmb();
> > -
> > -       vcpu->arch.power_off = false;
> > +       kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> > +                                   smccc_get_arg3(source_vcpu),
> > +                                   kvm_vcpu_is_be(source_vcpu));
> >         kvm_vcpu_wake_up(vcpu);
> >
> >         return PSCI_RET_SUCCESS;
> > --
> > 2.33.0.685.g46640cef36-goog
>
> Reviewed-by: Reiji Watanabe <reijiw@google.com>
>
> Not directly related to the patch, but the (original) code doesn't
> do any sanity checking for the entry address although the PSCI spec says:
>
> "INVALID_ADDRESS is returned when the entry point address is known
> by the implementation to be invalid, because it is in a range that
> is known not to be available to the caller."

Right, I had noticed the same but was a tad too lazy to address in
this series :) Thanks for the review, Reji!

--
Best,
Oliver
Andrew Jones Oct. 5, 2021, 1:33 p.m. UTC | #3
On Fri, Oct 01, 2021 at 09:10:14AM -0700, Oliver Upton wrote:
> On Thu, Sep 30, 2021 at 11:05 PM Reiji Watanabe <reijiw@google.com> wrote:
> >
> > On Thu, Sep 23, 2021 at 12:16 PM Oliver Upton <oupton@google.com> wrote:
> > >
> > > In its implementation of the PSCI function, KVM needs to request that a
> > > target vCPU resets before its next entry into the guest. Wrap the logic
> > > for requesting a reset in a function for later use by other implemented
> > > PSCI calls.
> > >
> > > No functional change intended.
> > >
> > > Signed-off-by: Oliver Upton <oupton@google.com>
> > > ---
> > >  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
> > >  1 file changed, 35 insertions(+), 24 deletions(-)
> > >
> > > diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> > > index 310b9cb2b32b..bb59b692998b 100644
> > > --- a/arch/arm64/kvm/psci.c
> > > +++ b/arch/arm64/kvm/psci.c
> > > @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
> > >         return !(affinity & ~MPIDR_HWID_BITMASK);
> > >  }
> > >
> > > -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> > > +                                       unsigned long entry_addr,
> > > +                                       unsigned long context_id,
> > > +                                       bool big_endian)
> > >  {
> > >         struct vcpu_reset_state *reset_state;
> > > +
> > > +       lockdep_assert_held(&vcpu->kvm->lock);
> > > +
> > > +       reset_state = &vcpu->arch.reset_state;
> > > +       reset_state->pc = entry_addr;
> > > +
> > > +       /* Propagate caller endianness */
> > > +       reset_state->be = big_endian;
> > > +
> > > +       /*
> > > +        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > +        * the general purpose registers are undefined upon CPU_ON.
> > > +        */
> > > +       reset_state->r0 = context_id;
> > > +
> > > +       WRITE_ONCE(reset_state->reset, true);
> > > +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > +
> > > +       /*
> > > +        * Make sure the reset request is observed if the change to
> > > +        * power_state is observed.
> > > +        */
> > > +       smp_wmb();
> > > +       vcpu->arch.power_off = false;
> > > +}
> > > +
> > > +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > +{
> > >         struct kvm *kvm = source_vcpu->kvm;
> > >         struct kvm_vcpu *vcpu = NULL;
> > >         unsigned long cpu_id;
> > > @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > >                         return PSCI_RET_INVALID_PARAMS;
> > >         }
> > >
> > > -       reset_state = &vcpu->arch.reset_state;
> > > -
> > > -       reset_state->pc = smccc_get_arg2(source_vcpu);
> > > -
> > > -       /* Propagate caller endianness */
> > > -       reset_state->be = kvm_vcpu_is_be(source_vcpu);
> > > -
> > > -       /*
> > > -        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > -        * the general purpose registers are undefined upon CPU_ON.
> > > -        */
> > > -       reset_state->r0 = smccc_get_arg3(source_vcpu);
> > > -
> > > -       WRITE_ONCE(reset_state->reset, true);
> > > -       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > -
> > > -       /*
> > > -        * Make sure the reset request is observed if the change to
> > > -        * power_state is observed.
> > > -        */
> > > -       smp_wmb();
> > > -
> > > -       vcpu->arch.power_off = false;
> > > +       kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> > > +                                   smccc_get_arg3(source_vcpu),
> > > +                                   kvm_vcpu_is_be(source_vcpu));
> > >         kvm_vcpu_wake_up(vcpu);
> > >
> > >         return PSCI_RET_SUCCESS;
> > > --
> > > 2.33.0.685.g46640cef36-goog
> >
> > Reviewed-by: Reiji Watanabe <reijiw@google.com>
> >
> > Not directly related to the patch, but the (original) code doesn't
> > do any sanity checking for the entry address although the PSCI spec says:
> >
> > "INVALID_ADDRESS is returned when the entry point address is known
> > by the implementation to be invalid, because it is in a range that
> > is known not to be available to the caller."
> 
> Right, I had noticed the same but was a tad too lazy to address in
> this series :) Thanks for the review, Reji!
>

KVM doesn't reserve any subrange within [0 - max_ipa), afaik. So all
we need to do is check 'entry_addr < max_ipa', right?

Thanks,
drew
Andrew Jones Oct. 5, 2021, 1:35 p.m. UTC | #4
On Thu, Sep 23, 2021 at 07:16:02PM +0000, Oliver Upton wrote:
> In its implementation of the PSCI function, KVM needs to request that a
> target vCPU resets before its next entry into the guest. Wrap the logic
> for requesting a reset in a function for later use by other implemented
> PSCI calls.
> 
> No functional change intended.
> 
> Signed-off-by: Oliver Upton <oupton@google.com>
> ---
>  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
>  1 file changed, 35 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> index 310b9cb2b32b..bb59b692998b 100644
> --- a/arch/arm64/kvm/psci.c
> +++ b/arch/arm64/kvm/psci.c
> @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
>  	return !(affinity & ~MPIDR_HWID_BITMASK);
>  }
>  
> -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> +					unsigned long entry_addr,
> +					unsigned long context_id,
> +					bool big_endian)
>  {
>  	struct vcpu_reset_state *reset_state;
> +
> +	lockdep_assert_held(&vcpu->kvm->lock);
> +
> +	reset_state = &vcpu->arch.reset_state;
> +	reset_state->pc = entry_addr;
> +
> +	/* Propagate caller endianness */
> +	reset_state->be = big_endian;
> +
> +	/*
> +	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
> +	 * the general purpose registers are undefined upon CPU_ON.
> +	 */
> +	reset_state->r0 = context_id;
> +
> +	WRITE_ONCE(reset_state->reset, true);
> +	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> +
> +	/*
> +	 * Make sure the reset request is observed if the change to
> +	 * power_state is observed.
> +	 */
> +	smp_wmb();
> +	vcpu->arch.power_off = false;
> +}
> +
> +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> +{
>  	struct kvm *kvm = source_vcpu->kvm;
>  	struct kvm_vcpu *vcpu = NULL;
>  	unsigned long cpu_id;
> @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
>  			return PSCI_RET_INVALID_PARAMS;
>  	}
>  
> -	reset_state = &vcpu->arch.reset_state;
> -
> -	reset_state->pc = smccc_get_arg2(source_vcpu);
> -
> -	/* Propagate caller endianness */
> -	reset_state->be = kvm_vcpu_is_be(source_vcpu);
> -
> -	/*
> -	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
> -	 * the general purpose registers are undefined upon CPU_ON.
> -	 */
> -	reset_state->r0 = smccc_get_arg3(source_vcpu);
> -
> -	WRITE_ONCE(reset_state->reset, true);
> -	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> -
> -	/*
> -	 * Make sure the reset request is observed if the change to
> -	 * power_state is observed.
> -	 */
> -	smp_wmb();
> -
> -	vcpu->arch.power_off = false;
> +	kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> +				    smccc_get_arg3(source_vcpu),
> +				    kvm_vcpu_is_be(source_vcpu));
>  	kvm_vcpu_wake_up(vcpu);
>  
>  	return PSCI_RET_SUCCESS;
> -- 
> 2.33.0.685.g46640cef36-goog
>

Reviewed-by: Andrew Jones <drjones@redhat.com>
Oliver Upton Oct. 5, 2021, 3:05 p.m. UTC | #5
Hi folks,

On Tue, Oct 5, 2021 at 6:33 AM Andrew Jones <drjones@redhat.com> wrote:
>
> On Fri, Oct 01, 2021 at 09:10:14AM -0700, Oliver Upton wrote:
> > On Thu, Sep 30, 2021 at 11:05 PM Reiji Watanabe <reijiw@google.com> wrote:
> > >
> > > On Thu, Sep 23, 2021 at 12:16 PM Oliver Upton <oupton@google.com> wrote:
> > > >
> > > > In its implementation of the PSCI function, KVM needs to request that a
> > > > target vCPU resets before its next entry into the guest. Wrap the logic
> > > > for requesting a reset in a function for later use by other implemented
> > > > PSCI calls.
> > > >
> > > > No functional change intended.
> > > >
> > > > Signed-off-by: Oliver Upton <oupton@google.com>
> > > > ---
> > > >  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
> > > >  1 file changed, 35 insertions(+), 24 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> > > > index 310b9cb2b32b..bb59b692998b 100644
> > > > --- a/arch/arm64/kvm/psci.c
> > > > +++ b/arch/arm64/kvm/psci.c
> > > > @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
> > > >         return !(affinity & ~MPIDR_HWID_BITMASK);
> > > >  }
> > > >
> > > > -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> > > > +                                       unsigned long entry_addr,
> > > > +                                       unsigned long context_id,
> > > > +                                       bool big_endian)
> > > >  {
> > > >         struct vcpu_reset_state *reset_state;
> > > > +
> > > > +       lockdep_assert_held(&vcpu->kvm->lock);
> > > > +
> > > > +       reset_state = &vcpu->arch.reset_state;
> > > > +       reset_state->pc = entry_addr;
> > > > +
> > > > +       /* Propagate caller endianness */
> > > > +       reset_state->be = big_endian;
> > > > +
> > > > +       /*
> > > > +        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > > +        * the general purpose registers are undefined upon CPU_ON.
> > > > +        */
> > > > +       reset_state->r0 = context_id;
> > > > +
> > > > +       WRITE_ONCE(reset_state->reset, true);
> > > > +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > > +
> > > > +       /*
> > > > +        * Make sure the reset request is observed if the change to
> > > > +        * power_state is observed.
> > > > +        */
> > > > +       smp_wmb();
> > > > +       vcpu->arch.power_off = false;
> > > > +}
> > > > +
> > > > +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > +{
> > > >         struct kvm *kvm = source_vcpu->kvm;
> > > >         struct kvm_vcpu *vcpu = NULL;
> > > >         unsigned long cpu_id;
> > > > @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > >                         return PSCI_RET_INVALID_PARAMS;
> > > >         }
> > > >
> > > > -       reset_state = &vcpu->arch.reset_state;
> > > > -
> > > > -       reset_state->pc = smccc_get_arg2(source_vcpu);
> > > > -
> > > > -       /* Propagate caller endianness */
> > > > -       reset_state->be = kvm_vcpu_is_be(source_vcpu);
> > > > -
> > > > -       /*
> > > > -        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > > -        * the general purpose registers are undefined upon CPU_ON.
> > > > -        */
> > > > -       reset_state->r0 = smccc_get_arg3(source_vcpu);
> > > > -
> > > > -       WRITE_ONCE(reset_state->reset, true);
> > > > -       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > > -
> > > > -       /*
> > > > -        * Make sure the reset request is observed if the change to
> > > > -        * power_state is observed.
> > > > -        */
> > > > -       smp_wmb();
> > > > -
> > > > -       vcpu->arch.power_off = false;
> > > > +       kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> > > > +                                   smccc_get_arg3(source_vcpu),
> > > > +                                   kvm_vcpu_is_be(source_vcpu));
> > > >         kvm_vcpu_wake_up(vcpu);
> > > >
> > > >         return PSCI_RET_SUCCESS;
> > > > --
> > > > 2.33.0.685.g46640cef36-goog
> > >
> > > Reviewed-by: Reiji Watanabe <reijiw@google.com>
> > >
> > > Not directly related to the patch, but the (original) code doesn't
> > > do any sanity checking for the entry address although the PSCI spec says:
> > >
> > > "INVALID_ADDRESS is returned when the entry point address is known
> > > by the implementation to be invalid, because it is in a range that
> > > is known not to be available to the caller."
> >
> > Right, I had noticed the same but was a tad too lazy to address in
> > this series :) Thanks for the review, Reji!
> >
>
> KVM doesn't reserve any subrange within [0 - max_ipa), afaik. So all
> we need to do is check 'entry_addr < max_ipa', right?
>

We could be a bit more pedantic and check if the IPA exists in a
memory slot, seems like kvm_vcpu_is_visible_gfn() should do the trick.

Thoughts?

--
Thanks,
Oliver
Andrew Jones Oct. 5, 2021, 7:01 p.m. UTC | #6
On Tue, Oct 05, 2021 at 08:05:02AM -0700, Oliver Upton wrote:
> Hi folks,
> 
> On Tue, Oct 5, 2021 at 6:33 AM Andrew Jones <drjones@redhat.com> wrote:
> >
> > On Fri, Oct 01, 2021 at 09:10:14AM -0700, Oliver Upton wrote:
> > > On Thu, Sep 30, 2021 at 11:05 PM Reiji Watanabe <reijiw@google.com> wrote:
> > > >
> > > > On Thu, Sep 23, 2021 at 12:16 PM Oliver Upton <oupton@google.com> wrote:
> > > > >
> > > > > In its implementation of the PSCI function, KVM needs to request that a
> > > > > target vCPU resets before its next entry into the guest. Wrap the logic
> > > > > for requesting a reset in a function for later use by other implemented
> > > > > PSCI calls.
> > > > >
> > > > > No functional change intended.
> > > > >
> > > > > Signed-off-by: Oliver Upton <oupton@google.com>
> > > > > ---
> > > > >  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
> > > > >  1 file changed, 35 insertions(+), 24 deletions(-)
> > > > >
> > > > > diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> > > > > index 310b9cb2b32b..bb59b692998b 100644
> > > > > --- a/arch/arm64/kvm/psci.c
> > > > > +++ b/arch/arm64/kvm/psci.c
> > > > > @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
> > > > >         return !(affinity & ~MPIDR_HWID_BITMASK);
> > > > >  }
> > > > >
> > > > > -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > > +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> > > > > +                                       unsigned long entry_addr,
> > > > > +                                       unsigned long context_id,
> > > > > +                                       bool big_endian)
> > > > >  {
> > > > >         struct vcpu_reset_state *reset_state;
> > > > > +
> > > > > +       lockdep_assert_held(&vcpu->kvm->lock);
> > > > > +
> > > > > +       reset_state = &vcpu->arch.reset_state;
> > > > > +       reset_state->pc = entry_addr;
> > > > > +
> > > > > +       /* Propagate caller endianness */
> > > > > +       reset_state->be = big_endian;
> > > > > +
> > > > > +       /*
> > > > > +        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > > > +        * the general purpose registers are undefined upon CPU_ON.
> > > > > +        */
> > > > > +       reset_state->r0 = context_id;
> > > > > +
> > > > > +       WRITE_ONCE(reset_state->reset, true);
> > > > > +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > > > +
> > > > > +       /*
> > > > > +        * Make sure the reset request is observed if the change to
> > > > > +        * power_state is observed.
> > > > > +        */
> > > > > +       smp_wmb();
> > > > > +       vcpu->arch.power_off = false;
> > > > > +}
> > > > > +
> > > > > +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > > +{
> > > > >         struct kvm *kvm = source_vcpu->kvm;
> > > > >         struct kvm_vcpu *vcpu = NULL;
> > > > >         unsigned long cpu_id;
> > > > > @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > >                         return PSCI_RET_INVALID_PARAMS;
> > > > >         }
> > > > >
> > > > > -       reset_state = &vcpu->arch.reset_state;
> > > > > -
> > > > > -       reset_state->pc = smccc_get_arg2(source_vcpu);
> > > > > -
> > > > > -       /* Propagate caller endianness */
> > > > > -       reset_state->be = kvm_vcpu_is_be(source_vcpu);
> > > > > -
> > > > > -       /*
> > > > > -        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > > > -        * the general purpose registers are undefined upon CPU_ON.
> > > > > -        */
> > > > > -       reset_state->r0 = smccc_get_arg3(source_vcpu);
> > > > > -
> > > > > -       WRITE_ONCE(reset_state->reset, true);
> > > > > -       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > > > -
> > > > > -       /*
> > > > > -        * Make sure the reset request is observed if the change to
> > > > > -        * power_state is observed.
> > > > > -        */
> > > > > -       smp_wmb();
> > > > > -
> > > > > -       vcpu->arch.power_off = false;
> > > > > +       kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> > > > > +                                   smccc_get_arg3(source_vcpu),
> > > > > +                                   kvm_vcpu_is_be(source_vcpu));
> > > > >         kvm_vcpu_wake_up(vcpu);
> > > > >
> > > > >         return PSCI_RET_SUCCESS;
> > > > > --
> > > > > 2.33.0.685.g46640cef36-goog
> > > >
> > > > Reviewed-by: Reiji Watanabe <reijiw@google.com>
> > > >
> > > > Not directly related to the patch, but the (original) code doesn't
> > > > do any sanity checking for the entry address although the PSCI spec says:
> > > >
> > > > "INVALID_ADDRESS is returned when the entry point address is known
> > > > by the implementation to be invalid, because it is in a range that
> > > > is known not to be available to the caller."
> > >
> > > Right, I had noticed the same but was a tad too lazy to address in
> > > this series :) Thanks for the review, Reji!
> > >
> >
> > KVM doesn't reserve any subrange within [0 - max_ipa), afaik. So all
> > we need to do is check 'entry_addr < max_ipa', right?
> >
> 
> We could be a bit more pedantic and check if the IPA exists in a
> memory slot, seems like kvm_vcpu_is_visible_gfn() should do the trick.
> 
> Thoughts?

Are we sure that all emulated devices, nvram, etc. will always use a
memslot for regions that contain executable code? If there's any doubt,
then we can't be sure about non-memslot regions within the max_ipa range.
That'd be up to userspace.

Thanks,
drew
Reiji Watanabe Oct. 13, 2021, 4:48 a.m. UTC | #7
On Tue, Oct 5, 2021 at 12:02 PM Andrew Jones <drjones@redhat.com> wrote:
>
> On Tue, Oct 05, 2021 at 08:05:02AM -0700, Oliver Upton wrote:
> > Hi folks,
> >
> > On Tue, Oct 5, 2021 at 6:33 AM Andrew Jones <drjones@redhat.com> wrote:
> > >
> > > On Fri, Oct 01, 2021 at 09:10:14AM -0700, Oliver Upton wrote:
> > > > On Thu, Sep 30, 2021 at 11:05 PM Reiji Watanabe <reijiw@google.com> wrote:
> > > > >
> > > > > On Thu, Sep 23, 2021 at 12:16 PM Oliver Upton <oupton@google.com> wrote:
> > > > > >
> > > > > > In its implementation of the PSCI function, KVM needs to request that a
> > > > > > target vCPU resets before its next entry into the guest. Wrap the logic
> > > > > > for requesting a reset in a function for later use by other implemented
> > > > > > PSCI calls.
> > > > > >
> > > > > > No functional change intended.
> > > > > >
> > > > > > Signed-off-by: Oliver Upton <oupton@google.com>
> > > > > > ---
> > > > > >  arch/arm64/kvm/psci.c | 59 +++++++++++++++++++++++++------------------
> > > > > >  1 file changed, 35 insertions(+), 24 deletions(-)
> > > > > >
> > > > > > diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
> > > > > > index 310b9cb2b32b..bb59b692998b 100644
> > > > > > --- a/arch/arm64/kvm/psci.c
> > > > > > +++ b/arch/arm64/kvm/psci.c
> > > > > > @@ -64,9 +64,40 @@ static inline bool kvm_psci_valid_affinity(unsigned long affinity)
> > > > > >         return !(affinity & ~MPIDR_HWID_BITMASK);
> > > > > >  }
> > > > > >
> > > > > > -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > > > +static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
> > > > > > +                                       unsigned long entry_addr,
> > > > > > +                                       unsigned long context_id,
> > > > > > +                                       bool big_endian)
> > > > > >  {
> > > > > >         struct vcpu_reset_state *reset_state;
> > > > > > +
> > > > > > +       lockdep_assert_held(&vcpu->kvm->lock);
> > > > > > +
> > > > > > +       reset_state = &vcpu->arch.reset_state;
> > > > > > +       reset_state->pc = entry_addr;
> > > > > > +
> > > > > > +       /* Propagate caller endianness */
> > > > > > +       reset_state->be = big_endian;
> > > > > > +
> > > > > > +       /*
> > > > > > +        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > > > > +        * the general purpose registers are undefined upon CPU_ON.
> > > > > > +        */
> > > > > > +       reset_state->r0 = context_id;
> > > > > > +
> > > > > > +       WRITE_ONCE(reset_state->reset, true);
> > > > > > +       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > > > > +
> > > > > > +       /*
> > > > > > +        * Make sure the reset request is observed if the change to
> > > > > > +        * power_state is observed.
> > > > > > +        */
> > > > > > +       smp_wmb();
> > > > > > +       vcpu->arch.power_off = false;
> > > > > > +}
> > > > > > +
> > > > > > +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > > > +{
> > > > > >         struct kvm *kvm = source_vcpu->kvm;
> > > > > >         struct kvm_vcpu *vcpu = NULL;
> > > > > >         unsigned long cpu_id;
> > > > > > @@ -90,29 +121,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
> > > > > >                         return PSCI_RET_INVALID_PARAMS;
> > > > > >         }
> > > > > >
> > > > > > -       reset_state = &vcpu->arch.reset_state;
> > > > > > -
> > > > > > -       reset_state->pc = smccc_get_arg2(source_vcpu);
> > > > > > -
> > > > > > -       /* Propagate caller endianness */
> > > > > > -       reset_state->be = kvm_vcpu_is_be(source_vcpu);
> > > > > > -
> > > > > > -       /*
> > > > > > -        * NOTE: We always update r0 (or x0) because for PSCI v0.1
> > > > > > -        * the general purpose registers are undefined upon CPU_ON.
> > > > > > -        */
> > > > > > -       reset_state->r0 = smccc_get_arg3(source_vcpu);
> > > > > > -
> > > > > > -       WRITE_ONCE(reset_state->reset, true);
> > > > > > -       kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> > > > > > -
> > > > > > -       /*
> > > > > > -        * Make sure the reset request is observed if the change to
> > > > > > -        * power_state is observed.
> > > > > > -        */
> > > > > > -       smp_wmb();
> > > > > > -
> > > > > > -       vcpu->arch.power_off = false;
> > > > > > +       kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
> > > > > > +                                   smccc_get_arg3(source_vcpu),
> > > > > > +                                   kvm_vcpu_is_be(source_vcpu));
> > > > > >         kvm_vcpu_wake_up(vcpu);
> > > > > >
> > > > > >         return PSCI_RET_SUCCESS;
> > > > > > --
> > > > > > 2.33.0.685.g46640cef36-goog
> > > > >
> > > > > Reviewed-by: Reiji Watanabe <reijiw@google.com>
> > > > >
> > > > > Not directly related to the patch, but the (original) code doesn't
> > > > > do any sanity checking for the entry address although the PSCI spec says:
> > > > >
> > > > > "INVALID_ADDRESS is returned when the entry point address is known
> > > > > by the implementation to be invalid, because it is in a range that
> > > > > is known not to be available to the caller."
> > > >
> > > > Right, I had noticed the same but was a tad too lazy to address in
> > > > this series :) Thanks for the review, Reji!
> > > >
> > >
> > > KVM doesn't reserve any subrange within [0 - max_ipa), afaik. So all
> > > we need to do is check 'entry_addr < max_ipa', right?
> > >
> >
> > We could be a bit more pedantic and check if the IPA exists in a
> > memory slot, seems like kvm_vcpu_is_visible_gfn() should do the trick.
> >
> > Thoughts?
>
> Are we sure that all emulated devices, nvram, etc. will always use a
> memslot for regions that contain executable code? If there's any doubt,
> then we can't be sure about non-memslot regions within the max_ipa range.
> That'd be up to userspace.

I'm sorry for the late response.
IMHO, I would prefer Andrew's suggestion (check with the max_ipa).

It looks like instructions must always be in memslot for KVM/ARM looking
at the current implementation (especially kvm_handle_guest_abort()).
But, it doesn't necessarily mean the address is not invalid for the
guest (could be valid for load/store) and it might be changed in
the future.


Thanks,
Reiji
diff mbox series

Patch

diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 310b9cb2b32b..bb59b692998b 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -64,9 +64,40 @@  static inline bool kvm_psci_valid_affinity(unsigned long affinity)
 	return !(affinity & ~MPIDR_HWID_BITMASK);
 }
 
-static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+static void kvm_psci_vcpu_request_reset(struct kvm_vcpu *vcpu,
+					unsigned long entry_addr,
+					unsigned long context_id,
+					bool big_endian)
 {
 	struct vcpu_reset_state *reset_state;
+
+	lockdep_assert_held(&vcpu->kvm->lock);
+
+	reset_state = &vcpu->arch.reset_state;
+	reset_state->pc = entry_addr;
+
+	/* Propagate caller endianness */
+	reset_state->be = big_endian;
+
+	/*
+	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
+	 * the general purpose registers are undefined upon CPU_ON.
+	 */
+	reset_state->r0 = context_id;
+
+	WRITE_ONCE(reset_state->reset, true);
+	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+
+	/*
+	 * Make sure the reset request is observed if the change to
+	 * power_state is observed.
+	 */
+	smp_wmb();
+	vcpu->arch.power_off = false;
+}
+
+static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+{
 	struct kvm *kvm = source_vcpu->kvm;
 	struct kvm_vcpu *vcpu = NULL;
 	unsigned long cpu_id;
@@ -90,29 +121,9 @@  static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 			return PSCI_RET_INVALID_PARAMS;
 	}
 
-	reset_state = &vcpu->arch.reset_state;
-
-	reset_state->pc = smccc_get_arg2(source_vcpu);
-
-	/* Propagate caller endianness */
-	reset_state->be = kvm_vcpu_is_be(source_vcpu);
-
-	/*
-	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
-	 * the general purpose registers are undefined upon CPU_ON.
-	 */
-	reset_state->r0 = smccc_get_arg3(source_vcpu);
-
-	WRITE_ONCE(reset_state->reset, true);
-	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
-
-	/*
-	 * Make sure the reset request is observed if the change to
-	 * power_state is observed.
-	 */
-	smp_wmb();
-
-	vcpu->arch.power_off = false;
+	kvm_psci_vcpu_request_reset(vcpu, smccc_get_arg2(source_vcpu),
+				    smccc_get_arg3(source_vcpu),
+				    kvm_vcpu_is_be(source_vcpu));
 	kvm_vcpu_wake_up(vcpu);
 
 	return PSCI_RET_SUCCESS;