diff mbox series

[1/1] KVM: s390: fix LPSWEY handling

Message ID 20240627090520.4667-1-borntraeger@linux.ibm.com (mailing list archive)
State New
Headers show
Series [1/1] KVM: s390: fix LPSWEY handling | expand

Commit Message

Christian Borntraeger June 27, 2024, 9:05 a.m. UTC
in rare cases, e.g. for injecting a machine check we do intercept all
load PSW instructions via ICTL_LPSW. With facility 193 a new variant
LPSWEY was added. KVM needs to handle that as well.

Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
---
 arch/s390/include/asm/kvm_host.h |  1 +
 arch/s390/kvm/kvm-s390.c         |  1 +
 arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
 arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
 4 files changed, 50 insertions(+)

Comments

Claudio Imbrenda June 27, 2024, 9:23 a.m. UTC | #1
On Thu, 27 Jun 2024 11:05:20 +0200
Christian Borntraeger <borntraeger@linux.ibm.com> wrote:

> in rare cases, e.g. for injecting a machine check we do intercept all
> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
> LPSWEY was added. KVM needs to handle that as well.
> 
> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>

[...]

> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
> +{
> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
> +
> +	/* The displacement is a 20bit _SIGNED_ value */
> +	if (disp1 & 0x80000)
> +		disp1+=0xfff00000;
> +
> +	if (ar)
> +		*ar = base1;
> +
> +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
> +}
> +
>  static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
>  					      u64 *address1, u64 *address2,
>  					      u8 *ar_b1, u8 *ar_b2)
> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
> index 1be19cc9d73c..1a49b89706f8 100644
> --- a/arch/s390/kvm/priv.c
> +++ b/arch/s390/kvm/priv.c
> @@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
>  	return 0;
>  }
>  
> +static int handle_lpswey(struct kvm_vcpu *vcpu)
> +{
> +	psw_t new_psw;
> +	u64 addr;
> +	int rc;
> +	u8 ar;
> +
> +	vcpu->stat.instruction_lpswey++;
> +
> +	if (!test_kvm_facility(vcpu->kvm, 193))
> +		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
> +
> +	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
> +		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
> +
> +	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
> +	if (addr & 7)
> +		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
> +
> +	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
> +	if (rc)
> +		return kvm_s390_inject_prog_cond(vcpu, rc);
> +
> +	vcpu->arch.sie_block->gpsw = new_psw;
> +	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
> +		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
> +
> +	return 0;
> +}

looks quite straightforward, but you duplicated most of handle_lpswe.
it would probably be cleaner to abstract the "load psw" logic, and
convert handle_lpswe{,y} to be wrappers around it, something like

static int _handle_load_psw(struct kvm_vcpu *vcpu, unsigned long
pswaddr)

which can then contain the old code from the "if (addr & 7)" to the end
of the function.



I think it would look cleaner, but I don't have a super strong opinion
about it
Christian Borntraeger June 27, 2024, 9:40 a.m. UTC | #2
Am 27.06.24 um 11:23 schrieb Claudio Imbrenda:
> On Thu, 27 Jun 2024 11:05:20 +0200
> Christian Borntraeger <borntraeger@linux.ibm.com> wrote:
> 
>> in rare cases, e.g. for injecting a machine check we do intercept all
>> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
>> LPSWEY was added. KVM needs to handle that as well.
>>
>> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
>> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
>> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
> 
> [...]
> 
>> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
>> +{
>> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
>> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
>> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
>> +
>> +	/* The displacement is a 20bit _SIGNED_ value */
>> +	if (disp1 & 0x80000)
>> +		disp1+=0xfff00000;
>> +
>> +	if (ar)
>> +		*ar = base1;
>> +
>> +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
>> +}
>> +
>>   static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
>>   					      u64 *address1, u64 *address2,
>>   					      u8 *ar_b1, u8 *ar_b2)
>> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
>> index 1be19cc9d73c..1a49b89706f8 100644
>> --- a/arch/s390/kvm/priv.c
>> +++ b/arch/s390/kvm/priv.c
>> @@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
>>   	return 0;
>>   }
>>   
>> +static int handle_lpswey(struct kvm_vcpu *vcpu)
>> +{
>> +	psw_t new_psw;
>> +	u64 addr;
>> +	int rc;
>> +	u8 ar;
>> +
>> +	vcpu->stat.instruction_lpswey++;
>> +
>> +	if (!test_kvm_facility(vcpu->kvm, 193))
>> +		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
>> +
>> +	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>> +		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>> +
>> +	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
>> +	if (addr & 7)
>> +		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
>> +
>> +	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
>> +	if (rc)
>> +		return kvm_s390_inject_prog_cond(vcpu, rc);
>> +
>> +	vcpu->arch.sie_block->gpsw = new_psw;
>> +	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
>> +		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
>> +
>> +	return 0;
>> +}
> 
> looks quite straightforward, but you duplicated most of handle_lpswe.
> it would probably be cleaner to abstract the "load psw" logic, and
> convert handle_lpswe{,y} to be wrappers around it, something like
> 
> static int _handle_load_psw(struct kvm_vcpu *vcpu, unsigned long
> pswaddr)
> 
> which can then contain the old code from the "if (addr & 7)" to the end
> of the function.
> 
> 
> 
> I think it would look cleaner, but I don't have a super strong opinion
> about it

As this is a functional fix needed to properly run z16 code I would like to
minimize refactoring. I think we also need a different fix for LPSW(E) (we
should set the BEAR register).  We can do refactoring after we have fixed
everything.
Thomas Huth June 27, 2024, 9:44 a.m. UTC | #3
On 27/06/2024 11.05, Christian Borntraeger wrote:
> in rare cases, e.g. for injecting a machine check we do intercept all
> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
> LPSWEY was added. KVM needs to handle that as well.
> 
> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
> ---
>   arch/s390/include/asm/kvm_host.h |  1 +
>   arch/s390/kvm/kvm-s390.c         |  1 +
>   arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
>   arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
>   4 files changed, 50 insertions(+)
> 
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index 95990461888f..9281063636a7 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
>   	u64 instruction_io_other;
>   	u64 instruction_lpsw;
>   	u64 instruction_lpswe;
> +	u64 instruction_lpswey;
>   	u64 instruction_pfmf;
>   	u64 instruction_ptff;
>   	u64 instruction_sck;
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 50b77b759042..8e04c7f0c90c 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
>   	STATS_DESC_COUNTER(VCPU, instruction_io_other),
>   	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
>   	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
> +	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
>   	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
>   	STATS_DESC_COUNTER(VCPU, instruction_ptff),
>   	STATS_DESC_COUNTER(VCPU, instruction_sck),
> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
> index 111eb5c74784..c61966cae121 100644
> --- a/arch/s390/kvm/kvm-s390.h
> +++ b/arch/s390/kvm/kvm-s390.h
> @@ -138,6 +138,22 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
>   	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
>   }
>   
> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
> +{
> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
> +
> +	/* The displacement is a 20bit _SIGNED_ value */
> +	if (disp1 & 0x80000)
> +		disp1+=0xfff00000;
> +
> +	if (ar)
> +		*ar = base1;
> +
> +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
> +}
> +
>   static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
>   					      u64 *address1, u64 *address2,
>   					      u8 *ar_b1, u8 *ar_b2)
> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
> index 1be19cc9d73c..1a49b89706f8 100644
> --- a/arch/s390/kvm/priv.c
> +++ b/arch/s390/kvm/priv.c
> @@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
>   	return 0;
>   }
>   
> +static int handle_lpswey(struct kvm_vcpu *vcpu)
> +{
> +	psw_t new_psw;
> +	u64 addr;
> +	int rc;
> +	u8 ar;
> +
> +	vcpu->stat.instruction_lpswey++;
> +
> +	if (!test_kvm_facility(vcpu->kvm, 193))
> +		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
> +
> +	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
> +		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
> +
> +	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
> +	if (addr & 7)
> +		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
> +
> +	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
> +	if (rc)
> +		return kvm_s390_inject_prog_cond(vcpu, rc);

Quoting the Principles of Operations:

"If the storage-key-removal facility is installed, a spe-
cial-operation exception is recognized if the key value
in bits 8-11 of the storage operand is nonzero."

Do we need to have such a check here, too?

  Thomas


> +	vcpu->arch.sie_block->gpsw = new_psw;
> +	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
> +		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
> +
> +	return 0;
> +}
> +
>   static int handle_stidp(struct kvm_vcpu *vcpu)
>   {
>   	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
> @@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
>   	case 0x61:
>   	case 0x62:
>   		return handle_ri(vcpu);
> +	case 0x71:
> +		return handle_lpswey(vcpu);
>   	default:
>   		return -EOPNOTSUPP;
>   	}
Christian Borntraeger June 27, 2024, 9:49 a.m. UTC | #4
Am 27.06.24 um 11:44 schrieb Thomas Huth:
> On 27/06/2024 11.05, Christian Borntraeger wrote:
>> in rare cases, e.g. for injecting a machine check we do intercept all
>> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
>> LPSWEY was added. KVM needs to handle that as well.
>>
>> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
>> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
>> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
>> ---
>>   arch/s390/include/asm/kvm_host.h |  1 +
>>   arch/s390/kvm/kvm-s390.c         |  1 +
>>   arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
>>   arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
>>   4 files changed, 50 insertions(+)
>>
>> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
>> index 95990461888f..9281063636a7 100644
>> --- a/arch/s390/include/asm/kvm_host.h
>> +++ b/arch/s390/include/asm/kvm_host.h
>> @@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
>>       u64 instruction_io_other;
>>       u64 instruction_lpsw;
>>       u64 instruction_lpswe;
>> +    u64 instruction_lpswey;
>>       u64 instruction_pfmf;
>>       u64 instruction_ptff;
>>       u64 instruction_sck;
>> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
>> index 50b77b759042..8e04c7f0c90c 100644
>> --- a/arch/s390/kvm/kvm-s390.c
>> +++ b/arch/s390/kvm/kvm-s390.c
>> @@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
>>       STATS_DESC_COUNTER(VCPU, instruction_io_other),
>>       STATS_DESC_COUNTER(VCPU, instruction_lpsw),
>>       STATS_DESC_COUNTER(VCPU, instruction_lpswe),
>> +    STATS_DESC_COUNTER(VCPU, instruction_lpswey),
>>       STATS_DESC_COUNTER(VCPU, instruction_pfmf),
>>       STATS_DESC_COUNTER(VCPU, instruction_ptff),
>>       STATS_DESC_COUNTER(VCPU, instruction_sck),
>> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
>> index 111eb5c74784..c61966cae121 100644
>> --- a/arch/s390/kvm/kvm-s390.h
>> +++ b/arch/s390/kvm/kvm-s390.h
>> @@ -138,6 +138,22 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
>>       return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
>>   }
>> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
>> +{
>> +    u32 base1 = vcpu->arch.sie_block->ipb >> 28;
>> +    u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
>> +            ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
>> +
>> +    /* The displacement is a 20bit _SIGNED_ value */
>> +    if (disp1 & 0x80000)
>> +        disp1+=0xfff00000;
>> +
>> +    if (ar)
>> +        *ar = base1;
>> +
>> +    return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
>> +}
>> +
>>   static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
>>                             u64 *address1, u64 *address2,
>>                             u8 *ar_b1, u8 *ar_b2)
>> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
>> index 1be19cc9d73c..1a49b89706f8 100644
>> --- a/arch/s390/kvm/priv.c
>> +++ b/arch/s390/kvm/priv.c
>> @@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
>>       return 0;
>>   }
>> +static int handle_lpswey(struct kvm_vcpu *vcpu)
>> +{
>> +    psw_t new_psw;
>> +    u64 addr;
>> +    int rc;
>> +    u8 ar;
>> +
>> +    vcpu->stat.instruction_lpswey++;
>> +
>> +    if (!test_kvm_facility(vcpu->kvm, 193))
>> +        return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
>> +
>> +    if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
>> +        return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
>> +
>> +    addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
>> +    if (addr & 7)
>> +        return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
>> +
>> +    rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
>> +    if (rc)
>> +        return kvm_s390_inject_prog_cond(vcpu, rc);
> 
> Quoting the Principles of Operations:
> 
> "If the storage-key-removal facility is installed, a spe-
> cial-operation exception is recognized if the key value
> in bits 8-11 of the storage operand is nonzero."
> 
> Do we need to have such a check here, too?

We do not yet implement the storage key removal facility in KVM as far as
I can see. Only secure execution does that but there we do not have lpswe
intercepts. But yes, if we are going to implement this for normal guests
we need to have a look, also for LPSW(E)
Heiko Carstens June 27, 2024, 9:57 a.m. UTC | #5
On Thu, Jun 27, 2024 at 11:05:20AM +0200, Christian Borntraeger wrote:
> in rare cases, e.g. for injecting a machine check we do intercept all
> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
> LPSWEY was added. KVM needs to handle that as well.
> 
> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
> ---
>  arch/s390/include/asm/kvm_host.h |  1 +
>  arch/s390/kvm/kvm-s390.c         |  1 +
>  arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
>  arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
>  4 files changed, 50 insertions(+)

...

> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
> +{
> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
> +
> +	/* The displacement is a 20bit _SIGNED_ value */
> +	if (disp1 & 0x80000)
> +		disp1+=0xfff00000;
> +
> +	if (ar)
> +		*ar = base1;
> +
> +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
> +}

You may want to use sign_extend32() or sign_extend64() instead of open-coding.
Christian Borntraeger June 28, 2024, 2:53 p.m. UTC | #6
Am 27.06.24 um 11:57 schrieb Heiko Carstens:
> On Thu, Jun 27, 2024 at 11:05:20AM +0200, Christian Borntraeger wrote:
>> in rare cases, e.g. for injecting a machine check we do intercept all
>> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
>> LPSWEY was added. KVM needs to handle that as well.
>>
>> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
>> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
>> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
>> ---
>>   arch/s390/include/asm/kvm_host.h |  1 +
>>   arch/s390/kvm/kvm-s390.c         |  1 +
>>   arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
>>   arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
>>   4 files changed, 50 insertions(+)
> 
> ...
> 
>> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
>> +{
>> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
>> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
>> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
>> +
>> +	/* The displacement is a 20bit _SIGNED_ value */
>> +	if (disp1 & 0x80000)
>> +		disp1+=0xfff00000;
>> +
>> +	if (ar)
>> +		*ar = base1;
>> +
>> +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
>> +}
> 
> You may want to use sign_extend32() or sign_extend64() instead of open-coding.

Something like sign_extend64(disp1, 31)
I actually find that harder to read, but I am open for other opinions.
Heiko Carstens June 28, 2024, 3:02 p.m. UTC | #7
On Fri, Jun 28, 2024 at 04:53:20PM +0200, Christian Borntraeger wrote:
> > > +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
> > > +{
> > > +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
> > > +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
> > > +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
> > > +
> > > +	/* The displacement is a 20bit _SIGNED_ value */
> > > +	if (disp1 & 0x80000)
> > > +		disp1+=0xfff00000;
> > > +
> > > +	if (ar)
> > > +		*ar = base1;
> > > +
> > > +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
> > > +}
> > 
> > You may want to use sign_extend32() or sign_extend64() instead of open-coding.
> 
> Something like sign_extend64(disp1, 31)
> I actually find that harder to read, but I am open for other opinions.

Feel free to ignore my suggestion. It was just a comment that this
helper exists. If you prefer the open-coded variant then that's fine
too of course.
Claudio Imbrenda June 28, 2024, 3:22 p.m. UTC | #8
On Fri, 28 Jun 2024 16:53:20 +0200
Christian Borntraeger <borntraeger@linux.ibm.com> wrote:

> Am 27.06.24 um 11:57 schrieb Heiko Carstens:
> > On Thu, Jun 27, 2024 at 11:05:20AM +0200, Christian Borntraeger wrote:  
> >> in rare cases, e.g. for injecting a machine check we do intercept all
> >> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
> >> LPSWEY was added. KVM needs to handle that as well.
> >>
> >> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
> >> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
> >> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
> >> ---
> >>   arch/s390/include/asm/kvm_host.h |  1 +
> >>   arch/s390/kvm/kvm-s390.c         |  1 +
> >>   arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
> >>   arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
> >>   4 files changed, 50 insertions(+)  
> > 
> > ...
> >   
> >> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
> >> +{
> >> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
> >> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +

long disp1 = ... 

> >> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
> >> +
> >> +	/* The displacement is a 20bit _SIGNED_ value */
> >> +	if (disp1 & 0x80000)
> >> +		disp1+=0xfff00000;

disp1 = sign_extend64(disp1, 20);

> >> +
> >> +	if (ar)
> >> +		*ar = base1;
> >> +
> >> +	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;

+ disp1;

> >> +}  
> > 
> > You may want to use sign_extend32() or sign_extend64() instead of open-coding.  
> 
> Something like sign_extend64(disp1, 31)
> I actually find that harder to read, but I am open for other opinions.

I think what he meant is what I wrote above, but it doesn't matter too
much.


with or without the above improvements:

Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Christian Borntraeger June 28, 2024, 3:50 p.m. UTC | #9
Am 28.06.24 um 17:22 schrieb Claudio Imbrenda:
> On Fri, 28 Jun 2024 16:53:20 +0200
> Christian Borntraeger <borntraeger@linux.ibm.com> wrote:
> 
>> Am 27.06.24 um 11:57 schrieb Heiko Carstens:
>>> On Thu, Jun 27, 2024 at 11:05:20AM +0200, Christian Borntraeger wrote:
>>>> in rare cases, e.g. for injecting a machine check we do intercept all
>>>> load PSW instructions via ICTL_LPSW. With facility 193 a new variant
>>>> LPSWEY was added. KVM needs to handle that as well.
>>>>
>>>> Fixes: a3efa8429266 ("KVM: s390: gen_facilities: allow facilities 165, 193, 194 and 196")
>>>> Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
>>>> Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
>>>> ---
>>>>    arch/s390/include/asm/kvm_host.h |  1 +
>>>>    arch/s390/kvm/kvm-s390.c         |  1 +
>>>>    arch/s390/kvm/kvm-s390.h         | 16 ++++++++++++++++
>>>>    arch/s390/kvm/priv.c             | 32 ++++++++++++++++++++++++++++++++
>>>>    4 files changed, 50 insertions(+)
>>>
>>> ...
>>>    
>>>> +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
>>>> +{
>>>> +	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
>>>> +	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
> 
> long disp1 = ...
> 
>>>> +			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
>>>> +
>>>> +	/* The displacement is a 20bit _SIGNED_ value */
>>>> +	if (disp1 & 0x80000)
>>>> +		disp1+=0xfff00000;
> 
> disp1 = sign_extend64(disp1, 20);

Hmm, right. I was just looking at the return statement, but here it is clearly better.

Will send a v2
Christian Borntraeger June 28, 2024, 4:55 p.m. UTC | #10
Am 28.06.24 um 17:22 schrieb Claudio Imbrenda:
[...]
> disp1 = sign_extend64(disp1, 20);

[...]

> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>

I dropped this RB since I did use 19 instead of 20 for sign_extend64
diff mbox series

Patch

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 95990461888f..9281063636a7 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -427,6 +427,7 @@  struct kvm_vcpu_stat {
 	u64 instruction_io_other;
 	u64 instruction_lpsw;
 	u64 instruction_lpswe;
+	u64 instruction_lpswey;
 	u64 instruction_pfmf;
 	u64 instruction_ptff;
 	u64 instruction_sck;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 50b77b759042..8e04c7f0c90c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -132,6 +132,7 @@  const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
+	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
 	STATS_DESC_COUNTER(VCPU, instruction_sck),
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 111eb5c74784..c61966cae121 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -138,6 +138,22 @@  static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
 }
 
+static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
+{
+	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
+	u32 disp1 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
+
+	/* The displacement is a 20bit _SIGNED_ value */
+	if (disp1 & 0x80000)
+		disp1+=0xfff00000;
+
+	if (ar)
+		*ar = base1;
+
+	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + (long)(int)disp1;
+}
+
 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
 					      u64 *address1, u64 *address2,
 					      u8 *ar_b1, u8 *ar_b2)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 1be19cc9d73c..1a49b89706f8 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -797,6 +797,36 @@  static int handle_lpswe(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static int handle_lpswey(struct kvm_vcpu *vcpu)
+{
+	psw_t new_psw;
+	u64 addr;
+	int rc;
+	u8 ar;
+
+	vcpu->stat.instruction_lpswey++;
+
+	if (!test_kvm_facility(vcpu->kvm, 193))
+		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
+	if (addr & 7)
+		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
+	if (rc)
+		return kvm_s390_inject_prog_cond(vcpu, rc);
+
+	vcpu->arch.sie_block->gpsw = new_psw;
+	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
+		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+	return 0;
+}
+
 static int handle_stidp(struct kvm_vcpu *vcpu)
 {
 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
@@ -1462,6 +1492,8 @@  int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
 	case 0x61:
 	case 0x62:
 		return handle_ri(vcpu);
+	case 0x71:
+		return handle_lpswey(vcpu);
 	default:
 		return -EOPNOTSUPP;
 	}