diff mbox series

Fixup sida bouncing

Message ID 20191114162153.25349-1-frankja@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Fixup sida bouncing | expand

Commit Message

Janosch Frank Nov. 14, 2019, 4:21 p.m. UTC
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
---
 arch/s390/kvm/kvm-s390.c | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

Comments

Thomas Huth Nov. 15, 2019, 8:19 a.m. UTC | #1
On 14/11/2019 17.21, Janosch Frank wrote:
> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> ---
>  arch/s390/kvm/kvm-s390.c | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 0fa7c6d9ed0e..9820fde04887 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -4432,13 +4432,21 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>  	if (mop->size > MEM_OP_MAX_SIZE)
>  		return -E2BIG;
>  
> -	/* Protected guests move instruction data over the satellite
> +	/*
> +	 * Protected guests move instruction data over the satellite
>  	 * block which has its own size limit
>  	 */
>  	if (kvm_s390_pv_is_protected(vcpu->kvm) &&
> -	    mop->size > ((vcpu->arch.sie_block->sidad & 0x0f) + 1) * PAGE_SIZE)
> +	    mop->size > ((vcpu->arch.sie_block->sidad & 0xff) + 1) * PAGE_SIZE)
>  		return -E2BIG;
>  
> +	/* We can currently only offset into the one SIDA page. */
> +	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
> +		mop->gaddr &= ~PAGE_MASK;
> +		if (mop->gaddr + mop->size > PAGE_SIZE)
> +			return -EINVAL;
> +	}
> +
>  	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
>  		tmpbuf = vmalloc(mop->size);
>  		if (!tmpbuf)
> @@ -4451,6 +4459,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>  	case KVM_S390_MEMOP_LOGICAL_READ:
>  		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
>  			if (kvm_s390_pv_is_protected(vcpu->kvm)) {
> +				/* We can always copy into the SIDA */
>  				r = 0;
>  				break;
>  			}
> @@ -4461,8 +4470,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>  		if (kvm_s390_pv_is_protected(vcpu->kvm)) {
>  			r = 0;
>  			if (copy_to_user(uaddr, (void *)vcpu->arch.sie_block->sidad +
> -					 (mop->gaddr & ~PAGE_MASK),
> -					 mop->size))
> +					 mop->gaddr, mop->size))
>  				r = -EFAULT;
>  			break;
>  		}
> @@ -4485,8 +4493,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>  		if (kvm_s390_pv_is_protected(vcpu->kvm)) {
>  			r = 0;
>  			if (copy_from_user((void *)vcpu->arch.sie_block->sidad +
> -					   (mop->gaddr & ~PAGE_MASK), uaddr,
> -					   mop->size))
> +					   mop->gaddr, uaddr, mop->size))
>  				r = -EFAULT;
>  			break;
>  		}
> 

That looks better, indeed.

Still, is there a way you could also verify that gaddr references the
right page that is mirrored in the sidad?

 Thomas
Janosch Frank Nov. 15, 2019, 8:50 a.m. UTC | #2
On 11/15/19 9:19 AM, Thomas Huth wrote:
> On 14/11/2019 17.21, Janosch Frank wrote:
>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
>> ---
>>  arch/s390/kvm/kvm-s390.c | 19 +++++++++++++------
>>  1 file changed, 13 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
>> index 0fa7c6d9ed0e..9820fde04887 100644
>> --- a/arch/s390/kvm/kvm-s390.c
>> +++ b/arch/s390/kvm/kvm-s390.c
>> @@ -4432,13 +4432,21 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>>  	if (mop->size > MEM_OP_MAX_SIZE)
>>  		return -E2BIG;
>>  
>> -	/* Protected guests move instruction data over the satellite
>> +	/*
>> +	 * Protected guests move instruction data over the satellite
>>  	 * block which has its own size limit
>>  	 */
>>  	if (kvm_s390_pv_is_protected(vcpu->kvm) &&
>> -	    mop->size > ((vcpu->arch.sie_block->sidad & 0x0f) + 1) * PAGE_SIZE)
>> +	    mop->size > ((vcpu->arch.sie_block->sidad & 0xff) + 1) * PAGE_SIZE)
>>  		return -E2BIG;
>>  
>> +	/* We can currently only offset into the one SIDA page. */
>> +	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
>> +		mop->gaddr &= ~PAGE_MASK;
>> +		if (mop->gaddr + mop->size > PAGE_SIZE)
>> +			return -EINVAL;
>> +	}
>> +
>>  	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
>>  		tmpbuf = vmalloc(mop->size);
>>  		if (!tmpbuf)
>> @@ -4451,6 +4459,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>>  	case KVM_S390_MEMOP_LOGICAL_READ:
>>  		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
>>  			if (kvm_s390_pv_is_protected(vcpu->kvm)) {
>> +				/* We can always copy into the SIDA */
>>  				r = 0;
>>  				break;
>>  			}
>> @@ -4461,8 +4470,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>>  		if (kvm_s390_pv_is_protected(vcpu->kvm)) {
>>  			r = 0;
>>  			if (copy_to_user(uaddr, (void *)vcpu->arch.sie_block->sidad +
>> -					 (mop->gaddr & ~PAGE_MASK),
>> -					 mop->size))
>> +					 mop->gaddr, mop->size))
>>  				r = -EFAULT;
>>  			break;
>>  		}
>> @@ -4485,8 +4493,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
>>  		if (kvm_s390_pv_is_protected(vcpu->kvm)) {
>>  			r = 0;
>>  			if (copy_from_user((void *)vcpu->arch.sie_block->sidad +
>> -					   (mop->gaddr & ~PAGE_MASK), uaddr,
>> -					   mop->size))
>> +					   mop->gaddr, uaddr, mop->size))
>>  				r = -EFAULT;
>>  			break;
>>  		}
>>
> 
> That looks better, indeed.
> 
> Still, is there a way you could also verify that gaddr references the
> right page that is mirrored in the sidad?
> 
>  Thomas
> 

I'm not completely sure if I understand your question correctly.
Checking that is not possible here without also looking at the
instruction bytecode and register contents which would make this patch
ridiculously large with no real benefit.
Thomas Huth Nov. 15, 2019, 9:21 a.m. UTC | #3
On 15/11/2019 09.50, Janosch Frank wrote:
> On 11/15/19 9:19 AM, Thomas Huth wrote:
[...]
>> Still, is there a way you could also verify that gaddr references the
>> right page that is mirrored in the sidad?
>>
>>  Thomas
>>
> 
> I'm not completely sure if I understand your question correctly.
> Checking that is not possible here without also looking at the
> instruction bytecode and register contents which would make this patch
> ridiculously large with no real benefit.

Yes, I was thinking about something like that. I mean, how can you be
sure that the userspace really only wants to read the contents that are
references by the sidad? It could also try to read or write e.g. the
lowcore data inbetween (assuming that there are some code paths left
which are not aware of protected virtualization yet)?

Well, it does not have to be right now and in this patch, but I still
think that's something that should be added in the future if somehow
possible...

 Thomas
diff mbox series

Patch

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0fa7c6d9ed0e..9820fde04887 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4432,13 +4432,21 @@  static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
 	if (mop->size > MEM_OP_MAX_SIZE)
 		return -E2BIG;
 
-	/* Protected guests move instruction data over the satellite
+	/*
+	 * Protected guests move instruction data over the satellite
 	 * block which has its own size limit
 	 */
 	if (kvm_s390_pv_is_protected(vcpu->kvm) &&
-	    mop->size > ((vcpu->arch.sie_block->sidad & 0x0f) + 1) * PAGE_SIZE)
+	    mop->size > ((vcpu->arch.sie_block->sidad & 0xff) + 1) * PAGE_SIZE)
 		return -E2BIG;
 
+	/* We can currently only offset into the one SIDA page. */
+	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
+		mop->gaddr &= ~PAGE_MASK;
+		if (mop->gaddr + mop->size > PAGE_SIZE)
+			return -EINVAL;
+	}
+
 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
 		tmpbuf = vmalloc(mop->size);
 		if (!tmpbuf)
@@ -4451,6 +4459,7 @@  static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
 	case KVM_S390_MEMOP_LOGICAL_READ:
 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
 			if (kvm_s390_pv_is_protected(vcpu->kvm)) {
+				/* We can always copy into the SIDA */
 				r = 0;
 				break;
 			}
@@ -4461,8 +4470,7 @@  static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
 		if (kvm_s390_pv_is_protected(vcpu->kvm)) {
 			r = 0;
 			if (copy_to_user(uaddr, (void *)vcpu->arch.sie_block->sidad +
-					 (mop->gaddr & ~PAGE_MASK),
-					 mop->size))
+					 mop->gaddr, mop->size))
 				r = -EFAULT;
 			break;
 		}
@@ -4485,8 +4493,7 @@  static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
 		if (kvm_s390_pv_is_protected(vcpu->kvm)) {
 			r = 0;
 			if (copy_from_user((void *)vcpu->arch.sie_block->sidad +
-					   (mop->gaddr & ~PAGE_MASK), uaddr,
-					   mop->size))
+					   mop->gaddr, uaddr, mop->size))
 				r = -EFAULT;
 			break;
 		}