diff mbox series

[v7,09/14] KVM: s390: Dispatch to implementing function at top level of vm mem_op

Message ID 20230206164602.138068-10-scgl@linux.ibm.com (mailing list archive)
State Accepted
Commit 8550bcb754bc9ee0f8906c416e9e900e4ed5607c
Headers show
Series KVM: s390: Extend MEM_OP ioctl by storage key checked cmpxchg | expand

Commit Message

Janis Schoetterl-Glausch Feb. 6, 2023, 4:45 p.m. UTC
Instead of having one function covering all mem_op operations,
have a function implementing absolute access and dispatch to that
function in its caller, based on the operation code.
This way additional future operations can be implemented by adding an
implementing function without changing existing operations.

Suggested-by: Janosch Frank <frankja@linux.ibm.com>
Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
---
 arch/s390/kvm/kvm-s390.c | 38 ++++++++++++++++++++++++--------------
 1 file changed, 24 insertions(+), 14 deletions(-)

Comments

Janosch Frank Feb. 7, 2023, 1:08 p.m. UTC | #1
On 2/6/23 17:45, Janis Schoetterl-Glausch wrote:
> Instead of having one function covering all mem_op operations,
> have a function implementing absolute access and dispatch to that
> function in its caller, based on the operation code.
> This way additional future operations can be implemented by adding an
> implementing function without changing existing operations.
> 
> Suggested-by: Janosch Frank <frankja@linux.ibm.com>
> Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> Reviewed-by: Thomas Huth <thuth@redhat.com>

Reviewed-by: Janosch Frank <frankja@linux.ibm.com>

> ---
>   arch/s390/kvm/kvm-s390.c | 38 ++++++++++++++++++++++++--------------
>   1 file changed, 24 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 0367c1a7e69a..707967a296f1 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2779,7 +2779,7 @@ static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_fla
>   	return 0;
>   }
>   
> -static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
> +static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
>   {
>   	void __user *uaddr = (void __user *)mop->buf;
>   	void *tmpbuf = NULL;
> @@ -2790,17 +2790,6 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
>   	if (r)
>   		return r;
>   
> -	/*
> -	 * This is technically a heuristic only, if the kvm->lock is not
> -	 * taken, it is not guaranteed that the vm is/remains non-protected.
> -	 * This is ok from a kernel perspective, wrongdoing is detected
> -	 * on the access, -EFAULT is returned and the vm may crash the
> -	 * next time it accesses the memory in question.
> -	 * There is no sane usecase to do switching and a memop on two
> -	 * different CPUs at the same time.
> -	 */
> -	if (kvm_s390_pv_get_handle(kvm))
> -		return -EINVAL;
>   	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
>   		tmpbuf = vmalloc(mop->size);
>   		if (!tmpbuf)
> @@ -2841,8 +2830,6 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
>   		}
>   		break;
>   	}
> -	default:
> -		r = -EINVAL;
>   	}
>   
>   out_unlock:
> @@ -2852,6 +2839,29 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
>   	return r;
>   }
>   
> +static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
> +{
> +	/*
> +	 * This is technically a heuristic only, if the kvm->lock is not
> +	 * taken, it is not guaranteed that the vm is/remains non-protected.
> +	 * This is ok from a kernel perspective, wrongdoing is detected
> +	 * on the access, -EFAULT is returned and the vm may crash the
> +	 * next time it accesses the memory in question.
> +	 * There is no sane usecase to do switching and a memop on two
> +	 * different CPUs at the same time.
> +	 */
> +	if (kvm_s390_pv_get_handle(kvm))
> +		return -EINVAL;
> +
> +	switch (mop->op) {
> +	case KVM_S390_MEMOP_ABSOLUTE_READ:
> +	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
> +		return kvm_s390_vm_mem_op_abs(kvm, mop);
> +	default:
> +		return -EINVAL;
> +	}
> +}
> +
>   long kvm_arch_vm_ioctl(struct file *filp,
>   		       unsigned int ioctl, unsigned long arg)
>   {
diff mbox series

Patch

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0367c1a7e69a..707967a296f1 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2779,7 +2779,7 @@  static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_fla
 	return 0;
 }
 
-static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
+static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
 {
 	void __user *uaddr = (void __user *)mop->buf;
 	void *tmpbuf = NULL;
@@ -2790,17 +2790,6 @@  static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
 	if (r)
 		return r;
 
-	/*
-	 * This is technically a heuristic only, if the kvm->lock is not
-	 * taken, it is not guaranteed that the vm is/remains non-protected.
-	 * This is ok from a kernel perspective, wrongdoing is detected
-	 * on the access, -EFAULT is returned and the vm may crash the
-	 * next time it accesses the memory in question.
-	 * There is no sane usecase to do switching and a memop on two
-	 * different CPUs at the same time.
-	 */
-	if (kvm_s390_pv_get_handle(kvm))
-		return -EINVAL;
 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
 		tmpbuf = vmalloc(mop->size);
 		if (!tmpbuf)
@@ -2841,8 +2830,6 @@  static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
 		}
 		break;
 	}
-	default:
-		r = -EINVAL;
 	}
 
 out_unlock:
@@ -2852,6 +2839,29 @@  static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
 	return r;
 }
 
+static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
+{
+	/*
+	 * This is technically a heuristic only, if the kvm->lock is not
+	 * taken, it is not guaranteed that the vm is/remains non-protected.
+	 * This is ok from a kernel perspective, wrongdoing is detected
+	 * on the access, -EFAULT is returned and the vm may crash the
+	 * next time it accesses the memory in question.
+	 * There is no sane usecase to do switching and a memop on two
+	 * different CPUs at the same time.
+	 */
+	if (kvm_s390_pv_get_handle(kvm))
+		return -EINVAL;
+
+	switch (mop->op) {
+	case KVM_S390_MEMOP_ABSOLUTE_READ:
+	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
+		return kvm_s390_vm_mem_op_abs(kvm, mop);
+	default:
+		return -EINVAL;
+	}
+}
+
 long kvm_arch_vm_ioctl(struct file *filp,
 		       unsigned int ioctl, unsigned long arg)
 {