diff mbox series

[5/7] KVM: arm64: Move 'invalid syndrome' logic out of io_mem_abort()

Message ID 20200724143506.17772-6-will@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Fixes to early stage-2 fault handling | expand

Commit Message

Will Deacon July 24, 2020, 2:35 p.m. UTC
In preparation for handling stage-2 faults on stage-1 page-table walks
earlier, move the 'invalid syndrome' logic out of io_mem_abort() and
into its own function, which can be called from kvm_handle_guest_abort()
directly.

Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/kvm/mmio.c | 22 ----------------------
 arch/arm64/kvm/mmu.c  | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 32 insertions(+), 22 deletions(-)

Comments

Marc Zyngier July 26, 2020, 11:55 a.m. UTC | #1
On Fri, 24 Jul 2020 15:35:04 +0100,
Will Deacon <will@kernel.org> wrote:
> 
> In preparation for handling stage-2 faults on stage-1 page-table walks
> earlier, move the 'invalid syndrome' logic out of io_mem_abort() and
> into its own function, which can be called from kvm_handle_guest_abort()
> directly.
> 
> Cc: Marc Zyngier <maz@kernel.org>
> Cc: Quentin Perret <qperret@google.com>
> Signed-off-by: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kvm/mmio.c | 22 ----------------------
>  arch/arm64/kvm/mmu.c  | 32 ++++++++++++++++++++++++++++++++
>  2 files changed, 32 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
> index b54ea5aa6c06..45a630e480e1 100644
> --- a/arch/arm64/kvm/mmio.c
> +++ b/arch/arm64/kvm/mmio.c
> @@ -136,28 +136,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
>  	int len;
>  	u8 data_buf[8];
>  
> -	/*
> -	 * No valid syndrome? Ask userspace for help if it has
> -	 * volunteered to do so, and bail out otherwise.
> -	 */
> -	if (!kvm_vcpu_dabt_isvalid(vcpu)) {
> -		if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
> -			run->exit_reason = KVM_EXIT_ARM_NISV;
> -			run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
> -			run->arm_nisv.fault_ipa = fault_ipa;
> -			return 0;
> -		}
> -
> -		kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
> -		return -ENOSYS;
> -	}
> -
> -	/* Page table accesses IO mem: tell guest to fix its TTBR */
> -	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
> -		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
> -		return 1;
> -	}
> -
>  	/*
>  	 * Prepare MMIO operation. First decode the syndrome data we get
>  	 * from the CPU. Then try if some in-kernel emulation feels
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 73e62d360a36..adb933ecd177 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -2046,6 +2046,20 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
>  		kvm_set_pfn_accessed(pfn);
>  }
>  
> +static int handle_error_invalid_dabt(struct kvm_vcpu *vcpu, struct kvm_run *run,

Nit: why the "_error_"? There isn't any error here, only an awkward
part of the architecture. I'd rather see something like
handle_nisv_dabt(), which matches what this function actually does.

> +				     phys_addr_t fault_ipa)
> +{
> +	if (!vcpu->kvm->arch.return_nisv_io_abort_to_user) {
> +		kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
> +		return -ENOSYS;
> +	}
> +
> +	run->exit_reason = KVM_EXIT_ARM_NISV;
> +	run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
> +	run->arm_nisv.fault_ipa = fault_ipa;
> +	return 0;
> +}
> +
>  /**
>   * kvm_handle_guest_abort - handles all 2nd stage aborts
>   * @vcpu:	the VCPU pointer
> @@ -2133,6 +2147,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  		 * of the page size.
>  		 */
>  		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
> +
> +		/*
> +		 * No valid syndrome? Ask userspace for help if it has
> +		 * volunteered to do so, and bail out otherwise.
> +		 */
> +		if (!kvm_vcpu_dabt_isvalid(vcpu)) {
> +			ret = handle_error_invalid_dabt(vcpu, run, fault_ipa);
> +			goto out_unlock;
> +		}
> +
> +		if (kvm_vcpu_dabt_iss1tw(vcpu)) {
> +			ret = -ENXIO;
> +			goto out;
> +		}
> +
>  		ret = io_mem_abort(vcpu, run, fault_ipa);
>  		goto out_unlock;
>  	}
> @@ -2153,6 +2182,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
>  	if (ret == -ENOEXEC) {
>  		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
>  		ret = 1;
> +	} else if (ret == -ENXIO) {
> +		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
> +		ret = 1;
>  	}
>  out_unlock:
>  	srcu_read_unlock(&vcpu->kvm->srcu, idx);
> -- 
> 2.28.0.rc0.142.g3c755180ce-goog
> 
> 

Otherwise looks OK.

	M.
Will Deacon July 27, 2020, 10:31 a.m. UTC | #2
On Sun, Jul 26, 2020 at 12:55:16PM +0100, Marc Zyngier wrote:
> On Fri, 24 Jul 2020 15:35:04 +0100,
> Will Deacon <will@kernel.org> wrote:
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 73e62d360a36..adb933ecd177 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -2046,6 +2046,20 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
> >  		kvm_set_pfn_accessed(pfn);
> >  }
> >  
> > +static int handle_error_invalid_dabt(struct kvm_vcpu *vcpu, struct kvm_run *run,
> 
> Nit: why the "_error_"? There isn't any error here, only an awkward
> part of the architecture. I'd rather see something like
> handle_nisv_dabt(), which matches what this function actually does.

I chose "_error_" because this handling the case when kvm_is_error_hva() is
true (but I agree that "error" is misleading in both cases).

Will
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index b54ea5aa6c06..45a630e480e1 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -136,28 +136,6 @@  int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 	int len;
 	u8 data_buf[8];
 
-	/*
-	 * No valid syndrome? Ask userspace for help if it has
-	 * volunteered to do so, and bail out otherwise.
-	 */
-	if (!kvm_vcpu_dabt_isvalid(vcpu)) {
-		if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
-			run->exit_reason = KVM_EXIT_ARM_NISV;
-			run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
-			run->arm_nisv.fault_ipa = fault_ipa;
-			return 0;
-		}
-
-		kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
-		return -ENOSYS;
-	}
-
-	/* Page table accesses IO mem: tell guest to fix its TTBR */
-	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
-		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-		return 1;
-	}
-
 	/*
 	 * Prepare MMIO operation. First decode the syndrome data we get
 	 * from the CPU. Then try if some in-kernel emulation feels
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 73e62d360a36..adb933ecd177 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2046,6 +2046,20 @@  static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 		kvm_set_pfn_accessed(pfn);
 }
 
+static int handle_error_invalid_dabt(struct kvm_vcpu *vcpu, struct kvm_run *run,
+				     phys_addr_t fault_ipa)
+{
+	if (!vcpu->kvm->arch.return_nisv_io_abort_to_user) {
+		kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
+		return -ENOSYS;
+	}
+
+	run->exit_reason = KVM_EXIT_ARM_NISV;
+	run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
+	run->arm_nisv.fault_ipa = fault_ipa;
+	return 0;
+}
+
 /**
  * kvm_handle_guest_abort - handles all 2nd stage aborts
  * @vcpu:	the VCPU pointer
@@ -2133,6 +2147,21 @@  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		 * of the page size.
 		 */
 		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+
+		/*
+		 * No valid syndrome? Ask userspace for help if it has
+		 * volunteered to do so, and bail out otherwise.
+		 */
+		if (!kvm_vcpu_dabt_isvalid(vcpu)) {
+			ret = handle_error_invalid_dabt(vcpu, run, fault_ipa);
+			goto out_unlock;
+		}
+
+		if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+			ret = -ENXIO;
+			goto out;
+		}
+
 		ret = io_mem_abort(vcpu, run, fault_ipa);
 		goto out_unlock;
 	}
@@ -2153,6 +2182,9 @@  int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	if (ret == -ENOEXEC) {
 		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 		ret = 1;
+	} else if (ret == -ENXIO) {
+		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+		ret = 1;
 	}
 out_unlock:
 	srcu_read_unlock(&vcpu->kvm->srcu, idx);