diff mbox series

[v13,07/30] LoongArch: KVM: Implement vcpu run interface

Message ID 20230609090832.2131037-8-zhaotianrui@loongson.cn (mailing list archive)
State New, archived
Headers show
Series Add KVM LoongArch support | expand

Commit Message

zhaotianrui June 9, 2023, 9:08 a.m. UTC
Implement vcpu run interface, handling mmio, iocsr reading fault
and deliver interrupt, lose fpu before vcpu enter guest.

Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
---
 arch/loongarch/kvm/vcpu.c | 83 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 83 insertions(+)

Comments

bibo, mao June 13, 2023, 12:55 p.m. UTC | #1
Reviewed-by: Bibo Mao <maobibo@loongson.cn>


Regards
Bibo, Mao

在 2023/6/9 17:08, Tianrui Zhao 写道:
> Implement vcpu run interface, handling mmio, iocsr reading fault
> and deliver interrupt, lose fpu before vcpu enter guest.
> 
> Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
> ---
>   arch/loongarch/kvm/vcpu.c | 83 +++++++++++++++++++++++++++++++++++++++
>   1 file changed, 83 insertions(+)
> 
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 24b5b00266a1..eba5c07b8be3 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -17,6 +17,41 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
>   	return 0;
>   }
>   
> +/* Returns 1 if the guest TLB may be clobbered */
> +static int _kvm_check_requests(struct kvm_vcpu *vcpu)
> +{
> +	int ret = 0;
> +
> +	if (!kvm_request_pending(vcpu))
> +		return 0;
> +
> +	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
> +		/* Drop vpid for this vCPU */
> +		vcpu->arch.vpid = 0;
> +		/* This will clobber guest TLB contents too */
> +		ret = 1;
> +	}
> +
> +	return ret;
> +}
> +
> +static void kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
> +{
> +	/*
> +	 * handle vcpu timer, interrupts, check requests and
> +	 * check vmid before vcpu enter guest
> +	 */
> +	kvm_acquire_timer(vcpu);
> +	_kvm_deliver_intr(vcpu);
> +	/* make sure the vcpu mode has been written */
> +	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
> +	_kvm_check_requests(vcpu);
> +	_kvm_check_vmid(vcpu);
> +	vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
> +	/* clear KVM_LARCH_CSR as csr will change when enter guest */
> +	vcpu->arch.aux_inuse &= ~KVM_LARCH_CSR;
> +}
> +
>   int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>   {
>   	unsigned long timer_hz;
> @@ -86,3 +121,51 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
>   			context->last_vcpu = NULL;
>   	}
>   }
> +
> +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> +{
> +	int r = -EINTR;
> +	struct kvm_run *run = vcpu->run;
> +
> +	vcpu_load(vcpu);
> +
> +	kvm_sigset_activate(vcpu);
> +
> +	if (vcpu->mmio_needed) {
> +		if (!vcpu->mmio_is_write)
> +			_kvm_complete_mmio_read(vcpu, run);
> +		vcpu->mmio_needed = 0;
> +	}
> +
> +	if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
> +		if (!run->iocsr_io.is_write)
> +			_kvm_complete_iocsr_read(vcpu, run);
> +	}
> +
> +	/* clear exit_reason */
> +	run->exit_reason = KVM_EXIT_UNKNOWN;
> +	if (run->immediate_exit)
> +		goto out;
> +
> +	lose_fpu(1);
> +
> +	local_irq_disable();
> +	guest_timing_enter_irqoff();
> +
> +	kvm_pre_enter_guest(vcpu);
> +	trace_kvm_enter(vcpu);
> +
> +	guest_state_enter_irqoff();
> +	r = kvm_loongarch_ops->enter_guest(run, vcpu);
> +
> +	/* guest_state_exit_irqoff() already done.  */
> +	trace_kvm_out(vcpu);
> +	guest_timing_exit_irqoff();
> +	local_irq_enable();
> +
> +out:
> +	kvm_sigset_deactivate(vcpu);
> +
> +	vcpu_put(vcpu);
> +	return r;
> +}
diff mbox series

Patch

diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 24b5b00266a1..eba5c07b8be3 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -17,6 +17,41 @@  int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 	return 0;
 }
 
+/* Returns 1 if the guest TLB may be clobbered */
+static int _kvm_check_requests(struct kvm_vcpu *vcpu)
+{
+	int ret = 0;
+
+	if (!kvm_request_pending(vcpu))
+		return 0;
+
+	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+		/* Drop vpid for this vCPU */
+		vcpu->arch.vpid = 0;
+		/* This will clobber guest TLB contents too */
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static void kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * handle vcpu timer, interrupts, check requests and
+	 * check vmid before vcpu enter guest
+	 */
+	kvm_acquire_timer(vcpu);
+	_kvm_deliver_intr(vcpu);
+	/* make sure the vcpu mode has been written */
+	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
+	_kvm_check_requests(vcpu);
+	_kvm_check_vmid(vcpu);
+	vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
+	/* clear KVM_LARCH_CSR as csr will change when enter guest */
+	vcpu->arch.aux_inuse &= ~KVM_LARCH_CSR;
+}
+
 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
 	unsigned long timer_hz;
@@ -86,3 +121,51 @@  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 			context->last_vcpu = NULL;
 	}
 }
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+{
+	int r = -EINTR;
+	struct kvm_run *run = vcpu->run;
+
+	vcpu_load(vcpu);
+
+	kvm_sigset_activate(vcpu);
+
+	if (vcpu->mmio_needed) {
+		if (!vcpu->mmio_is_write)
+			_kvm_complete_mmio_read(vcpu, run);
+		vcpu->mmio_needed = 0;
+	}
+
+	if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
+		if (!run->iocsr_io.is_write)
+			_kvm_complete_iocsr_read(vcpu, run);
+	}
+
+	/* clear exit_reason */
+	run->exit_reason = KVM_EXIT_UNKNOWN;
+	if (run->immediate_exit)
+		goto out;
+
+	lose_fpu(1);
+
+	local_irq_disable();
+	guest_timing_enter_irqoff();
+
+	kvm_pre_enter_guest(vcpu);
+	trace_kvm_enter(vcpu);
+
+	guest_state_enter_irqoff();
+	r = kvm_loongarch_ops->enter_guest(run, vcpu);
+
+	/* guest_state_exit_irqoff() already done.  */
+	trace_kvm_out(vcpu);
+	guest_timing_exit_irqoff();
+	local_irq_enable();
+
+out:
+	kvm_sigset_deactivate(vcpu);
+
+	vcpu_put(vcpu);
+	return r;
+}