diff mbox series

[v2,5/6] RISCV: KVM: Add sstateen0 context save/restore

Message ID 20230721075439.454473-6-mchitale@ventanamicro.com (mailing list archive)
State Superseded
Headers show
Series Risc-V Kvm Smstateen | expand

Checks

Context Check Description
conchuod/cover_letter success Series has a cover letter
conchuod/tree_selection success Guessed tree name to be for-next at HEAD 471aba2e4760
conchuod/fixes_present success Fixes tag not required for -next series
conchuod/maintainers_pattern success MAINTAINERS pattern errors before the patch: 4 and now 4
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/kdoc success Errors and warnings before: 0 this patch: 0
conchuod/build_rv64_clang_allmodconfig success Errors and warnings before: 2810 this patch: 2810
conchuod/module_param success Was 0 now: 0
conchuod/build_rv64_gcc_allmodconfig success Errors and warnings before: 15878 this patch: 15877
conchuod/build_rv32_defconfig success Build OK
conchuod/dtb_warn_rv64 success Errors and warnings before: 3 this patch: 3
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/checkpatch success total: 0 errors, 0 warnings, 0 checks, 57 lines checked
conchuod/build_rv64_nommu_k210_defconfig success Build OK
conchuod/verify_fixes success No Fixes tag
conchuod/build_rv64_nommu_virt_defconfig success Build OK

Commit Message

Mayuresh Chitale July 21, 2023, 7:54 a.m. UTC
Define sstateen0 and add sstateen0 save/restore for guest VCPUs.

Signed-off-by: Mayuresh Chitale <mchitale@ventanamicro.com>
---
 arch/riscv/include/asm/csr.h      |  1 +
 arch/riscv/include/asm/kvm_host.h |  8 ++++++++
 arch/riscv/kvm/vcpu.c             | 10 ++++++++++
 3 files changed, 19 insertions(+)

Comments

Andrew Jones July 21, 2023, 9:04 a.m. UTC | #1
On Fri, Jul 21, 2023 at 01:24:38PM +0530, Mayuresh Chitale wrote:
> Define sstateen0 and add sstateen0 save/restore for guest VCPUs.
> 
> Signed-off-by: Mayuresh Chitale <mchitale@ventanamicro.com>
> ---
>  arch/riscv/include/asm/csr.h      |  1 +
>  arch/riscv/include/asm/kvm_host.h |  8 ++++++++
>  arch/riscv/kvm/vcpu.c             | 10 ++++++++++
>  3 files changed, 19 insertions(+)
> 
> diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
> index b52270278733..5168f37d8e75 100644
> --- a/arch/riscv/include/asm/csr.h
> +++ b/arch/riscv/include/asm/csr.h
> @@ -286,6 +286,7 @@
>  #define CSR_STVEC		0x105
>  #define CSR_SCOUNTEREN		0x106
>  #define CSR_SENVCFG		0x10a
> +#define CSR_SSTATEEN0		0x10c
>  #define CSR_SSCRATCH		0x140
>  #define CSR_SEPC		0x141
>  #define CSR_SCAUSE		0x142
> diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
> index c3cc0cb39cf8..c9837772b109 100644
> --- a/arch/riscv/include/asm/kvm_host.h
> +++ b/arch/riscv/include/asm/kvm_host.h
> @@ -170,6 +170,10 @@ struct kvm_vcpu_config {
>  	u64 hstateen0;
>  };
>  
> +struct kvm_vcpu_smstateen_csr {
> +	unsigned long sstateen0;
> +};
> +
>  struct kvm_vcpu_arch {
>  	/* VCPU ran at least once */
>  	bool ran_atleast_once;
> @@ -190,6 +194,7 @@ struct kvm_vcpu_arch {
>  	unsigned long host_stvec;
>  	unsigned long host_scounteren;
>  	unsigned long host_senvcfg;
> +	unsigned long host_sstateen0;
>  
>  	/* CPU context of Host */
>  	struct kvm_cpu_context host_context;
> @@ -200,6 +205,9 @@ struct kvm_vcpu_arch {
>  	/* CPU CSR context of Guest VCPU */
>  	struct kvm_vcpu_csr guest_csr;
>  
> +	/* CPU Smstateen CSR context of Guest VCPU */
> +	struct kvm_vcpu_smstateen_csr smstateen_csr;
> +
>  	/* CPU context upon Guest VCPU reset */
>  	struct kvm_cpu_context guest_reset_context;
>  
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 37f1ed70d782..ae750decbefe 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -1138,14 +1138,24 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
>   */
>  static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
>  {
> +	struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
>  	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> +	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
>  
>  	vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
> +	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
> +	    (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
> +		vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
> +						     smcsr->sstateen0);
>  	guest_state_enter_irqoff();
>  	__kvm_riscv_switch_to(&vcpu->arch);
>  	vcpu->arch.last_exit_cpu = vcpu->cpu;
>  	guest_state_exit_irqoff();
>  	csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
> +	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
> +	    (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
> +		smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
> +					    vcpu->arch.host_sstateen0);
>  }

It might be nice to keep kvm_riscv_vcpu_enter_exit() "clean", by adding
a couple functions. Something like

 static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
 {
   struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
   struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
   struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;

   vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
   if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
       (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
          vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
                                               smcsr->sstateen0);
 }

 static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
 {
   struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
   struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
   struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;

   csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
   if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
       (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
           smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
                                       vcpu->arch.host_sstateen0);
 }

 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 {
    kvm_riscv_vcpu_swap_in_guest_state(vcpu);
    guest_state_enter_irqoff();
    __kvm_riscv_switch_to(&vcpu->arch);
    vcpu->arch.last_exit_cpu = vcpu->cpu;
    guest_state_exit_irqoff();
    kvm_riscv_vcpu_swap_in_host_state(vcpu);
 }

>  
>  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
> -- 
> 2.34.1
>

Either way,

Reviewed-by: Andrew Jones <ajones@ventanamicro.com>

Thanks,
drew
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index b52270278733..5168f37d8e75 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -286,6 +286,7 @@ 
 #define CSR_STVEC		0x105
 #define CSR_SCOUNTEREN		0x106
 #define CSR_SENVCFG		0x10a
+#define CSR_SSTATEEN0		0x10c
 #define CSR_SSCRATCH		0x140
 #define CSR_SEPC		0x141
 #define CSR_SCAUSE		0x142
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index c3cc0cb39cf8..c9837772b109 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -170,6 +170,10 @@  struct kvm_vcpu_config {
 	u64 hstateen0;
 };
 
+struct kvm_vcpu_smstateen_csr {
+	unsigned long sstateen0;
+};
+
 struct kvm_vcpu_arch {
 	/* VCPU ran at least once */
 	bool ran_atleast_once;
@@ -190,6 +194,7 @@  struct kvm_vcpu_arch {
 	unsigned long host_stvec;
 	unsigned long host_scounteren;
 	unsigned long host_senvcfg;
+	unsigned long host_sstateen0;
 
 	/* CPU context of Host */
 	struct kvm_cpu_context host_context;
@@ -200,6 +205,9 @@  struct kvm_vcpu_arch {
 	/* CPU CSR context of Guest VCPU */
 	struct kvm_vcpu_csr guest_csr;
 
+	/* CPU Smstateen CSR context of Guest VCPU */
+	struct kvm_vcpu_smstateen_csr smstateen_csr;
+
 	/* CPU context upon Guest VCPU reset */
 	struct kvm_cpu_context guest_reset_context;
 
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 37f1ed70d782..ae750decbefe 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -1138,14 +1138,24 @@  static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
  */
 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 {
+	struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+	struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
 
 	vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
+	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
+	    (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
+		vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
+						     smcsr->sstateen0);
 	guest_state_enter_irqoff();
 	__kvm_riscv_switch_to(&vcpu->arch);
 	vcpu->arch.last_exit_cpu = vcpu->cpu;
 	guest_state_exit_irqoff();
 	csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
+	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
+	    (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
+		smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
+					    vcpu->arch.host_sstateen0);
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)