@@ -286,6 +286,7 @@
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
#define CSR_SENVCFG 0x10a
+#define CSR_SSTATEEN0 0x10c
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
@@ -170,6 +170,10 @@ struct kvm_vcpu_config {
u64 hstateen0;
};
+struct kvm_vcpu_smstateen_csr {
+ unsigned long sstateen0;
+};
+
struct kvm_vcpu_arch {
/* VCPU ran at least once */
bool ran_atleast_once;
@@ -190,6 +194,7 @@ struct kvm_vcpu_arch {
unsigned long host_stvec;
unsigned long host_scounteren;
unsigned long host_senvcfg;
+ unsigned long host_sstateen0;
/* CPU context of Host */
struct kvm_cpu_context host_context;
@@ -200,6 +205,9 @@ struct kvm_vcpu_arch {
/* CPU CSR context of Guest VCPU */
struct kvm_vcpu_csr guest_csr;
+ /* CPU Smstateen CSR context of Guest VCPU */
+ struct kvm_vcpu_smstateen_csr smstateen_csr;
+
/* CPU context upon Guest VCPU reset */
struct kvm_cpu_context guest_reset_context;
@@ -603,16 +603,28 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
{
+ struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
+ (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
+ vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
+ smcsr->sstateen0);
}
static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
{
+ struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
+ (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
+ smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
+ vcpu->arch.host_sstateen0);
}
/*