diff mbox series

[v3,09/13] RISC-V: KVM: Implement SBI STA extension

Message ID 20231217204019.36492-24-ajones@ventanamicro.com (mailing list archive)
State Superseded
Headers show
Series RISC-V: Add steal-time support | expand

Checks

Context Check Description
conchuod/vmtest-fixes-PR fail merge-conflict

Commit Message

Andrew Jones Dec. 17, 2023, 8:40 p.m. UTC
Add a select SCHED_INFO to the KVM config in order to get run_delay
info. Then implement SBI STA's set-steal-time-shmem function and
kvm_riscv_vcpu_record_steal_time() to provide the steal-time info
to guests.

Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
---
 arch/riscv/kvm/Kconfig        |  1 +
 arch/riscv/kvm/vcpu_sbi_sta.c | 96 ++++++++++++++++++++++++++++++++++-
 2 files changed, 95 insertions(+), 2 deletions(-)

Comments

Atish Patra Dec. 19, 2023, 9:52 p.m. UTC | #1
On Sun, Dec 17, 2023 at 12:40 PM Andrew Jones <ajones@ventanamicro.com> wrote:
>
> Add a select SCHED_INFO to the KVM config in order to get run_delay
> info. Then implement SBI STA's set-steal-time-shmem function and
> kvm_riscv_vcpu_record_steal_time() to provide the steal-time info
> to guests.
>
> Reviewed-by: Anup Patel <anup@brainfault.org>
> Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
> ---
>  arch/riscv/kvm/Kconfig        |  1 +
>  arch/riscv/kvm/vcpu_sbi_sta.c | 96 ++++++++++++++++++++++++++++++++++-
>  2 files changed, 95 insertions(+), 2 deletions(-)
>
> diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
> index dfc237d7875b..148e52b516cf 100644
> --- a/arch/riscv/kvm/Kconfig
> +++ b/arch/riscv/kvm/Kconfig
> @@ -32,6 +32,7 @@ config KVM
>         select KVM_XFER_TO_GUEST_WORK
>         select MMU_NOTIFIER
>         select PREEMPT_NOTIFIERS
> +       select SCHED_INFO
>         help
>           Support hosting virtualized guest machines.
>
> diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
> index 87bf1a5f05ce..01f09fe8c3b0 100644
> --- a/arch/riscv/kvm/vcpu_sbi_sta.c
> +++ b/arch/riscv/kvm/vcpu_sbi_sta.c
> @@ -6,9 +6,15 @@
>  #include <linux/kconfig.h>
>  #include <linux/kernel.h>
>  #include <linux/kvm_host.h>
> +#include <linux/mm.h>
> +#include <linux/sizes.h>
>
> +#include <asm/bug.h>
> +#include <asm/current.h>
>  #include <asm/kvm_vcpu_sbi.h>
> +#include <asm/page.h>
>  #include <asm/sbi.h>
> +#include <asm/uaccess.h>
>
>  void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
>  {
> @@ -19,14 +25,100 @@ void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
>  void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
>  {
>         gpa_t shmem = vcpu->arch.sta.shmem;
> +       u64 last_steal = vcpu->arch.sta.last_steal;
> +       u32 *sequence_ptr, sequence;
> +       u64 *steal_ptr, steal;
> +       unsigned long hva;
> +       gfn_t gfn;
>
>         if (shmem == INVALID_GPA)
>                 return;
> +
> +       /*
> +        * shmem is 64-byte aligned (see the enforcement in
> +        * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
> +        * is 64 bytes, so we know all its offsets are in the same page.
> +        */
> +       gfn = shmem >> PAGE_SHIFT;
> +       hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
> +
> +       if (WARN_ON(kvm_is_error_hva(hva))) {
> +               vcpu->arch.sta.shmem = INVALID_GPA;
> +               return;
> +       }
> +
> +       sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
> +                              offsetof(struct sbi_sta_struct, sequence));
> +       steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
> +                           offsetof(struct sbi_sta_struct, steal));
> +
> +       if (WARN_ON(get_user(sequence, sequence_ptr)))
> +               return;
> +
> +       sequence = le32_to_cpu(sequence);
> +       sequence += 1;
> +
> +       if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
> +               return;
> +
> +       if (!WARN_ON(get_user(steal, steal_ptr))) {
> +               steal = le64_to_cpu(steal);
> +               vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
> +               steal += vcpu->arch.sta.last_steal - last_steal;
> +               WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
> +       }
> +
> +       sequence += 1;
> +       WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
> +
> +       kvm_vcpu_mark_page_dirty(vcpu, gfn);
>  }
>
>  static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
>  {
> -       return SBI_ERR_FAILURE;
> +       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> +       unsigned long shmem_phys_lo = cp->a0;
> +       unsigned long shmem_phys_hi = cp->a1;
> +       u32 flags = cp->a2;
> +       struct sbi_sta_struct zero_sta = {0};
> +       unsigned long hva;
> +       bool writable;
> +       gpa_t shmem;
> +       int ret;
> +
> +       if (flags != 0)
> +               return SBI_ERR_INVALID_PARAM;
> +
> +       if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
> +           shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
> +               vcpu->arch.sta.shmem = INVALID_GPA;
> +               return 0;
> +       }
> +
> +       if (shmem_phys_lo & (SZ_64 - 1))
> +               return SBI_ERR_INVALID_PARAM;
> +
> +       shmem = shmem_phys_lo;
> +
> +       if (shmem_phys_hi != 0) {
> +               if (IS_ENABLED(CONFIG_32BIT))
> +                       shmem |= ((gpa_t)shmem_phys_hi << 32);
> +               else
> +                       return SBI_ERR_INVALID_ADDRESS;
> +       }
> +
> +       hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
> +       if (kvm_is_error_hva(hva) || !writable)
> +               return SBI_ERR_INVALID_ADDRESS;
> +
> +       ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
> +       if (ret)
> +               return SBI_ERR_FAILURE;
> +
> +       vcpu->arch.sta.shmem = shmem;
> +       vcpu->arch.sta.last_steal = current->sched_info.run_delay;
> +
> +       return 0;
>  }
>
>  static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
> @@ -52,7 +144,7 @@ static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
>
>  static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
>  {
> -       return 0;
> +       return !!sched_info_on();
>  }
>
>  const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
> --
> 2.43.0
>


Reviewed-by: Atish Patra <atishp@rivosinc.com>
diff mbox series

Patch

diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig
index dfc237d7875b..148e52b516cf 100644
--- a/arch/riscv/kvm/Kconfig
+++ b/arch/riscv/kvm/Kconfig
@@ -32,6 +32,7 @@  config KVM
 	select KVM_XFER_TO_GUEST_WORK
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
+	select SCHED_INFO
 	help
 	  Support hosting virtualized guest machines.
 
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index 87bf1a5f05ce..01f09fe8c3b0 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -6,9 +6,15 @@ 
 #include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
 
+#include <asm/bug.h>
+#include <asm/current.h>
 #include <asm/kvm_vcpu_sbi.h>
+#include <asm/page.h>
 #include <asm/sbi.h>
+#include <asm/uaccess.h>
 
 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
 {
@@ -19,14 +25,100 @@  void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
 {
 	gpa_t shmem = vcpu->arch.sta.shmem;
+	u64 last_steal = vcpu->arch.sta.last_steal;
+	u32 *sequence_ptr, sequence;
+	u64 *steal_ptr, steal;
+	unsigned long hva;
+	gfn_t gfn;
 
 	if (shmem == INVALID_GPA)
 		return;
+
+	/*
+	 * shmem is 64-byte aligned (see the enforcement in
+	 * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
+	 * is 64 bytes, so we know all its offsets are in the same page.
+	 */
+	gfn = shmem >> PAGE_SHIFT;
+	hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
+
+	if (WARN_ON(kvm_is_error_hva(hva))) {
+		vcpu->arch.sta.shmem = INVALID_GPA;
+		return;
+	}
+
+	sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
+			       offsetof(struct sbi_sta_struct, sequence));
+	steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
+			    offsetof(struct sbi_sta_struct, steal));
+
+	if (WARN_ON(get_user(sequence, sequence_ptr)))
+		return;
+
+	sequence = le32_to_cpu(sequence);
+	sequence += 1;
+
+	if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
+		return;
+
+	if (!WARN_ON(get_user(steal, steal_ptr))) {
+		steal = le64_to_cpu(steal);
+		vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
+		steal += vcpu->arch.sta.last_steal - last_steal;
+		WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
+	}
+
+	sequence += 1;
+	WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
+
+	kvm_vcpu_mark_page_dirty(vcpu, gfn);
 }
 
 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
 {
-	return SBI_ERR_FAILURE;
+	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+	unsigned long shmem_phys_lo = cp->a0;
+	unsigned long shmem_phys_hi = cp->a1;
+	u32 flags = cp->a2;
+	struct sbi_sta_struct zero_sta = {0};
+	unsigned long hva;
+	bool writable;
+	gpa_t shmem;
+	int ret;
+
+	if (flags != 0)
+		return SBI_ERR_INVALID_PARAM;
+
+	if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
+	    shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
+		vcpu->arch.sta.shmem = INVALID_GPA;
+		return 0;
+	}
+
+	if (shmem_phys_lo & (SZ_64 - 1))
+		return SBI_ERR_INVALID_PARAM;
+
+	shmem = shmem_phys_lo;
+
+	if (shmem_phys_hi != 0) {
+		if (IS_ENABLED(CONFIG_32BIT))
+			shmem |= ((gpa_t)shmem_phys_hi << 32);
+		else
+			return SBI_ERR_INVALID_ADDRESS;
+	}
+
+	hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
+	if (kvm_is_error_hva(hva) || !writable)
+		return SBI_ERR_INVALID_ADDRESS;
+
+	ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
+	if (ret)
+		return SBI_ERR_FAILURE;
+
+	vcpu->arch.sta.shmem = shmem;
+	vcpu->arch.sta.last_steal = current->sched_info.run_delay;
+
+	return 0;
 }
 
 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@@ -52,7 +144,7 @@  static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
 
 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
 {
-	return 0;
+	return !!sched_info_on();
 }
 
 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {