diff mbox series

[RFC,v2,17/22] i386/xen: handle VCPUOP_register_vcpu_time_info

Message ID 20221209095612.689243-18-dwmw2@infradead.org (mailing list archive)
State New, archived
Headers show
Series Xen HVM support under KVM | expand

Commit Message

David Woodhouse Dec. 9, 2022, 9:56 a.m. UTC
From: Joao Martins <joao.m.martins@oracle.com>

In order to support Linux vdso in Xen.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
 target/i386/cpu.h     |  1 +
 target/i386/kvm/kvm.c |  9 ++++++
 target/i386/machine.c |  4 ++-
 target/i386/xen.c     | 70 ++++++++++++++++++++++++++++++++++++-------
 4 files changed, 72 insertions(+), 12 deletions(-)

Comments

Paul Durrant Dec. 12, 2022, 3:34 p.m. UTC | #1
On 09/12/2022 09:56, David Woodhouse wrote:
> From: Joao Martins <joao.m.martins@oracle.com>
> 
> In order to support Linux vdso in Xen.
> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
> ---
>   target/i386/cpu.h     |  1 +
>   target/i386/kvm/kvm.c |  9 ++++++
>   target/i386/machine.c |  4 ++-
>   target/i386/xen.c     | 70 ++++++++++++++++++++++++++++++++++++-------
>   4 files changed, 72 insertions(+), 12 deletions(-)
> 
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 109b2e5669..96c2d0d5cb 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -1790,6 +1790,7 @@ typedef struct CPUArchState {
>       struct kvm_nested_state *nested_state;
>       uint64_t xen_vcpu_info_gpa;
>       uint64_t xen_vcpu_info_default_gpa;
> +    uint64_t xen_vcpu_time_info_gpa;
>   #endif
>   #if defined(CONFIG_HVF)
>       HVFX86LazyFlags hvf_lflags;
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index fa45e2f99a..3f19fff21f 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -1813,6 +1813,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
>   
>       env->xen_vcpu_info_gpa = UINT64_MAX;
>       env->xen_vcpu_info_default_gpa = UINT64_MAX;
> +    env->xen_vcpu_time_info_gpa = UINT64_MAX;

Another few candidates for INVALID_GPA.

>   
>       xen_version = kvm_arch_xen_version(MACHINE(qdev_get_machine()));
>       if (xen_version) {
> @@ -4744,6 +4745,14 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
>                   return ret;
>               }
>           }
> +
> +        gpa = x86_cpu->env.xen_vcpu_time_info_gpa;
> +        if (gpa != UINT64_MAX) {
> +            ret = kvm_xen_set_vcpu_attr(cpu, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, gpa);
> +            if (ret < 0) {
> +                return ret;
> +            }
> +        }
>       }
>   #endif
>   
> diff --git a/target/i386/machine.c b/target/i386/machine.c
> index 104cd6047c..9acef102a3 100644
> --- a/target/i386/machine.c
> +++ b/target/i386/machine.c
> @@ -1263,7 +1263,8 @@ static bool xen_vcpu_needed(void *opaque)
>       CPUX86State *env = &cpu->env;
>   
>       return (env->xen_vcpu_info_gpa != UINT64_MAX ||
> -            env->xen_vcpu_info_default_gpa != UINT64_MAX);
> +            env->xen_vcpu_info_default_gpa != UINT64_MAX ||
> +            env->xen_vcpu_time_info_gpa != UINT64_MAX);
>   }
>   
>   static const VMStateDescription vmstate_xen_vcpu = {
> @@ -1274,6 +1275,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
>       .fields = (VMStateField[]) {
>           VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
>           VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
> +        VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
>           VMSTATE_END_OF_LIST()
>       }
>   };
> diff --git a/target/i386/xen.c b/target/i386/xen.c
> index cd816bb711..427729ab4d 100644
> --- a/target/i386/xen.c
> +++ b/target/i386/xen.c
> @@ -21,28 +21,41 @@
>   #include "standard-headers/xen/hvm/hvm_op.h"
>   #include "standard-headers/xen/vcpu.h"
>   
> +static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
> +                           size_t *len, bool is_write)
> +{
> +        struct kvm_translation tr = {
> +            .linear_address = gva,
> +        };
> +
> +        if (len) {
> +                *len = TARGET_PAGE_SIZE - (gva & ~TARGET_PAGE_MASK);
> +        }
> +
> +        if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid ||
> +            (is_write && !tr.writeable)) {
> +            return false;
> +        }
> +        *gpa = tr.physical_address;
> +        return true;
> +}
> +
>   static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
>                         bool is_write)
>   {
>       uint8_t *buf = (uint8_t *)_buf;
>       size_t i = 0, len = 0;
> -    int ret;
>   
>       for (i = 0; i < sz; i+= len) {
> -        struct kvm_translation tr = {
> -            .linear_address = gva + i,
> -        };
> +        uint64_t gpa;
>   
> -        len = TARGET_PAGE_SIZE - (tr.linear_address & ~TARGET_PAGE_MASK);
> +        if (!kvm_gva_to_gpa(cs, gva + i, &gpa, &len, is_write)) {
> +                return -EFAULT;
> +        }
>           if (len > sz)
>               len = sz;
>   
> -        ret = kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr);
> -        if (ret || !tr.valid || (is_write && !tr.writeable)) {
> -            return -EFAULT;
> -        }
> -
> -        cpu_physical_memory_rw(tr.physical_address, buf + i, len, is_write);
> +        cpu_physical_memory_rw(gpa, buf + i, len, is_write);
>       }
>   
>       return 0;
> @@ -166,6 +179,17 @@ static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
>                             env->xen_vcpu_info_gpa);
>   }
>   
> +static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
> +{
> +    X86CPU *cpu = X86_CPU(cs);
> +    CPUX86State *env = &cpu->env;
> +
> +    env->xen_vcpu_time_info_gpa = data.host_ulong;
> +
> +    kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
> +                          env->xen_vcpu_time_info_gpa);
> +}
> +
>   static int xen_set_shared_info(CPUState *cs, uint64_t gfn)
>   {
>       uint64_t gpa = gfn << TARGET_PAGE_BITS;
> @@ -258,6 +282,27 @@ static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
>       return 0;
>   }
>   
> +static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target,
> +                                          uint64_t arg)
> +{
> +    struct vcpu_register_time_memory_area tma;
> +    uint64_t gpa;
> +    size_t len;
> +
> +    if (kvm_copy_from_gva(cs, arg, &tma, sizeof(*tma.addr.v))) {
> +        return -EFAULT;
> +    }
> +
> +    if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) ||
> +        len < sizeof(tma)) {
> +        return -EFAULT;
> +    }

Xen stashes the GVA, not the GPA, and so it would be possible to 
register the same GVA on different vcpus to point at different areas of 
memory.

   Paul

> +
> +    async_run_on_cpu(target, do_set_vcpu_time_info_gpa,
> +                     RUN_ON_CPU_HOST_ULONG(gpa));
> +    return 0;
> +}
> +
>   static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
>                                     int cmd, int vcpu_id, uint64_t arg)
>   {
> @@ -266,6 +311,9 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
>       int err;
>   
>       switch (cmd) {
> +    case VCPUOP_register_vcpu_time_memory_area:
> +            err = vcpuop_register_vcpu_time_info(cs, dest, arg);
> +            break;
>       case VCPUOP_register_vcpu_info:
>               err = vcpuop_register_vcpu_info(cs, dest, arg);
>               break;
diff mbox series

Patch

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 109b2e5669..96c2d0d5cb 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1790,6 +1790,7 @@  typedef struct CPUArchState {
     struct kvm_nested_state *nested_state;
     uint64_t xen_vcpu_info_gpa;
     uint64_t xen_vcpu_info_default_gpa;
+    uint64_t xen_vcpu_time_info_gpa;
 #endif
 #if defined(CONFIG_HVF)
     HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index fa45e2f99a..3f19fff21f 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -1813,6 +1813,7 @@  int kvm_arch_init_vcpu(CPUState *cs)
 
     env->xen_vcpu_info_gpa = UINT64_MAX;
     env->xen_vcpu_info_default_gpa = UINT64_MAX;
+    env->xen_vcpu_time_info_gpa = UINT64_MAX;
 
     xen_version = kvm_arch_xen_version(MACHINE(qdev_get_machine()));
     if (xen_version) {
@@ -4744,6 +4745,14 @@  int kvm_arch_put_registers(CPUState *cpu, int level)
                 return ret;
             }
         }
+
+        gpa = x86_cpu->env.xen_vcpu_time_info_gpa;
+        if (gpa != UINT64_MAX) {
+            ret = kvm_xen_set_vcpu_attr(cpu, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, gpa);
+            if (ret < 0) {
+                return ret;
+            }
+        }
     }
 #endif
 
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 104cd6047c..9acef102a3 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1263,7 +1263,8 @@  static bool xen_vcpu_needed(void *opaque)
     CPUX86State *env = &cpu->env;
 
     return (env->xen_vcpu_info_gpa != UINT64_MAX ||
-            env->xen_vcpu_info_default_gpa != UINT64_MAX);
+            env->xen_vcpu_info_default_gpa != UINT64_MAX ||
+            env->xen_vcpu_time_info_gpa != UINT64_MAX);
 }
 
 static const VMStateDescription vmstate_xen_vcpu = {
@@ -1274,6 +1275,7 @@  static const VMStateDescription vmstate_xen_vcpu = {
     .fields = (VMStateField[]) {
         VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
         VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
+        VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
         VMSTATE_END_OF_LIST()
     }
 };
diff --git a/target/i386/xen.c b/target/i386/xen.c
index cd816bb711..427729ab4d 100644
--- a/target/i386/xen.c
+++ b/target/i386/xen.c
@@ -21,28 +21,41 @@ 
 #include "standard-headers/xen/hvm/hvm_op.h"
 #include "standard-headers/xen/vcpu.h"
 
+static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
+                           size_t *len, bool is_write)
+{
+        struct kvm_translation tr = {
+            .linear_address = gva,
+        };
+
+        if (len) {
+                *len = TARGET_PAGE_SIZE - (gva & ~TARGET_PAGE_MASK);
+        }
+
+        if (kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr) || !tr.valid ||
+            (is_write && !tr.writeable)) {
+            return false;
+        }
+        *gpa = tr.physical_address;
+        return true;
+}
+
 static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
                       bool is_write)
 {
     uint8_t *buf = (uint8_t *)_buf;
     size_t i = 0, len = 0;
-    int ret;
 
     for (i = 0; i < sz; i+= len) {
-        struct kvm_translation tr = {
-            .linear_address = gva + i,
-        };
+        uint64_t gpa;
 
-        len = TARGET_PAGE_SIZE - (tr.linear_address & ~TARGET_PAGE_MASK);
+        if (!kvm_gva_to_gpa(cs, gva + i, &gpa, &len, is_write)) {
+                return -EFAULT;
+        }
         if (len > sz)
             len = sz;
 
-        ret = kvm_vcpu_ioctl(cs, KVM_TRANSLATE, &tr);
-        if (ret || !tr.valid || (is_write && !tr.writeable)) {
-            return -EFAULT;
-        }
-
-        cpu_physical_memory_rw(tr.physical_address, buf + i, len, is_write);
+        cpu_physical_memory_rw(gpa, buf + i, len, is_write);
     }
 
     return 0;
@@ -166,6 +179,17 @@  static void do_set_vcpu_info_gpa(CPUState *cs, run_on_cpu_data data)
                           env->xen_vcpu_info_gpa);
 }
 
+static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    env->xen_vcpu_time_info_gpa = data.host_ulong;
+
+    kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
+                          env->xen_vcpu_time_info_gpa);
+}
+
 static int xen_set_shared_info(CPUState *cs, uint64_t gfn)
 {
     uint64_t gpa = gfn << TARGET_PAGE_BITS;
@@ -258,6 +282,27 @@  static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
     return 0;
 }
 
+static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target,
+                                          uint64_t arg)
+{
+    struct vcpu_register_time_memory_area tma;
+    uint64_t gpa;
+    size_t len;
+
+    if (kvm_copy_from_gva(cs, arg, &tma, sizeof(*tma.addr.v))) {
+        return -EFAULT;
+    }
+
+    if (!kvm_gva_to_gpa(cs, tma.addr.p, &gpa, &len, false) ||
+        len < sizeof(tma)) {
+        return -EFAULT;
+    }
+
+    async_run_on_cpu(target, do_set_vcpu_time_info_gpa,
+                     RUN_ON_CPU_HOST_ULONG(gpa));
+    return 0;
+}
+
 static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
                                   int cmd, int vcpu_id, uint64_t arg)
 {
@@ -266,6 +311,9 @@  static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
     int err;
 
     switch (cmd) {
+    case VCPUOP_register_vcpu_time_memory_area:
+            err = vcpuop_register_vcpu_time_info(cs, dest, arg);
+            break;
     case VCPUOP_register_vcpu_info:
             err = vcpuop_register_vcpu_info(cs, dest, arg);
             break;