diff mbox

[v3,03/13] x86/hvm: Scale host TSC when setting/getting guest TSC

Message ID 1451531020-29964-4-git-send-email-haozhong.zhang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Haozhong Zhang Dec. 31, 2015, 3:03 a.m. UTC
The existing hvm_[set|get]_guest_tsc_fixed() calculate the guest TSC by
adding the TSC offset to the host TSC. When the TSC scaling is enabled,
the host TSC should be scaled first. This patch adds the scaling logic
to those two functions.

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
 xen/arch/x86/hvm/hvm.c        | 17 +++++++----------
 xen/arch/x86/hvm/svm/svm.c    | 12 ++++++++++++
 xen/include/asm-x86/hvm/hvm.h |  2 ++
 3 files changed, 21 insertions(+), 10 deletions(-)

Comments

Jan Beulich Jan. 8, 2016, 9:15 a.m. UTC | #1
>>> On 31.12.15 at 04:03, <haozhong.zhang@intel.com> wrote:
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -804,6 +804,16 @@ static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio)
>      return scaled_host_tsc;
>  }
>  
> +static uint64_t svm_scale_tsc(struct vcpu *v, uint64_t tsc)
> +{
> +    struct domain *d = v->domain;
> +
> +    if ( !cpu_has_tsc_ratio || d->arch.vtsc )

The left side of this check is redundant with those at both call sites.
It should either be removed altogether, or converted to an ASSERT().
Perhaps the right side should move into the callers too (as being
vendor independent), or if not at least the pointless local variable
should be eliminated.

Further I suppose this new hook really could/should have its
first argument const qualified (the hook isn't supposed to fiddle
with the vCPU).

Jan
Haozhong Zhang Jan. 8, 2016, 2:04 p.m. UTC | #2
On 01/08/16 02:15, Jan Beulich wrote:
> >>> On 31.12.15 at 04:03, <haozhong.zhang@intel.com> wrote:
> > --- a/xen/arch/x86/hvm/svm/svm.c
> > +++ b/xen/arch/x86/hvm/svm/svm.c
> > @@ -804,6 +804,16 @@ static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio)
> >      return scaled_host_tsc;
> >  }
> >  
> > +static uint64_t svm_scale_tsc(struct vcpu *v, uint64_t tsc)
> > +{
> > +    struct domain *d = v->domain;
> > +
> > +    if ( !cpu_has_tsc_ratio || d->arch.vtsc )
> 
> The left side of this check is redundant with those at both call sites.
> It should either be removed altogether, or converted to an ASSERT().
> Perhaps the right side should move into the callers too (as being
> vendor independent), or if not at least the pointless local variable
> should be eliminated.
>

Yes, I'll remove the left check and move the right check to callers. 
 
> Further I suppose this new hook really could/should have its
> first argument const qualified (the hook isn't supposed to fiddle
> with the vCPU).
>

I'll add 'const'.

Haozhong
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 21470ec..3648a44 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -60,6 +60,7 @@ 
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/event.h>
 #include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/svm/svm.h> /* for cpu_has_tsc_ratio */
 #include <asm/altp2m.h>
 #include <asm/mtrr.h>
 #include <asm/apic.h>
@@ -310,13 +311,11 @@  void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc)
         tsc = hvm_get_guest_time_fixed(v, at_tsc);
         tsc = gtime_to_gtsc(v->domain, tsc);
     }
-    else if ( at_tsc )
-    {
-        tsc = at_tsc;
-    }
     else
     {
-        tsc = rdtsc();
+        tsc = at_tsc ?: rdtsc();
+        if ( cpu_has_tsc_ratio )
+            tsc = hvm_funcs.scale_tsc(v, tsc);
     }
 
     delta_tsc = guest_tsc - tsc;
@@ -344,13 +343,11 @@  u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
         tsc = hvm_get_guest_time_fixed(v, at_tsc);
         tsc = gtime_to_gtsc(v->domain, tsc);
     }
-    else if ( at_tsc )
-    {
-        tsc = at_tsc;
-    }
     else
     {
-        tsc = rdtsc();
+        tsc = at_tsc ?: rdtsc();
+        if ( cpu_has_tsc_ratio )
+            tsc = hvm_funcs.scale_tsc(v, tsc);
     }
 
     return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a66d854..c538a29 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -804,6 +804,16 @@  static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio)
     return scaled_host_tsc;
 }
 
+static uint64_t svm_scale_tsc(struct vcpu *v, uint64_t tsc)
+{
+    struct domain *d = v->domain;
+
+    if ( !cpu_has_tsc_ratio || d->arch.vtsc )
+        return tsc;
+
+    return scale_tsc(tsc, vcpu_tsc_ratio(v));
+}
+
 static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
     uint64_t ratio)
 {
@@ -2272,6 +2282,8 @@  static struct hvm_function_table __initdata svm_function_table = {
     .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
     .nhvm_intr_blocked = nsvm_intr_blocked,
     .nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m,
+
+    .scale_tsc            = svm_scale_tsc,
 };
 
 void svm_vmexit_handler(struct cpu_user_regs *regs)
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index b9d893d..ba6259e 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -212,6 +212,8 @@  struct hvm_function_table {
     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
     int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs);
+
+    uint64_t (*scale_tsc)(struct vcpu *v, uint64_t tsc);
 };
 
 extern struct hvm_function_table hvm_funcs;