diff mbox

[v2,2/4] x86/vpmu: Remove core2_no_vpmu_ops

Message ID 1480962280-29787-3-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Dec. 5, 2016, 6:24 p.m. UTC
core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
values in Xen.

With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
the nop path in vpmu_do_msr() now suffices.

vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
domains, which enables the removal the duplicate logic in priv_op_read_msr().

Finally, make all arch_vpmu_ops structures const as they are never modified,
and make them static as they are not referred to externally.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>

v2:
 * s/const static/static const/
 * Tweak comment
---
 xen/arch/x86/cpu/vpmu.c       |  6 +++++-
 xen/arch/x86/cpu/vpmu_amd.c   |  2 +-
 xen/arch/x86/cpu/vpmu_intel.c | 22 +---------------------
 xen/arch/x86/traps.c          |  6 +-----
 xen/include/asm-x86/vpmu.h    |  2 +-
 5 files changed, 9 insertions(+), 29 deletions(-)

Comments

Boris Ostrovsky Dec. 5, 2016, 9:02 p.m. UTC | #1
On 12/05/2016 01:24 PM, Andrew Cooper wrote:
> core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
> values in Xen.
>
> With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
> remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
> intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
> the nop path in vpmu_do_msr() now suffices.
>
> vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
> domains, which enables the removal the duplicate logic in priv_op_read_msr().
>
> Finally, make all arch_vpmu_ops structures const as they are never modified,
> and make them static as they are not referred to externally.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Jan Beulich Dec. 6, 2016, 8:53 a.m. UTC | #2
>>> On 05.12.16 at 19:24, <andrew.cooper3@citrix.com> wrote:
> core2_no_vpmu_ops exists solely to work around the default-leaking of CPUID/MSR
> values in Xen.
> 
> With CPUID handling removed from arch_vpmu_ops, the RDMSR handling is the last
> remaining hook.  Since core2_no_vpmu_ops's introduction in c/s 25250ed7 "vpmu
> intel: Add cpuid handling when vpmu disabled", a lot of work has been done and
> the nop path in vpmu_do_msr() now suffices.
> 
> vpmu_do_msr() also falls into the nop path for un-configured or unprivileged
> domains, which enables the removal the duplicate logic in priv_op_read_msr().
> 
> Finally, make all arch_vpmu_ops structures const as they are never modified,
> and make them static as they are not referred to externally.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>

Acked-by: Jan Beulich <jbeulich@suse.com>
diff mbox

Patch

diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index a542f4d..4c713be 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -136,9 +136,13 @@  int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
     const struct arch_vpmu_ops *ops;
     int ret = 0;
 
+    /*
+     * Hide the PMU MSRs if vpmu is not configured, or the hardware domain is
+     * profiling the whole system.
+     */
     if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
          ((vpmu_mode & XENPMU_MODE_ALL) &&
-          !is_hardware_domain(current->domain)) )
+          !is_hardware_domain(curr->domain)) )
          goto nop;
 
     vpmu = vcpu_vpmu(curr);
diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 55d03b3..43ade13 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -488,7 +488,7 @@  static void amd_vpmu_dump(const struct vcpu *v)
     }
 }
 
-struct arch_vpmu_ops amd_vpmu_ops = {
+static const struct arch_vpmu_ops amd_vpmu_ops = {
     .do_wrmsr = amd_vpmu_do_wrmsr,
     .do_rdmsr = amd_vpmu_do_rdmsr,
     .do_interrupt = amd_vpmu_do_interrupt,
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index e3f25c8..e51bc4e 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -865,7 +865,7 @@  static void core2_vpmu_destroy(struct vcpu *v)
     vpmu_clear(vpmu);
 }
 
-struct arch_vpmu_ops core2_vpmu_ops = {
+static const struct arch_vpmu_ops core2_vpmu_ops = {
     .do_wrmsr = core2_vpmu_do_wrmsr,
     .do_rdmsr = core2_vpmu_do_rdmsr,
     .do_interrupt = core2_vpmu_do_interrupt,
@@ -875,32 +875,12 @@  struct arch_vpmu_ops core2_vpmu_ops = {
     .arch_vpmu_dump = core2_vpmu_dump
 };
 
-/*
- * If its a vpmu msr set it to 0.
- */
-static int core2_no_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
-{
-    int type = -1, index = -1;
-    if ( !is_core2_vpmu_msr(msr, &type, &index) )
-        return -EINVAL;
-    *msr_content = 0;
-    return 0;
-}
-
-/*
- * These functions are used in case vpmu is not enabled.
- */
-struct arch_vpmu_ops core2_no_vpmu_ops = {
-    .do_rdmsr = core2_no_vpmu_do_rdmsr,
-};
-
 int vmx_vpmu_initialise(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     u64 msr_content;
     static bool_t ds_warned;
 
-    vpmu->arch_vpmu_ops = &core2_no_vpmu_ops;
     if ( vpmu_mode == XENPMU_MODE_OFF )
         return 0;
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 48ac519..638d8ff 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2566,11 +2566,7 @@  static int priv_op_read_msr(unsigned int reg, uint64_t *val,
     case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:
             if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
             {
-                /* Don't leak PMU MSRs to unprivileged domains. */
-                if ( (vpmu_mode & XENPMU_MODE_ALL) &&
-                     !is_hardware_domain(currd) )
-                    *val = 0;
-                else if ( vpmu_do_rdmsr(reg, val) )
+                if ( vpmu_do_rdmsr(reg, val) )
                     break;
                 return X86EMUL_OKAY;
             }
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index d1dda4b..e50618f 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -60,7 +60,7 @@  struct vpmu_struct {
     u32 hw_lapic_lvtpc;
     void *context;      /* May be shared with PV guest */
     void *priv_context; /* hypervisor-only */
-    struct arch_vpmu_ops *arch_vpmu_ops;
+    const struct arch_vpmu_ops *arch_vpmu_ops;
     struct xen_pmu_data *xenpmu_data;
     spinlock_t vpmu_lock;
 };