diff mbox

[v4] x86/hvm/viridian: zero and check vcpu context __pad field

Message ID 1459352852-1708-1-git-send-email-paul.durrant@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paul Durrant March 30, 2016, 3:47 p.m. UTC
Commit 57844631 "save APIC assist vector" added an extra field to the
viridian vcpu context save record. This field was only a uint8_t and
so an extra __pad field was also added to pad up to the next 64-bit
boundary.

This patch makes sure that __pad field is zeroed on save and checked
for zero on restore. This prevents a potential leak of information
from the stack and a compatibility check against future use of the
space occupied by the __pad field.

The __pad field is zeroed as a side effect of making use of a C99 struct
initializer for the other fields. This patch also modifies the domain
context save code to use the same mechanism.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---

v4:
 - use c99 struct initializer
 - re-work zero_page extern

v3:
 - make zero_page accessible outside mm.c

v2:
 - drop is_zero() helper an use memcmp against zero_page instead.
 - add memset to viridian_save_domain_ctxt() to reduce potential
   for information leakage in future.
---
 xen/arch/x86/hvm/viridian.c | 23 +++++++++++++----------
 xen/arch/x86/mm.c           |  2 +-
 xen/include/asm-x86/mm.h    |  2 ++
 3 files changed, 16 insertions(+), 11 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 5c76c1a..8253fd0 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -780,16 +780,16 @@  out:
 
 static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
-    struct hvm_viridian_domain_context ctxt;
+    struct hvm_viridian_domain_context ctxt = {
+        .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val,
+        .hypercall_gpa  = d->arch.hvm_domain.viridian.hypercall_gpa.raw,
+        .guest_os_id    = d->arch.hvm_domain.viridian.guest_os_id.raw,
+        .reference_tsc  = d->arch.hvm_domain.viridian.reference_tsc.raw,
+    };
 
     if ( !is_viridian_domain(d) )
         return 0;
 
-    ctxt.time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val;
-    ctxt.hypercall_gpa  = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
-    ctxt.guest_os_id    = d->arch.hvm_domain.viridian.guest_os_id.raw;
-    ctxt.reference_tsc  = d->arch.hvm_domain.viridian.reference_tsc.raw;
-
     return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0);
 }
 
@@ -822,10 +822,10 @@  static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         return 0;
 
     for_each_vcpu( d, v ) {
-        struct hvm_viridian_vcpu_context ctxt;
-
-        ctxt.apic_assist_msr = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw;
-        ctxt.apic_assist_vector = v->arch.hvm_vcpu.viridian.apic_assist.vector;
+        struct hvm_viridian_vcpu_context ctxt = {
+            .apic_assist_msr = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw,
+            .apic_assist_vector = v->arch.hvm_vcpu.viridian.apic_assist.vector,
+        };
 
         if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 )
             return 1;
@@ -851,6 +851,9 @@  static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     if ( hvm_load_entry_zeroextend(VIRIDIAN_VCPU, h, &ctxt) != 0 )
         return -EINVAL;
 
+    if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) )
+        return -EINVAL;
+
     v->arch.hvm_vcpu.viridian.apic_assist.msr.raw = ctxt.apic_assist_msr;
     if ( v->arch.hvm_vcpu.viridian.apic_assist.msr.fields.enabled )
         initialize_apic_assist(v);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c997b53..bca7532 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -589,7 +589,7 @@  static inline void guest_get_eff_kern_l1e(struct vcpu *v, unsigned long addr,
     TOGGLE_MODE();
 }
 
-static const char __section(".bss.page_aligned.const") zero_page[PAGE_SIZE];
+const char __section(".bss.page_aligned.const") zero_page[PAGE_SIZE];
 
 static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b25942b..b781495 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -595,4 +595,6 @@  typedef struct mm_rwlock {
                        &(d)->xenpage_list : &(d)->page_list,            \
                    &(d)->arch.relmem_list)
 
+extern const char zero_page[];
+
 #endif /* __ASM_X86_MM_H__ */