diff mbox series

[1/3] x86 / vmx: make apic_access_mfn type-safe

Message ID 20200121120009.1767-2-pdurrant@amazon.com (mailing list archive)
State Superseded
Headers show
Series purge free_shared_domheap_page() | expand

Commit Message

Paul Durrant Jan. 21, 2020, noon UTC
Use mfn_t rather than unsigned long and change previous tests against 0 to
tests against INVALID_MFN (also introducing initialization to that value).

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wl@xen.org>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/hvm/mtrr.c            |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c         | 14 +++++++-------
 xen/include/asm-x86/hvm/vmx/vmcs.h |  2 +-
 3 files changed, 9 insertions(+), 9 deletions(-)

Comments

Tian, Kevin Jan. 22, 2020, 2:51 a.m. UTC | #1
> From: Paul Durrant <pdurrant@amazon.com>
> Sent: Tuesday, January 21, 2020 8:00 PM
> 
> Use mfn_t rather than unsigned long and change previous tests against 0 to
> tests against INVALID_MFN (also introducing initialization to that value).
> 
> Signed-off-by: Paul Durrant <pdurrant@amazon.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Andrew Cooper Jan. 22, 2020, 2:05 p.m. UTC | #2
On 21/01/2020 12:00, Paul Durrant wrote:
> Use mfn_t rather than unsigned long and change previous tests against 0 to
> tests against INVALID_MFN (also introducing initialization to that value).
>
> Signed-off-by: Paul Durrant <pdurrant@amazon.com>

I'm afraid this breaks the idempotency of vmx_free_vlapic_mapping(),
which gets in the way of domain/vcpu create/destroy cleanup.

Its fine to use 0 as the sentinel.

~Andrew
Jan Beulich Jan. 22, 2020, 3:48 p.m. UTC | #3
On 22.01.2020 15:05, Andrew Cooper wrote:
> On 21/01/2020 12:00, Paul Durrant wrote:
>> Use mfn_t rather than unsigned long and change previous tests against 0 to
>> tests against INVALID_MFN (also introducing initialization to that value).
>>
>> Signed-off-by: Paul Durrant <pdurrant@amazon.com>
> 
> I'm afraid this breaks the idempotency of vmx_free_vlapic_mapping(),
> which gets in the way of domain/vcpu create/destroy cleanup.
> 
> Its fine to use 0 as the sentinel.

And with this adjustment
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 5ad15eafe0..8356e8de3d 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -818,7 +818,7 @@  int epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn,
 
     if ( direct_mmio )
     {
-        if ( (mfn_x(mfn) ^ d->arch.hvm.vmx.apic_access_mfn) >> order )
+        if ( (mfn_x(mfn) ^ mfn_x(d->arch.hvm.vmx.apic_access_mfn)) >> order )
             return MTRR_TYPE_UNCACHABLE;
         if ( order )
             return -1;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f83f102638..3d90e67a05 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -413,6 +413,7 @@  static int vmx_domain_initialise(struct domain *d)
     if ( !has_vlapic(d) )
         return 0;
 
+    d->arch.hvm.vmx.apic_access_mfn = INVALID_MFN;
     if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
         return rc;
 
@@ -3034,7 +3035,7 @@  static int vmx_alloc_vlapic_mapping(struct domain *d)
     mfn = page_to_mfn(pg);
     clear_domain_page(mfn);
     share_xen_page_with_guest(pg, d, SHARE_rw);
-    d->arch.hvm.vmx.apic_access_mfn = mfn_x(mfn);
+    d->arch.hvm.vmx.apic_access_mfn = mfn;
 
     return set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
                               PAGE_ORDER_4K,
@@ -3043,24 +3044,23 @@  static int vmx_alloc_vlapic_mapping(struct domain *d)
 
 static void vmx_free_vlapic_mapping(struct domain *d)
 {
-    unsigned long mfn = d->arch.hvm.vmx.apic_access_mfn;
+    mfn_t mfn = d->arch.hvm.vmx.apic_access_mfn;
 
-    if ( mfn != 0 )
-        free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
+    if ( !mfn_eq(mfn, INVALID_MFN) )
+        free_shared_domheap_page(mfn_to_page(mfn));
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
 {
     paddr_t virt_page_ma, apic_page_ma;
 
-    if ( v->domain->arch.hvm.vmx.apic_access_mfn == 0 )
+    if ( mfn_eq(v->domain->arch.hvm.vmx.apic_access_mfn, INVALID_MFN) )
         return;
 
     ASSERT(cpu_has_vmx_virtualize_apic_accesses);
 
     virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
-    apic_page_ma = v->domain->arch.hvm.vmx.apic_access_mfn;
-    apic_page_ma <<= PAGE_SHIFT;
+    apic_page_ma = mfn_to_maddr(v->domain->arch.hvm.vmx.apic_access_mfn);
 
     vmx_vmcs_enter(v);
     __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma);
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index a514299144..be4661a929 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -59,7 +59,7 @@  struct ept_data {
 #define _VMX_DOMAIN_PML_ENABLED    0
 #define VMX_DOMAIN_PML_ENABLED     (1ul << _VMX_DOMAIN_PML_ENABLED)
 struct vmx_domain {
-    unsigned long apic_access_mfn;
+    mfn_t apic_access_mfn;
     /* VMX_DOMAIN_* */
     unsigned int status;