diff mbox

[Altp2m,cleanup,v7] altp2m cleanup work.

Message ID 1475079727-29002-2-git-send-email-paul.c.lai@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paul Lai Sept. 28, 2016, 4:22 p.m. UTC
Indent goto labels by one space.
Inline (header) altp2m functions.
In do_altp2m_op(), during the sanity check of the passed command,
return -EOPNOTSUPP if not a valid command.
In do_altp2m_op(), when evaluating a command, ASSERT_UNREACHABLE()
if the command is not recognizable.  The sanity check above should
have triggered the return of -EOPNOTSUPP.
Make hvm_funcs.altp2m_supported "bool" instead of "bool_t".
Make hvm_altp2m_supported() and altp2m_vcpu_emulate_ve() return
bool (rather than return void()).

Signed-off-by: Paul Lai <paul.c.lai@intel.com>
---
v7 of patch

Changes since last revision:
        use -EOPNOTSUPP instead of -ENOSYS
---
 xen/arch/x86/hvm/hvm.c        | 47 ++++++++++++++++++++-----------------------
 xen/include/asm-x86/hvm/hvm.h | 26 +++++++++++++++++++-----
 2 files changed, 43 insertions(+), 30 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 34a8c85..f9aca6d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1966,11 +1966,11 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
      * Otherwise, this is an error condition. */
     rc = fall_through;
 
-out_put_gfn:
+ out_put_gfn:
     __put_gfn(p2m, gfn);
     if ( ap2m_active )
         __put_gfn(hostp2m, gfn);
-out:
+ out:
     /* All of these are delayed until we exit, since we might 
      * sleep on event ring wait queues, and we must not hold
      * locks in such circumstance */
@@ -5317,12 +5317,25 @@  static int do_altp2m_op(
         return -EFAULT;
 
     if ( a.pad1 || a.pad2 ||
-         (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ||
-         (a.cmd < HVMOP_altp2m_get_domain_state) ||
-         (a.cmd > HVMOP_altp2m_change_gfn) )
+        (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) )
         return -EINVAL;
 
-    d = (a.cmd != HVMOP_altp2m_vcpu_enable_notify) ?
+    switch ( a.cmd )
+    {
+    case HVMOP_altp2m_get_domain_state:
+    case HVMOP_altp2m_set_domain_state:
+    case HVMOP_altp2m_vcpu_enable_notify:
+    case HVMOP_altp2m_create_p2m:
+    case HVMOP_altp2m_destroy_p2m:
+    case HVMOP_altp2m_switch_p2m:
+    case HVMOP_altp2m_set_mem_access:
+    case HVMOP_altp2m_change_gfn:
+        break;
+    default:
+        return -EOPNOTSUPP;
+    }
+
+    d = ( a.cmd != HVMOP_altp2m_vcpu_enable_notify ) ?
         rcu_lock_domain_by_any_id(a.domain) : rcu_lock_current_domain();
 
     if ( d == NULL )
@@ -5439,6 +5452,9 @@  static int do_altp2m_op(
             rc = p2m_change_altp2m_gfn(d, a.u.change_gfn.view,
                     _gfn(a.u.change_gfn.old_gfn),
                     _gfn(a.u.change_gfn.new_gfn));
+        break;
+    default:
+        ASSERT_UNREACHABLE();
     }
 
  out:
@@ -5963,25 +5979,6 @@  void hvm_toggle_singlestep(struct vcpu *v)
     v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
 }
 
-void altp2m_vcpu_update_p2m(struct vcpu *v)
-{
-    if ( hvm_funcs.altp2m_vcpu_update_p2m )
-        hvm_funcs.altp2m_vcpu_update_p2m(v);
-}
-
-void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
-{
-    if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
-        hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
-}
-
-bool_t altp2m_vcpu_emulate_ve(struct vcpu *v)
-{
-    if ( hvm_funcs.altp2m_vcpu_emulate_ve )
-        return hvm_funcs.altp2m_vcpu_emulate_ve(v);
-    return 0;
-}
-
 int hvm_set_mode(struct vcpu *v, int mode)
 {
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 81b60d5..b1be6bd 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -100,7 +100,7 @@  struct hvm_function_table {
     bool_t pvh_supported;
 
     /* Necessary hardware support for alternate p2m's? */
-    bool_t altp2m_supported;
+    bool altp2m_supported;
 
     /* Indicate HAP capabilities. */
     unsigned int hap_capabilities;
@@ -593,19 +593,35 @@  static inline bool_t hvm_is_singlestep_supported(void)
 }
 
 /* returns true if hardware supports alternate p2m's */
-static inline bool_t hvm_altp2m_supported(void)
+static inline bool hvm_altp2m_supported(void)
 {
     return hvm_funcs.altp2m_supported;
 }
 
 /* updates the current hardware p2m */
-void altp2m_vcpu_update_p2m(struct vcpu *v);
+static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
+{
+    if ( hvm_funcs.altp2m_vcpu_update_p2m )
+        hvm_funcs.altp2m_vcpu_update_p2m(v);
+}
 
 /* updates VMCS fields related to VMFUNC and #VE */
-void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v);
+static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
+{
+    if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
+        hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+}
 
 /* emulates #VE */
-bool_t altp2m_vcpu_emulate_ve(struct vcpu *v);
+static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
+{
+    if ( hvm_funcs.altp2m_vcpu_emulate_ve )
+    {
+        hvm_funcs.altp2m_vcpu_emulate_ve(v);
+        return true;
+    }
+    return false;
+}
 
 /* Check CR4/EFER values */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,