diff mbox series

[1/3] x86/shim: fix build with PV_SHIM_EXCLUSIVE and SHADOW_PAGING

Message ID 3ddb9adf-22b5-9196-1f3c-7cb3cafd08dd@suse.com (mailing list archive)
State New, archived
Headers show
Series x86: shim building adjustments | expand

Commit Message

Jan Beulich Sept. 14, 2020, 12:38 p.m. UTC
While there's little point in enabling both, the combination ought to at
least build correctly. Drop the direct PV_SHIM_EXCLUSIVE conditionals
and instead zap PG_log_dirty to zero under the right conditions, and key
other #ifdef-s off of that.

While there also expand on ded576ce07e9 ("x86/shadow: dirty VRAM
tracking is needed for HVM only"): There was yet another is_hvm_domain()
missing, and code touching the struct fields needs to be guarded by
suitable #ifdef-s as well. While there also guard shadow-mode-only
fields accordingly.

Fixes: 8b5b49ceb3d9 ("x86: don't include domctl and alike in shim-exclusive builds")
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Roger Pau Monné Sept. 14, 2020, 4:44 p.m. UTC | #1
On Mon, Sep 14, 2020 at 02:38:49PM +0200, Jan Beulich wrote:
> While there's little point in enabling both, the combination ought to at
> least build correctly. Drop the direct PV_SHIM_EXCLUSIVE conditionals
> and instead zap PG_log_dirty to zero under the right conditions, and key
> other #ifdef-s off of that.
> 
> While there also expand on ded576ce07e9 ("x86/shadow: dirty VRAM
> tracking is needed for HVM only"): There was yet another is_hvm_domain()
> missing, and code touching the struct fields needs to be guarded by
> suitable #ifdef-s as well. While there also guard shadow-mode-only
> fields accordingly.
> 
> Fixes: 8b5b49ceb3d9 ("x86: don't include domctl and alike in shim-exclusive builds")
> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

We seem to be growing more and more ifdefs which is not ideal IMO, we
should rather aim to remove them by splitting code into separate
compilation units. There doesn't seem to be much option to split
stuff in this case, so be it.

Thanks, Roger.
Jan Beulich Sept. 15, 2020, 8 a.m. UTC | #2
On 14.09.2020 18:44, Roger Pau Monné wrote:
> On Mon, Sep 14, 2020 at 02:38:49PM +0200, Jan Beulich wrote:
>> While there's little point in enabling both, the combination ought to at
>> least build correctly. Drop the direct PV_SHIM_EXCLUSIVE conditionals
>> and instead zap PG_log_dirty to zero under the right conditions, and key
>> other #ifdef-s off of that.
>>
>> While there also expand on ded576ce07e9 ("x86/shadow: dirty VRAM
>> tracking is needed for HVM only"): There was yet another is_hvm_domain()
>> missing, and code touching the struct fields needs to be guarded by
>> suitable #ifdef-s as well. While there also guard shadow-mode-only
>> fields accordingly.
>>
>> Fixes: 8b5b49ceb3d9 ("x86: don't include domctl and alike in shim-exclusive builds")
>> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks.

> We seem to be growing more and more ifdefs which is not ideal IMO, we
> should rather aim to remove them by splitting code into separate
> compilation units. There doesn't seem to be much option to split
> stuff in this case, so be it.

Right - I was indeed wondering whether maybe shadow_vram_{get,put}_l1e()
could be moved into hvm.c, but I didn't want to right away take the time
to check, as it seemed more important to get the build issue addressed.

Beyond the #ifdef-s added there the majority of the changes here are
merely adjustments to existing ones. (I'd except the new PG_log_dirty
conditional #define-s from the ones wanting better alternatives, as we
have been doing the same already for other PG_* constants.)

Jan
diff mbox series

Patch

--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -47,7 +47,7 @@ 
 /* Per-CPU variable for enforcing the lock ordering */
 DEFINE_PER_CPU(int, mm_lock_level);
 
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
 
 /************************************************/
 /*              LOG DIRTY SUPPORT               */
@@ -630,7 +630,7 @@  void paging_log_dirty_init(struct domain
     d->arch.paging.log_dirty.ops = ops;
 }
 
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
 
 /************************************************/
 /*           CODE FOR PAGING SUPPORT            */
@@ -671,7 +671,7 @@  void paging_vcpu_init(struct vcpu *v)
         shadow_vcpu_init(v);
 }
 
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
 int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
                   XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
                   bool_t resuming)
@@ -792,7 +792,7 @@  long paging_domctl_continuation(XEN_GUES
 
     return ret;
 }
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
 
 /* Call when destroying a domain */
 int paging_teardown(struct domain *d)
@@ -808,7 +808,7 @@  int paging_teardown(struct domain *d)
     if ( preempted )
         return -ERESTART;
 
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
     /* clean up log dirty resources. */
     rc = paging_free_log_dirty_bitmap(d, 0);
     if ( rc == -ERESTART )
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2869,12 +2869,14 @@  void shadow_teardown(struct domain *d, b
      * calls now that we've torn down the bitmap */
     d->arch.paging.mode &= ~PG_log_dirty;
 
-    if ( d->arch.hvm.dirty_vram )
+#ifdef CONFIG_HVM
+    if ( is_hvm_domain(d) && d->arch.hvm.dirty_vram )
     {
         xfree(d->arch.hvm.dirty_vram->sl1ma);
         xfree(d->arch.hvm.dirty_vram->dirty_bitmap);
         XFREE(d->arch.hvm.dirty_vram);
     }
+#endif
 
 out:
     paging_unlock(d);
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -618,6 +618,7 @@  _sh_propagate(struct vcpu *v,
         }
     }
 
+#ifdef CONFIG_HVM
     if ( unlikely(level == 1) && is_hvm_domain(d) )
     {
         struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
@@ -632,6 +633,7 @@  _sh_propagate(struct vcpu *v,
                 sflags &= ~_PAGE_RW;
         }
     }
+#endif
 
     /* Read-only memory */
     if ( p2m_is_readonly(p2mt) )
@@ -1050,6 +1052,7 @@  static inline void shadow_vram_get_l1e(s
                                        mfn_t sl1mfn,
                                        struct domain *d)
 {
+#ifdef CONFIG_HVM
     mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
     int flags = shadow_l1e_get_flags(new_sl1e);
     unsigned long gfn;
@@ -1074,6 +1077,7 @@  static inline void shadow_vram_get_l1e(s
             dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn)
                 | ((unsigned long)sl1e & ~PAGE_MASK);
     }
+#endif
 }
 
 static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e,
@@ -1081,6 +1085,7 @@  static inline void shadow_vram_put_l1e(s
                                        mfn_t sl1mfn,
                                        struct domain *d)
 {
+#ifdef CONFIG_HVM
     mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
     int flags = shadow_l1e_get_flags(old_sl1e);
     unsigned long gfn;
@@ -1140,6 +1145,7 @@  static inline void shadow_vram_put_l1e(s
             dirty_vram->last_dirty = NOW();
         }
     }
+#endif
 }
 
 static int shadow_set_l1e(struct domain *d,
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -67,8 +67,12 @@ 
 #define PG_translate   0
 #define PG_external    0
 #endif
+#if defined(CONFIG_HVM) || !defined(CONFIG_PV_SHIM_EXCLUSIVE)
 /* Enable log dirty mode */
 #define PG_log_dirty   (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift)
+#else
+#define PG_log_dirty   0
+#endif
 
 /* All paging modes. */
 #define PG_MASK (PG_refcounts | PG_log_dirty | PG_translate | PG_external)
@@ -154,7 +158,7 @@  struct paging_mode {
 /*****************************************************************************
  * Log dirty code */
 
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
 
 /* get the dirty bitmap for a specific range of pfns */
 void paging_log_dirty_range(struct domain *d,
@@ -195,23 +199,28 @@  int paging_mfn_is_dirty(struct domain *d
 #define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
 
+#ifdef CONFIG_HVM
 /* VRAM dirty tracking support */
 struct sh_dirty_vram {
     unsigned long begin_pfn;
     unsigned long end_pfn;
+#ifdef CONFIG_SHADOW_PAGING
     paddr_t *sl1ma;
     uint8_t *dirty_bitmap;
     s_time_t last_dirty;
+#endif
 };
+#endif
 
-#else /* !CONFIG_PV_SHIM_EXCLUSIVE */
+#else /* !PG_log_dirty */
 
 static inline void paging_log_dirty_init(struct domain *d,
                                          const struct log_dirty_ops *ops) {}
 static inline void paging_mark_dirty(struct domain *d, mfn_t gmfn) {}
 static inline void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn) {}
+static inline bool paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) { return false; }
 
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
 
 /*****************************************************************************
  * Entry points into the paging-assistance code */