diff mbox

[v1,3/7] mm: Use statically defined locking order

Message ID 1462549688-29263-4-git-send-email-ross.lagerwall@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ross Lagerwall May 6, 2016, 3:48 p.m. UTC
Instead of using a locking order based on line numbers which doesn't
play nicely with xSplice, statically define the locking order.

Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
---
 xen/arch/x86/mm/mm-locks.h | 29 ++++++++++++++++++++---------
 1 file changed, 20 insertions(+), 9 deletions(-)

Comments

Dario Faggioli May 10, 2016, 8:25 a.m. UTC | #1
On Fri, 2016-05-06 at 16:48 +0100, Ross Lagerwall wrote:
> Instead of using a locking order based on line numbers which doesn't
> play nicely with xSplice, statically define the locking order.
> 
> Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
>
Reviewed-by: Dario Faggioli <dario.faggioli@citrix.com>

FWIW, I actually like this...

> --- a/xen/arch/x86/mm/mm-locks.h
> +++ b/xen/arch/x86/mm/mm-locks.h
> @@ -46,8 +46,10 @@ static inline int mm_locked_by_me(mm_lock_t *l)
>      return (l->lock.recurse_cpu == current->processor);
>  }
>  
> -/* If you see this crash, the numbers printed are lines in this
> file 
> - * where the offending locks are declared. */
> +/*
> + * If you see this crash, the numbers printed are order levels
> defined
> + * in this file.
> + */
>
... and, apart from the fat that it server xsplice purposes, I'd say
this is an improvement on its own of the current situation (although I
understand this is a matter of taste). :-)

Regards,
Dario
Jan Beulich May 10, 2016, 2:42 p.m. UTC | #2
>>> On 06.05.16 at 17:48, <ross.lagerwall@citrix.com> wrote:
> @@ -201,11 +203,20 @@ static inline void mm_enforce_order_unlock(int unlock_level,
>  
>  /************************************************************************
>   *                                                                      *
> - * To avoid deadlocks, these locks _MUST_ be taken in the order they're *
> - * declared in this file.  The locking functions will enforce this.     *
> + * To avoid deadlocks, these locks _MUST_ be taken in the order listed  *
> + * below.  The locking functions will enforce this.                     *
>   *                                                                      *
>   ************************************************************************/
>  
> +#define MM_LOCK_ORDER_nestedp2m              10000
> +#define MM_LOCK_ORDER_p2m                    20000
> +#define MM_LOCK_ORDER_altp2mlist             30000
> +#define MM_LOCK_ORDER_altp2m                 40000
> +#define MM_LOCK_ORDER_per_page_sharing       50000
> +#define MM_LOCK_ORDER_pod                    60000
> +#define MM_LOCK_ORDER_page_alloc             70000
> +#define MM_LOCK_ORDER_paging                 80000

It would seem more natural for these to appear ahead of being used,
even if the order of #define-s doesn't really matter. And then, why
multiples of 10000? Large numbers are generally less efficient to deal
with (i.e. they can't be fit in sign-extended 8-bit immediates).

Jan
Andrew Cooper May 10, 2016, 2:46 p.m. UTC | #3
On 06/05/16 16:48, Ross Lagerwall wrote:
> Instead of using a locking order based on line numbers which doesn't
> play nicely with xSplice, statically define the locking order.
>
> Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>

Along with this change, it would useful to modify the message in
__check_lock_level() to explicitly refer to mm-locks.h

The current message is very obscure to anyone who doesn't know about
mm-locks.h

~Andrew
diff mbox

Patch

diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..da7f52d 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -46,8 +46,10 @@  static inline int mm_locked_by_me(mm_lock_t *l)
     return (l->lock.recurse_cpu == current->processor);
 }
 
-/* If you see this crash, the numbers printed are lines in this file 
- * where the offending locks are declared. */
+/*
+ * If you see this crash, the numbers printed are order levels defined
+ * in this file.
+ */
 #define __check_lock_level(l)                           \
 do {                                                    \
     if ( unlikely(__get_lock_level() > (l)) )           \
@@ -152,12 +154,12 @@  static inline void mm_read_unlock(mm_rwlock_t *l)
 /* This wrapper uses the line number to express the locking order below */
 #define declare_mm_lock(name)                                                 \
     static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\
-    { _mm_lock(l, func, __LINE__, rec); }
+    { _mm_lock(l, func, MM_LOCK_ORDER_##name, rec); }
 #define declare_mm_rwlock(name)                                               \
     static inline void mm_write_lock_##name(mm_rwlock_t *l, const char *func) \
-    { _mm_write_lock(l, func, __LINE__); }                                    \
+    { _mm_write_lock(l, func, MM_LOCK_ORDER_##name); }                                    \
     static inline void mm_read_lock_##name(mm_rwlock_t *l)                    \
-    { _mm_read_lock(l, __LINE__); }
+    { _mm_read_lock(l, MM_LOCK_ORDER_##name); }
 /* These capture the name of the calling function */
 #define mm_lock(name, l) mm_lock_##name(l, __func__, 0)
 #define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1)
@@ -169,10 +171,10 @@  static inline void mm_read_unlock(mm_rwlock_t *l)
  * to ordering constraints. */
 #define declare_mm_order_constraint(name)                                   \
     static inline void mm_enforce_order_lock_pre_##name(void)               \
-    { _mm_enforce_order_lock_pre(__LINE__); }                               \
+    { _mm_enforce_order_lock_pre(MM_LOCK_ORDER_##name); }                               \
     static inline void mm_enforce_order_lock_post_##name(                   \
                         int *unlock_level, unsigned short *recurse_count)   \
-    { _mm_enforce_order_lock_post(__LINE__, unlock_level, recurse_count); } \
+    { _mm_enforce_order_lock_post(MM_LOCK_ORDER_##name, unlock_level, recurse_count); } \
 
 static inline void mm_unlock(mm_lock_t *l)
 {
@@ -201,11 +203,20 @@  static inline void mm_enforce_order_unlock(int unlock_level,
 
 /************************************************************************
  *                                                                      *
- * To avoid deadlocks, these locks _MUST_ be taken in the order they're *
- * declared in this file.  The locking functions will enforce this.     *
+ * To avoid deadlocks, these locks _MUST_ be taken in the order listed  *
+ * below.  The locking functions will enforce this.                     *
  *                                                                      *
  ************************************************************************/
 
+#define MM_LOCK_ORDER_nestedp2m              10000
+#define MM_LOCK_ORDER_p2m                    20000
+#define MM_LOCK_ORDER_altp2mlist             30000
+#define MM_LOCK_ORDER_altp2m                 40000
+#define MM_LOCK_ORDER_per_page_sharing       50000
+#define MM_LOCK_ORDER_pod                    60000
+#define MM_LOCK_ORDER_page_alloc             70000
+#define MM_LOCK_ORDER_paging                 80000
+
 /* Nested P2M lock (per-domain)
  *
  * A per-domain lock that protects the mapping from nested-CR3 to