diff mbox series

[v4,04/12] xen/spinlock: rename recursive lock functions

Message ID 20231212094725.22184-5-jgross@suse.com (mailing list archive)
State New
Headers show
Series xen/spinlock: make recursive spinlocks a dedicated type | expand

Commit Message

Jürgen Groß Dec. 12, 2023, 9:47 a.m. UTC
Rename the recursive spin_lock() functions by replacing the trailing
"_recursive" with a leading "r".

Switch the parameter to be a pointer to rspinlock_t.

Remove the indirection through a macro, as it is adding only complexity
without any gain.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
 xen/arch/arm/domain.c         |  4 +--
 xen/arch/x86/domain.c         |  8 +++---
 xen/arch/x86/mm/mem_sharing.c |  8 +++---
 xen/arch/x86/mm/mm-locks.h    |  4 +--
 xen/common/ioreq.c            | 52 +++++++++++++++++------------------
 xen/common/page_alloc.c       | 12 ++++----
 xen/common/spinlock.c         |  6 ++--
 xen/drivers/char/console.c    | 12 ++++----
 xen/drivers/passthrough/pci.c |  4 +--
 xen/include/xen/sched.h       |  4 +--
 xen/include/xen/spinlock.h    | 24 +++++++---------
 11 files changed, 67 insertions(+), 71 deletions(-)

Comments

Julien Grall Dec. 12, 2023, 12:59 p.m. UTC | #1
Hi Juergen,

On 12/12/2023 09:47, Juergen Gross wrote:
> Rename the recursive spin_lock() functions by replacing the trailing
> "_recursive" with a leading "r".
> 
> Switch the parameter to be a pointer to rspinlock_t.
> 
> Remove the indirection through a macro, as it is adding only complexity
> without any gain.
> 
> Suggested-by: Jan Beulich <jbeulich@suse.com>
> Signed-off-by: Juergen Gross <jgross@suse.com>

Acked-by: Julien Grall <jgrall@amazon.com>

Cheers,
Jan Beulich Feb. 28, 2024, 2:59 p.m. UTC | #2
On 12.12.2023 10:47, Juergen Gross wrote:
> Rename the recursive spin_lock() functions by replacing the trailing
> "_recursive" with a leading "r".
> 
> Switch the parameter to be a pointer to rspinlock_t.
> 
> Remove the indirection through a macro, as it is adding only complexity
> without any gain.

Considering we aren't aware of any leveraging of this, doing so is
probably okay. Still I think it was done that way for a reason. Plus
of course if we undo the indirection here, sooner or later we should
also undo similar indirection elsewhere.

> Suggested-by: Jan Beulich <jbeulich@suse.com>
> Signed-off-by: Juergen Gross <jgross@suse.com>

Acked-by: Jan Beulich <jbeulich@suse.com>

Jan
diff mbox series

Patch

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 5e7a7f3e7e..f38cb5e04c 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -987,7 +987,7 @@  static int relinquish_memory(struct domain *d, struct page_list_head *list)
     int               ret = 0;
 
     /* Use a recursive lock, as we may enter 'free_domheap_page'. */
-    spin_lock_recursive(&d->page_alloc_lock);
+    rspin_lock(&d->page_alloc_lock);
 
     page_list_for_each_safe( page, tmp, list )
     {
@@ -1014,7 +1014,7 @@  static int relinquish_memory(struct domain *d, struct page_list_head *list)
     }
 
   out:
-    spin_unlock_recursive(&d->page_alloc_lock);
+    rspin_unlock(&d->page_alloc_lock);
     return ret;
 }
 
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 3712e36df9..69ce1fd5cf 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1321,7 +1321,7 @@  int arch_set_info_guest(
         {
             bool done = false;
 
-            spin_lock_recursive(&d->page_alloc_lock);
+            rspin_lock(&d->page_alloc_lock);
 
             for ( i = 0; ; )
             {
@@ -1342,7 +1342,7 @@  int arch_set_info_guest(
                     break;
             }
 
-            spin_unlock_recursive(&d->page_alloc_lock);
+            rspin_unlock(&d->page_alloc_lock);
 
             if ( !done )
                 return -ERESTART;
@@ -2181,7 +2181,7 @@  static int relinquish_memory(
     int               ret = 0;
 
     /* Use a recursive lock, as we may enter 'free_domheap_page'. */
-    spin_lock_recursive(&d->page_alloc_lock);
+    rspin_lock(&d->page_alloc_lock);
 
     while ( (page = page_list_remove_head(list)) )
     {
@@ -2322,7 +2322,7 @@  static int relinquish_memory(
     page_list_move(list, &d->arch.relmem_list);
 
  out:
-    spin_unlock_recursive(&d->page_alloc_lock);
+    rspin_unlock(&d->page_alloc_lock);
     return ret;
 }
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 4f810706a3..1720079fd9 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -688,7 +688,7 @@  static int page_make_sharable(struct domain *d,
     int rc = 0;
     bool drop_dom_ref = false;
 
-    spin_lock_recursive(&d->page_alloc_lock);
+    rspin_lock(&d->page_alloc_lock);
 
     if ( d->is_dying )
     {
@@ -731,7 +731,7 @@  static int page_make_sharable(struct domain *d,
     }
 
 out:
-    spin_unlock_recursive(&d->page_alloc_lock);
+    rspin_unlock(&d->page_alloc_lock);
 
     if ( drop_dom_ref )
         put_domain(d);
@@ -1942,7 +1942,7 @@  int mem_sharing_fork_reset(struct domain *d, bool reset_state,
         goto state;
 
     /* need recursive lock because we will free pages */
-    spin_lock_recursive(&d->page_alloc_lock);
+    rspin_lock(&d->page_alloc_lock);
     page_list_for_each_safe(page, tmp, &d->page_list)
     {
         shr_handle_t sh;
@@ -1971,7 +1971,7 @@  int mem_sharing_fork_reset(struct domain *d, bool reset_state,
         put_page_alloc_ref(page);
         put_page_and_type(page);
     }
-    spin_unlock_recursive(&d->page_alloc_lock);
+    rspin_unlock(&d->page_alloc_lock);
 
  state:
     if ( reset_state )
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index b05cad1752..c867ad7d53 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -79,7 +79,7 @@  static inline void _mm_lock(const struct domain *d, mm_lock_t *l,
 {
     if ( !((mm_locked_by_me(l)) && rec) )
         _check_lock_level(d, level);
-    spin_lock_recursive(&l->lock);
+    rspin_lock(&l->lock);
     if ( l->lock.recurse_cnt == 1 )
     {
         l->locker_function = func;
@@ -200,7 +200,7 @@  static inline void mm_unlock(mm_lock_t *l)
         l->locker_function = "nobody";
         _set_lock_level(l->unlock_level);
     }
-    spin_unlock_recursive(&l->lock);
+    rspin_unlock(&l->lock);
 }
 
 static inline void mm_enforce_order_unlock(int unlock_level,
diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c
index 652c18a9b5..1257a3d972 100644
--- a/xen/common/ioreq.c
+++ b/xen/common/ioreq.c
@@ -329,7 +329,7 @@  bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
     unsigned int id;
     bool found = false;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -340,7 +340,7 @@  bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
         }
     }
 
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return found;
 }
@@ -658,7 +658,7 @@  static int ioreq_server_create(struct domain *d, int bufioreq_handling,
         return -ENOMEM;
 
     domain_pause(d);
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
     {
@@ -686,13 +686,13 @@  static int ioreq_server_create(struct domain *d, int bufioreq_handling,
     if ( id )
         *id = i;
 
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -704,7 +704,7 @@  static int ioreq_server_destroy(struct domain *d, ioservid_t id)
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -736,7 +736,7 @@  static int ioreq_server_destroy(struct domain *d, ioservid_t id)
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -749,7 +749,7 @@  static int ioreq_server_get_info(struct domain *d, ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -783,7 +783,7 @@  static int ioreq_server_get_info(struct domain *d, ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -796,7 +796,7 @@  int ioreq_server_get_frame(struct domain *d, ioservid_t id,
 
     ASSERT(is_hvm_domain(d));
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -834,7 +834,7 @@  int ioreq_server_get_frame(struct domain *d, ioservid_t id,
     }
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -850,7 +850,7 @@  static int ioreq_server_map_io_range(struct domain *d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -886,7 +886,7 @@  static int ioreq_server_map_io_range(struct domain *d, ioservid_t id,
     rc = rangeset_add_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -902,7 +902,7 @@  static int ioreq_server_unmap_io_range(struct domain *d, ioservid_t id,
     if ( start > end )
         return -EINVAL;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -938,7 +938,7 @@  static int ioreq_server_unmap_io_range(struct domain *d, ioservid_t id,
     rc = rangeset_remove_range(r, start, end);
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -963,7 +963,7 @@  int ioreq_server_map_mem_type(struct domain *d, ioservid_t id,
     if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
         return -EINVAL;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -978,7 +978,7 @@  int ioreq_server_map_mem_type(struct domain *d, ioservid_t id,
     rc = arch_ioreq_server_map_mem_type(d, s, flags);
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     if ( rc == 0 )
         arch_ioreq_server_map_mem_type_completed(d, s, flags);
@@ -992,7 +992,7 @@  static int ioreq_server_set_state(struct domain *d, ioservid_t id,
     struct ioreq_server *s;
     int rc;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     s = get_ioreq_server(d, id);
 
@@ -1016,7 +1016,7 @@  static int ioreq_server_set_state(struct domain *d, ioservid_t id,
     rc = 0;
 
  out:
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
     return rc;
 }
 
@@ -1026,7 +1026,7 @@  int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
     unsigned int id;
     int rc;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
     {
@@ -1035,7 +1035,7 @@  int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
             goto fail;
     }
 
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return 0;
 
@@ -1050,7 +1050,7 @@  int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
         ioreq_server_remove_vcpu(s, v);
     }
 
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 
     return rc;
 }
@@ -1060,12 +1060,12 @@  void ioreq_server_remove_vcpu_all(struct domain *d, struct vcpu *v)
     struct ioreq_server *s;
     unsigned int id;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     FOR_EACH_IOREQ_SERVER(d, id, s)
         ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 }
 
 void ioreq_server_destroy_all(struct domain *d)
@@ -1076,7 +1076,7 @@  void ioreq_server_destroy_all(struct domain *d)
     if ( !arch_ioreq_server_destroy_all(d) )
         return;
 
-    spin_lock_recursive(&d->ioreq_server.lock);
+    rspin_lock(&d->ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1094,7 +1094,7 @@  void ioreq_server_destroy_all(struct domain *d)
         xfree(s);
     }
 
-    spin_unlock_recursive(&d->ioreq_server.lock);
+    rspin_unlock(&d->ioreq_server.lock);
 }
 
 struct ioreq_server *ioreq_server_select(struct domain *d,
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9b5df74fdd..8c6a3d9274 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2497,7 +2497,7 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
     if ( unlikely(is_xen_heap_page(pg)) )
     {
         /* NB. May recursively lock from relinquish_memory(). */
-        spin_lock_recursive(&d->page_alloc_lock);
+        rspin_lock(&d->page_alloc_lock);
 
         for ( i = 0; i < (1 << order); i++ )
             arch_free_heap_page(d, &pg[i]);
@@ -2505,7 +2505,7 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
         d->xenheap_pages -= 1 << order;
         drop_dom_ref = (d->xenheap_pages == 0);
 
-        spin_unlock_recursive(&d->page_alloc_lock);
+        rspin_unlock(&d->page_alloc_lock);
     }
     else
     {
@@ -2514,7 +2514,7 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
         if ( likely(d) && likely(d != dom_cow) )
         {
             /* NB. May recursively lock from relinquish_memory(). */
-            spin_lock_recursive(&d->page_alloc_lock);
+            rspin_lock(&d->page_alloc_lock);
 
             for ( i = 0; i < (1 << order); i++ )
             {
@@ -2537,7 +2537,7 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
 
             drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
 
-            spin_unlock_recursive(&d->page_alloc_lock);
+            rspin_unlock(&d->page_alloc_lock);
 
             /*
              * Normally we expect a domain to clear pages before freeing them,
@@ -2753,7 +2753,7 @@  void free_domstatic_page(struct page_info *page)
     ASSERT_ALLOC_CONTEXT();
 
     /* NB. May recursively lock from relinquish_memory(). */
-    spin_lock_recursive(&d->page_alloc_lock);
+    rspin_lock(&d->page_alloc_lock);
 
     arch_free_heap_page(d, page);
 
@@ -2764,7 +2764,7 @@  void free_domstatic_page(struct page_info *page)
     /* Add page on the resv_page_list *after* it has been freed. */
     page_list_add_tail(page, &d->resv_page_list);
 
-    spin_unlock_recursive(&d->page_alloc_lock);
+    rspin_unlock(&d->page_alloc_lock);
 
     if ( drop_dom_ref )
         put_domain(d);
diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c
index 09028af864..422a7fb1db 100644
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -436,7 +436,7 @@  void _spin_barrier(spinlock_t *lock)
     smp_mb();
 }
 
-int _spin_trylock_recursive(spinlock_t *lock)
+int rspin_trylock(rspinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
 
@@ -460,7 +460,7 @@  int _spin_trylock_recursive(spinlock_t *lock)
     return 1;
 }
 
-void _spin_lock_recursive(spinlock_t *lock)
+void rspin_lock(rspinlock_t *lock)
 {
     unsigned int cpu = smp_processor_id();
 
@@ -475,7 +475,7 @@  void _spin_lock_recursive(spinlock_t *lock)
     lock->recurse_cnt++;
 }
 
-void _spin_unlock_recursive(spinlock_t *lock)
+void rspin_unlock(rspinlock_t *lock)
 {
     if ( likely(--lock->recurse_cnt == 0) )
     {
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 76e455bacd..f6f61dc5a1 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -920,7 +920,7 @@  static void vprintk_common(const char *prefix, const char *fmt, va_list args)
 
     /* console_lock can be acquired recursively from __printk_ratelimit(). */
     local_irq_save(flags);
-    spin_lock_recursive(&console_lock);
+    rspin_lock(&console_lock);
     state = &this_cpu(state);
 
     (void)vsnprintf(buf, sizeof(buf), fmt, args);
@@ -956,7 +956,7 @@  static void vprintk_common(const char *prefix, const char *fmt, va_list args)
         state->continued = 1;
     }
 
-    spin_unlock_recursive(&console_lock);
+    rspin_unlock(&console_lock);
     local_irq_restore(flags);
 }
 
@@ -1163,14 +1163,14 @@  unsigned long console_lock_recursive_irqsave(void)
     unsigned long flags;
 
     local_irq_save(flags);
-    spin_lock_recursive(&console_lock);
+    rspin_lock(&console_lock);
 
     return flags;
 }
 
 void console_unlock_recursive_irqrestore(unsigned long flags)
 {
-    spin_unlock_recursive(&console_lock);
+    rspin_unlock(&console_lock);
     local_irq_restore(flags);
 }
 
@@ -1231,12 +1231,12 @@  int __printk_ratelimit(int ratelimit_ms, int ratelimit_burst)
             char lost_str[8];
             snprintf(lost_str, sizeof(lost_str), "%d", lost);
             /* console_lock may already be acquired by printk(). */
-            spin_lock_recursive(&console_lock);
+            rspin_lock(&console_lock);
             printk_start_of_line("(XEN) ");
             __putstr("printk: ");
             __putstr(lost_str);
             __putstr(" messages suppressed.\n");
-            spin_unlock_recursive(&console_lock);
+            rspin_unlock(&console_lock);
         }
         local_irq_restore(flags);
         return 1;
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index d604ed5634..41444f8e2e 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -54,12 +54,12 @@  static DEFINE_RSPINLOCK(_pcidevs_lock);
 
 void pcidevs_lock(void)
 {
-    spin_lock_recursive(&_pcidevs_lock);
+    rspin_lock(&_pcidevs_lock);
 }
 
 void pcidevs_unlock(void)
 {
-    spin_unlock_recursive(&_pcidevs_lock);
+    rspin_unlock(&_pcidevs_lock);
 }
 
 bool pcidevs_locked(void)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index c6604aef78..8cf751ad0c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -358,8 +358,8 @@  struct sched_unit {
           (v) = (v)->next_in_list )
 
 /* Per-domain lock can be recursively acquired in fault handlers. */
-#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
-#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
+#define domain_lock(d) rspin_lock(&(d)->domain_lock)
+#define domain_unlock(d) rspin_unlock(&(d)->domain_lock)
 
 struct evtchn_port_ops;
 
diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h
index 20d15f34dd..ee536c302c 100644
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -209,9 +209,16 @@  int _spin_is_locked(const spinlock_t *lock);
 int _spin_trylock(spinlock_t *lock);
 void _spin_barrier(spinlock_t *lock);
 
-int _spin_trylock_recursive(spinlock_t *lock);
-void _spin_lock_recursive(spinlock_t *lock);
-void _spin_unlock_recursive(spinlock_t *lock);
+/*
+ * rspin_[un]lock(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+int rspin_trylock(rspinlock_t *lock);
+void rspin_lock(rspinlock_t *lock);
+void rspin_unlock(rspinlock_t *lock);
 
 #define spin_lock(l)                  _spin_lock(l)
 #define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)
@@ -241,15 +248,4 @@  void _spin_unlock_recursive(spinlock_t *lock);
 /* Ensure a lock is quiescent between two critical operations. */
 #define spin_barrier(l)               _spin_barrier(l)
 
-/*
- * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
- * reentered recursively on the same CPU. All critical regions that may form
- * part of a recursively-nested set must be protected by these forms. If there
- * are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
- */
-#define spin_trylock_recursive(l)     _spin_trylock_recursive(l)
-#define spin_lock_recursive(l)        _spin_lock_recursive(l)
-#define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
-
 #endif /* __SPINLOCK_H__ */