@@ -23,6 +23,7 @@
#include <xen/types.h>
#include <xen/domain_page.h>
#include <xen/spinlock.h>
+#include <xen/rwlock.h>
#include <xen/mm.h>
#include <xen/grant_table.h>
#include <xen/sched.h>
@@ -30,6 +30,7 @@ obj-y += rangeset.o
obj-y += radix-tree.o
obj-y += rbtree.o
obj-y += rcupdate.o
+obj-y += rwlock.o
obj-$(CONFIG_SCHED_ARINC653) += sched_arinc653.o
obj-$(CONFIG_SCHED_CREDIT) += sched_credit.o
obj-$(CONFIG_SCHED_CREDIT2) += sched_credit2.o
new file mode 100644
@@ -0,0 +1,47 @@
+#include <xen/rwlock.h>
+#include <xen/irq.h>
+
+static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
+
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ unsigned int cpu;
+ cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
+
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ /*
+ * First take the write lock to protect against other writers or slow
+ * path readers.
+ */
+ write_lock(&percpu_rwlock->rwlock);
+
+ /* Now set the global variable so that readers start using read_lock. */
+ percpu_rwlock->writer_activating = 1;
+ smp_mb();
+
+ /* Using a per cpu cpumask is only safe if there is no nesting. */
+ ASSERT(!in_irq());
+ cpumask_copy(rwlock_readers, &cpu_online_map);
+
+ /* Check if there are any percpu readers in progress on this rwlock. */
+ for ( ; ; )
+ {
+ for_each_cpu(cpu, rwlock_readers)
+ {
+ /*
+ * Remove any percpu readers not contending on this rwlock
+ * from our check mask.
+ */
+ if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
+ __cpumask_clear_cpu(cpu, rwlock_readers);
+ }
+ /* Check if we've cleared all percpu readers from check mask. */
+ if ( cpumask_empty(rwlock_readers) )
+ break;
+ /* Give the coherency fabric a break. */
+ cpu_relax();
+ };
+}
@@ -10,8 +10,6 @@
#include <asm/processor.h>
#include <asm/atomic.h>
-static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
-
#ifndef NDEBUG
static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
@@ -494,49 +492,6 @@ int _rw_is_write_locked(rwlock_t *lock)
return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
}
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock)
-{
- unsigned int cpu;
- cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
-
- /* Validate the correct per_cpudata variable has been provided. */
- _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
- /*
- * First take the write lock to protect against other writers or slow
- * path readers.
- */
- write_lock(&percpu_rwlock->rwlock);
-
- /* Now set the global variable so that readers start using read_lock. */
- percpu_rwlock->writer_activating = 1;
- smp_mb();
-
- /* Using a per cpu cpumask is only safe if there is no nesting. */
- ASSERT(!in_irq());
- cpumask_copy(rwlock_readers, &cpu_online_map);
-
- /* Check if there are any percpu readers in progress on this rwlock. */
- for ( ; ; )
- {
- for_each_cpu(cpu, rwlock_readers)
- {
- /*
- * Remove any percpu readers not contending on this rwlock
- * from our check mask.
- */
- if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
- __cpumask_clear_cpu(cpu, rwlock_readers);
- }
- /* Check if we've cleared all percpu readers from check mask. */
- if ( cpumask_empty(rwlock_readers) )
- break;
- /* Give the coherency fabric a break. */
- cpu_relax();
- };
-}
-
#ifdef LOCK_PROFILE
struct lock_profile_anc {
@@ -5,6 +5,7 @@
#include <xen/config.h>
#include <xen/list.h>
#include <xen/spinlock.h>
+#include <xen/rwlock.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/x86_emulate.h>
@@ -23,6 +23,7 @@
#ifndef __XEN_GRANT_TABLE_H__
#define __XEN_GRANT_TABLE_H__
+#include <xen/rwlock.h>
#include <public/grant_table.h>
#include <asm/page.h>
#include <asm/grant_table.h>
new file mode 100644
@@ -0,0 +1,150 @@
+#ifndef __RWLOCK_H__
+#define __RWLOCK_H__
+
+#include <xen/spinlock.h>
+
+#define read_lock(l) _read_lock(l)
+#define read_lock_irq(l) _read_lock_irq(l)
+#define read_lock_irqsave(l, f) \
+ ({ \
+ BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long)); \
+ ((f) = _read_lock_irqsave(l)); \
+ })
+
+#define read_unlock(l) _read_unlock(l)
+#define read_unlock_irq(l) _read_unlock_irq(l)
+#define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
+#define read_trylock(l) _read_trylock(l)
+
+#define write_lock(l) _write_lock(l)
+#define write_lock_irq(l) _write_lock_irq(l)
+#define write_lock_irqsave(l, f) \
+ ({ \
+ BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long)); \
+ ((f) = _write_lock_irqsave(l)); \
+ })
+#define write_trylock(l) _write_trylock(l)
+
+#define write_unlock(l) _write_unlock(l)
+#define write_unlock_irq(l) _write_unlock_irq(l)
+#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
+
+#define rw_is_locked(l) _rw_is_locked(l)
+#define rw_is_write_locked(l) _rw_is_write_locked(l)
+
+
+typedef struct percpu_rwlock percpu_rwlock_t;
+
+struct percpu_rwlock {
+ rwlock_t rwlock;
+ bool_t writer_activating;
+#ifndef NDEBUG
+ percpu_rwlock_t **percpu_owner;
+#endif
+};
+
+#ifndef NDEBUG
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
+static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
+}
+#else
+#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
+#define _percpu_rwlock_owner_check(data, lock) ((void)0)
+#endif
+
+#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
+ percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
+#define percpu_rwlock_resource_init(l, owner) \
+ (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
+
+static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ /* We cannot support recursion on the same lock. */
+ ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
+ /*
+ * Detect using a second percpu_rwlock_t simulatenously and fallback
+ * to standard read_lock.
+ */
+ if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
+ {
+ read_lock(&percpu_rwlock->rwlock);
+ return;
+ }
+
+ /* Indicate this cpu is reading. */
+ this_cpu_ptr(per_cpudata) = percpu_rwlock;
+ smp_mb();
+ /* Check if a writer is waiting. */
+ if ( unlikely(percpu_rwlock->writer_activating) )
+ {
+ /* Let the waiting writer know we aren't holding the lock. */
+ this_cpu_ptr(per_cpudata) = NULL;
+ /* Wait using the read lock to keep the lock fair. */
+ read_lock(&percpu_rwlock->rwlock);
+ /* Set the per CPU data again and continue. */
+ this_cpu_ptr(per_cpudata) = percpu_rwlock;
+ /* Drop the read lock because we don't need it anymore. */
+ read_unlock(&percpu_rwlock->rwlock);
+ }
+}
+
+static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ /* Verify the read lock was taken for this lock */
+ ASSERT(this_cpu_ptr(per_cpudata) != NULL);
+ /*
+ * Detect using a second percpu_rwlock_t simulatenously and fallback
+ * to standard read_unlock.
+ */
+ if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
+ {
+ read_unlock(&percpu_rwlock->rwlock);
+ return;
+ }
+ this_cpu_ptr(per_cpudata) = NULL;
+ smp_wmb();
+}
+
+/* Don't inline percpu write lock as it's a complex function. */
+void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock);
+
+static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
+{
+ /* Validate the correct per_cpudata variable has been provided. */
+ _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
+
+ ASSERT(percpu_rwlock->writer_activating);
+ percpu_rwlock->writer_activating = 0;
+ write_unlock(&percpu_rwlock->rwlock);
+}
+
+#define percpu_rw_is_write_locked(l) _rw_is_write_locked(&((l)->rwlock))
+
+#define percpu_read_lock(percpu, lock) \
+ _percpu_read_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_read_unlock(percpu, lock) \
+ _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_lock(percpu, lock) \
+ _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+#define percpu_write_unlock(percpu, lock) \
+ _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
+
+#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
+ name)
+#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
+ name)
+
+#endif /* __RWLOCK_H__ */
@@ -4,6 +4,7 @@
#include <xen/types.h>
#include <xen/spinlock.h>
+#include <xen/rwlock.h>
#include <xen/shared.h>
#include <xen/timer.h>
#include <xen/rangeset.h>
@@ -236,147 +236,4 @@ int _rw_is_write_locked(rwlock_t *lock);
#define spin_lock_recursive(l) _spin_lock_recursive(l)
#define spin_unlock_recursive(l) _spin_unlock_recursive(l)
-#define read_lock(l) _read_lock(l)
-#define read_lock_irq(l) _read_lock_irq(l)
-#define read_lock_irqsave(l, f) \
- ({ \
- BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long)); \
- ((f) = _read_lock_irqsave(l)); \
- })
-
-#define read_unlock(l) _read_unlock(l)
-#define read_unlock_irq(l) _read_unlock_irq(l)
-#define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
-#define read_trylock(l) _read_trylock(l)
-
-#define write_lock(l) _write_lock(l)
-#define write_lock_irq(l) _write_lock_irq(l)
-#define write_lock_irqsave(l, f) \
- ({ \
- BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long)); \
- ((f) = _write_lock_irqsave(l)); \
- })
-#define write_trylock(l) _write_trylock(l)
-
-#define write_unlock(l) _write_unlock(l)
-#define write_unlock_irq(l) _write_unlock_irq(l)
-#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
-
-#define rw_is_locked(l) _rw_is_locked(l)
-#define rw_is_write_locked(l) _rw_is_write_locked(l)
-
-typedef struct percpu_rwlock percpu_rwlock_t;
-
-struct percpu_rwlock {
- rwlock_t rwlock;
- bool_t writer_activating;
-#ifndef NDEBUG
- percpu_rwlock_t **percpu_owner;
-#endif
-};
-
-#ifndef NDEBUG
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
-static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock)
-{
- ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
-}
-#else
-#define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
-#define _percpu_rwlock_owner_check(data, lock) ((void)0)
-#endif
-
-#define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
- percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
-#define percpu_rwlock_resource_init(l, owner) \
- (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
-
-static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock)
-{
- /* Validate the correct per_cpudata variable has been provided. */
- _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
- /* We cannot support recursion on the same lock. */
- ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
- /*
- * Detect using a second percpu_rwlock_t simulatenously and fallback
- * to standard read_lock.
- */
- if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
- {
- read_lock(&percpu_rwlock->rwlock);
- return;
- }
-
- /* Indicate this cpu is reading. */
- this_cpu_ptr(per_cpudata) = percpu_rwlock;
- smp_mb();
- /* Check if a writer is waiting. */
- if ( unlikely(percpu_rwlock->writer_activating) )
- {
- /* Let the waiting writer know we aren't holding the lock. */
- this_cpu_ptr(per_cpudata) = NULL;
- /* Wait using the read lock to keep the lock fair. */
- read_lock(&percpu_rwlock->rwlock);
- /* Set the per CPU data again and continue. */
- this_cpu_ptr(per_cpudata) = percpu_rwlock;
- /* Drop the read lock because we don't need it anymore. */
- read_unlock(&percpu_rwlock->rwlock);
- }
-}
-
-static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock)
-{
- /* Validate the correct per_cpudata variable has been provided. */
- _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
- /* Verify the read lock was taken for this lock */
- ASSERT(this_cpu_ptr(per_cpudata) != NULL);
- /*
- * Detect using a second percpu_rwlock_t simulatenously and fallback
- * to standard read_unlock.
- */
- if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
- {
- read_unlock(&percpu_rwlock->rwlock);
- return;
- }
- this_cpu_ptr(per_cpudata) = NULL;
- smp_wmb();
-}
-
-/* Don't inline percpu write lock as it's a complex function. */
-void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock);
-
-static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock)
-{
- /* Validate the correct per_cpudata variable has been provided. */
- _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
-
- ASSERT(percpu_rwlock->writer_activating);
- percpu_rwlock->writer_activating = 0;
- write_unlock(&percpu_rwlock->rwlock);
-}
-
-#define percpu_rw_is_write_locked(l) _rw_is_write_locked(&((l)->rwlock))
-
-#define percpu_read_lock(percpu, lock) \
- _percpu_read_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_read_unlock(percpu, lock) \
- _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_lock(percpu, lock) \
- _percpu_write_lock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_unlock(percpu, lock) \
- _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
-
-#define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
- name)
-#define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
- name)
-
#endif /* __SPINLOCK_H__ */
@@ -40,6 +40,7 @@
#include <xen/xmalloc.h>
#include <xen/string.h>
#include <xen/spinlock.h>
+#include <xen/rwlock.h>
#include <xen/errno.h>
#include "flask.h"
#include "avc.h"