diff mbox

[3/3] radix-tree: support locking of individual exception entries.

Message ID 145663616983.3865.11911049648442320016.stgit@notabene (mailing list archive)
State New, archived
Headers show

Commit Message

NeilBrown Feb. 28, 2016, 5:09 a.m. UTC
The least significant bit of an exception entry is used as a lock flag.
A caller can:
 - create a locked entry by simply adding an entry with this flag set
 - lock an existing entry with radix_tree_lookup_lock().  This may return
    NULL if the entry doesn't exists, or was deleted while waiting for
    the lock.  It may return a non-exception entry if that is what is
    found.  If it returns a locked entry then it has exclusive rights
    to delete the entry.
 - unlock an entry that is already locked.  This will wake any waiters.
 - delete an entry that is locked.  This will wake waiters so that they
   return NULL without looking at the slot in the radix tree.

These must all be called with the radix tree locked (i.e. a spinlock held).
That spinlock is passed to radix_tree_lookup_lock() so that it can drop
the lock while waiting.

This is a "demonstration of concept".  I haven't actually tested, only compiled.
A possible use case is for the exception entries used by DAX.

It is possible that some of the lookups can be optimised away in some
cases by storing a slot pointer.  I wanted to keep it reasonable
simple until it was determined if it might be useful.

Signed-off-by: NeilBrown <neilb@suse.com>
---
 include/linux/radix-tree.h |    8 ++
 lib/radix-tree.c           |  158 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 166 insertions(+)



--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

kernel test robot Feb. 28, 2016, 5:30 a.m. UTC | #1
Hi NeilBrown,

[auto build test ERROR on v4.5-rc5]
[also build test ERROR on next-20160226]
[if your patch is applied to the wrong git tree, please drop us a note to help improving the system]

url:    https://github.com/0day-ci/linux/commits/NeilBrown/RFC-improvements-to-radix-tree-related-to-DAX/20160228-132214
config: i386-tinyconfig (attached as .config)
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   lib/radix-tree.c: In function 'radix_tree_lookup_lock':
>> lib/radix-tree.c:1616:5: error: 'TASK_UNINTERRUPTIBLE' undeclared (first use in this function)
        TASK_UNINTERRUPTIBLE);
        ^
   lib/radix-tree.c:1616:5: note: each undeclared identifier is reported only once for each function it appears in
>> lib/radix-tree.c:1621:3: error: implicit declaration of function 'schedule' [-Werror=implicit-function-declaration]
      schedule();
      ^
   lib/radix-tree.c: In function 'radix_tree_unlock':
>> lib/radix-tree.c:1644:17: error: 'TASK_NORMAL' undeclared (first use in this function)
      __wake_up(wq, TASK_NORMAL, 1, &key);
                    ^
   lib/radix-tree.c: In function 'radix_tree_delete_unlock':
   lib/radix-tree.c:1657:17: error: 'TASK_NORMAL' undeclared (first use in this function)
      __wake_up(wq, TASK_NORMAL, 1, &key);
                    ^
   cc1: some warnings being treated as errors

vim +/TASK_UNINTERRUPTIBLE +1616 lib/radix-tree.c

  1610		wait.state = SLOT_WAITING;
  1611		wait.root = root;
  1612		wait.index = index;
  1613		wait.ret = NULL;
  1614		for (;;) {
  1615			prepare_to_wait(wq, &wait.wait,
> 1616					TASK_UNINTERRUPTIBLE);
  1617			if (wait.state != SLOT_WAITING)
  1618				break;
  1619	
  1620			spin_unlock(lock);
> 1621			schedule();
  1622			spin_lock(lock);
  1623		}
  1624		finish_wait(wq, &wait.wait);
  1625		return wait.ret;
  1626	}
  1627	EXPORT_SYMBOL(radix_tree_lookup_lock);
  1628	
  1629	void radix_tree_unlock(struct radix_tree_root *root, wait_queue_head_t *wq,
  1630				unsigned long index)
  1631	{
  1632		void *ret, **slot;
  1633	
  1634		ret = __radix_tree_lookup(root, index, NULL, &slot);
  1635		if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret)))
  1636			return;
  1637		if (WARN_ON_ONCE(!slot_locked(slot)))
  1638			return;
  1639		unlock_slot(slot);
  1640	
  1641		if (waitqueue_active(wq)) {
  1642			struct wait_bit_key key = {.flags = root, .bit_nr = -2,
  1643						   .timeout = index};
> 1644			__wake_up(wq, TASK_NORMAL, 1, &key);
  1645		}
  1646	}
  1647	EXPORT_SYMBOL(radix_tree_unlock);

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
NeilBrown Feb. 28, 2016, 6:27 a.m. UTC | #2
On Sun, Feb 28 2016, NeilBrown <neilb@suse.com> wrote:

> +static int wake_slot_function(wait_queue_t *wait, unsigned mode, int sync,
> +			      void *arg)
> +{
> +	struct wait_bit_key *key = arg;
> +	struct wait_slot_queue *wait_slot =
> +		container_of(wait, struct wait_slot_queue, wait);
> +	void **slot;
> +
> +	if (wait_slot->root != key->flags ||
> +	    wait_slot->index != key->timeout)
> +		/* Not waking this waiter */
> +		return 0;
> +	if (wait_slot->state != SLOT_WAITING)
> +		/* Should be impossible.... */
> +		return 1;
> +	if (key->bit_nr == -3)
> +		/* Was just deleted, no point in doing a lookup */
> +		wait_slot = NULL;
> +	else
> +		wait_slot->ret = __radix_tree_lookup(
> +			wait_slot->root, wait_slot->index, NULL, &slot);
> +	if (!wait_slot->ret || !radix_tree_exceptional_entry(wait_slot->ret)) {
> +		wait_slot->state = SLOT_GONE;
> +		return 1;
> +	}
> +	if (slot_locked(slot))
> +		/* still locked */
> +		return 0;
> +	wait_slot->ret = lock_slot(slot);
> +	wait_slot->state = SLOT_LOCKED;
> +	return 1;
> +}

Sorry, just realized that this should:
  return autoremove_wake_function(wait, mode, sync, arg);

instead of "return 1;"

NeilBrown
diff mbox

Patch

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 450c12b546b7..8f579f66574b 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -308,6 +308,14 @@  unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
 
+void *radix_tree_lookup_lock(struct radix_tree_root *root, wait_queue_head_t *wq,
+			     unsigned long index, spinlock_t *lock);
+void radix_tree_unlock(struct radix_tree_root *root, wait_queue_head_t *wq,
+		       unsigned long index);
+void radix_tree_delete_unlock(struct radix_tree_root *root, wait_queue_head_t *wq,
+			      unsigned long index);
+
+
 static inline void radix_tree_preload_end(void)
 {
 	preempt_enable();
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 37d4643ab5c0..a24ea002f3eb 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1500,3 +1500,161 @@  void __init radix_tree_init(void)
 	radix_tree_init_maxindex();
 	hotcpu_notifier(radix_tree_callback, 0);
 }
+
+/* Exception entry locking.
+ * The least significant bit of an exception entry can be used as a
+ * "locked" flag.  Supported locking operations are:
+ * radix_tree_lookup_lock() - if the indexed entry exists, lock it and
+ *         return the value, else return NULL.  If the indexed entry is not
+ *         exceptional it is returned without locking.
+ * radix_tree_unlock() - release the lock on the indexed entry
+ * radix_tree_delete_unlock() - the entry must be locked.  It will be atomically
+ *     unlocked and removed.  Any threads sleeping in lookup_lock() will return.
+ * Each of these take a radix_tree_root, a wait_queue_head_t, and an index.
+ * The '*lock' function also takes a spinlock_t which must be held when any
+ * of the functions is called.  *lock will drop the spinlock while waiting for
+ * the entry lock.
+ *
+ * As delete_unlock could free the radix_tree_node, waiters much not touch it
+ * when woken.  We provide a wake function for the waitq which records when the
+ * item has been deleted.
+ *
+ * The wait_queue_head passed should be one that is used for bit_wait, such
+ * as zone->wait_table.  We re-use the 'flags' and 'timeout' fields of the
+ * wait_bit_key to store the root and index that we are waiting for.
+ * __wake_up may only be called on one of these keys while the radix tree
+ * is locked.  The wakeup function will take the lock itself if appropriate, or
+ * may record that the radix tree entry has been deleted.  In either case
+ * the waiting function just looks at the status reported by the wakeup function
+ * and doesn't look at the radix tree itself.
+ *
+ * There is no function for locking an entry while inserting it.  Simply
+ * insert an entry that is already marked as 'locked' - lsb set.
+ *
+ */
+
+struct wait_slot_queue {
+	struct radix_tree_root	*root;
+	unsigned long		index;
+	wait_queue_t		wait;
+	enum {SLOT_WAITING, SLOT_LOCKED, SLOT_GONE} state;
+	void			*ret;
+};
+
+static inline int slot_locked(void *v)
+{
+	unsigned long l = (unsigned long)v;
+	return l & 1;
+}
+
+static inline void *lock_slot(void **v)
+{
+	unsigned long *l = (unsigned long *)v;
+	return (void*)(*l |= 1);
+}
+
+static inline void * unlock_slot(void **v)
+{
+	unsigned long *l = (unsigned long *)v;
+	return (void*)(*l &= ~1UL);
+}
+
+static int wake_slot_function(wait_queue_t *wait, unsigned mode, int sync,
+			      void *arg)
+{
+	struct wait_bit_key *key = arg;
+	struct wait_slot_queue *wait_slot =
+		container_of(wait, struct wait_slot_queue, wait);
+	void **slot;
+
+	if (wait_slot->root != key->flags ||
+	    wait_slot->index != key->timeout)
+		/* Not waking this waiter */
+		return 0;
+	if (wait_slot->state != SLOT_WAITING)
+		/* Should be impossible.... */
+		return 1;
+	if (key->bit_nr == -3)
+		/* Was just deleted, no point in doing a lookup */
+		wait_slot = NULL;
+	else
+		wait_slot->ret = __radix_tree_lookup(
+			wait_slot->root, wait_slot->index, NULL, &slot);
+	if (!wait_slot->ret || !radix_tree_exceptional_entry(wait_slot->ret)) {
+		wait_slot->state = SLOT_GONE;
+		return 1;
+	}
+	if (slot_locked(slot))
+		/* still locked */
+		return 0;
+	wait_slot->ret = lock_slot(slot);
+	wait_slot->state = SLOT_LOCKED;
+	return 1;
+}
+
+void *radix_tree_lookup_lock(struct radix_tree_root *root, wait_queue_head_t *wq,
+			     unsigned long index, spinlock_t *lock)
+{
+	void *ret, **slot;
+	struct wait_slot_queue wait;
+
+	ret = __radix_tree_lookup(root, index, NULL, &slot);
+	if (!ret || !radix_tree_exceptional_entry(ret))
+		return ret;
+	if (!slot_locked(slot))
+		return lock_slot(slot);
+
+	wait.wait.private = current;
+	wait.wait.func = wake_slot_function;
+	INIT_LIST_HEAD(&wait.wait.task_list);
+	wait.state = SLOT_WAITING;
+	wait.root = root;
+	wait.index = index;
+	wait.ret = NULL;
+	for (;;) {
+		prepare_to_wait(wq, &wait.wait,
+				TASK_UNINTERRUPTIBLE);
+		if (wait.state != SLOT_WAITING)
+			break;
+
+		spin_unlock(lock);
+		schedule();
+		spin_lock(lock);
+	}
+	finish_wait(wq, &wait.wait);
+	return wait.ret;
+}
+EXPORT_SYMBOL(radix_tree_lookup_lock);
+
+void radix_tree_unlock(struct radix_tree_root *root, wait_queue_head_t *wq,
+			unsigned long index)
+{
+	void *ret, **slot;
+
+	ret = __radix_tree_lookup(root, index, NULL, &slot);
+	if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret)))
+		return;
+	if (WARN_ON_ONCE(!slot_locked(slot)))
+		return;
+	unlock_slot(slot);
+
+	if (waitqueue_active(wq)) {
+		struct wait_bit_key key = {.flags = root, .bit_nr = -2,
+					   .timeout = index};
+		__wake_up(wq, TASK_NORMAL, 1, &key);
+	}
+}
+EXPORT_SYMBOL(radix_tree_unlock);
+
+void radix_tree_delete_unlock(struct radix_tree_root *root, wait_queue_head_t *wq,
+			      unsigned long index)
+{
+	radix_tree_delete(root, index);
+	if (waitqueue_active(wq)) {
+		/* -3 here indicates deletion */
+		struct wait_bit_key key = {.flags = root, .bit_nr = -3,
+					   .timeout = index};
+		__wake_up(wq, TASK_NORMAL, 1, &key);
+	}
+}
+EXPORT_SYMBOL(radix_tree_delete_unlock);