diff mbox

[05/11] locking/ww_mutex: Add waiters in stamp order

Message ID 1480335612-12069-6-git-send-email-nhaehnle@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nicolai Hähnle Nov. 28, 2016, 12:20 p.m. UTC
From: Nicolai Hähnle <Nicolai.Haehnle@amd.com>

Add regular waiters in stamp order. Keep adding waiters that have no
context in FIFO order and take care not to starve them.

While adding our task as a waiter, back off if we detect that there is a
waiter with a lower stamp in front of us.

Make sure to call lock_contended even when we back off early.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Nicolai Hähnle <Nicolai.Haehnle@amd.com>
---
 include/linux/mutex.h  |  3 ++
 kernel/locking/mutex.c | 76 +++++++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 72 insertions(+), 7 deletions(-)

Comments

Chris Wilson Nov. 30, 2016, 2:10 p.m. UTC | #1
On Mon, Nov 28, 2016 at 01:20:06PM +0100, Nicolai Hähnle wrote:
> From: Nicolai Hähnle <Nicolai.Haehnle@amd.com>
> 
> Add regular waiters in stamp order. Keep adding waiters that have no
> context in FIFO order and take care not to starve them.
> 
> While adding our task as a waiter, back off if we detect that there is a
> waiter with a lower stamp in front of us.
> 
> Make sure to call lock_contended even when we back off early.

I'm hitting
[   86.202749] WARNING: CPU: 1 PID: 813 at ./include/linux/ww_mutex.h:292 stress_inorder_work+0x436/0x4b5 [test_ww_mutex]
[   86.202885] DEBUG_LOCKS_WARN_ON(!ctx->contending_lock)

which if I understand correctly is due to

> +static inline int __sched
> +__ww_mutex_add_waiter(struct mutex_waiter *waiter,
> +		      struct mutex *lock,
> +		      struct ww_acquire_ctx *ww_ctx)
> +{
> +	if (ww_ctx) {
> +		struct mutex_waiter *cur;
> +
> +		/*
> +		 * Add the waiter before the first waiter with a higher stamp.
> +		 * Waiters without a context are skipped to avoid starving
> +		 * them.
> +		 */
> +		list_for_each_entry(cur, &lock->wait_list, list) {
> +			if (!cur->ww_ctx)
> +				continue;
> +
> +			if (__ww_mutex_stamp_after(ww_ctx, cur->ww_ctx)) {
> +				/* Back off immediately if necessary. */
> +				if (ww_ctx->acquired > 0)
> +					return -EDEADLK;

not setting ww_ctx->contending_lock here.

> +
> +				continue;
> +			}
> +
> +			list_add_tail(&waiter->list, &cur->list);
> +			return 0;
> +		}
> +	}
> +
> +	list_add_tail(&waiter->list, &lock->wait_list);
> +	return 0;
> +}
diff mbox

Patch

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f..118a3b6 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,8 @@ 
 #include <linux/osq_lock.h>
 #include <linux/debug_locks.h>
 
+struct ww_acquire_ctx;
+
 /*
  * Simple, straightforward mutexes with strict semantics:
  *
@@ -75,6 +77,7 @@  static inline struct task_struct *__mutex_owner(struct mutex *lock)
 struct mutex_waiter {
 	struct list_head	list;
 	struct task_struct	*task;
+	struct ww_acquire_ctx	*ww_ctx;
 #ifdef CONFIG_DEBUG_MUTEXES
 	void			*magic;
 #endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 585627f..01dcae7 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -628,6 +628,40 @@  __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
 	return 0;
 }
 
+static inline int __sched
+__ww_mutex_add_waiter(struct mutex_waiter *waiter,
+		      struct mutex *lock,
+		      struct ww_acquire_ctx *ww_ctx)
+{
+	if (ww_ctx) {
+		struct mutex_waiter *cur;
+
+		/*
+		 * Add the waiter before the first waiter with a higher stamp.
+		 * Waiters without a context are skipped to avoid starving
+		 * them.
+		 */
+		list_for_each_entry(cur, &lock->wait_list, list) {
+			if (!cur->ww_ctx)
+				continue;
+
+			if (__ww_mutex_stamp_after(ww_ctx, cur->ww_ctx)) {
+				/* Back off immediately if necessary. */
+				if (ww_ctx->acquired > 0)
+					return -EDEADLK;
+
+				continue;
+			}
+
+			list_add_tail(&waiter->list, &cur->list);
+			return 0;
+		}
+	}
+
+	list_add_tail(&waiter->list, &lock->wait_list);
+	return 0;
+}
+
 /*
  * Lock a mutex (possibly interruptible), slowpath:
  */
@@ -677,15 +711,25 @@  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	debug_mutex_lock_common(lock, &waiter);
 	debug_mutex_add_waiter(lock, &waiter, task);
 
-	/* add waiting tasks to the end of the waitqueue (FIFO): */
-	list_add_tail(&waiter.list, &lock->wait_list);
+	lock_contended(&lock->dep_map, ip);
+
+	if (!use_ww_ctx) {
+		/* add waiting tasks to the end of the waitqueue (FIFO): */
+		list_add_tail(&waiter.list, &lock->wait_list);
+	} else {
+		/* Add in stamp order, waking up waiters that must back off. */
+		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
+		if (ret)
+			goto err_early_backoff;
+
+		waiter.ww_ctx = ww_ctx;
+	}
+
 	waiter.task = task;
 
 	if (__mutex_waiter_is_first(lock, &waiter))
 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 
-	lock_contended(&lock->dep_map, ip);
-
 	set_task_state(task, state);
 	for (;;) {
 		/*
@@ -693,8 +737,12 @@  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * mutex_unlock() handing the lock off to us, do a trylock
 		 * before testing the error conditions to make sure we pick up
 		 * the handoff.
+		 *
+		 * For w/w locks, we always need to do this even if we're not
+		 * currently the first waiter, because we may have been the
+		 * first waiter during the unlock.
 		 */
-		if (__mutex_trylock(lock, first))
+		if (__mutex_trylock(lock, use_ww_ctx || first))
 			goto acquired;
 
 		/*
@@ -716,7 +764,20 @@  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		spin_unlock_mutex(&lock->wait_lock, flags);
 		schedule_preempt_disabled();
 
-		if (!first && __mutex_waiter_is_first(lock, &waiter)) {
+		if (use_ww_ctx && ww_ctx) {
+			/*
+			 * Always re-check whether we're in first position. We
+			 * don't want to spin if another task with a lower
+			 * stamp has taken our position.
+			 *
+			 * We also may have to set the handoff flag again, if
+			 * our position at the head was temporarily taken away.
+			 */
+			first = __mutex_waiter_is_first(lock, &waiter);
+
+			if (first)
+				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
+		} else if (!first && __mutex_waiter_is_first(lock, &waiter)) {
 			first = true;
 			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
 		}
@@ -728,7 +789,7 @@  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 		 * or we must see its unlock and acquire.
 		 */
 		if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
-		     __mutex_trylock(lock, first))
+		     __mutex_trylock(lock, use_ww_ctx || first))
 			break;
 
 		spin_lock_mutex(&lock->wait_lock, flags);
@@ -761,6 +822,7 @@  __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 err:
 	__set_task_state(task, TASK_RUNNING);
 	mutex_remove_waiter(lock, &waiter, task);
+err_early_backoff:
 	spin_unlock_mutex(&lock->wait_lock, flags);
 	debug_mutex_free_waiter(&waiter);
 	mutex_release(&lock->dep_map, 1, ip);