diff mbox

[RFC,v2,4/4] locks: Use blocked_lock_lock only to protect blocked_hash

Message ID 1425306313-7234-5-git-send-email-daniel.wagner@bmw-carit.de (mailing list archive)
State New, archived
Headers show

Commit Message

Daniel Wagner March 2, 2015, 2:25 p.m. UTC
blocked_lock_lock and file_lock_lglock is used to protect file_lock's
fl_link, fl_block, fl_next, blocked_hash and the percpu
file_lock_list.

The plan is to reorganize the usage of the locks and what they protect
so that the usage of the global blocked_lock_lock is reduced.

Whenever we insert a new lock we are going to grab besides the
flc_lock also the corresponding file_lock_lglock. The global
blocked_lock_lock is only used when blocked_hash is involved.

file_lock_lglock protects now file_lock_list and fl_link, fl_block and
fl_next allone. That means we need to define which file_lock_lglock is
used for all waiters. Luckely, fl_link_cpu can be reused for fl_block
and fl_next.

Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
Cc: Jeff Layton <jlayton@poochiereds.net>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
---
 fs/locks.c | 78 ++++++++++++++++++++++++++++++++++----------------------------
 1 file changed, 43 insertions(+), 35 deletions(-)

Comments

Jeff Layton March 3, 2015, 12:58 a.m. UTC | #1
On Mon,  2 Mar 2015 15:25:13 +0100
Daniel Wagner <daniel.wagner@bmw-carit.de> wrote:

> blocked_lock_lock and file_lock_lglock is used to protect file_lock's
> fl_link, fl_block, fl_next, blocked_hash and the percpu
> file_lock_list.
> 
> The plan is to reorganize the usage of the locks and what they protect
> so that the usage of the global blocked_lock_lock is reduced.
> 
> Whenever we insert a new lock we are going to grab besides the
> flc_lock also the corresponding file_lock_lglock. The global
> blocked_lock_lock is only used when blocked_hash is involved.
> 
> file_lock_lglock protects now file_lock_list and fl_link, fl_block and
> fl_next allone. That means we need to define which file_lock_lglock is
> used for all waiters. Luckely, fl_link_cpu can be reused for fl_block
> and fl_next.
> 
> Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
> Cc: Jeff Layton <jlayton@poochiereds.net>
> Cc: "J. Bruce Fields" <bfields@fieldses.org>
> Cc: Alexander Viro <viro@zeniv.linux.org.uk>
> ---
>  fs/locks.c | 78 ++++++++++++++++++++++++++++++++++----------------------------
>  1 file changed, 43 insertions(+), 35 deletions(-)
> 
> diff --git a/fs/locks.c b/fs/locks.c
> index 02821dd..de15ea8 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -162,6 +162,20 @@ int lease_break_time = 45;
>   * keep a list on each CPU, with each list protected by its own spinlock via
>   * the file_lock_lglock. Note that alterations to the list also require that
>   * the relevant flc_lock is held.
> + *
> + * In addition, it also protects the fl->fl_block list, and the fl->fl_next
> + * pointer for file_lock structures that are acting as lock requests (in
> + * contrast to those that are acting as records of acquired locks).
> + *
> + * file_lock structures acting as lock requests (waiters) use the same
> + * spinlock as the those acting as lock holder (blocker). E.g. the
> + * blocker is initially added to the file_lock_list living on CPU 0,
> + * all waiters on that blocker are serialized via CPU 0 (see
> + * fl_link_cpu usage).
> + *
> + * In particular, adding an entry to the fl_block list requires that you hold
> + * both the flc_lock and the blocked_lock_lock (acquired in that order).
> + * Deleting an entry from the list however only requires the file_lock_gllock.
>   */
>  DEFINE_STATIC_LGLOCK(file_lock_lglock);
>  static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
> @@ -183,19 +197,6 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
>  /*
>   * This lock protects the blocked_hash. Generally, if you're accessing it, you
>   * want to be holding this lock.
> - *
> - * In addition, it also protects the fl->fl_block list, and the fl->fl_next
> - * pointer for file_lock structures that are acting as lock requests (in
> - * contrast to those that are acting as records of acquired locks).
> - *
> - * Note that when we acquire this lock in order to change the above fields,
> - * we often hold the flc_lock as well. In certain cases, when reading the fields
> - * protected by this lock, we can skip acquiring it iff we already hold the
> - * flc_lock.
> - *
> - * In particular, adding an entry to the fl_block list requires that you hold
> - * both the flc_lock and the blocked_lock_lock (acquired in that order).
> - * Deleting an entry from the list however only requires the file_lock_lock.
>   */
>  static DEFINE_SPINLOCK(blocked_lock_lock);
>  
> @@ -607,7 +608,7 @@ static void locks_delete_global_blocked(struct file_lock *waiter)
>  /* Remove waiter from blocker's block list.
>   * When blocker ends up pointing to itself then the list is empty.
>   *
> - * Must be called with blocked_lock_lock held.
> + * Must be called with file_lock_lglock held.
>   */
>  static void __locks_delete_block(struct file_lock *waiter)
>  {
> @@ -617,7 +618,7 @@ static void __locks_delete_block(struct file_lock *waiter)
>  
>  /* Posix block variant of __locks_delete_block.
>   *
> - * Must be called with blocked_lock_lock held.
> + * Must be called with file_lock_lglock held.
>   */
>  static void __locks_delete_posix_block(struct file_lock *waiter)
>  {
> @@ -627,16 +628,18 @@ static void __locks_delete_posix_block(struct file_lock *waiter)
>  
>  static void locks_delete_block(struct file_lock *waiter)
>  {
> -	spin_lock(&blocked_lock_lock);
> +	lg_local_lock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
>  	__locks_delete_block(waiter);
> -	spin_unlock(&blocked_lock_lock);
> +	lg_local_unlock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
>  }
>  
>  static void locks_delete_posix_block(struct file_lock *waiter)
>  {
> +	lg_local_lock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
>  	spin_lock(&blocked_lock_lock);
>  	__locks_delete_posix_block(waiter);
>  	spin_unlock(&blocked_lock_lock);
> +	lg_local_unlock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
>  }
>  
>  /* Insert waiter into blocker's block list.
> @@ -644,22 +647,23 @@ static void locks_delete_posix_block(struct file_lock *waiter)
>   * the order they blocked. The documentation doesn't require this but
>   * it seems like the reasonable thing to do.
>   *
> - * Must be called with both the flc_lock and blocked_lock_lock held. The
> - * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
> + * Must be called with both the flc_lock and file_lock_lglock held. The
> + * fl_block list itself is protected by the file_lock_lglock, but by ensuring
>   * that the flc_lock is also held on insertions we can avoid taking the
> - * blocked_lock_lock in some cases when we see that the fl_block list is empty.
> + * file_lock_lglock in some cases when we see that the fl_block list is empty.
>   */
>  static void __locks_insert_block(struct file_lock *blocker,
>  					struct file_lock *waiter)
>  {
>  	BUG_ON(!list_empty(&waiter->fl_block));
> +	waiter->fl_link_cpu = blocker->fl_link_cpu;
>  	waiter->fl_next = blocker;
>  	list_add_tail(&waiter->fl_block, &blocker->fl_block);
>  }
>  
>  /* Posix block variant of __locks_insert_block.
>   *
> - * Must be called with flc_lock and blocked_lock_lock held.
> + * Must be called with flc_lock and file_lock_lglock held.
>   */
>  static void __locks_insert_posix_block(struct file_lock *blocker,
>  					struct file_lock *waiter)
> @@ -673,9 +677,9 @@ static void __locks_insert_posix_block(struct file_lock *blocker,
>  static void locks_insert_block(struct file_lock *blocker,
>  					struct file_lock *waiter)
>  {
> -	spin_lock(&blocked_lock_lock);
> +	lg_local_lock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
>  	__locks_insert_block(blocker, waiter);
> -	spin_unlock(&blocked_lock_lock);
> +	lg_local_unlock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
>  }
>  
>  /*
> @@ -686,31 +690,33 @@ static void locks_insert_block(struct file_lock *blocker,
>  static void locks_wake_up_blocks(struct file_lock *blocker)
>  {
>  	/*
> -	 * Avoid taking global lock if list is empty. This is safe since new
> +	 * Avoid taking lock if list is empty. This is safe since new
>  	 * blocked requests are only added to the list under the flc_lock, and
>  	 * the flc_lock is always held here. Note that removal from the fl_block
>  	 * list does not require the flc_lock, so we must recheck list_empty()
> -	 * after acquiring the blocked_lock_lock.
> +	 * after acquiring the file_lock_lglock.
>  	 */
>  	if (list_empty(&blocker->fl_block))
>  		return;
>  
> -	spin_lock(&blocked_lock_lock);
> +	lg_local_lock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
>  	while (!list_empty(&blocker->fl_block)) {
>  		struct file_lock *waiter;
>  
>  		waiter = list_first_entry(&blocker->fl_block,
>  				struct file_lock, fl_block);
> -		if (IS_POSIX(blocker))
> +		if (IS_POSIX(blocker)) {
> +			spin_lock(&blocked_lock_lock);
>  			__locks_delete_posix_block(waiter);
> -		else
> +			spin_unlock(&blocked_lock_lock);
> +		} else
>  			__locks_delete_block(waiter);

So again, as I tried to point out in the last patch, I think we can do
better here and also avoid taking the blocked_lock_lock when the
blocker is an OFD lock.

>  		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
>  			waiter->fl_lmops->lm_notify(waiter);
>  		else
>  			wake_up(&waiter->fl_wait);
>  	}
> -	spin_unlock(&blocked_lock_lock);
> +	lg_local_unlock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
>  }
>  
>  static void
> @@ -737,9 +743,11 @@ static void
>  locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
>  {
>  	locks_unlink_lock_ctx(fl);
> -	if (dispose)
> +	if (dispose) {
> +		lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
>  		list_add(&fl->fl_list, dispose);
> -	else
> +		lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
> +	} else
>  		locks_free_lock(fl);
>  }
>  
> @@ -1011,12 +1019,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
>  			 * locks list must be done while holding the same lock!
>  			 */
>  			error = -EDEADLK;
> +			lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
>  			spin_lock(&blocked_lock_lock);
>  			if (likely(!posix_locks_deadlock(request, fl))) {
>  				error = FILE_LOCK_DEFERRED;
>  				__locks_insert_posix_block(fl, request);
>  			}
>  			spin_unlock(&blocked_lock_lock);
> +			lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
>  			goto out;
>    		}
>    	}
> @@ -2497,12 +2507,14 @@ posix_unblock_lock(struct file_lock *waiter)
>  {
>  	int status = 0;
>  
> +	lg_local_lock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
>  	spin_lock(&blocked_lock_lock);
>  	if (waiter->fl_next)
>  		__locks_delete_posix_block(waiter);
>  	else
>  		status = -ENOENT;
>  	spin_unlock(&blocked_lock_lock);
> +	lg_local_unlock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
>  	return status;
>  }
>  EXPORT_SYMBOL(posix_unblock_lock);
> @@ -2629,13 +2641,11 @@ static int locks_show(struct seq_file *f, void *v)
>  }
>  
>  static void *locks_start(struct seq_file *f, loff_t *pos)
> -	__acquires(&blocked_lock_lock)
>  {
>  	struct locks_iterator *iter = f->private;
>  
>  	iter->li_pos = *pos + 1;
>  	lg_global_lock(&file_lock_lglock);
> -	spin_lock(&blocked_lock_lock);
>  	return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
>  }
>  
> @@ -2648,9 +2658,7 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
>  }
>  
>  static void locks_stop(struct seq_file *f, void *v)
> -	__releases(&blocked_lock_lock)
>  {
> -	spin_unlock(&blocked_lock_lock);
>  	lg_global_unlock(&file_lock_lglock);
>  }
>
diff mbox

Patch

diff --git a/fs/locks.c b/fs/locks.c
index 02821dd..de15ea8 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -162,6 +162,20 @@  int lease_break_time = 45;
  * keep a list on each CPU, with each list protected by its own spinlock via
  * the file_lock_lglock. Note that alterations to the list also require that
  * the relevant flc_lock is held.
+ *
+ * In addition, it also protects the fl->fl_block list, and the fl->fl_next
+ * pointer for file_lock structures that are acting as lock requests (in
+ * contrast to those that are acting as records of acquired locks).
+ *
+ * file_lock structures acting as lock requests (waiters) use the same
+ * spinlock as the those acting as lock holder (blocker). E.g. the
+ * blocker is initially added to the file_lock_list living on CPU 0,
+ * all waiters on that blocker are serialized via CPU 0 (see
+ * fl_link_cpu usage).
+ *
+ * In particular, adding an entry to the fl_block list requires that you hold
+ * both the flc_lock and the blocked_lock_lock (acquired in that order).
+ * Deleting an entry from the list however only requires the file_lock_gllock.
  */
 DEFINE_STATIC_LGLOCK(file_lock_lglock);
 static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -183,19 +197,6 @@  static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 /*
  * This lock protects the blocked_hash. Generally, if you're accessing it, you
  * want to be holding this lock.
- *
- * In addition, it also protects the fl->fl_block list, and the fl->fl_next
- * pointer for file_lock structures that are acting as lock requests (in
- * contrast to those that are acting as records of acquired locks).
- *
- * Note that when we acquire this lock in order to change the above fields,
- * we often hold the flc_lock as well. In certain cases, when reading the fields
- * protected by this lock, we can skip acquiring it iff we already hold the
- * flc_lock.
- *
- * In particular, adding an entry to the fl_block list requires that you hold
- * both the flc_lock and the blocked_lock_lock (acquired in that order).
- * Deleting an entry from the list however only requires the file_lock_lock.
  */
 static DEFINE_SPINLOCK(blocked_lock_lock);
 
@@ -607,7 +608,7 @@  static void locks_delete_global_blocked(struct file_lock *waiter)
 /* Remove waiter from blocker's block list.
  * When blocker ends up pointing to itself then the list is empty.
  *
- * Must be called with blocked_lock_lock held.
+ * Must be called with file_lock_lglock held.
  */
 static void __locks_delete_block(struct file_lock *waiter)
 {
@@ -617,7 +618,7 @@  static void __locks_delete_block(struct file_lock *waiter)
 
 /* Posix block variant of __locks_delete_block.
  *
- * Must be called with blocked_lock_lock held.
+ * Must be called with file_lock_lglock held.
  */
 static void __locks_delete_posix_block(struct file_lock *waiter)
 {
@@ -627,16 +628,18 @@  static void __locks_delete_posix_block(struct file_lock *waiter)
 
 static void locks_delete_block(struct file_lock *waiter)
 {
-	spin_lock(&blocked_lock_lock);
+	lg_local_lock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
 	__locks_delete_block(waiter);
-	spin_unlock(&blocked_lock_lock);
+	lg_local_unlock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
 }
 
 static void locks_delete_posix_block(struct file_lock *waiter)
 {
+	lg_local_lock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
 	spin_lock(&blocked_lock_lock);
 	__locks_delete_posix_block(waiter);
 	spin_unlock(&blocked_lock_lock);
+	lg_local_unlock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
 }
 
 /* Insert waiter into blocker's block list.
@@ -644,22 +647,23 @@  static void locks_delete_posix_block(struct file_lock *waiter)
  * the order they blocked. The documentation doesn't require this but
  * it seems like the reasonable thing to do.
  *
- * Must be called with both the flc_lock and blocked_lock_lock held. The
- * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
+ * Must be called with both the flc_lock and file_lock_lglock held. The
+ * fl_block list itself is protected by the file_lock_lglock, but by ensuring
  * that the flc_lock is also held on insertions we can avoid taking the
- * blocked_lock_lock in some cases when we see that the fl_block list is empty.
+ * file_lock_lglock in some cases when we see that the fl_block list is empty.
  */
 static void __locks_insert_block(struct file_lock *blocker,
 					struct file_lock *waiter)
 {
 	BUG_ON(!list_empty(&waiter->fl_block));
+	waiter->fl_link_cpu = blocker->fl_link_cpu;
 	waiter->fl_next = blocker;
 	list_add_tail(&waiter->fl_block, &blocker->fl_block);
 }
 
 /* Posix block variant of __locks_insert_block.
  *
- * Must be called with flc_lock and blocked_lock_lock held.
+ * Must be called with flc_lock and file_lock_lglock held.
  */
 static void __locks_insert_posix_block(struct file_lock *blocker,
 					struct file_lock *waiter)
@@ -673,9 +677,9 @@  static void __locks_insert_posix_block(struct file_lock *blocker,
 static void locks_insert_block(struct file_lock *blocker,
 					struct file_lock *waiter)
 {
-	spin_lock(&blocked_lock_lock);
+	lg_local_lock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
 	__locks_insert_block(blocker, waiter);
-	spin_unlock(&blocked_lock_lock);
+	lg_local_unlock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
 }
 
 /*
@@ -686,31 +690,33 @@  static void locks_insert_block(struct file_lock *blocker,
 static void locks_wake_up_blocks(struct file_lock *blocker)
 {
 	/*
-	 * Avoid taking global lock if list is empty. This is safe since new
+	 * Avoid taking lock if list is empty. This is safe since new
 	 * blocked requests are only added to the list under the flc_lock, and
 	 * the flc_lock is always held here. Note that removal from the fl_block
 	 * list does not require the flc_lock, so we must recheck list_empty()
-	 * after acquiring the blocked_lock_lock.
+	 * after acquiring the file_lock_lglock.
 	 */
 	if (list_empty(&blocker->fl_block))
 		return;
 
-	spin_lock(&blocked_lock_lock);
+	lg_local_lock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
 	while (!list_empty(&blocker->fl_block)) {
 		struct file_lock *waiter;
 
 		waiter = list_first_entry(&blocker->fl_block,
 				struct file_lock, fl_block);
-		if (IS_POSIX(blocker))
+		if (IS_POSIX(blocker)) {
+			spin_lock(&blocked_lock_lock);
 			__locks_delete_posix_block(waiter);
-		else
+			spin_unlock(&blocked_lock_lock);
+		} else
 			__locks_delete_block(waiter);
 		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
 			waiter->fl_lmops->lm_notify(waiter);
 		else
 			wake_up(&waiter->fl_wait);
 	}
-	spin_unlock(&blocked_lock_lock);
+	lg_local_unlock_cpu(&file_lock_lglock, blocker->fl_link_cpu);
 }
 
 static void
@@ -737,9 +743,11 @@  static void
 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
 {
 	locks_unlink_lock_ctx(fl);
-	if (dispose)
+	if (dispose) {
+		lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 		list_add(&fl->fl_list, dispose);
-	else
+		lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
+	} else
 		locks_free_lock(fl);
 }
 
@@ -1011,12 +1019,14 @@  static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
 			 * locks list must be done while holding the same lock!
 			 */
 			error = -EDEADLK;
+			lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 			spin_lock(&blocked_lock_lock);
 			if (likely(!posix_locks_deadlock(request, fl))) {
 				error = FILE_LOCK_DEFERRED;
 				__locks_insert_posix_block(fl, request);
 			}
 			spin_unlock(&blocked_lock_lock);
+			lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
 			goto out;
   		}
   	}
@@ -2497,12 +2507,14 @@  posix_unblock_lock(struct file_lock *waiter)
 {
 	int status = 0;
 
+	lg_local_lock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
 	spin_lock(&blocked_lock_lock);
 	if (waiter->fl_next)
 		__locks_delete_posix_block(waiter);
 	else
 		status = -ENOENT;
 	spin_unlock(&blocked_lock_lock);
+	lg_local_unlock_cpu(&file_lock_lglock, waiter->fl_link_cpu);
 	return status;
 }
 EXPORT_SYMBOL(posix_unblock_lock);
@@ -2629,13 +2641,11 @@  static int locks_show(struct seq_file *f, void *v)
 }
 
 static void *locks_start(struct seq_file *f, loff_t *pos)
-	__acquires(&blocked_lock_lock)
 {
 	struct locks_iterator *iter = f->private;
 
 	iter->li_pos = *pos + 1;
 	lg_global_lock(&file_lock_lglock);
-	spin_lock(&blocked_lock_lock);
 	return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
 }
 
@@ -2648,9 +2658,7 @@  static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
 }
 
 static void locks_stop(struct seq_file *f, void *v)
-	__releases(&blocked_lock_lock)
 {
-	spin_unlock(&blocked_lock_lock);
 	lg_global_unlock(&file_lock_lglock);
 }