diff mbox series

[34/35] multipathd: uxlsnr: drop client_lock

Message ID 20210910114120.13665-35-mwilck@suse.com (mailing list archive)
State Not Applicable, archived
Delegated to: christophe varoqui
Headers show
Series multipathd: uxlsnr overhaul | expand

Commit Message

Martin Wilck Sept. 10, 2021, 11:41 a.m. UTC
From: Martin Wilck <mwilck@suse.com>

The list of clients is never changed anywhere except in
uxsock_listen(). No need to lock.

Signed-off-by: Martin Wilck <mwilck@suse.com>
---
 multipathd/uxlsnr.c | 21 ++-------------------
 1 file changed, 2 insertions(+), 19 deletions(-)

Comments

Benjamin Marzinski Sept. 16, 2021, 4:24 a.m. UTC | #1
On Fri, Sep 10, 2021 at 01:41:19PM +0200, mwilck@suse.com wrote:
> From: Martin Wilck <mwilck@suse.com>
> 
> The list of clients is never changed anywhere except in
> uxsock_listen(). No need to lock.
> 
> Signed-off-by: Martin Wilck <mwilck@suse.com>
> ---
>  multipathd/uxlsnr.c | 21 ++-------------------
>  1 file changed, 2 insertions(+), 19 deletions(-)
> 
> diff --git a/multipathd/uxlsnr.c b/multipathd/uxlsnr.c
> index c18b2c4..7b763b6 100644
> --- a/multipathd/uxlsnr.c
> +++ b/multipathd/uxlsnr.c
> @@ -89,7 +89,6 @@ enum {
>  static __attribute__((unused)) char ___a[-(MIN_POLLS <= 0)];
>  
>  static LIST_HEAD(clients);
> -static pthread_mutex_t client_lock = PTHREAD_MUTEX_INITIALIZER;
>  static struct pollfd *polls;
>  static int notify_fd = -1;
>  static int idle_fd = -1;
> @@ -150,15 +149,13 @@ static void new_client(int ux_sock)
>  	c->is_root = _socket_client_is_root(c->fd);
>  
>  	/* put it in our linked list */
> -	pthread_mutex_lock(&client_lock);
>  	list_add_tail(&c->node, &clients);
> -	pthread_mutex_unlock(&client_lock);
>  }
>  
>  /*
>   * kill off a dead client
>   */
> -static void _dead_client(struct client *c)
> +static void dead_client(struct client *c)

There are also leading underscores and comments about the clients lock
for __get_soonest_timeout() and __need_lock(). We should probably remove
those as well.

-Ben

>  {
>  	int fd = c->fd;
>  	list_del_init(&c->node);
> @@ -170,14 +167,6 @@ static void _dead_client(struct client *c)
>  	close(fd);
>  }
>  
> -static void dead_client(struct client *c)
> -{
> -	pthread_cleanup_push(cleanup_mutex, &client_lock);
> -	pthread_mutex_lock(&client_lock);
> -	_dead_client(c);
> -	pthread_cleanup_pop(1);
> -}
> -
>  static void free_polls (void)
>  {
>  	if (polls)
> @@ -194,11 +183,9 @@ void uxsock_cleanup(void *arg)
>  	close(notify_fd);
>  	free(watch_config_dir);
>  
> -	pthread_mutex_lock(&client_lock);
>  	list_for_each_entry_safe(client_loop, client_tmp, &clients, node) {
> -		_dead_client(client_loop);
> +		dead_client(client_loop);
>  	}
> -	pthread_mutex_unlock(&client_lock);
>  
>  	cli_exit();
>  	free_polls();
> @@ -668,8 +655,6 @@ void *uxsock_listen(long ux_sock, void *trigger_data)
>  		struct timespec __timeout, *timeout;
>  
>  		/* setup for a poll */
> -		pthread_mutex_lock(&client_lock);
> -		pthread_cleanup_push(cleanup_mutex, &client_lock);
>  		num_clients = 0;
>  		list_for_each_entry(c, &clients, node) {
>  			num_clients++;
> @@ -738,8 +723,6 @@ void *uxsock_listen(long ux_sock, void *trigger_data)
>                  n_pfds = i;
>  		timeout = __get_soonest_timeout(&__timeout);
>  
> -		pthread_cleanup_pop(1);
> -
>  		/* most of our life is spent in this call */
>  		poll_count = ppoll(polls, n_pfds, timeout, &mask);
>  
> -- 
> 2.33.0

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel
Martin Wilck Sept. 16, 2021, 9:34 a.m. UTC | #2
On Wed, 2021-09-15 at 23:24 -0500, Benjamin Marzinski wrote:
> >  
> >  /*
> >   * kill off a dead client
> >   */
> > -static void _dead_client(struct client *c)
> > +static void dead_client(struct client *c)
> 
> There are also leading underscores and comments about the clients
> lock
> for __get_soonest_timeout() and __need_lock(). We should probably
> remove
> those as well.

Ok,

Martin





--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel
diff mbox series

Patch

diff --git a/multipathd/uxlsnr.c b/multipathd/uxlsnr.c
index c18b2c4..7b763b6 100644
--- a/multipathd/uxlsnr.c
+++ b/multipathd/uxlsnr.c
@@ -89,7 +89,6 @@  enum {
 static __attribute__((unused)) char ___a[-(MIN_POLLS <= 0)];
 
 static LIST_HEAD(clients);
-static pthread_mutex_t client_lock = PTHREAD_MUTEX_INITIALIZER;
 static struct pollfd *polls;
 static int notify_fd = -1;
 static int idle_fd = -1;
@@ -150,15 +149,13 @@  static void new_client(int ux_sock)
 	c->is_root = _socket_client_is_root(c->fd);
 
 	/* put it in our linked list */
-	pthread_mutex_lock(&client_lock);
 	list_add_tail(&c->node, &clients);
-	pthread_mutex_unlock(&client_lock);
 }
 
 /*
  * kill off a dead client
  */
-static void _dead_client(struct client *c)
+static void dead_client(struct client *c)
 {
 	int fd = c->fd;
 	list_del_init(&c->node);
@@ -170,14 +167,6 @@  static void _dead_client(struct client *c)
 	close(fd);
 }
 
-static void dead_client(struct client *c)
-{
-	pthread_cleanup_push(cleanup_mutex, &client_lock);
-	pthread_mutex_lock(&client_lock);
-	_dead_client(c);
-	pthread_cleanup_pop(1);
-}
-
 static void free_polls (void)
 {
 	if (polls)
@@ -194,11 +183,9 @@  void uxsock_cleanup(void *arg)
 	close(notify_fd);
 	free(watch_config_dir);
 
-	pthread_mutex_lock(&client_lock);
 	list_for_each_entry_safe(client_loop, client_tmp, &clients, node) {
-		_dead_client(client_loop);
+		dead_client(client_loop);
 	}
-	pthread_mutex_unlock(&client_lock);
 
 	cli_exit();
 	free_polls();
@@ -668,8 +655,6 @@  void *uxsock_listen(long ux_sock, void *trigger_data)
 		struct timespec __timeout, *timeout;
 
 		/* setup for a poll */
-		pthread_mutex_lock(&client_lock);
-		pthread_cleanup_push(cleanup_mutex, &client_lock);
 		num_clients = 0;
 		list_for_each_entry(c, &clients, node) {
 			num_clients++;
@@ -738,8 +723,6 @@  void *uxsock_listen(long ux_sock, void *trigger_data)
                 n_pfds = i;
 		timeout = __get_soonest_timeout(&__timeout);
 
-		pthread_cleanup_pop(1);
-
 		/* most of our life is spent in this call */
 		poll_count = ppoll(polls, n_pfds, timeout, &mask);