diff mbox series

svc_run: make sure only one svc_run loop runs in one process

Message ID 20190409113713.30595-1-xiubli@redhat.com (mailing list archive)
State New, archived
Headers show
Series svc_run: make sure only one svc_run loop runs in one process | expand

Commit Message

Xiubo Li April 9, 2019, 11:37 a.m. UTC
From: Xiubo Li <xiubli@redhat.com>

In gluster-block project and there are 2 separate threads, both
of which will run the svc_run loop, this could work well in glibc
version, but in libtirpc we are hitting the random crash and stuck
issues.

More detail please see:
https://github.com/gluster/gluster-block/pull/182

Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
 src/svc_run.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

Comments

Xiubo Li May 16, 2019, 2:55 a.m. UTC | #1
Hey ping.

What's the state of this patch and will it make sense here?

Thanks
BRs

On 2019/4/9 19:37, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
>
> In gluster-block project and there are 2 separate threads, both
> of which will run the svc_run loop, this could work well in glibc
> version, but in libtirpc we are hitting the random crash and stuck
> issues.
>
> More detail please see:
> https://github.com/gluster/gluster-block/pull/182
>
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
>   src/svc_run.c | 19 +++++++++++++++++++
>   1 file changed, 19 insertions(+)
>
> diff --git a/src/svc_run.c b/src/svc_run.c
> index f40314b..b295755 100644
> --- a/src/svc_run.c
> +++ b/src/svc_run.c
> @@ -38,12 +38,17 @@
>   #include <string.h>
>   #include <unistd.h>
>   #include <sys/poll.h>
> +#include <syslog.h>
> +#include <stdbool.h>
>   
>   
>   #include <rpc/rpc.h>
>   #include "rpc_com.h"
>   #include <sys/select.h>
>   
> +static bool svc_loop_running = false;
> +static pthread_mutex_t svc_run_lock = PTHREAD_MUTEX_INITIALIZER;
> +
>   void
>   svc_run()
>   {
> @@ -51,6 +56,16 @@ svc_run()
>     struct pollfd *my_pollfd = NULL;
>     int last_max_pollfd = 0;
>   
> +  pthread_mutex_lock(&svc_run_lock);
> +  if (svc_loop_running) {
> +    pthread_mutex_unlock(&svc_run_lock);
> +    syslog (LOG_ERR, "svc_run: svc loop is already running in current process %d", getpid());
> +    return;
> +  }
> +
> +  svc_loop_running = true;
> +  pthread_mutex_unlock(&svc_run_lock);
> +
>     for (;;) {
>       int max_pollfd = svc_max_pollfd;
>       if (max_pollfd == 0 && svc_pollfd == NULL)
> @@ -111,4 +126,8 @@ svc_exit()
>   	svc_pollfd = NULL;
>   	svc_max_pollfd = 0;
>   	rwlock_unlock(&svc_fd_lock);
> +
> +    pthread_mutex_lock(&svc_run_lock);
> +    svc_loop_running = false;
> +    pthread_mutex_unlock(&svc_run_lock);
>   }
Steve Dickson June 11, 2019, 2:54 p.m. UTC | #2
Sorry for the delay.... 

On 5/15/19 10:55 PM, Xiubo Li wrote:
> Hey ping.
> 
> What's the state of this patch and will it make sense here?
I'm not sure it does make sense.... Shouldn't the mutex lock
be in the call of svc_run()?

steved.

> 
> Thanks
> BRs
> 
> On 2019/4/9 19:37, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> In gluster-block project and there are 2 separate threads, both
>> of which will run the svc_run loop, this could work well in glibc
>> version, but in libtirpc we are hitting the random crash and stuck
>> issues.
>>
>> More detail please see:
>> https://github.com/gluster/gluster-block/pull/182
>>
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>   src/svc_run.c | 19 +++++++++++++++++++
>>   1 file changed, 19 insertions(+)
>>
>> diff --git a/src/svc_run.c b/src/svc_run.c
>> index f40314b..b295755 100644
>> --- a/src/svc_run.c
>> +++ b/src/svc_run.c
>> @@ -38,12 +38,17 @@
>>   #include <string.h>
>>   #include <unistd.h>
>>   #include <sys/poll.h>
>> +#include <syslog.h>
>> +#include <stdbool.h>
>>       #include <rpc/rpc.h>
>>   #include "rpc_com.h"
>>   #include <sys/select.h>
>>   +static bool svc_loop_running = false;
>> +static pthread_mutex_t svc_run_lock = PTHREAD_MUTEX_INITIALIZER;
>> +
>>   void
>>   svc_run()
>>   {
>> @@ -51,6 +56,16 @@ svc_run()
>>     struct pollfd *my_pollfd = NULL;
>>     int last_max_pollfd = 0;
>>   +  pthread_mutex_lock(&svc_run_lock);
>> +  if (svc_loop_running) {
>> +    pthread_mutex_unlock(&svc_run_lock);
>> +    syslog (LOG_ERR, "svc_run: svc loop is already running in current process %d", getpid());
>> +    return;
>> +  }
>> +
>> +  svc_loop_running = true;
>> +  pthread_mutex_unlock(&svc_run_lock);
>> +
>>     for (;;) {
>>       int max_pollfd = svc_max_pollfd;
>>       if (max_pollfd == 0 && svc_pollfd == NULL)
>> @@ -111,4 +126,8 @@ svc_exit()
>>       svc_pollfd = NULL;
>>       svc_max_pollfd = 0;
>>       rwlock_unlock(&svc_fd_lock);
>> +
>> +    pthread_mutex_lock(&svc_run_lock);
>> +    svc_loop_running = false;
>> +    pthread_mutex_unlock(&svc_run_lock);
>>   }
> 
> 
> 
> 
> _______________________________________________
> Libtirpc-devel mailing list
> Libtirpc-devel@lists.sourceforge.net
> https://lists.sourceforge.net/lists/listinfo/libtirpc-devel
Xiubo Li June 12, 2019, 3:32 a.m. UTC | #3
On 2019/6/11 22:54, Steve Dickson wrote:
> Sorry for the delay....
>
> On 5/15/19 10:55 PM, Xiubo Li wrote:
>> Hey ping.
>>
>> What's the state of this patch and will it make sense here?
> I'm not sure it does make sense.... Shouldn't the mutex lock
> be in the call of svc_run()?

Hi Steve,

Yeah, mutex lock should be in the call of svc_run(). This is exactly 
what I do in this change.

If the libtirpc means to allow only one svc_run() loop in each process, 
so IMO this change is needed. Or if we will allow more than one like the 
glibc version does, so this should be one bug in libtirpc.

Thanks.
BRs
Xiubo


> steved.
>
>> Thanks
>> BRs
>>
>> On 2019/4/9 19:37, xiubli@redhat.com wrote:
>>> From: Xiubo Li <xiubli@redhat.com>
>>>
>>> In gluster-block project and there are 2 separate threads, both
>>> of which will run the svc_run loop, this could work well in glibc
>>> version, but in libtirpc we are hitting the random crash and stuck
>>> issues.
>>>
>>> More detail please see:
>>> https://github.com/gluster/gluster-block/pull/182
>>>
>>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>>> ---
>>>    src/svc_run.c | 19 +++++++++++++++++++
>>>    1 file changed, 19 insertions(+)
>>>
>>> diff --git a/src/svc_run.c b/src/svc_run.c
>>> index f40314b..b295755 100644
>>> --- a/src/svc_run.c
>>> +++ b/src/svc_run.c
>>> @@ -38,12 +38,17 @@
>>>    #include <string.h>
>>>    #include <unistd.h>
>>>    #include <sys/poll.h>
>>> +#include <syslog.h>
>>> +#include <stdbool.h>
>>>        #include <rpc/rpc.h>
>>>    #include "rpc_com.h"
>>>    #include <sys/select.h>
>>>    +static bool svc_loop_running = false;
>>> +static pthread_mutex_t svc_run_lock = PTHREAD_MUTEX_INITIALIZER;
>>> +
>>>    void
>>>    svc_run()
>>>    {
>>> @@ -51,6 +56,16 @@ svc_run()
>>>      struct pollfd *my_pollfd = NULL;
>>>      int last_max_pollfd = 0;
>>>    +  pthread_mutex_lock(&svc_run_lock);
>>> +  if (svc_loop_running) {
>>> +    pthread_mutex_unlock(&svc_run_lock);
>>> +    syslog (LOG_ERR, "svc_run: svc loop is already running in current process %d", getpid());
>>> +    return;
>>> +  }
>>> +
>>> +  svc_loop_running = true;
>>> +  pthread_mutex_unlock(&svc_run_lock);
>>> +
>>>      for (;;) {
>>>        int max_pollfd = svc_max_pollfd;
>>>        if (max_pollfd == 0 && svc_pollfd == NULL)
>>> @@ -111,4 +126,8 @@ svc_exit()
>>>        svc_pollfd = NULL;
>>>        svc_max_pollfd = 0;
>>>        rwlock_unlock(&svc_fd_lock);
>>> +
>>> +    pthread_mutex_lock(&svc_run_lock);
>>> +    svc_loop_running = false;
>>> +    pthread_mutex_unlock(&svc_run_lock);
>>>    }
>>
>>
>>
>> _______________________________________________
>> Libtirpc-devel mailing list
>> Libtirpc-devel@lists.sourceforge.net
>> https://lists.sourceforge.net/lists/listinfo/libtirpc-devel
Olga Kornievskaia June 12, 2019, 4:46 p.m. UTC | #4
On Wed, Jun 12, 2019 at 3:45 AM Xiubo Li <xiubli@redhat.com> wrote:
>
> On 2019/6/11 22:54, Steve Dickson wrote:
> > Sorry for the delay....
> >
> > On 5/15/19 10:55 PM, Xiubo Li wrote:
> >> Hey ping.
> >>
> >> What's the state of this patch and will it make sense here?
> > I'm not sure it does make sense.... Shouldn't the mutex lock
> > be in the call of svc_run()?
>
> Hi Steve,
>
> Yeah, mutex lock should be in the call of svc_run(). This is exactly
> what I do in this change.
>
> If the libtirpc means to allow only one svc_run() loop in each process,
> so IMO this change is needed. Or if we will allow more than one like the
> glibc version does, so this should be one bug in libtirpc.

Has there been effort into made into investigating what's causing the
crashes? We perhaps should make an effort to see if svc_run() is
thread-safe and examine which functions it uses and which might not be
thread safe. You might be able to allow greater parallelism then 1
thread in a svc_run() function by just making some not-thread safe
functions wrapped in pthread locks.

>
> Thanks.
> BRs
> Xiubo
>
>
> > steved.
> >
> >> Thanks
> >> BRs
> >>
> >> On 2019/4/9 19:37, xiubli@redhat.com wrote:
> >>> From: Xiubo Li <xiubli@redhat.com>
> >>>
> >>> In gluster-block project and there are 2 separate threads, both
> >>> of which will run the svc_run loop, this could work well in glibc
> >>> version, but in libtirpc we are hitting the random crash and stuck
> >>> issues.
> >>>
> >>> More detail please see:
> >>> https://github.com/gluster/gluster-block/pull/182
> >>>
> >>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> >>> ---
> >>>    src/svc_run.c | 19 +++++++++++++++++++
> >>>    1 file changed, 19 insertions(+)
> >>>
> >>> diff --git a/src/svc_run.c b/src/svc_run.c
> >>> index f40314b..b295755 100644
> >>> --- a/src/svc_run.c
> >>> +++ b/src/svc_run.c
> >>> @@ -38,12 +38,17 @@
> >>>    #include <string.h>
> >>>    #include <unistd.h>
> >>>    #include <sys/poll.h>
> >>> +#include <syslog.h>
> >>> +#include <stdbool.h>
> >>>        #include <rpc/rpc.h>
> >>>    #include "rpc_com.h"
> >>>    #include <sys/select.h>
> >>>    +static bool svc_loop_running = false;
> >>> +static pthread_mutex_t svc_run_lock = PTHREAD_MUTEX_INITIALIZER;
> >>> +
> >>>    void
> >>>    svc_run()
> >>>    {
> >>> @@ -51,6 +56,16 @@ svc_run()
> >>>      struct pollfd *my_pollfd = NULL;
> >>>      int last_max_pollfd = 0;
> >>>    +  pthread_mutex_lock(&svc_run_lock);
> >>> +  if (svc_loop_running) {
> >>> +    pthread_mutex_unlock(&svc_run_lock);
> >>> +    syslog (LOG_ERR, "svc_run: svc loop is already running in current process %d", getpid());
> >>> +    return;
> >>> +  }
> >>> +
> >>> +  svc_loop_running = true;
> >>> +  pthread_mutex_unlock(&svc_run_lock);
> >>> +
> >>>      for (;;) {
> >>>        int max_pollfd = svc_max_pollfd;
> >>>        if (max_pollfd == 0 && svc_pollfd == NULL)
> >>> @@ -111,4 +126,8 @@ svc_exit()
> >>>        svc_pollfd = NULL;
> >>>        svc_max_pollfd = 0;
> >>>        rwlock_unlock(&svc_fd_lock);
> >>> +
> >>> +    pthread_mutex_lock(&svc_run_lock);
> >>> +    svc_loop_running = false;
> >>> +    pthread_mutex_unlock(&svc_run_lock);
> >>>    }
> >>
> >>
> >>
> >> _______________________________________________
> >> Libtirpc-devel mailing list
> >> Libtirpc-devel@lists.sourceforge.net
> >> https://lists.sourceforge.net/lists/listinfo/libtirpc-devel
>
>
Xiubo Li June 13, 2019, 12:50 a.m. UTC | #5
On 2019/6/13 0:46, Olga Kornievskaia wrote:
> On Wed, Jun 12, 2019 at 3:45 AM Xiubo Li <xiubli@redhat.com> wrote:
>> On 2019/6/11 22:54, Steve Dickson wrote:
>>> Sorry for the delay....
>>>
>>> On 5/15/19 10:55 PM, Xiubo Li wrote:
>>>> Hey ping.
>>>>
>>>> What's the state of this patch and will it make sense here?
>>> I'm not sure it does make sense.... Shouldn't the mutex lock
>>> be in the call of svc_run()?
>> Hi Steve,
>>
>> Yeah, mutex lock should be in the call of svc_run(). This is exactly
>> what I do in this change.
>>
>> If the libtirpc means to allow only one svc_run() loop in each process,
>> so IMO this change is needed. Or if we will allow more than one like the
>> glibc version does, so this should be one bug in libtirpc.
> Has there been effort into made into investigating what's causing the
> crashes?

Before as our investigation and test, it was that if we ran two 
svc_run() loop in one process, such as in pthread1 and pthread2, it 
seems that pthread1 will receive the RPC connection/request which should 
be handled by pthread2's svc_run loop and vice versa.

Then we can see many random crash for tons of different reasons, like 
use after free and double free..., and almost every time the crash will 
randomly in different places and different libraries, such as the 
libtirpc, glusterfs and gluster-block...

After switching to multi processes instead of running two svc_run loop 
in multi pthreads, this issue has been resolved we didn't dig it further.


>   We perhaps should make an effort to see if svc_run() is
> thread-safe and examine which functions it uses and which might not be
> thread safe. You might be able to allow greater parallelism then 1
> thread in a svc_run() function by just making some not-thread safe
> functions wrapped in pthread locks.

Yeah, make sense.

Thanks.

BRs


>> Thanks.
>> BRs
>> Xiubo
>>
>>
>>> steved.
>>>
>>>> Thanks
>>>> BRs
>>>>
>>>> On 2019/4/9 19:37, xiubli@redhat.com wrote:
>>>>> From: Xiubo Li <xiubli@redhat.com>
>>>>>
>>>>> In gluster-block project and there are 2 separate threads, both
>>>>> of which will run the svc_run loop, this could work well in glibc
>>>>> version, but in libtirpc we are hitting the random crash and stuck
>>>>> issues.
>>>>>
>>>>> More detail please see:
>>>>> https://github.com/gluster/gluster-block/pull/182
>>>>>
>>>>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>>>>> ---
>>>>>     src/svc_run.c | 19 +++++++++++++++++++
>>>>>     1 file changed, 19 insertions(+)
>>>>>
>>>>> diff --git a/src/svc_run.c b/src/svc_run.c
>>>>> index f40314b..b295755 100644
>>>>> --- a/src/svc_run.c
>>>>> +++ b/src/svc_run.c
>>>>> @@ -38,12 +38,17 @@
>>>>>     #include <string.h>
>>>>>     #include <unistd.h>
>>>>>     #include <sys/poll.h>
>>>>> +#include <syslog.h>
>>>>> +#include <stdbool.h>
>>>>>         #include <rpc/rpc.h>
>>>>>     #include "rpc_com.h"
>>>>>     #include <sys/select.h>
>>>>>     +static bool svc_loop_running = false;
>>>>> +static pthread_mutex_t svc_run_lock = PTHREAD_MUTEX_INITIALIZER;
>>>>> +
>>>>>     void
>>>>>     svc_run()
>>>>>     {
>>>>> @@ -51,6 +56,16 @@ svc_run()
>>>>>       struct pollfd *my_pollfd = NULL;
>>>>>       int last_max_pollfd = 0;
>>>>>     +  pthread_mutex_lock(&svc_run_lock);
>>>>> +  if (svc_loop_running) {
>>>>> +    pthread_mutex_unlock(&svc_run_lock);
>>>>> +    syslog (LOG_ERR, "svc_run: svc loop is already running in current process %d", getpid());
>>>>> +    return;
>>>>> +  }
>>>>> +
>>>>> +  svc_loop_running = true;
>>>>> +  pthread_mutex_unlock(&svc_run_lock);
>>>>> +
>>>>>       for (;;) {
>>>>>         int max_pollfd = svc_max_pollfd;
>>>>>         if (max_pollfd == 0 && svc_pollfd == NULL)
>>>>> @@ -111,4 +126,8 @@ svc_exit()
>>>>>         svc_pollfd = NULL;
>>>>>         svc_max_pollfd = 0;
>>>>>         rwlock_unlock(&svc_fd_lock);
>>>>> +
>>>>> +    pthread_mutex_lock(&svc_run_lock);
>>>>> +    svc_loop_running = false;
>>>>> +    pthread_mutex_unlock(&svc_run_lock);
>>>>>     }
>>>>
>>>>
>>>> _______________________________________________
>>>> Libtirpc-devel mailing list
>>>> Libtirpc-devel@lists.sourceforge.net
>>>> https://lists.sourceforge.net/lists/listinfo/libtirpc-devel
>>
diff mbox series

Patch

diff --git a/src/svc_run.c b/src/svc_run.c
index f40314b..b295755 100644
--- a/src/svc_run.c
+++ b/src/svc_run.c
@@ -38,12 +38,17 @@ 
 #include <string.h>
 #include <unistd.h>
 #include <sys/poll.h>
+#include <syslog.h>
+#include <stdbool.h>
 
 
 #include <rpc/rpc.h>
 #include "rpc_com.h"
 #include <sys/select.h>
 
+static bool svc_loop_running = false;
+static pthread_mutex_t svc_run_lock = PTHREAD_MUTEX_INITIALIZER;
+
 void
 svc_run()
 {
@@ -51,6 +56,16 @@  svc_run()
   struct pollfd *my_pollfd = NULL;
   int last_max_pollfd = 0;
 
+  pthread_mutex_lock(&svc_run_lock);
+  if (svc_loop_running) {
+    pthread_mutex_unlock(&svc_run_lock);
+    syslog (LOG_ERR, "svc_run: svc loop is already running in current process %d", getpid());
+    return;
+  }
+
+  svc_loop_running = true;
+  pthread_mutex_unlock(&svc_run_lock);
+
   for (;;) {
     int max_pollfd = svc_max_pollfd;
     if (max_pollfd == 0 && svc_pollfd == NULL)
@@ -111,4 +126,8 @@  svc_exit()
 	svc_pollfd = NULL;
 	svc_max_pollfd = 0;
 	rwlock_unlock(&svc_fd_lock);
+
+    pthread_mutex_lock(&svc_run_lock);
+    svc_loop_running = false;
+    pthread_mutex_unlock(&svc_run_lock);
 }