diff mbox series

[PATCHv3,v6.5-rc2,2/3] fs: dlm: allow to F_SETLKW getting interrupted

Message ID 20230718180721.745569-3-aahringo@redhat.com (mailing list archive)
State New, archived
Headers show
Series fs: dlm: lock cancellation feature | expand

Commit Message

Alexander Aring July 18, 2023, 6:07 p.m. UTC
This patch implements dlm plock F_SETLKW interruption feature. If a
blocking posix lock request got interrupted in user space by a signal a
cancellation request for a non granted lock request to the user space
lock manager will be send. The user lock manager answers either with
zero or a negative errno code. A errno of -ENOENT signals that there is
currently no blocking lock request waiting to being granted. In case of
-ENOENT it was probably to late to request a cancellation and the
pending lock got granted. In any error case we will wait until the lock
is being granted as cancellation failed, this causes also that in case
of an older user lock manager returning -EINVAL we will wait as
cancellation is not supported which should be fine. If a user requires
this feature the user should update dlm user space to support lock
request cancellation.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/plock.c                 | 56 ++++++++++++++++++++++------------
 include/uapi/linux/dlm_plock.h |  1 +
 2 files changed, 37 insertions(+), 20 deletions(-)

Comments

Andreas Gruenbacher March 25, 2024, 3:08 p.m. UTC | #1
On Tue, Jul 18, 2023 at 8:07 PM Alexander Aring <aahringo@redhat.com> wrote:
> This patch implements dlm plock F_SETLKW interruption feature. If a
> blocking posix lock request got interrupted in user space by a signal a
> cancellation request for a non granted lock request to the user space
> lock manager will be send. The user lock manager answers either with
> zero or a negative errno code. A errno of -ENOENT signals that there is
> currently no blocking lock request waiting to being granted. In case of
> -ENOENT it was probably to late to request a cancellation and the
> pending lock got granted. In any error case we will wait until the lock
> is being granted as cancellation failed, this causes also that in case
> of an older user lock manager returning -EINVAL we will wait as
> cancellation is not supported which should be fine. If a user requires
> this feature the user should update dlm user space to support lock
> request cancellation.
>
> Signed-off-by: Alexander Aring <aahringo@redhat.com>
> ---
>  fs/dlm/plock.c                 | 56 ++++++++++++++++++++++------------
>  include/uapi/linux/dlm_plock.h |  1 +
>  2 files changed, 37 insertions(+), 20 deletions(-)
>
> diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
> index a34f605d8505..a8ffa0760913 100644
> --- a/fs/dlm/plock.c
> +++ b/fs/dlm/plock.c
> @@ -74,30 +74,26 @@ static void send_op(struct plock_op *op)
>         wake_up(&send_wq);
>  }
>
> -/* If a process was killed while waiting for the only plock on a file,
> -   locks_remove_posix will not see any lock on the file so it won't
> -   send an unlock-close to us to pass on to userspace to clean up the
> -   abandoned waiter.  So, we have to insert the unlock-close when the
> -   lock call is interrupted. */
> -
> -static void do_unlock_close(const struct dlm_plock_info *info)
> +static int do_lock_cancel(const struct dlm_plock_info *orig_info)
>  {
>         struct plock_op *op;
> +       int rv;
>
>         op = kzalloc(sizeof(*op), GFP_NOFS);
>         if (!op)
> -               return;
> +               return -ENOMEM;
> +
> +       op->info = *orig_info;
> +       op->info.optype = DLM_PLOCK_OP_CANCEL;
> +       op->info.wait = 0;
>
> -       op->info.optype         = DLM_PLOCK_OP_UNLOCK;
> -       op->info.pid            = info->pid;
> -       op->info.fsid           = info->fsid;
> -       op->info.number         = info->number;
> -       op->info.start          = 0;
> -       op->info.end            = OFFSET_MAX;
> -       op->info.owner          = info->owner;
> -
> -       op->info.flags |= DLM_PLOCK_FL_CLOSE;
>         send_op(op);
> +       wait_event(recv_wq, (op->done != 0));
> +
> +       rv = op->info.rv;
> +
> +       dlm_release_plock_op(op);
> +       return rv;
>  }
>
>  int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> @@ -156,7 +152,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
>         send_op(op);
>
>         if (op->info.wait) {
> -               rv = wait_event_killable(recv_wq, (op->done != 0));
> +               rv = wait_event_interruptible(recv_wq, (op->done != 0));

It seems that this patch leads to an unnecessary change in behavior
when a fatal signal is received (fatal_signal_pending()): before, the
process would terminate. Now, it will try to cancel the lock, and when
that fails, the process will keep waiting. In case of a fatal signal,
can we skip the cancelling and do what we did before?

>                 if (rv == -ERESTARTSYS) {
>                         spin_lock(&ops_lock);
>                         /* recheck under ops_lock if we got a done != 0,
> @@ -166,17 +162,37 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
>                                 spin_unlock(&ops_lock);
>                                 goto do_lock_wait;
>                         }
> -                       list_del(&op->list);
>                         spin_unlock(&ops_lock);
>
> +                       rv = do_lock_cancel(&op->info);
> +                       switch (rv) {
> +                       case 0:
> +                               /* waiter was deleted in user space, answer will never come
> +                                * remove original request. The original request must be
> +                                * on recv_list because the answer of do_lock_cancel()
> +                                * synchronized it.
> +                                */
> +                               spin_lock(&ops_lock);
> +                               list_del(&op->list);
> +                               spin_unlock(&ops_lock);
> +                               rv = -EINTR;
> +                               break;
> +                       case -ENOENT:
> +                               /* cancellation wasn't successful but op should be done */
> +                               fallthrough;
> +                       default:
> +                               /* internal error doing cancel we need to wait */
> +                               goto wait;
> +                       }
> +
>                         log_debug(ls, "%s: wait interrupted %x %llx pid %d",
>                                   __func__, ls->ls_global_id,
>                                   (unsigned long long)number, op->info.pid);
> -                       do_unlock_close(&op->info);
>                         dlm_release_plock_op(op);
>                         goto out;
>                 }
>         } else {
> +wait:
>                 wait_event(recv_wq, (op->done != 0));
>         }
>
> diff --git a/include/uapi/linux/dlm_plock.h b/include/uapi/linux/dlm_plock.h
> index 63b6c1fd9169..eb66afcac40e 100644
> --- a/include/uapi/linux/dlm_plock.h
> +++ b/include/uapi/linux/dlm_plock.h
> @@ -22,6 +22,7 @@ enum {
>         DLM_PLOCK_OP_LOCK = 1,
>         DLM_PLOCK_OP_UNLOCK,
>         DLM_PLOCK_OP_GET,
> +       DLM_PLOCK_OP_CANCEL,
>  };
>
>  #define DLM_PLOCK_FL_CLOSE 1
> --
> 2.31.1
>

Thanks,
Andreas
Alexander Aring March 26, 2024, 12:32 a.m. UTC | #2
Hi,

On Mon, Mar 25, 2024 at 11:09 AM Andreas Gruenbacher
<agruenba@redhat.com> wrote:
>
> On Tue, Jul 18, 2023 at 8:07 PM Alexander Aring <aahringo@redhat.com> wrote:
> > This patch implements dlm plock F_SETLKW interruption feature. If a
> > blocking posix lock request got interrupted in user space by a signal a
> > cancellation request for a non granted lock request to the user space
> > lock manager will be send. The user lock manager answers either with
> > zero or a negative errno code. A errno of -ENOENT signals that there is
> > currently no blocking lock request waiting to being granted. In case of
> > -ENOENT it was probably to late to request a cancellation and the
> > pending lock got granted. In any error case we will wait until the lock
> > is being granted as cancellation failed, this causes also that in case
> > of an older user lock manager returning -EINVAL we will wait as
> > cancellation is not supported which should be fine. If a user requires
> > this feature the user should update dlm user space to support lock
> > request cancellation.
> >
> > Signed-off-by: Alexander Aring <aahringo@redhat.com>
> > ---
> >  fs/dlm/plock.c                 | 56 ++++++++++++++++++++++------------
> >  include/uapi/linux/dlm_plock.h |  1 +
> >  2 files changed, 37 insertions(+), 20 deletions(-)
> >
> > diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
> > index a34f605d8505..a8ffa0760913 100644
> > --- a/fs/dlm/plock.c
> > +++ b/fs/dlm/plock.c
> > @@ -74,30 +74,26 @@ static void send_op(struct plock_op *op)
> >         wake_up(&send_wq);
> >  }
> >
> > -/* If a process was killed while waiting for the only plock on a file,
> > -   locks_remove_posix will not see any lock on the file so it won't
> > -   send an unlock-close to us to pass on to userspace to clean up the
> > -   abandoned waiter.  So, we have to insert the unlock-close when the
> > -   lock call is interrupted. */
> > -
> > -static void do_unlock_close(const struct dlm_plock_info *info)
> > +static int do_lock_cancel(const struct dlm_plock_info *orig_info)
> >  {
> >         struct plock_op *op;
> > +       int rv;
> >
> >         op = kzalloc(sizeof(*op), GFP_NOFS);
> >         if (!op)
> > -               return;
> > +               return -ENOMEM;
> > +
> > +       op->info = *orig_info;
> > +       op->info.optype = DLM_PLOCK_OP_CANCEL;
> > +       op->info.wait = 0;
> >
> > -       op->info.optype         = DLM_PLOCK_OP_UNLOCK;
> > -       op->info.pid            = info->pid;
> > -       op->info.fsid           = info->fsid;
> > -       op->info.number         = info->number;
> > -       op->info.start          = 0;
> > -       op->info.end            = OFFSET_MAX;
> > -       op->info.owner          = info->owner;
> > -
> > -       op->info.flags |= DLM_PLOCK_FL_CLOSE;
> >         send_op(op);
> > +       wait_event(recv_wq, (op->done != 0));
> > +
> > +       rv = op->info.rv;
> > +
> > +       dlm_release_plock_op(op);
> > +       return rv;
> >  }
> >
> >  int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> > @@ -156,7 +152,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> >         send_op(op);
> >
> >         if (op->info.wait) {
> > -               rv = wait_event_killable(recv_wq, (op->done != 0));
> > +               rv = wait_event_interruptible(recv_wq, (op->done != 0));
>
> It seems that this patch leads to an unnecessary change in behavior
> when a fatal signal is received (fatal_signal_pending()): before, the
> process would terminate. Now, it will try to cancel the lock, and when
> that fails, the process will keep waiting. In case of a fatal signal,
> can we skip the cancelling and do what we did before?

From my understanding interruptible() "reacts" on everything that is
also killable() and returns -ERESTARTSYS on "fatal signal". I even
tested it because wait_event_killable() has an issue, see reproducer
[0]. The issue was that it cleans too many waiters, the other waiter
of child in F_SETLKW was also cleared and it will never get a result
back from dlm_controld. I fixed that with an additional check on pid
in [1], but I have no idea about other side effects that could have
occurred as FL_CLOSE is also being used on other parts in the DLM
plock handling.

I rechecked the behaviour with the cancellation feature and sent
SIGKILL and the issue was gone without changing anything in user
space. The only thing I see why it would not have the old behaviour
(killable - that having the mentioned issue above) is that the
dlm_controld version is too old. To not run into this known issue we
just do a wait_event() that does not have those issues.

The mentioned "cancellation fails" - is not that it failed to cancel
the lock, there is some unexpected behaviour of dlm_controld, only
then we do wait_event() e.g. when we receive -EINVAL because
dlm_controld does not understand the op.

- Alex

[0] https://gitlab.com/netcoder/ltp/-/blob/dlm_fcntl_owner_testcase/testcases/kernel/syscalls/fcntl/fcntl44.c
[1] https://pagure.io/dlm/blob/main/f/dlm_controld/plock.c#_655
Andreas Gruenbacher March 26, 2024, 11:31 a.m. UTC | #3
On Tue, Mar 26, 2024 at 1:32 AM Alexander Aring <aahringo@redhat.com> wrote:
> Hi,
>
> On Mon, Mar 25, 2024 at 11:09 AM Andreas Gruenbacher
> <agruenba@redhat.com> wrote:
> >
> > On Tue, Jul 18, 2023 at 8:07 PM Alexander Aring <aahringo@redhat.com> wrote:
> > > This patch implements dlm plock F_SETLKW interruption feature. If a
> > > blocking posix lock request got interrupted in user space by a signal a
> > > cancellation request for a non granted lock request to the user space
> > > lock manager will be send. The user lock manager answers either with
> > > zero or a negative errno code. A errno of -ENOENT signals that there is
> > > currently no blocking lock request waiting to being granted. In case of
> > > -ENOENT it was probably to late to request a cancellation and the
> > > pending lock got granted. In any error case we will wait until the lock
> > > is being granted as cancellation failed, this causes also that in case
> > > of an older user lock manager returning -EINVAL we will wait as
> > > cancellation is not supported which should be fine. If a user requires
> > > this feature the user should update dlm user space to support lock
> > > request cancellation.
> > >
> > > Signed-off-by: Alexander Aring <aahringo@redhat.com>
> > > ---
> > >  fs/dlm/plock.c                 | 56 ++++++++++++++++++++++------------
> > >  include/uapi/linux/dlm_plock.h |  1 +
> > >  2 files changed, 37 insertions(+), 20 deletions(-)
> > >
> > > diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
> > > index a34f605d8505..a8ffa0760913 100644
> > > --- a/fs/dlm/plock.c
> > > +++ b/fs/dlm/plock.c
> > > @@ -74,30 +74,26 @@ static void send_op(struct plock_op *op)
> > >         wake_up(&send_wq);
> > >  }
> > >
> > > -/* If a process was killed while waiting for the only plock on a file,
> > > -   locks_remove_posix will not see any lock on the file so it won't
> > > -   send an unlock-close to us to pass on to userspace to clean up the
> > > -   abandoned waiter.  So, we have to insert the unlock-close when the
> > > -   lock call is interrupted. */
> > > -
> > > -static void do_unlock_close(const struct dlm_plock_info *info)
> > > +static int do_lock_cancel(const struct dlm_plock_info *orig_info)
> > >  {
> > >         struct plock_op *op;
> > > +       int rv;
> > >
> > >         op = kzalloc(sizeof(*op), GFP_NOFS);
> > >         if (!op)
> > > -               return;
> > > +               return -ENOMEM;
> > > +
> > > +       op->info = *orig_info;
> > > +       op->info.optype = DLM_PLOCK_OP_CANCEL;
> > > +       op->info.wait = 0;
> > >
> > > -       op->info.optype         = DLM_PLOCK_OP_UNLOCK;
> > > -       op->info.pid            = info->pid;
> > > -       op->info.fsid           = info->fsid;
> > > -       op->info.number         = info->number;
> > > -       op->info.start          = 0;
> > > -       op->info.end            = OFFSET_MAX;
> > > -       op->info.owner          = info->owner;
> > > -
> > > -       op->info.flags |= DLM_PLOCK_FL_CLOSE;
> > >         send_op(op);
> > > +       wait_event(recv_wq, (op->done != 0));
> > > +
> > > +       rv = op->info.rv;
> > > +
> > > +       dlm_release_plock_op(op);
> > > +       return rv;
> > >  }
> > >
> > >  int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> > > @@ -156,7 +152,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> > >         send_op(op);
> > >
> > >         if (op->info.wait) {
> > > -               rv = wait_event_killable(recv_wq, (op->done != 0));
> > > +               rv = wait_event_interruptible(recv_wq, (op->done != 0));
> >
> > It seems that this patch leads to an unnecessary change in behavior
> > when a fatal signal is received (fatal_signal_pending()): before, the
> > process would terminate. Now, it will try to cancel the lock, and when
> > that fails, the process will keep waiting. In case of a fatal signal,
> > can we skip the cancelling and do what we did before?
>
> From my understanding interruptible() "reacts" on everything that is
> also killable() and returns -ERESTARTSYS on "fatal signal". I even
> tested it because wait_event_killable() has an issue, see reproducer
> [0]. The issue was that it cleans too many waiters, the other waiter
> of child in F_SETLKW was also cleared and it will never get a result
> back from dlm_controld. I fixed that with an additional check on pid
> in [1], but I have no idea about other side effects that could have
> occurred as FL_CLOSE is also being used on other parts in the DLM
> plock handling.
>
> I rechecked the behaviour with the cancellation feature and sent
> SIGKILL and the issue was gone without changing anything in user
> space. The only thing I see why it would not have the old behaviour
> (killable - that having the mentioned issue above) is that the
> dlm_controld version is too old. To not run into this known issue we
> just do a wait_event() that does not have those issues.
>
> The mentioned "cancellation fails" - is not that it failed to cancel
> the lock, there is some unexpected behaviour of dlm_controld, only
> then we do wait_event() e.g. when we receive -EINVAL because
> dlm_controld does not understand the op.

What happens on a system that has this kernel change, but doesn't have
the corresponding dlm_controld change for DLM_PLOCK_OP_CANCEL support?
In this scenario, when a process is killed with SIGKILL, the kernel
will send a DLM_PLOCK_OP_CANCEL request to dlm_controld. dlm_controld
doesn't understand the DLM_PLOCK_OP_CANCEL request, so I assume that
it will return a value other than 0 or -ENOENT. As a consequence, we
will end up in wait_event(), which isn't interruptible. So before this
kernel change, the process would be killable, but with this kernel
change, it isn't killable anymore.

I'm worried about this scenario because it isn't entirely unrealistic
for a system to end up with this kernel change but without the
corresponding user-space change.

Andreas
Alexander Aring March 26, 2024, 1:02 p.m. UTC | #4
Hi,

On Tue, Mar 26, 2024 at 7:31 AM Andreas Gruenbacher <agruenba@redhat.com> wrote:
>
> On Tue, Mar 26, 2024 at 1:32 AM Alexander Aring <aahringo@redhat.com> wrote:
> > Hi,
> >
> > On Mon, Mar 25, 2024 at 11:09 AM Andreas Gruenbacher
> > <agruenba@redhat.com> wrote:
> > >
> > > On Tue, Jul 18, 2023 at 8:07 PM Alexander Aring <aahringo@redhat.com> wrote:
> > > > This patch implements dlm plock F_SETLKW interruption feature. If a
> > > > blocking posix lock request got interrupted in user space by a signal a
> > > > cancellation request for a non granted lock request to the user space
> > > > lock manager will be send. The user lock manager answers either with
> > > > zero or a negative errno code. A errno of -ENOENT signals that there is
> > > > currently no blocking lock request waiting to being granted. In case of
> > > > -ENOENT it was probably to late to request a cancellation and the
> > > > pending lock got granted. In any error case we will wait until the lock
> > > > is being granted as cancellation failed, this causes also that in case
> > > > of an older user lock manager returning -EINVAL we will wait as
> > > > cancellation is not supported which should be fine. If a user requires
> > > > this feature the user should update dlm user space to support lock
> > > > request cancellation.
> > > >
> > > > Signed-off-by: Alexander Aring <aahringo@redhat.com>
> > > > ---
> > > >  fs/dlm/plock.c                 | 56 ++++++++++++++++++++++------------
> > > >  include/uapi/linux/dlm_plock.h |  1 +
> > > >  2 files changed, 37 insertions(+), 20 deletions(-)
> > > >
> > > > diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
> > > > index a34f605d8505..a8ffa0760913 100644
> > > > --- a/fs/dlm/plock.c
> > > > +++ b/fs/dlm/plock.c
> > > > @@ -74,30 +74,26 @@ static void send_op(struct plock_op *op)
> > > >         wake_up(&send_wq);
> > > >  }
> > > >
> > > > -/* If a process was killed while waiting for the only plock on a file,
> > > > -   locks_remove_posix will not see any lock on the file so it won't
> > > > -   send an unlock-close to us to pass on to userspace to clean up the
> > > > -   abandoned waiter.  So, we have to insert the unlock-close when the
> > > > -   lock call is interrupted. */
> > > > -
> > > > -static void do_unlock_close(const struct dlm_plock_info *info)
> > > > +static int do_lock_cancel(const struct dlm_plock_info *orig_info)
> > > >  {
> > > >         struct plock_op *op;
> > > > +       int rv;
> > > >
> > > >         op = kzalloc(sizeof(*op), GFP_NOFS);
> > > >         if (!op)
> > > > -               return;
> > > > +               return -ENOMEM;
> > > > +
> > > > +       op->info = *orig_info;
> > > > +       op->info.optype = DLM_PLOCK_OP_CANCEL;
> > > > +       op->info.wait = 0;
> > > >
> > > > -       op->info.optype         = DLM_PLOCK_OP_UNLOCK;
> > > > -       op->info.pid            = info->pid;
> > > > -       op->info.fsid           = info->fsid;
> > > > -       op->info.number         = info->number;
> > > > -       op->info.start          = 0;
> > > > -       op->info.end            = OFFSET_MAX;
> > > > -       op->info.owner          = info->owner;
> > > > -
> > > > -       op->info.flags |= DLM_PLOCK_FL_CLOSE;
> > > >         send_op(op);
> > > > +       wait_event(recv_wq, (op->done != 0));
> > > > +
> > > > +       rv = op->info.rv;
> > > > +
> > > > +       dlm_release_plock_op(op);
> > > > +       return rv;
> > > >  }
> > > >
> > > >  int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> > > > @@ -156,7 +152,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
> > > >         send_op(op);
> > > >
> > > >         if (op->info.wait) {
> > > > -               rv = wait_event_killable(recv_wq, (op->done != 0));
> > > > +               rv = wait_event_interruptible(recv_wq, (op->done != 0));
> > >
> > > It seems that this patch leads to an unnecessary change in behavior
> > > when a fatal signal is received (fatal_signal_pending()): before, the
> > > process would terminate. Now, it will try to cancel the lock, and when
> > > that fails, the process will keep waiting. In case of a fatal signal,
> > > can we skip the cancelling and do what we did before?
> >
> > From my understanding interruptible() "reacts" on everything that is
> > also killable() and returns -ERESTARTSYS on "fatal signal". I even
> > tested it because wait_event_killable() has an issue, see reproducer
> > [0]. The issue was that it cleans too many waiters, the other waiter
> > of child in F_SETLKW was also cleared and it will never get a result
> > back from dlm_controld. I fixed that with an additional check on pid
> > in [1], but I have no idea about other side effects that could have
> > occurred as FL_CLOSE is also being used on other parts in the DLM
> > plock handling.
> >
> > I rechecked the behaviour with the cancellation feature and sent
> > SIGKILL and the issue was gone without changing anything in user
> > space. The only thing I see why it would not have the old behaviour
> > (killable - that having the mentioned issue above) is that the
> > dlm_controld version is too old. To not run into this known issue we
> > just do a wait_event() that does not have those issues.
> >
> > The mentioned "cancellation fails" - is not that it failed to cancel
> > the lock, there is some unexpected behaviour of dlm_controld, only
> > then we do wait_event() e.g. when we receive -EINVAL because
> > dlm_controld does not understand the op.
>
> What happens on a system that has this kernel change, but doesn't have
> the corresponding dlm_controld change for DLM_PLOCK_OP_CANCEL support?
> In this scenario, when a process is killed with SIGKILL, the kernel
> will send a DLM_PLOCK_OP_CANCEL request to dlm_controld. dlm_controld
> doesn't understand the DLM_PLOCK_OP_CANCEL request, so I assume that
> it will return a value other than 0 or -ENOENT. As a consequence, we
> will end up in wait_event(), which isn't interruptible. So before this
> kernel change, the process would be killable, but with this kernel
> change, it isn't killable anymore.
>
> I'm worried about this scenario because it isn't entirely unrealistic
> for a system to end up with this kernel change but without the
> corresponding user-space change.
>

as I said before the previous behaviour had broken cases. I am more
worried about that somebody runs into this case as somebody realize
its not killable anymore but will may then figure out to upgrade
dlm_controld.

- Alex
diff mbox series

Patch

diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index a34f605d8505..a8ffa0760913 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -74,30 +74,26 @@  static void send_op(struct plock_op *op)
 	wake_up(&send_wq);
 }
 
-/* If a process was killed while waiting for the only plock on a file,
-   locks_remove_posix will not see any lock on the file so it won't
-   send an unlock-close to us to pass on to userspace to clean up the
-   abandoned waiter.  So, we have to insert the unlock-close when the
-   lock call is interrupted. */
-
-static void do_unlock_close(const struct dlm_plock_info *info)
+static int do_lock_cancel(const struct dlm_plock_info *orig_info)
 {
 	struct plock_op *op;
+	int rv;
 
 	op = kzalloc(sizeof(*op), GFP_NOFS);
 	if (!op)
-		return;
+		return -ENOMEM;
+
+	op->info = *orig_info;
+	op->info.optype = DLM_PLOCK_OP_CANCEL;
+	op->info.wait = 0;
 
-	op->info.optype		= DLM_PLOCK_OP_UNLOCK;
-	op->info.pid		= info->pid;
-	op->info.fsid		= info->fsid;
-	op->info.number		= info->number;
-	op->info.start		= 0;
-	op->info.end		= OFFSET_MAX;
-	op->info.owner		= info->owner;
-
-	op->info.flags |= DLM_PLOCK_FL_CLOSE;
 	send_op(op);
+	wait_event(recv_wq, (op->done != 0));
+
+	rv = op->info.rv;
+
+	dlm_release_plock_op(op);
+	return rv;
 }
 
 int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
@@ -156,7 +152,7 @@  int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
 	send_op(op);
 
 	if (op->info.wait) {
-		rv = wait_event_killable(recv_wq, (op->done != 0));
+		rv = wait_event_interruptible(recv_wq, (op->done != 0));
 		if (rv == -ERESTARTSYS) {
 			spin_lock(&ops_lock);
 			/* recheck under ops_lock if we got a done != 0,
@@ -166,17 +162,37 @@  int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
 				spin_unlock(&ops_lock);
 				goto do_lock_wait;
 			}
-			list_del(&op->list);
 			spin_unlock(&ops_lock);
 
+			rv = do_lock_cancel(&op->info);
+			switch (rv) {
+			case 0:
+				/* waiter was deleted in user space, answer will never come
+				 * remove original request. The original request must be
+				 * on recv_list because the answer of do_lock_cancel()
+				 * synchronized it.
+				 */
+				spin_lock(&ops_lock);
+				list_del(&op->list);
+				spin_unlock(&ops_lock);
+				rv = -EINTR;
+				break;
+			case -ENOENT:
+				/* cancellation wasn't successful but op should be done */
+				fallthrough;
+			default:
+				/* internal error doing cancel we need to wait */
+				goto wait;
+			}
+
 			log_debug(ls, "%s: wait interrupted %x %llx pid %d",
 				  __func__, ls->ls_global_id,
 				  (unsigned long long)number, op->info.pid);
-			do_unlock_close(&op->info);
 			dlm_release_plock_op(op);
 			goto out;
 		}
 	} else {
+wait:
 		wait_event(recv_wq, (op->done != 0));
 	}
 
diff --git a/include/uapi/linux/dlm_plock.h b/include/uapi/linux/dlm_plock.h
index 63b6c1fd9169..eb66afcac40e 100644
--- a/include/uapi/linux/dlm_plock.h
+++ b/include/uapi/linux/dlm_plock.h
@@ -22,6 +22,7 @@  enum {
 	DLM_PLOCK_OP_LOCK = 1,
 	DLM_PLOCK_OP_UNLOCK,
 	DLM_PLOCK_OP_GET,
+	DLM_PLOCK_OP_CANCEL,
 };
 
 #define DLM_PLOCK_FL_CLOSE 1