diff mbox series

[RFC,2/4] ceph: don't mark mount as SHUTDOWN when recovering session

Message ID 20200925140851.320673-3-jlayton@kernel.org
State New, archived
Headers show
Series ceph: fix spurious recover_session=clean errors | expand

Commit Message

Jeff Layton Sept. 25, 2020, 2:08 p.m. UTC
When recovering a session (a'la recover_session=clean), we want to do
all of the operations that we do on a forced umount, but changing the
mount state to SHUTDOWN is wrong and can cause queued MDS requests to
fail when the session comes back.

Only mark it as SHUTDOWN when umount_begin is called.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
---
 fs/ceph/super.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

Comments

Yan, Zheng Sept. 29, 2020, 8:20 a.m. UTC | #1
On Fri, Sep 25, 2020 at 10:08 PM Jeff Layton <jlayton@kernel.org> wrote:
>
> When recovering a session (a'la recover_session=clean), we want to do
> all of the operations that we do on a forced umount, but changing the
> mount state to SHUTDOWN is wrong and can cause queued MDS requests to
> fail when the session comes back.
>

code that cleanup page cache check the SHUTDOWN state.

> Only mark it as SHUTDOWN when umount_begin is called.
>
> Signed-off-by: Jeff Layton <jlayton@kernel.org>
> ---
>  fs/ceph/super.c | 13 +++++++++----
>  1 file changed, 9 insertions(+), 4 deletions(-)
>
> diff --git a/fs/ceph/super.c b/fs/ceph/super.c
> index 2516304379d3..46a0e4e1b177 100644
> --- a/fs/ceph/super.c
> +++ b/fs/ceph/super.c
> @@ -832,6 +832,13 @@ static void destroy_caches(void)
>         ceph_fscache_unregister();
>  }
>
> +static void __ceph_umount_begin(struct ceph_fs_client *fsc)
> +{
> +       ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
> +       ceph_mdsc_force_umount(fsc->mdsc);
> +       fsc->filp_gen++; // invalidate open files
> +}
> +
>  /*
>   * ceph_umount_begin - initiate forced umount.  Tear down the
>   * mount, skipping steps that may hang while waiting for server(s).
> @@ -844,9 +851,7 @@ static void ceph_umount_begin(struct super_block *sb)
>         if (!fsc)
>                 return;
>         fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
> -       ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
> -       ceph_mdsc_force_umount(fsc->mdsc);
> -       fsc->filp_gen++; // invalidate open files
> +       __ceph_umount_begin(fsc);
>  }
>
>  static const struct super_operations ceph_super_ops = {
> @@ -1235,7 +1240,7 @@ int ceph_force_reconnect(struct super_block *sb)
>         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
>         int err = 0;
>
> -       ceph_umount_begin(sb);
> +       __ceph_umount_begin(fsc);
>
>         /* Make sure all page caches get invalidated.
>          * see remove_session_caps_cb() */
> --
> 2.26.2
>
Jeff Layton Sept. 29, 2020, 12:30 p.m. UTC | #2
On Tue, 2020-09-29 at 16:20 +0800, Yan, Zheng wrote:
> On Fri, Sep 25, 2020 at 10:08 PM Jeff Layton <jlayton@kernel.org> wrote:
> > When recovering a session (a'la recover_session=clean), we want to do
> > all of the operations that we do on a forced umount, but changing the
> > mount state to SHUTDOWN is wrong and can cause queued MDS requests to
> > fail when the session comes back.
> > 
> 
> code that cleanup page cache check the SHUTDOWN state.
> 

Ok, so we do need to do something else there if we don't mark the thing
SHUTDOWN. Maybe we ought to declare a new mount_state for
this...CEPH_MOUNT_RECOVERING ?

> > Only mark it as SHUTDOWN when umount_begin is called.
> > 
> > Signed-off-by: Jeff Layton <jlayton@kernel.org>
> > ---
> >  fs/ceph/super.c | 13 +++++++++----
> >  1 file changed, 9 insertions(+), 4 deletions(-)
> > 
> > diff --git a/fs/ceph/super.c b/fs/ceph/super.c
> > index 2516304379d3..46a0e4e1b177 100644
> > --- a/fs/ceph/super.c
> > +++ b/fs/ceph/super.c
> > @@ -832,6 +832,13 @@ static void destroy_caches(void)
> >         ceph_fscache_unregister();
> >  }
> > 
> > +static void __ceph_umount_begin(struct ceph_fs_client *fsc)
> > +{
> > +       ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
> > +       ceph_mdsc_force_umount(fsc->mdsc);
> > +       fsc->filp_gen++; // invalidate open files
> > +}
> > +
> >  /*
> >   * ceph_umount_begin - initiate forced umount.  Tear down the
> >   * mount, skipping steps that may hang while waiting for server(s).
> > @@ -844,9 +851,7 @@ static void ceph_umount_begin(struct super_block *sb)
> >         if (!fsc)
> >                 return;
> >         fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
> > -       ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
> > -       ceph_mdsc_force_umount(fsc->mdsc);
> > -       fsc->filp_gen++; // invalidate open files
> > +       __ceph_umount_begin(fsc);
> >  }
> > 
> >  static const struct super_operations ceph_super_ops = {
> > @@ -1235,7 +1240,7 @@ int ceph_force_reconnect(struct super_block *sb)
> >         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
> >         int err = 0;
> > 
> > -       ceph_umount_begin(sb);
> > +       __ceph_umount_begin(fsc);
> > 
> >         /* Make sure all page caches get invalidated.
> >          * see remove_session_caps_cb() */
> > --
> > 2.26.2
> >
diff mbox series

Patch

diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2516304379d3..46a0e4e1b177 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -832,6 +832,13 @@  static void destroy_caches(void)
 	ceph_fscache_unregister();
 }
 
+static void __ceph_umount_begin(struct ceph_fs_client *fsc)
+{
+	ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
+	ceph_mdsc_force_umount(fsc->mdsc);
+	fsc->filp_gen++; // invalidate open files
+}
+
 /*
  * ceph_umount_begin - initiate forced umount.  Tear down the
  * mount, skipping steps that may hang while waiting for server(s).
@@ -844,9 +851,7 @@  static void ceph_umount_begin(struct super_block *sb)
 	if (!fsc)
 		return;
 	fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
-	ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
-	ceph_mdsc_force_umount(fsc->mdsc);
-	fsc->filp_gen++; // invalidate open files
+	__ceph_umount_begin(fsc);
 }
 
 static const struct super_operations ceph_super_ops = {
@@ -1235,7 +1240,7 @@  int ceph_force_reconnect(struct super_block *sb)
 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
 	int err = 0;
 
-	ceph_umount_begin(sb);
+	__ceph_umount_begin(fsc);
 
 	/* Make sure all page caches get invalidated.
 	 * see remove_session_caps_cb() */