diff mbox series

[06/11] ceph: remove reliance on bdi congestion

Message ID 164549983739.9187.14895675781408171186.stgit@noble.brown (mailing list archive)
State New, archived
Headers show
Series Remove remaining parts of congestion tracking code. | expand

Commit Message

NeilBrown Feb. 22, 2022, 3:17 a.m. UTC
The bdi congestion tracking in not widely used and will be removed.

CEPHfs is one of a small number of filesystems that uses it, setting
just the async (write) congestion flags at what it determines are
appropriate times.

The only remaining effect of the async flag is to cause (some)
WB_SYNC_NONE writes to be skipped.

So instead of setting the flag, set an internal flag and change:
 - .writepages to do nothing if WB_SYNC_NONE and the flag is set
 - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE
    and the flag is set.

The writepages change causes a behavioural change in that pageout() can
now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will
be called on the page which (I think) wil further delay the next attempt
at writeout.  This might be a good thing.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 fs/ceph/addr.c  |   22 +++++++++++++---------
 fs/ceph/super.c |    1 +
 fs/ceph/super.h |    1 +
 3 files changed, 15 insertions(+), 9 deletions(-)

Comments

Jeffrey Layton Feb. 23, 2022, 3:43 p.m. UTC | #1
On Tue, 2022-02-22 at 14:17 +1100, NeilBrown wrote:
> The bdi congestion tracking in not widely used and will be removed.
> 
> CEPHfs is one of a small number of filesystems that uses it, setting
> just the async (write) congestion flags at what it determines are
> appropriate times.
> 
> The only remaining effect of the async flag is to cause (some)
> WB_SYNC_NONE writes to be skipped.
> 
> So instead of setting the flag, set an internal flag and change:
>  - .writepages to do nothing if WB_SYNC_NONE and the flag is set
>  - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE
>     and the flag is set.
> 
> The writepages change causes a behavioural change in that pageout() can
> now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will
> be called on the page which (I think) wil further delay the next attempt
> at writeout.  This might be a good thing.
> 
> Signed-off-by: NeilBrown <neilb@suse.de>

Maybe. I have to wonder whether all of this is really useful.

When things are congested we'll avoid trying to issue new writeback
requests. Note that we don't prevent new pages from being dirtied here -
- only their being written back.

This also doesn't do anything in the DIO or sync_write cases, so if we
lose caps or are doing DIO, we'll just keep churning out "unlimited"
writes in those cases anyway.

With ceph too, we're not likely to be dealing with a single server as
well. One OSD could be struggling to keep up but others are OK. Do we
really want to throttle writeback to the ones that are fine?

FWIW, the original patch that added this stuff was this:

commit 2baba25019ec564cd247af74013873d69a0b8190
Author: Yehuda Sadeh <yehuda@hq.newdream.net>
Date:   Fri Dec 18 13:51:57 2009 -0800

    ceph: writeback congestion control
    
    Set bdi congestion bit when amount of write data in flight exceeds adjustable
    threshold.
    
    Signed-off-by: Yehuda Sadeh <yehuda@hq.newdream.net>
    Signed-off-by: Sage Weil <sage@newdream.net>

...but it's pretty scant on details.

The only reason I can see to throttle writeback like this is to prevent
you from having too much memory tied up in writeback requests, but we
aren't limiting other requests in the same way.

Maybe we would do better to just rip this stuff out?

> ---
>  fs/ceph/addr.c  |   22 +++++++++++++---------
>  fs/ceph/super.c |    1 +
>  fs/ceph/super.h |    1 +
>  3 files changed, 15 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
> index c98e5238a1b6..dc7af34640dd 100644
> --- a/fs/ceph/addr.c
> +++ b/fs/ceph/addr.c
> @@ -563,7 +563,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
>  
>  	if (atomic_long_inc_return(&fsc->writeback_count) >
>  	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
> -		set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
> +		fsc->write_congested = true;
>  
>  	req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
>  				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
> @@ -623,7 +623,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
>  
>  	if (atomic_long_dec_return(&fsc->writeback_count) <
>  	    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
> -		clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
> +		fsc->write_congested = false;
>  
>  	return err;
>  }
> @@ -635,6 +635,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
>  	BUG_ON(!inode);
>  	ihold(inode);
>  
> +	if (wbc->sync_mode == WB_SYNC_NONE &&
> +	    ceph_inode_to_client(inode)->write_congested)
> +		return AOP_WRITEPAGE_ACTIVATE;
> +
>  	wait_on_page_fscache(page);
>  
>  	err = writepage_nounlock(page, wbc);
> @@ -707,8 +711,7 @@ static void writepages_finish(struct ceph_osd_request *req)
>  			if (atomic_long_dec_return(&fsc->writeback_count) <
>  			     CONGESTION_OFF_THRESH(
>  					fsc->mount_options->congestion_kb))
> -				clear_bdi_congested(inode_to_bdi(inode),
> -						    BLK_RW_ASYNC);
> +				fsc->write_congested = false;
>  
>  			ceph_put_snap_context(detach_page_private(page));
>  			end_page_writeback(page);
> @@ -760,6 +763,10 @@ static int ceph_writepages_start(struct address_space *mapping,
>  	bool done = false;
>  	bool caching = ceph_is_cache_enabled(inode);
>  
> +	if (wbc->sync_mode == WB_SYNC_NONE &&
> +	    fsc->write_congested)
> +		return 0;
> +
>  	dout("writepages_start %p (mode=%s)\n", inode,
>  	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
>  	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
> @@ -954,11 +961,8 @@ static int ceph_writepages_start(struct address_space *mapping,
>  
>  			if (atomic_long_inc_return(&fsc->writeback_count) >
>  			    CONGESTION_ON_THRESH(
> -				    fsc->mount_options->congestion_kb)) {
> -				set_bdi_congested(inode_to_bdi(inode),
> -						  BLK_RW_ASYNC);
> -			}
> -
> +				    fsc->mount_options->congestion_kb))
> +				fsc->write_congested = true;
>  
>  			pages[locked_pages++] = page;
>  			pvec.pages[i] = NULL;
> diff --git a/fs/ceph/super.c b/fs/ceph/super.c
> index bf79f369aec6..4a3b77d049c7 100644
> --- a/fs/ceph/super.c
> +++ b/fs/ceph/super.c
> @@ -802,6 +802,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
>  	fsc->have_copy_from2 = true;
>  
>  	atomic_long_set(&fsc->writeback_count, 0);
> +	fsc->write_congested = false;
>  
>  	err = -ENOMEM;
>  	/*
> diff --git a/fs/ceph/super.h b/fs/ceph/super.h
> index 67f145e1ae7a..0bd97aea2319 100644
> --- a/fs/ceph/super.h
> +++ b/fs/ceph/super.h
> @@ -121,6 +121,7 @@ struct ceph_fs_client {
>  	struct ceph_mds_client *mdsc;
>  
>  	atomic_long_t writeback_count;
> +	bool write_congested;
>  
>  	struct workqueue_struct *inode_wq;
>  	struct workqueue_struct *cap_wq;
> 
>
NeilBrown Feb. 24, 2022, 5:41 a.m. UTC | #2
On Thu, 24 Feb 2022, Jeff Layton wrote:
> On Tue, 2022-02-22 at 14:17 +1100, NeilBrown wrote:
> > The bdi congestion tracking in not widely used and will be removed.
> > 
> > CEPHfs is one of a small number of filesystems that uses it, setting
> > just the async (write) congestion flags at what it determines are
> > appropriate times.
> > 
> > The only remaining effect of the async flag is to cause (some)
> > WB_SYNC_NONE writes to be skipped.
> > 
> > So instead of setting the flag, set an internal flag and change:
> >  - .writepages to do nothing if WB_SYNC_NONE and the flag is set
> >  - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE
> >     and the flag is set.
> > 
> > The writepages change causes a behavioural change in that pageout() can
> > now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will
> > be called on the page which (I think) wil further delay the next attempt
> > at writeout.  This might be a good thing.
> > 
> > Signed-off-by: NeilBrown <neilb@suse.de>
> 
> Maybe. I have to wonder whether all of this is really useful.
> 
> When things are congested we'll avoid trying to issue new writeback
> requests. Note that we don't prevent new pages from being dirtied here -
> - only their being written back.
> 
> This also doesn't do anything in the DIO or sync_write cases, so if we
> lose caps or are doing DIO, we'll just keep churning out "unlimited"
> writes in those cases anyway.

I think the point of congestion tracking is to differentiate between
sync and async IO.  Or maybe "required" and "optional".
Eventually the "optional" IO will become required, but if we can delay
it until a time when there is less "required" io, then maybe we can
improve perceived latency.

"optional" IO here is write-back and read-ahead.  If the load of
"required" IO is bursty, and if we can shuffle that optional stuff into
the quiet periods, we might win.

Whether this is a real need is an important question that I don't have an
answer for.  And whether it is better to leave delayed requests in the
page cache, or in the low-level queue with sync requests able to
over-take them - I don't know.  If you have multiple low-level queue as
you say you can with ceph, then lower might be better.

The block layer has REQ_RAHEAD ..  maybe those request get should get a
lower priority ... though I don't think they do.
NFS has a 3 level priority queue, with write-back going at a lower
priority ... I think... for NFSv3 at least.

Sometimes I suspect that as all our transports have become faster, we
have been able to ignore the extra latency caused by poor scheduling of
optional requests.  But at other times when my recently upgraded desktop
is struggling to view a web page while compiling a kernel ...  I wonder
if maybe we don't have the balance right any more.

So maybe you are right - maybe we can rip all this stuff out.

Or maybe not.

Thanks,
NeilBrown
Jeffrey Layton Feb. 24, 2022, 11:30 a.m. UTC | #3
On Thu, 2022-02-24 at 16:41 +1100, NeilBrown wrote:
> On Thu, 24 Feb 2022, Jeff Layton wrote:
> > On Tue, 2022-02-22 at 14:17 +1100, NeilBrown wrote:
> > > The bdi congestion tracking in not widely used and will be removed.
> > > 
> > > CEPHfs is one of a small number of filesystems that uses it, setting
> > > just the async (write) congestion flags at what it determines are
> > > appropriate times.
> > > 
> > > The only remaining effect of the async flag is to cause (some)
> > > WB_SYNC_NONE writes to be skipped.
> > > 
> > > So instead of setting the flag, set an internal flag and change:
> > >  - .writepages to do nothing if WB_SYNC_NONE and the flag is set
> > >  - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE
> > >     and the flag is set.
> > > 
> > > The writepages change causes a behavioural change in that pageout() can
> > > now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will
> > > be called on the page which (I think) wil further delay the next attempt
> > > at writeout.  This might be a good thing.
> > > 
> > > Signed-off-by: NeilBrown <neilb@suse.de>
> > 
> > Maybe. I have to wonder whether all of this is really useful.
> > 
> > When things are congested we'll avoid trying to issue new writeback
> > requests. Note that we don't prevent new pages from being dirtied here -
> > - only their being written back.
> > 
> > This also doesn't do anything in the DIO or sync_write cases, so if we
> > lose caps or are doing DIO, we'll just keep churning out "unlimited"
> > writes in those cases anyway.
> 
> I think the point of congestion tracking is to differentiate between
> sync and async IO.  Or maybe "required" and "optional".
> Eventually the "optional" IO will become required, but if we can delay
> it until a time when there is less "required" io, then maybe we can
> improve perceived latency.
> 
> "optional" IO here is write-back and read-ahead.  If the load of
> "required" IO is bursty, and if we can shuffle that optional stuff into
> the quiet periods, we might win.
> 

In that case, maybe we should be counting in-flight reads too and deny
readahead when the count crosses some threshold? It seems a bit silly to
only look at writes when it comes to "congestion".

> Whether this is a real need is an important question that I don't have an
> answer for.  And whether it is better to leave delayed requests in the
> page cache, or in the low-level queue with sync requests able to
> over-take them - I don't know.  If you have multiple low-level queue as
> you say you can with ceph, then lower might be better.
> 
> The block layer has REQ_RAHEAD ..  maybe those request get should get a
> lower priority ... though I don't think they do.
> NFS has a 3 level priority queue, with write-back going at a lower
> priority ... I think... for NFSv3 at least.
> 
> Sometimes I suspect that as all our transports have become faster, we
> have been able to ignore the extra latency caused by poor scheduling of
> optional requests.  But at other times when my recently upgraded desktop
> is struggling to view a web page while compiling a kernel ...  I wonder
> if maybe we don't have the balance right any more.
> 
> So maybe you are right - maybe we can rip all this stuff out.
> 

I lean more toward just removing it. The existing implementation seems a
bit half-baked with the gaps in what's being counted. Granted, the
default congestion threshold is pretty high with modern memory sizes, so
it probably doesn't come into play much in practice, but removing it
would reduce some complexity in the client.
NeilBrown March 4, 2022, 2:47 a.m. UTC | #4
On Thu, 24 Feb 2022, Jeff Layton wrote:
> On Thu, 2022-02-24 at 16:41 +1100, NeilBrown wrote:
> > On Thu, 24 Feb 2022, Jeff Layton wrote:
> > > On Tue, 2022-02-22 at 14:17 +1100, NeilBrown wrote:
> > > > The bdi congestion tracking in not widely used and will be removed.
> > > > 
> > > > CEPHfs is one of a small number of filesystems that uses it, setting
> > > > just the async (write) congestion flags at what it determines are
> > > > appropriate times.
> > > > 
> > > > The only remaining effect of the async flag is to cause (some)
> > > > WB_SYNC_NONE writes to be skipped.
> > > > 
> > > > So instead of setting the flag, set an internal flag and change:
> > > >  - .writepages to do nothing if WB_SYNC_NONE and the flag is set
> > > >  - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE
> > > >     and the flag is set.
> > > > 
> > > > The writepages change causes a behavioural change in that pageout() can
> > > > now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will
> > > > be called on the page which (I think) wil further delay the next attempt
> > > > at writeout.  This might be a good thing.
> > > > 
> > > > Signed-off-by: NeilBrown <neilb@suse.de>
> > > 
> > > Maybe. I have to wonder whether all of this is really useful.
> > > 
> > > When things are congested we'll avoid trying to issue new writeback
> > > requests. Note that we don't prevent new pages from being dirtied here -
> > > - only their being written back.
> > > 
> > > This also doesn't do anything in the DIO or sync_write cases, so if we
> > > lose caps or are doing DIO, we'll just keep churning out "unlimited"
> > > writes in those cases anyway.
> > 
> > I think the point of congestion tracking is to differentiate between
> > sync and async IO.  Or maybe "required" and "optional".
> > Eventually the "optional" IO will become required, but if we can delay
> > it until a time when there is less "required" io, then maybe we can
> > improve perceived latency.
> > 
> > "optional" IO here is write-back and read-ahead.  If the load of
> > "required" IO is bursty, and if we can shuffle that optional stuff into
> > the quiet periods, we might win.
> > 
> 
> In that case, maybe we should be counting in-flight reads too and deny
> readahead when the count crosses some threshold? It seems a bit silly to
> only look at writes when it comes to "congestion".

I agree that seems a bit silly.

> 
> > Whether this is a real need is an important question that I don't have an
> > answer for.  And whether it is better to leave delayed requests in the
> > page cache, or in the low-level queue with sync requests able to
> > over-take them - I don't know.  If you have multiple low-level queue as
> > you say you can with ceph, then lower might be better.
> > 
> > The block layer has REQ_RAHEAD ..  maybe those request get should get a
> > lower priority ... though I don't think they do.
> > NFS has a 3 level priority queue, with write-back going at a lower
> > priority ... I think... for NFSv3 at least.
> > 
> > Sometimes I suspect that as all our transports have become faster, we
> > have been able to ignore the extra latency caused by poor scheduling of
> > optional requests.  But at other times when my recently upgraded desktop
> > is struggling to view a web page while compiling a kernel ...  I wonder
> > if maybe we don't have the balance right any more.
> > 
> > So maybe you are right - maybe we can rip all this stuff out.
> > 
> 
> I lean more toward just removing it. The existing implementation seems a
> bit half-baked with the gaps in what's being counted. Granted, the
> default congestion threshold is pretty high with modern memory sizes, so
> it probably doesn't come into play much in practice, but removing it
> would reduce some complexity in the client.

I'd love to have some test that could reliably generate congestion and
measure latencies for other IO.  Without that, it is mostly guess work.
So I cannot argue against your proposal, and do agree that removing the
code would reduce complexity.  I have no idea what the costs might be -
if any.  Hence my focus was on not changing behaviour.

Thanks,
NeilBrown
Jeffrey Layton March 4, 2022, 11:14 a.m. UTC | #5
On Fri, 2022-03-04 at 13:47 +1100, NeilBrown wrote:
> On Thu, 24 Feb 2022, Jeff Layton wrote:
> > On Thu, 2022-02-24 at 16:41 +1100, NeilBrown wrote:
> > > On Thu, 24 Feb 2022, Jeff Layton wrote:
> > > > On Tue, 2022-02-22 at 14:17 +1100, NeilBrown wrote:
> > > > > The bdi congestion tracking in not widely used and will be removed.
> > > > > 
> > > > > CEPHfs is one of a small number of filesystems that uses it, setting
> > > > > just the async (write) congestion flags at what it determines are
> > > > > appropriate times.
> > > > > 
> > > > > The only remaining effect of the async flag is to cause (some)
> > > > > WB_SYNC_NONE writes to be skipped.
> > > > > 
> > > > > So instead of setting the flag, set an internal flag and change:
> > > > >  - .writepages to do nothing if WB_SYNC_NONE and the flag is set
> > > > >  - .writepage to return AOP_WRITEPAGE_ACTIVATE if WB_SYNC_NONE
> > > > >     and the flag is set.
> > > > > 
> > > > > The writepages change causes a behavioural change in that pageout() can
> > > > > now return PAGE_ACTIVATE instead of PAGE_KEEP, so SetPageActive() will
> > > > > be called on the page which (I think) wil further delay the next attempt
> > > > > at writeout.  This might be a good thing.
> > > > > 
> > > > > Signed-off-by: NeilBrown <neilb@suse.de>
> > > > 
> > > > Maybe. I have to wonder whether all of this is really useful.
> > > > 
> > > > When things are congested we'll avoid trying to issue new writeback
> > > > requests. Note that we don't prevent new pages from being dirtied here -
> > > > - only their being written back.
> > > > 
> > > > This also doesn't do anything in the DIO or sync_write cases, so if we
> > > > lose caps or are doing DIO, we'll just keep churning out "unlimited"
> > > > writes in those cases anyway.
> > > 
> > > I think the point of congestion tracking is to differentiate between
> > > sync and async IO.  Or maybe "required" and "optional".
> > > Eventually the "optional" IO will become required, but if we can delay
> > > it until a time when there is less "required" io, then maybe we can
> > > improve perceived latency.
> > > 
> > > "optional" IO here is write-back and read-ahead.  If the load of
> > > "required" IO is bursty, and if we can shuffle that optional stuff into
> > > the quiet periods, we might win.
> > > 
> > 
> > In that case, maybe we should be counting in-flight reads too and deny
> > readahead when the count crosses some threshold? It seems a bit silly to
> > only look at writes when it comes to "congestion".
> 
> I agree that seems a bit silly.
> 
> > 
> > > Whether this is a real need is an important question that I don't have an
> > > answer for.  And whether it is better to leave delayed requests in the
> > > page cache, or in the low-level queue with sync requests able to
> > > over-take them - I don't know.  If you have multiple low-level queue as
> > > you say you can with ceph, then lower might be better.
> > > 
> > > The block layer has REQ_RAHEAD ..  maybe those request get should get a
> > > lower priority ... though I don't think they do.
> > > NFS has a 3 level priority queue, with write-back going at a lower
> > > priority ... I think... for NFSv3 at least.
> > > 
> > > Sometimes I suspect that as all our transports have become faster, we
> > > have been able to ignore the extra latency caused by poor scheduling of
> > > optional requests.  But at other times when my recently upgraded desktop
> > > is struggling to view a web page while compiling a kernel ...  I wonder
> > > if maybe we don't have the balance right any more.
> > > 
> > > So maybe you are right - maybe we can rip all this stuff out.
> > > 
> > 
> > I lean more toward just removing it. The existing implementation seems a
> > bit half-baked with the gaps in what's being counted. Granted, the
> > default congestion threshold is pretty high with modern memory sizes, so
> > it probably doesn't come into play much in practice, but removing it
> > would reduce some complexity in the client.
> 
> I'd love to have some test that could reliably generate congestion and
> measure latencies for other IO.  Without that, it is mostly guess work.
> So I cannot argue against your proposal, and do agree that removing the
> code would reduce complexity.  I have no idea what the costs might be -
> if any.  Hence my focus was on not changing behaviour.
> 

Fair enough -- caution is warranted.

I think the thing to do here is to take your patch for now, and then we
can look at just removing all of this stuff at some point in the future.
That would also give us a fallback that doesn't require the old
congestion infrastructure if it turns out that it is needed.

I'm assuming this is going in via Andrew's tree, but let us know if
you'd like us to take any of these in via the ceph tree.

Thanks,
diff mbox series

Patch

diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c98e5238a1b6..dc7af34640dd 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -563,7 +563,7 @@  static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 
 	if (atomic_long_inc_return(&fsc->writeback_count) >
 	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
-		set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
+		fsc->write_congested = true;
 
 	req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
 				    CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
@@ -623,7 +623,7 @@  static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 
 	if (atomic_long_dec_return(&fsc->writeback_count) <
 	    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
-		clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
+		fsc->write_congested = false;
 
 	return err;
 }
@@ -635,6 +635,10 @@  static int ceph_writepage(struct page *page, struct writeback_control *wbc)
 	BUG_ON(!inode);
 	ihold(inode);
 
+	if (wbc->sync_mode == WB_SYNC_NONE &&
+	    ceph_inode_to_client(inode)->write_congested)
+		return AOP_WRITEPAGE_ACTIVATE;
+
 	wait_on_page_fscache(page);
 
 	err = writepage_nounlock(page, wbc);
@@ -707,8 +711,7 @@  static void writepages_finish(struct ceph_osd_request *req)
 			if (atomic_long_dec_return(&fsc->writeback_count) <
 			     CONGESTION_OFF_THRESH(
 					fsc->mount_options->congestion_kb))
-				clear_bdi_congested(inode_to_bdi(inode),
-						    BLK_RW_ASYNC);
+				fsc->write_congested = false;
 
 			ceph_put_snap_context(detach_page_private(page));
 			end_page_writeback(page);
@@ -760,6 +763,10 @@  static int ceph_writepages_start(struct address_space *mapping,
 	bool done = false;
 	bool caching = ceph_is_cache_enabled(inode);
 
+	if (wbc->sync_mode == WB_SYNC_NONE &&
+	    fsc->write_congested)
+		return 0;
+
 	dout("writepages_start %p (mode=%s)\n", inode,
 	     wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
 	     (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
@@ -954,11 +961,8 @@  static int ceph_writepages_start(struct address_space *mapping,
 
 			if (atomic_long_inc_return(&fsc->writeback_count) >
 			    CONGESTION_ON_THRESH(
-				    fsc->mount_options->congestion_kb)) {
-				set_bdi_congested(inode_to_bdi(inode),
-						  BLK_RW_ASYNC);
-			}
-
+				    fsc->mount_options->congestion_kb))
+				fsc->write_congested = true;
 
 			pages[locked_pages++] = page;
 			pvec.pages[i] = NULL;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index bf79f369aec6..4a3b77d049c7 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -802,6 +802,7 @@  static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
 	fsc->have_copy_from2 = true;
 
 	atomic_long_set(&fsc->writeback_count, 0);
+	fsc->write_congested = false;
 
 	err = -ENOMEM;
 	/*
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 67f145e1ae7a..0bd97aea2319 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -121,6 +121,7 @@  struct ceph_fs_client {
 	struct ceph_mds_client *mdsc;
 
 	atomic_long_t writeback_count;
+	bool write_congested;
 
 	struct workqueue_struct *inode_wq;
 	struct workqueue_struct *cap_wq;