diff mbox series

[1/3] fs/buffer.c: add new api to allow eof writeback

Message ID 20210426220552.45413-1-junxiao.bi@oracle.com (mailing list archive)
State New, archived
Headers show
Series [1/3] fs/buffer.c: add new api to allow eof writeback | expand

Commit Message

Junxiao Bi April 26, 2021, 10:05 p.m. UTC
When doing truncate/fallocate for some filesytem like ocfs2, it
will zero some pages that are out of inode size and then later
update the inode size, so it needs this api to writeback eof
pages.

Cc: <stable@vger.kernel.org>
Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
---
 fs/buffer.c                 | 14 +++++++++++---
 include/linux/buffer_head.h |  3 +++
 2 files changed, 14 insertions(+), 3 deletions(-)

Comments

Joseph Qi April 29, 2021, 11:58 a.m. UTC | #1
On 4/27/21 6:05 AM, Junxiao Bi wrote:
> When doing truncate/fallocate for some filesytem like ocfs2, it
> will zero some pages that are out of inode size and then later
> update the inode size, so it needs this api to writeback eof
> pages.
> 
> Cc: <stable@vger.kernel.org>
> Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>


Looks good.
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
> ---
>  fs/buffer.c                 | 14 +++++++++++---
>  include/linux/buffer_head.h |  3 +++
>  2 files changed, 14 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/buffer.c b/fs/buffer.c
> index 0cb7ffd4977c..802f0bacdbde 100644
> --- a/fs/buffer.c
> +++ b/fs/buffer.c
> @@ -1709,9 +1709,9 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
>   * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
>   * causes the writes to be flagged as synchronous writes.
>   */
> -int __block_write_full_page(struct inode *inode, struct page *page,
> +int __block_write_full_page_eof(struct inode *inode, struct page *page,
>  			get_block_t *get_block, struct writeback_control *wbc,
> -			bh_end_io_t *handler)
> +			bh_end_io_t *handler, bool eof_write)
>  {
>  	int err;
>  	sector_t block;
> @@ -1746,7 +1746,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
>  	 * handle any aliases from the underlying blockdev's mapping.
>  	 */
>  	do {
> -		if (block > last_block) {
> +		if (block > last_block && !eof_write) {
>  			/*
>  			 * mapped buffers outside i_size will occur, because
>  			 * this page can be outside i_size when there is a
> @@ -1871,6 +1871,14 @@ int __block_write_full_page(struct inode *inode, struct page *page,
>  	unlock_page(page);
>  	goto done;
>  }
> +EXPORT_SYMBOL(__block_write_full_page_eof);
> +
> +int __block_write_full_page(struct inode *inode, struct page *page,
> +			get_block_t *get_block, struct writeback_control *wbc,
> +			bh_end_io_t *handler)
> +{
> +	return __block_write_full_page_eof(inode, page, get_block, wbc, handler, false);
> +}
>  EXPORT_SYMBOL(__block_write_full_page);
>  
>  /*
> diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
> index 6b47f94378c5..5da15a1ba15c 100644
> --- a/include/linux/buffer_head.h
> +++ b/include/linux/buffer_head.h
> @@ -221,6 +221,9 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
>  int __block_write_full_page(struct inode *inode, struct page *page,
>  			get_block_t *get_block, struct writeback_control *wbc,
>  			bh_end_io_t *handler);
> +int __block_write_full_page_eof(struct inode *inode, struct page *page,
> +			get_block_t *get_block, struct writeback_control *wbc,
> +			bh_end_io_t *handler, bool eof_write);
>  int block_read_full_page(struct page*, get_block_t*);
>  int block_is_partially_uptodate(struct page *page, unsigned long from,
>  				unsigned long count);
>
Andreas Gruenbacher April 29, 2021, 5:14 p.m. UTC | #2
Junxiao,

On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
> When doing truncate/fallocate for some filesytem like ocfs2, it
> will zero some pages that are out of inode size and then later
> update the inode size, so it needs this api to writeback eof
> pages.

is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
cache filling races" patch set [*]? It doesn't look like the kind of
patch Christoph would be happy with.

Thanks,
Andreas

[*] https://lore.kernel.org/linux-fsdevel/20210423171010.12-1-jack@suse.cz/

> Cc: <stable@vger.kernel.org>
> Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
> ---
>  fs/buffer.c                 | 14 +++++++++++---
>  include/linux/buffer_head.h |  3 +++
>  2 files changed, 14 insertions(+), 3 deletions(-)
>
> diff --git a/fs/buffer.c b/fs/buffer.c
> index 0cb7ffd4977c..802f0bacdbde 100644
> --- a/fs/buffer.c
> +++ b/fs/buffer.c
> @@ -1709,9 +1709,9 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
>   * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
>   * causes the writes to be flagged as synchronous writes.
>   */
> -int __block_write_full_page(struct inode *inode, struct page *page,
> +int __block_write_full_page_eof(struct inode *inode, struct page *page,
>                         get_block_t *get_block, struct writeback_control *wbc,
> -                       bh_end_io_t *handler)
> +                       bh_end_io_t *handler, bool eof_write)
>  {
>         int err;
>         sector_t block;
> @@ -1746,7 +1746,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
>          * handle any aliases from the underlying blockdev's mapping.
>          */
>         do {
> -               if (block > last_block) {
> +               if (block > last_block && !eof_write) {
>                         /*
>                          * mapped buffers outside i_size will occur, because
>                          * this page can be outside i_size when there is a
> @@ -1871,6 +1871,14 @@ int __block_write_full_page(struct inode *inode, struct page *page,
>         unlock_page(page);
>         goto done;
>  }
> +EXPORT_SYMBOL(__block_write_full_page_eof);
> +
> +int __block_write_full_page(struct inode *inode, struct page *page,
> +                       get_block_t *get_block, struct writeback_control *wbc,
> +                       bh_end_io_t *handler)
> +{
> +       return __block_write_full_page_eof(inode, page, get_block, wbc, handler, false);
> +}
>  EXPORT_SYMBOL(__block_write_full_page);
>
>  /*
> diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
> index 6b47f94378c5..5da15a1ba15c 100644
> --- a/include/linux/buffer_head.h
> +++ b/include/linux/buffer_head.h
> @@ -221,6 +221,9 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
>  int __block_write_full_page(struct inode *inode, struct page *page,
>                         get_block_t *get_block, struct writeback_control *wbc,
>                         bh_end_io_t *handler);
> +int __block_write_full_page_eof(struct inode *inode, struct page *page,
> +                       get_block_t *get_block, struct writeback_control *wbc,
> +                       bh_end_io_t *handler, bool eof_write);
>  int block_read_full_page(struct page*, get_block_t*);
>  int block_is_partially_uptodate(struct page *page, unsigned long from,
>                                 unsigned long count);
> --
> 2.24.3 (Apple Git-128)
>
Junxiao Bi April 29, 2021, 6:07 p.m. UTC | #3
On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:

> Junxiao,
>
> On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
>> When doing truncate/fallocate for some filesytem like ocfs2, it
>> will zero some pages that are out of inode size and then later
>> update the inode size, so it needs this api to writeback eof
>> pages.
> is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
> cache filling races" patch set [*]? It doesn't look like the kind of
> patch Christoph would be happy with.

Thank you for pointing the patch set. I think that is fixing a different 
issue.

The issue here is when extending file size with fallocate/truncate, if 
the original inode size

is in the middle of the last cluster block(1M), eof part will be zeroed 
with buffer write first,

and then new inode size is updated, so there is a window that dirty 
pages is out of inode size,

if writeback is kicked in, block_write_full_page will drop all those eof 
pages.

I guess gfs2 has the similar issue?

I think it would be good to provide an api that allowed eof write back. 
If this is not good,

do you have any advise how to improve/fix it?

Thanks,

Junxiao.


>
> Thanks,
> Andreas
>
> [*] https://lore.kernel.org/linux-fsdevel/20210423171010.12-1-jack@suse.cz/
>
>> Cc: <stable@vger.kernel.org>
>> Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
>> ---
>>   fs/buffer.c                 | 14 +++++++++++---
>>   include/linux/buffer_head.h |  3 +++
>>   2 files changed, 14 insertions(+), 3 deletions(-)
>>
>> diff --git a/fs/buffer.c b/fs/buffer.c
>> index 0cb7ffd4977c..802f0bacdbde 100644
>> --- a/fs/buffer.c
>> +++ b/fs/buffer.c
>> @@ -1709,9 +1709,9 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
>>    * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
>>    * causes the writes to be flagged as synchronous writes.
>>    */
>> -int __block_write_full_page(struct inode *inode, struct page *page,
>> +int __block_write_full_page_eof(struct inode *inode, struct page *page,
>>                          get_block_t *get_block, struct writeback_control *wbc,
>> -                       bh_end_io_t *handler)
>> +                       bh_end_io_t *handler, bool eof_write)
>>   {
>>          int err;
>>          sector_t block;
>> @@ -1746,7 +1746,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
>>           * handle any aliases from the underlying blockdev's mapping.
>>           */
>>          do {
>> -               if (block > last_block) {
>> +               if (block > last_block && !eof_write) {
>>                          /*
>>                           * mapped buffers outside i_size will occur, because
>>                           * this page can be outside i_size when there is a
>> @@ -1871,6 +1871,14 @@ int __block_write_full_page(struct inode *inode, struct page *page,
>>          unlock_page(page);
>>          goto done;
>>   }
>> +EXPORT_SYMBOL(__block_write_full_page_eof);
>> +
>> +int __block_write_full_page(struct inode *inode, struct page *page,
>> +                       get_block_t *get_block, struct writeback_control *wbc,
>> +                       bh_end_io_t *handler)
>> +{
>> +       return __block_write_full_page_eof(inode, page, get_block, wbc, handler, false);
>> +}
>>   EXPORT_SYMBOL(__block_write_full_page);
>>
>>   /*
>> diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
>> index 6b47f94378c5..5da15a1ba15c 100644
>> --- a/include/linux/buffer_head.h
>> +++ b/include/linux/buffer_head.h
>> @@ -221,6 +221,9 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
>>   int __block_write_full_page(struct inode *inode, struct page *page,
>>                          get_block_t *get_block, struct writeback_control *wbc,
>>                          bh_end_io_t *handler);
>> +int __block_write_full_page_eof(struct inode *inode, struct page *page,
>> +                       get_block_t *get_block, struct writeback_control *wbc,
>> +                       bh_end_io_t *handler, bool eof_write);
>>   int block_read_full_page(struct page*, get_block_t*);
>>   int block_is_partially_uptodate(struct page *page, unsigned long from,
>>                                  unsigned long count);
>> --
>> 2.24.3 (Apple Git-128)
>>
Jan Kara April 30, 2021, 12:47 p.m. UTC | #4
On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
> On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
> > On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
> > > When doing truncate/fallocate for some filesytem like ocfs2, it
> > > will zero some pages that are out of inode size and then later
> > > update the inode size, so it needs this api to writeback eof
> > > pages.
> > is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
> > cache filling races" patch set [*]? It doesn't look like the kind of
> > patch Christoph would be happy with.
> 
> Thank you for pointing the patch set. I think that is fixing a different
> issue.
> 
> The issue here is when extending file size with fallocate/truncate, if the
> original inode size
> 
> is in the middle of the last cluster block(1M), eof part will be zeroed with
> buffer write first,
> 
> and then new inode size is updated, so there is a window that dirty pages is
> out of inode size,
> 
> if writeback is kicked in, block_write_full_page will drop all those eof
> pages.

I agree that the buffers describing part of the cluster beyond i_size won't
be written. But page cache will remain zeroed out so that is fine. So you
only need to zero out the on disk contents. Since this is actually
physically contiguous range of blocks why don't you just use
sb_issue_zeroout() to zero out the tail of the cluster? It will be more
efficient than going through the page cache and you also won't have to
tweak block_write_full_page()...

								Honza
Junxiao Bi April 30, 2021, 9:18 p.m. UTC | #5
On 4/30/21 5:47 AM, Jan Kara wrote:

> On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
>> On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
>>> On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
>>>> When doing truncate/fallocate for some filesytem like ocfs2, it
>>>> will zero some pages that are out of inode size and then later
>>>> update the inode size, so it needs this api to writeback eof
>>>> pages.
>>> is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
>>> cache filling races" patch set [*]? It doesn't look like the kind of
>>> patch Christoph would be happy with.
>> Thank you for pointing the patch set. I think that is fixing a different
>> issue.
>>
>> The issue here is when extending file size with fallocate/truncate, if the
>> original inode size
>>
>> is in the middle of the last cluster block(1M), eof part will be zeroed with
>> buffer write first,
>>
>> and then new inode size is updated, so there is a window that dirty pages is
>> out of inode size,
>>
>> if writeback is kicked in, block_write_full_page will drop all those eof
>> pages.
> I agree that the buffers describing part of the cluster beyond i_size won't
> be written. But page cache will remain zeroed out so that is fine. So you
> only need to zero out the on disk contents. Since this is actually
> physically contiguous range of blocks why don't you just use
> sb_issue_zeroout() to zero out the tail of the cluster? It will be more
> efficient than going through the page cache and you also won't have to
> tweak block_write_full_page()...

Thanks for the review.

The physical blocks to be zeroed were continuous only when sparse mode 
is enabled, if sparse mode is disabled, unwritten extent was not 
supported for ocfs2, then all the blocks to the new size will be zeroed 
by the buffer write, since sb_issue_zeroout() will need waiting io done, 
there will be a lot of delay when extending file size. Use writeback to 
flush async seemed more efficient?

Thanks,

Junxiao.

>
> 								Honza
Jan Kara May 3, 2021, 10:29 a.m. UTC | #6
On Fri 30-04-21 14:18:15, Junxiao Bi wrote:
> On 4/30/21 5:47 AM, Jan Kara wrote:
> 
> > On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
> > > On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
> > > > On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
> > > > > When doing truncate/fallocate for some filesytem like ocfs2, it
> > > > > will zero some pages that are out of inode size and then later
> > > > > update the inode size, so it needs this api to writeback eof
> > > > > pages.
> > > > is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
> > > > cache filling races" patch set [*]? It doesn't look like the kind of
> > > > patch Christoph would be happy with.
> > > Thank you for pointing the patch set. I think that is fixing a different
> > > issue.
> > > 
> > > The issue here is when extending file size with fallocate/truncate, if the
> > > original inode size
> > > 
> > > is in the middle of the last cluster block(1M), eof part will be zeroed with
> > > buffer write first,
> > > 
> > > and then new inode size is updated, so there is a window that dirty pages is
> > > out of inode size,
> > > 
> > > if writeback is kicked in, block_write_full_page will drop all those eof
> > > pages.
> > I agree that the buffers describing part of the cluster beyond i_size won't
> > be written. But page cache will remain zeroed out so that is fine. So you
> > only need to zero out the on disk contents. Since this is actually
> > physically contiguous range of blocks why don't you just use
> > sb_issue_zeroout() to zero out the tail of the cluster? It will be more
> > efficient than going through the page cache and you also won't have to
> > tweak block_write_full_page()...
> 
> Thanks for the review.
> 
> The physical blocks to be zeroed were continuous only when sparse mode is
> enabled, if sparse mode is disabled, unwritten extent was not supported for
> ocfs2, then all the blocks to the new size will be zeroed by the buffer
> write, since sb_issue_zeroout() will need waiting io done, there will be a
> lot of delay when extending file size. Use writeback to flush async seemed
> more efficient?

It depends. Higher end storage (e.g. NVME or NAS, maybe some better SATA
flash disks as well) do support WRITE_ZERO command so you don't actually
have to write all those zeros. The storage will just internally mark all
those blocks as having zeros. This is rather fast so I'd expect the overall
result to be faster that zeroing page cache and then writing all those
pages with zeroes on transaction commit. But I agree that for lower end
storage this may be slower because of synchronous writing of zeroes. That
being said your transaction commit has to write those zeroes anyway so the
cost is only mostly shifted but it could still make a difference for some
workloads. Not sure if that matters, that is your call I'd say.

Also note that you could submit those zeroing bios asynchronously but that
would be more coding and you need to make sure they are completed on
transaction commit so probably it isn't worth the complexity.

								Honza
Junxiao Bi May 3, 2021, 5:25 p.m. UTC | #7
On 5/3/21 3:29 AM, Jan Kara wrote:
> On Fri 30-04-21 14:18:15, Junxiao Bi wrote:
>> On 4/30/21 5:47 AM, Jan Kara wrote:
>>
>>> On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
>>>> On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
>>>>> On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
>>>>>> When doing truncate/fallocate for some filesytem like ocfs2, it
>>>>>> will zero some pages that are out of inode size and then later
>>>>>> update the inode size, so it needs this api to writeback eof
>>>>>> pages.
>>>>> is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
>>>>> cache filling races" patch set [*]? It doesn't look like the kind of
>>>>> patch Christoph would be happy with.
>>>> Thank you for pointing the patch set. I think that is fixing a different
>>>> issue.
>>>>
>>>> The issue here is when extending file size with fallocate/truncate, if the
>>>> original inode size
>>>>
>>>> is in the middle of the last cluster block(1M), eof part will be zeroed with
>>>> buffer write first,
>>>>
>>>> and then new inode size is updated, so there is a window that dirty pages is
>>>> out of inode size,
>>>>
>>>> if writeback is kicked in, block_write_full_page will drop all those eof
>>>> pages.
>>> I agree that the buffers describing part of the cluster beyond i_size won't
>>> be written. But page cache will remain zeroed out so that is fine. So you
>>> only need to zero out the on disk contents. Since this is actually
>>> physically contiguous range of blocks why don't you just use
>>> sb_issue_zeroout() to zero out the tail of the cluster? It will be more
>>> efficient than going through the page cache and you also won't have to
>>> tweak block_write_full_page()...
>> Thanks for the review.
>>
>> The physical blocks to be zeroed were continuous only when sparse mode is
>> enabled, if sparse mode is disabled, unwritten extent was not supported for
>> ocfs2, then all the blocks to the new size will be zeroed by the buffer
>> write, since sb_issue_zeroout() will need waiting io done, there will be a
>> lot of delay when extending file size. Use writeback to flush async seemed
>> more efficient?
> It depends. Higher end storage (e.g. NVME or NAS, maybe some better SATA
> flash disks as well) do support WRITE_ZERO command so you don't actually
> have to write all those zeros. The storage will just internally mark all
> those blocks as having zeros. This is rather fast so I'd expect the overall
> result to be faster that zeroing page cache and then writing all those
> pages with zeroes on transaction commit. But I agree that for lower end
> storage this may be slower because of synchronous writing of zeroes. That
> being said your transaction commit has to write those zeroes anyway so the
> cost is only mostly shifted but it could still make a difference for some
> workloads. Not sure if that matters, that is your call I'd say.

Ocfs2 is mostly used with SAN, i don't think it's common for SAN storage 
to support WRITE_ZERO command.

Anything bad to add a new api to support eof writeback?

Thanks,

Junxiao.

>
> Also note that you could submit those zeroing bios asynchronously but that
> would be more coding and you need to make sure they are completed on
> transaction commit so probably it isn't worth the complexity.
>
> 								Honza
Jan Kara May 4, 2021, 9:02 a.m. UTC | #8
On Mon 03-05-21 10:25:31, Junxiao Bi wrote:
> 
> On 5/3/21 3:29 AM, Jan Kara wrote:
> > On Fri 30-04-21 14:18:15, Junxiao Bi wrote:
> > > On 4/30/21 5:47 AM, Jan Kara wrote:
> > > 
> > > > On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
> > > > > On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
> > > > > > On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
> > > > > > > When doing truncate/fallocate for some filesytem like ocfs2, it
> > > > > > > will zero some pages that are out of inode size and then later
> > > > > > > update the inode size, so it needs this api to writeback eof
> > > > > > > pages.
> > > > > > is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
> > > > > > cache filling races" patch set [*]? It doesn't look like the kind of
> > > > > > patch Christoph would be happy with.
> > > > > Thank you for pointing the patch set. I think that is fixing a different
> > > > > issue.
> > > > > 
> > > > > The issue here is when extending file size with fallocate/truncate, if the
> > > > > original inode size
> > > > > 
> > > > > is in the middle of the last cluster block(1M), eof part will be zeroed with
> > > > > buffer write first,
> > > > > 
> > > > > and then new inode size is updated, so there is a window that dirty pages is
> > > > > out of inode size,
> > > > > 
> > > > > if writeback is kicked in, block_write_full_page will drop all those eof
> > > > > pages.
> > > > I agree that the buffers describing part of the cluster beyond i_size won't
> > > > be written. But page cache will remain zeroed out so that is fine. So you
> > > > only need to zero out the on disk contents. Since this is actually
> > > > physically contiguous range of blocks why don't you just use
> > > > sb_issue_zeroout() to zero out the tail of the cluster? It will be more
> > > > efficient than going through the page cache and you also won't have to
> > > > tweak block_write_full_page()...
> > > Thanks for the review.
> > > 
> > > The physical blocks to be zeroed were continuous only when sparse mode is
> > > enabled, if sparse mode is disabled, unwritten extent was not supported for
> > > ocfs2, then all the blocks to the new size will be zeroed by the buffer
> > > write, since sb_issue_zeroout() will need waiting io done, there will be a
> > > lot of delay when extending file size. Use writeback to flush async seemed
> > > more efficient?
> > It depends. Higher end storage (e.g. NVME or NAS, maybe some better SATA
> > flash disks as well) do support WRITE_ZERO command so you don't actually
> > have to write all those zeros. The storage will just internally mark all
> > those blocks as having zeros. This is rather fast so I'd expect the overall
> > result to be faster that zeroing page cache and then writing all those
> > pages with zeroes on transaction commit. But I agree that for lower end
> > storage this may be slower because of synchronous writing of zeroes. That
> > being said your transaction commit has to write those zeroes anyway so the
> > cost is only mostly shifted but it could still make a difference for some
> > workloads. Not sure if that matters, that is your call I'd say.
> 
> Ocfs2 is mostly used with SAN, i don't think it's common for SAN storage to
> support WRITE_ZERO command.
> 
> Anything bad to add a new api to support eof writeback?

OK, now that I reread the whole series you've posted I think I somewhat
misunderstood your original problem and intention. So let's first settle
on that. As far as I understand the problem happens when extending a file
(either through truncate or through write beyond i_size). When that
happens, we need to make sure that blocks (or their parts) that used to be
above i_size and are not going to be after extension are zeroed out.
Usually, for simple filesystems such as ext2, there is only one such block
- the one straddling i_size - where we need to make sure this happens. And
we achieve that by zeroing out tail of this block on writeout (in
->writepage() handler) and also by zeroing out tail of the block when
reducing i_size (block_truncate_page() takes care of this for ext2). So the
tail of this block is zeroed out on disk at all times and thus we have no
problem when extending i_size.

Now what I described doesn't work for OCFS2. As far as I understand the
reason is that when block size is smaller than page size and OCFS2 uses
cluster size larger than block size, the page straddling i_size can have
also some buffers mapped (with underlying blocks allocated) that are fully
outside of i_size. These blocks are never written because of how
__block_write_full_page() currently behaves (never writes buffers fully
beyond i_size) so even if you zero out page cache and dirty the page,
racing writeback can clear dirty bits without writing those blocks and so
they are not zeroed out on disk although we are about to expand i_size.

Did I understand the problem correctly? But what confuses me is that
ocfs2_zero_extend_range() (ocfs2_write_zero_page() in fact) actually does
extend i_size to contain the range it zeroes out while still holding the
page lock so it should be protected against the race with writeback I
outlined above. What am I missing?

								Honza
Junxiao Bi May 4, 2021, 11:35 p.m. UTC | #9
On 5/4/21 2:02 AM, Jan Kara wrote:

> On Mon 03-05-21 10:25:31, Junxiao Bi wrote:
>> On 5/3/21 3:29 AM, Jan Kara wrote:
>>> On Fri 30-04-21 14:18:15, Junxiao Bi wrote:
>>>> On 4/30/21 5:47 AM, Jan Kara wrote:
>>>>
>>>>> On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
>>>>>> On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
>>>>>>> On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
>>>>>>>> When doing truncate/fallocate for some filesytem like ocfs2, it
>>>>>>>> will zero some pages that are out of inode size and then later
>>>>>>>> update the inode size, so it needs this api to writeback eof
>>>>>>>> pages.
>>>>>>> is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
>>>>>>> cache filling races" patch set [*]? It doesn't look like the kind of
>>>>>>> patch Christoph would be happy with.
>>>>>> Thank you for pointing the patch set. I think that is fixing a different
>>>>>> issue.
>>>>>>
>>>>>> The issue here is when extending file size with fallocate/truncate, if the
>>>>>> original inode size
>>>>>>
>>>>>> is in the middle of the last cluster block(1M), eof part will be zeroed with
>>>>>> buffer write first,
>>>>>>
>>>>>> and then new inode size is updated, so there is a window that dirty pages is
>>>>>> out of inode size,
>>>>>>
>>>>>> if writeback is kicked in, block_write_full_page will drop all those eof
>>>>>> pages.
>>>>> I agree that the buffers describing part of the cluster beyond i_size won't
>>>>> be written. But page cache will remain zeroed out so that is fine. So you
>>>>> only need to zero out the on disk contents. Since this is actually
>>>>> physically contiguous range of blocks why don't you just use
>>>>> sb_issue_zeroout() to zero out the tail of the cluster? It will be more
>>>>> efficient than going through the page cache and you also won't have to
>>>>> tweak block_write_full_page()...
>>>> Thanks for the review.
>>>>
>>>> The physical blocks to be zeroed were continuous only when sparse mode is
>>>> enabled, if sparse mode is disabled, unwritten extent was not supported for
>>>> ocfs2, then all the blocks to the new size will be zeroed by the buffer
>>>> write, since sb_issue_zeroout() will need waiting io done, there will be a
>>>> lot of delay when extending file size. Use writeback to flush async seemed
>>>> more efficient?
>>> It depends. Higher end storage (e.g. NVME or NAS, maybe some better SATA
>>> flash disks as well) do support WRITE_ZERO command so you don't actually
>>> have to write all those zeros. The storage will just internally mark all
>>> those blocks as having zeros. This is rather fast so I'd expect the overall
>>> result to be faster that zeroing page cache and then writing all those
>>> pages with zeroes on transaction commit. But I agree that for lower end
>>> storage this may be slower because of synchronous writing of zeroes. That
>>> being said your transaction commit has to write those zeroes anyway so the
>>> cost is only mostly shifted but it could still make a difference for some
>>> workloads. Not sure if that matters, that is your call I'd say.
>> Ocfs2 is mostly used with SAN, i don't think it's common for SAN storage to
>> support WRITE_ZERO command.
>>
>> Anything bad to add a new api to support eof writeback?
> OK, now that I reread the whole series you've posted I think I somewhat
> misunderstood your original problem and intention. So let's first settle
> on that. As far as I understand the problem happens when extending a file
> (either through truncate or through write beyond i_size). When that
> happens, we need to make sure that blocks (or their parts) that used to be
> above i_size and are not going to be after extension are zeroed out.
> Usually, for simple filesystems such as ext2, there is only one such block
> - the one straddling i_size - where we need to make sure this happens. And
> we achieve that by zeroing out tail of this block on writeout (in
> ->writepage() handler) and also by zeroing out tail of the block when
> reducing i_size (block_truncate_page() takes care of this for ext2). So the
> tail of this block is zeroed out on disk at all times and thus we have no
> problem when extending i_size.
>
> Now what I described doesn't work for OCFS2. As far as I understand the
> reason is that when block size is smaller than page size and OCFS2 uses
> cluster size larger than block size, the page straddling i_size can have
> also some buffers mapped (with underlying blocks allocated) that are fully
> outside of i_size. These blocks are never written because of how
> __block_write_full_page() currently behaves (never writes buffers fully
> beyond i_size) so even if you zero out page cache and dirty the page,
> racing writeback can clear dirty bits without writing those blocks and so
> they are not zeroed out on disk although we are about to expand i_size.
Correct.
>
> Did I understand the problem correctly? But what confuses me is that
> ocfs2_zero_extend_range() (ocfs2_write_zero_page() in fact) actually does
> extend i_size to contain the range it zeroes out while still holding the
> page lock so it should be protected against the race with writeback I
> outlined above. What am I missing?

Thank you for pointing this. I didn't realize ocfs2_zero_extend() will 
update inode size,

with it, truncate to extend file will not suffer this issue. The 
original issue happened with

qemu that used the following fallocate to extend file size. The first 
fallocate punched

hole beyond the inode size(2276196352) but not update isize, the second 
one updated

isize, the first one will do some buffer write to zero eof blocks in 
ocfs2_remove_inode_range

->ocfs2_zero_partial_clusters->ocfs2_zero_range_for_truncate.

     fallocate(11, FALLOC_FL_KEEP_SIZE|FALLOC_FL_PUNCH_HOLE, 2276196352, 
65536) = 0
     fallocate(11, 0, 2276196352, 65536) = 0


Thanks,

Junxiao.

>
> 								Honza
Jan Kara May 5, 2021, 11:43 a.m. UTC | #10
On Tue 04-05-21 16:35:53, Junxiao Bi wrote:
> On 5/4/21 2:02 AM, Jan Kara wrote:
> > On Mon 03-05-21 10:25:31, Junxiao Bi wrote:
> > > On 5/3/21 3:29 AM, Jan Kara wrote:
> > > > On Fri 30-04-21 14:18:15, Junxiao Bi wrote:
> > > > > On 4/30/21 5:47 AM, Jan Kara wrote:
> > > > > 
> > > > > > On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
> > > > > > > On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
> > > > > > > > On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
> > > > > > > > > When doing truncate/fallocate for some filesytem like ocfs2, it
> > > > > > > > > will zero some pages that are out of inode size and then later
> > > > > > > > > update the inode size, so it needs this api to writeback eof
> > > > > > > > > pages.
> > > > > > > > is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
> > > > > > > > cache filling races" patch set [*]? It doesn't look like the kind of
> > > > > > > > patch Christoph would be happy with.
> > > > > > > Thank you for pointing the patch set. I think that is fixing a different
> > > > > > > issue.
> > > > > > > 
> > > > > > > The issue here is when extending file size with fallocate/truncate, if the
> > > > > > > original inode size
> > > > > > > 
> > > > > > > is in the middle of the last cluster block(1M), eof part will be zeroed with
> > > > > > > buffer write first,
> > > > > > > 
> > > > > > > and then new inode size is updated, so there is a window that dirty pages is
> > > > > > > out of inode size,
> > > > > > > 
> > > > > > > if writeback is kicked in, block_write_full_page will drop all those eof
> > > > > > > pages.
> > > > > > I agree that the buffers describing part of the cluster beyond i_size won't
> > > > > > be written. But page cache will remain zeroed out so that is fine. So you
> > > > > > only need to zero out the on disk contents. Since this is actually
> > > > > > physically contiguous range of blocks why don't you just use
> > > > > > sb_issue_zeroout() to zero out the tail of the cluster? It will be more
> > > > > > efficient than going through the page cache and you also won't have to
> > > > > > tweak block_write_full_page()...
> > > > > Thanks for the review.
> > > > > 
> > > > > The physical blocks to be zeroed were continuous only when sparse mode is
> > > > > enabled, if sparse mode is disabled, unwritten extent was not supported for
> > > > > ocfs2, then all the blocks to the new size will be zeroed by the buffer
> > > > > write, since sb_issue_zeroout() will need waiting io done, there will be a
> > > > > lot of delay when extending file size. Use writeback to flush async seemed
> > > > > more efficient?
> > > > It depends. Higher end storage (e.g. NVME or NAS, maybe some better SATA
> > > > flash disks as well) do support WRITE_ZERO command so you don't actually
> > > > have to write all those zeros. The storage will just internally mark all
> > > > those blocks as having zeros. This is rather fast so I'd expect the overall
> > > > result to be faster that zeroing page cache and then writing all those
> > > > pages with zeroes on transaction commit. But I agree that for lower end
> > > > storage this may be slower because of synchronous writing of zeroes. That
> > > > being said your transaction commit has to write those zeroes anyway so the
> > > > cost is only mostly shifted but it could still make a difference for some
> > > > workloads. Not sure if that matters, that is your call I'd say.
> > > Ocfs2 is mostly used with SAN, i don't think it's common for SAN storage to
> > > support WRITE_ZERO command.
> > > 
> > > Anything bad to add a new api to support eof writeback?
> > OK, now that I reread the whole series you've posted I think I somewhat
> > misunderstood your original problem and intention. So let's first settle
> > on that. As far as I understand the problem happens when extending a file
> > (either through truncate or through write beyond i_size). When that
> > happens, we need to make sure that blocks (or their parts) that used to be
> > above i_size and are not going to be after extension are zeroed out.
> > Usually, for simple filesystems such as ext2, there is only one such block
> > - the one straddling i_size - where we need to make sure this happens. And
> > we achieve that by zeroing out tail of this block on writeout (in
> > ->writepage() handler) and also by zeroing out tail of the block when
> > reducing i_size (block_truncate_page() takes care of this for ext2). So the
> > tail of this block is zeroed out on disk at all times and thus we have no
> > problem when extending i_size.
> > 
> > Now what I described doesn't work for OCFS2. As far as I understand the
> > reason is that when block size is smaller than page size and OCFS2 uses
> > cluster size larger than block size, the page straddling i_size can have
> > also some buffers mapped (with underlying blocks allocated) that are fully
> > outside of i_size. These blocks are never written because of how
> > __block_write_full_page() currently behaves (never writes buffers fully
> > beyond i_size) so even if you zero out page cache and dirty the page,
> > racing writeback can clear dirty bits without writing those blocks and so
> > they are not zeroed out on disk although we are about to expand i_size.
> Correct.
> > 
> > Did I understand the problem correctly? But what confuses me is that
> > ocfs2_zero_extend_range() (ocfs2_write_zero_page() in fact) actually does
> > extend i_size to contain the range it zeroes out while still holding the
> > page lock so it should be protected against the race with writeback I
> > outlined above. What am I missing?
> 
> Thank you for pointing this. I didn't realize ocfs2_zero_extend() will
> update inode size,
> 
> with it, truncate to extend file will not suffer this issue. The original
> issue happened with
> 
> qemu that used the following fallocate to extend file size. The first
> fallocate punched
> 
> hole beyond the inode size(2276196352) but not update isize, the second one
> updated
> 
> isize, the first one will do some buffer write to zero eof blocks in
> ocfs2_remove_inode_range
> 
> ->ocfs2_zero_partial_clusters->ocfs2_zero_range_for_truncate.
> 
>     fallocate(11, FALLOC_FL_KEEP_SIZE|FALLOC_FL_PUNCH_HOLE, 2276196352,
> 65536) = 0
>     fallocate(11, 0, 2276196352, 65536) = 0

OK, I see. And AFAICT it is not about writeback racing with the zeroing in
ocfs2_zero_range_for_truncate() but rather the filemap_fdatawrite_range()
there not writing out zeroed pages if they are beyond i_size. And honestly,
rather than trying to extend block_write_full_page() for this odd corner
case, I'd use sb_issue_zeroout() or code something similar to
__blkdev_issue_zero_pages() inside OCFS2. Because making pages in the page
cache beyond i_size work is always going to be fragile...

								Honza
Junxiao Bi May 5, 2021, 3:54 p.m. UTC | #11
On 5/5/21 4:43 AM, Jan Kara wrote:
> On Tue 04-05-21 16:35:53, Junxiao Bi wrote:
>> On 5/4/21 2:02 AM, Jan Kara wrote:
>>> On Mon 03-05-21 10:25:31, Junxiao Bi wrote:
>>>> On 5/3/21 3:29 AM, Jan Kara wrote:
>>>>> On Fri 30-04-21 14:18:15, Junxiao Bi wrote:
>>>>>> On 4/30/21 5:47 AM, Jan Kara wrote:
>>>>>>
>>>>>>> On Thu 29-04-21 11:07:15, Junxiao Bi wrote:
>>>>>>>> On 4/29/21 10:14 AM, Andreas Gruenbacher wrote:
>>>>>>>>> On Tue, Apr 27, 2021 at 4:44 AM Junxiao Bi <junxiao.bi@oracle.com> wrote:
>>>>>>>>>> When doing truncate/fallocate for some filesytem like ocfs2, it
>>>>>>>>>> will zero some pages that are out of inode size and then later
>>>>>>>>>> update the inode size, so it needs this api to writeback eof
>>>>>>>>>> pages.
>>>>>>>>> is this in reaction to Jan's "[PATCH 0/12 v4] fs: Hole punch vs page
>>>>>>>>> cache filling races" patch set [*]? It doesn't look like the kind of
>>>>>>>>> patch Christoph would be happy with.
>>>>>>>> Thank you for pointing the patch set. I think that is fixing a different
>>>>>>>> issue.
>>>>>>>>
>>>>>>>> The issue here is when extending file size with fallocate/truncate, if the
>>>>>>>> original inode size
>>>>>>>>
>>>>>>>> is in the middle of the last cluster block(1M), eof part will be zeroed with
>>>>>>>> buffer write first,
>>>>>>>>
>>>>>>>> and then new inode size is updated, so there is a window that dirty pages is
>>>>>>>> out of inode size,
>>>>>>>>
>>>>>>>> if writeback is kicked in, block_write_full_page will drop all those eof
>>>>>>>> pages.
>>>>>>> I agree that the buffers describing part of the cluster beyond i_size won't
>>>>>>> be written. But page cache will remain zeroed out so that is fine. So you
>>>>>>> only need to zero out the on disk contents. Since this is actually
>>>>>>> physically contiguous range of blocks why don't you just use
>>>>>>> sb_issue_zeroout() to zero out the tail of the cluster? It will be more
>>>>>>> efficient than going through the page cache and you also won't have to
>>>>>>> tweak block_write_full_page()...
>>>>>> Thanks for the review.
>>>>>>
>>>>>> The physical blocks to be zeroed were continuous only when sparse mode is
>>>>>> enabled, if sparse mode is disabled, unwritten extent was not supported for
>>>>>> ocfs2, then all the blocks to the new size will be zeroed by the buffer
>>>>>> write, since sb_issue_zeroout() will need waiting io done, there will be a
>>>>>> lot of delay when extending file size. Use writeback to flush async seemed
>>>>>> more efficient?
>>>>> It depends. Higher end storage (e.g. NVME or NAS, maybe some better SATA
>>>>> flash disks as well) do support WRITE_ZERO command so you don't actually
>>>>> have to write all those zeros. The storage will just internally mark all
>>>>> those blocks as having zeros. This is rather fast so I'd expect the overall
>>>>> result to be faster that zeroing page cache and then writing all those
>>>>> pages with zeroes on transaction commit. But I agree that for lower end
>>>>> storage this may be slower because of synchronous writing of zeroes. That
>>>>> being said your transaction commit has to write those zeroes anyway so the
>>>>> cost is only mostly shifted but it could still make a difference for some
>>>>> workloads. Not sure if that matters, that is your call I'd say.
>>>> Ocfs2 is mostly used with SAN, i don't think it's common for SAN storage to
>>>> support WRITE_ZERO command.
>>>>
>>>> Anything bad to add a new api to support eof writeback?
>>> OK, now that I reread the whole series you've posted I think I somewhat
>>> misunderstood your original problem and intention. So let's first settle
>>> on that. As far as I understand the problem happens when extending a file
>>> (either through truncate or through write beyond i_size). When that
>>> happens, we need to make sure that blocks (or their parts) that used to be
>>> above i_size and are not going to be after extension are zeroed out.
>>> Usually, for simple filesystems such as ext2, there is only one such block
>>> - the one straddling i_size - where we need to make sure this happens. And
>>> we achieve that by zeroing out tail of this block on writeout (in
>>> ->writepage() handler) and also by zeroing out tail of the block when
>>> reducing i_size (block_truncate_page() takes care of this for ext2). So the
>>> tail of this block is zeroed out on disk at all times and thus we have no
>>> problem when extending i_size.
>>>
>>> Now what I described doesn't work for OCFS2. As far as I understand the
>>> reason is that when block size is smaller than page size and OCFS2 uses
>>> cluster size larger than block size, the page straddling i_size can have
>>> also some buffers mapped (with underlying blocks allocated) that are fully
>>> outside of i_size. These blocks are never written because of how
>>> __block_write_full_page() currently behaves (never writes buffers fully
>>> beyond i_size) so even if you zero out page cache and dirty the page,
>>> racing writeback can clear dirty bits without writing those blocks and so
>>> they are not zeroed out on disk although we are about to expand i_size.
>> Correct.
>>> Did I understand the problem correctly? But what confuses me is that
>>> ocfs2_zero_extend_range() (ocfs2_write_zero_page() in fact) actually does
>>> extend i_size to contain the range it zeroes out while still holding the
>>> page lock so it should be protected against the race with writeback I
>>> outlined above. What am I missing?
>> Thank you for pointing this. I didn't realize ocfs2_zero_extend() will
>> update inode size,
>>
>> with it, truncate to extend file will not suffer this issue. The original
>> issue happened with
>>
>> qemu that used the following fallocate to extend file size. The first
>> fallocate punched
>>
>> hole beyond the inode size(2276196352) but not update isize, the second one
>> updated
>>
>> isize, the first one will do some buffer write to zero eof blocks in
>> ocfs2_remove_inode_range
>>
>> ->ocfs2_zero_partial_clusters->ocfs2_zero_range_for_truncate.
>>
>>      fallocate(11, FALLOC_FL_KEEP_SIZE|FALLOC_FL_PUNCH_HOLE, 2276196352,
>> 65536) = 0
>>      fallocate(11, 0, 2276196352, 65536) = 0
> OK, I see. And AFAICT it is not about writeback racing with the zeroing in
> ocfs2_zero_range_for_truncate() but rather the filemap_fdatawrite_range()
> there not writing out zeroed pages if they are beyond i_size. And honestly,
> rather than trying to extend block_write_full_page() for this odd corner
> case, I'd use sb_issue_zeroout() or code something similar to
> __blkdev_issue_zero_pages() inside OCFS2. Because making pages in the page
> cache beyond i_size work is always going to be fragile...

Thanks for the suggestion. I will make v2 using zeroout.

Thanks,

Junxiao.

>
> 								Honza
Andrew Morton May 9, 2021, 11:23 p.m. UTC | #12
On Mon, 26 Apr 2021 15:05:50 -0700 Junxiao Bi <junxiao.bi@oracle.com> wrote:

> When doing truncate/fallocate for some filesytem like ocfs2, it
> will zero some pages that are out of inode size and then later
> update the inode size, so it needs this api to writeback eof
> pages.

Seems reasonable.  But can we please update the
__block_write_full_page_eof() comment?  It now uses the wrong function
name and doesn't document the new `eof' argument.
Junxiao Bi May 10, 2021, 10:15 p.m. UTC | #13
On 5/9/21 4:23 PM, Andrew Morton wrote:

> On Mon, 26 Apr 2021 15:05:50 -0700 Junxiao Bi <junxiao.bi@oracle.com> wrote:
>
>> When doing truncate/fallocate for some filesytem like ocfs2, it
>> will zero some pages that are out of inode size and then later
>> update the inode size, so it needs this api to writeback eof
>> pages.
> Seems reasonable.  But can we please update the
> __block_write_full_page_eof() comment?  It now uses the wrong function
> name and doesn't document the new `eof' argument.

Jan suggested using sb_issue_zeroout to zero eof pages in 
ocfs2_fallocate, that can

also fix the issue for ocfs2. For gfs2, i though it had the same issue, 
but i didn't get

a confirm from gfs2 maintainer, if gfs2 is ok, then maybe this new api 
is not necessary?

Thanks,

Junxiao.

>
Bob Peterson May 11, 2021, 12:19 p.m. UTC | #14
----- Original Message -----
> On 5/9/21 4:23 PM, Andrew Morton wrote:
> 
> > On Mon, 26 Apr 2021 15:05:50 -0700 Junxiao Bi <junxiao.bi@oracle.com>
> > wrote:
> >
> >> When doing truncate/fallocate for some filesytem like ocfs2, it
> >> will zero some pages that are out of inode size and then later
> >> update the inode size, so it needs this api to writeback eof
> >> pages.
> > Seems reasonable.  But can we please update the
> > __block_write_full_page_eof() comment?  It now uses the wrong function
> > name and doesn't document the new `eof' argument.
> 
> Jan suggested using sb_issue_zeroout to zero eof pages in
> ocfs2_fallocate, that can
> 
> also fix the issue for ocfs2. For gfs2, i though it had the same issue,
> but i didn't get
> 
> a confirm from gfs2 maintainer, if gfs2 is ok, then maybe this new api
> is not necessary?
> 
> Thanks,
> 
> Junxiao.

Hi,

Sorry. I was on holiday/vacation for the past week and a half without
Internet access except for my phone. I'll try to find the time to read
the thread and look into it soon.

Bob Peterson
diff mbox series

Patch

diff --git a/fs/buffer.c b/fs/buffer.c
index 0cb7ffd4977c..802f0bacdbde 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1709,9 +1709,9 @@  static struct buffer_head *create_page_buffers(struct page *page, struct inode *
  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
  * causes the writes to be flagged as synchronous writes.
  */
-int __block_write_full_page(struct inode *inode, struct page *page,
+int __block_write_full_page_eof(struct inode *inode, struct page *page,
 			get_block_t *get_block, struct writeback_control *wbc,
-			bh_end_io_t *handler)
+			bh_end_io_t *handler, bool eof_write)
 {
 	int err;
 	sector_t block;
@@ -1746,7 +1746,7 @@  int __block_write_full_page(struct inode *inode, struct page *page,
 	 * handle any aliases from the underlying blockdev's mapping.
 	 */
 	do {
-		if (block > last_block) {
+		if (block > last_block && !eof_write) {
 			/*
 			 * mapped buffers outside i_size will occur, because
 			 * this page can be outside i_size when there is a
@@ -1871,6 +1871,14 @@  int __block_write_full_page(struct inode *inode, struct page *page,
 	unlock_page(page);
 	goto done;
 }
+EXPORT_SYMBOL(__block_write_full_page_eof);
+
+int __block_write_full_page(struct inode *inode, struct page *page,
+			get_block_t *get_block, struct writeback_control *wbc,
+			bh_end_io_t *handler)
+{
+	return __block_write_full_page_eof(inode, page, get_block, wbc, handler, false);
+}
 EXPORT_SYMBOL(__block_write_full_page);
 
 /*
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6b47f94378c5..5da15a1ba15c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -221,6 +221,9 @@  int block_write_full_page(struct page *page, get_block_t *get_block,
 int __block_write_full_page(struct inode *inode, struct page *page,
 			get_block_t *get_block, struct writeback_control *wbc,
 			bh_end_io_t *handler);
+int __block_write_full_page_eof(struct inode *inode, struct page *page,
+			get_block_t *get_block, struct writeback_control *wbc,
+			bh_end_io_t *handler, bool eof_write);
 int block_read_full_page(struct page*, get_block_t*);
 int block_is_partially_uptodate(struct page *page, unsigned long from,
 				unsigned long count);