@@ -4895,6 +4895,7 @@ xfs_bmapi_remap(
}
ip->i_nblocks += len;
+ ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
@@ -357,6 +357,17 @@ xfs_bmap_defer_add(
trace_xfs_bmap_defer(bi);
xfs_bmap_update_get_group(tp->t_mountp, bi);
+
+ /*
+ * Ensure the deferred mapping is pre-recorded in i_delayed_blks.
+ *
+ * Otherwise stat can report zero blocks for an inode that actually has
+ * data when the entire mapping is in the process of being overwritten
+ * using the out of place write path. This is undone in xfs_bmapi_remap
+ * after it has incremented di_nblocks for a successful operation.
+ */
+ if (bi->bi_type == XFS_BMAP_MAP)
+ bi->bi_owner->i_delayed_blks += bi->bi_bmap.br_blockcount;
xfs_defer_add(tp, &bi->bi_list, &xfs_bmap_update_defer_type);
}
@@ -381,6 +392,9 @@ xfs_bmap_update_cancel_item(
{
struct xfs_bmap_intent *bi = bi_entry(item);
+ if (bi->bi_type == XFS_BMAP_MAP)
+ bi->bi_owner->i_delayed_blks -= bi->bi_bmap.br_blockcount;
+
xfs_bmap_update_put_group(bi);
kmem_cache_free(xfs_bmap_intent_cache, bi);
}
@@ -478,6 +492,9 @@ xfs_bui_recover_work(
bi->bi_owner = *ipp;
xfs_bmap_update_get_group(mp, bi);
+ /* see xfs_bmap_defer_add for details */
+ if (bi->bi_type == XFS_BMAP_MAP)
+ bi->bi_owner->i_delayed_blks += bi->bi_bmap.br_blockcount;
xfs_defer_add_item(dfp, &bi->bi_list);
return bi;
}