@@ -1507,6 +1507,34 @@ xfs_blockgc_free_space(
return 0;
}
+/*
+ * Reclaim all the free space that we can by scheduling the background blockgc
+ * and inodegc workers immediately and waiting for them all to clear.
+ */
+void
+xfs_blockgc_flush_all(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ trace_xfs_blockgc_flush_all(mp, __return_address);
+
+ /*
+ * For each blockgc worker, move its queue time up to now. If it
+ * wasn't queued, it will not be requeued. Then flush whatever's
+ * left.
+ */
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ mod_delayed_work(pag->pag_mount->m_blockgc_wq,
+ &pag->pag_blockgc_work, 0);
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ flush_delayed_work(&pag->pag_blockgc_work);
+
+ xfs_inodegc_flush(mp);
+}
+
/*
* Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
* quota caused an allocation failure, so we make a best effort by including
@@ -59,6 +59,7 @@ int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
unsigned int iwalk_flags);
int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
+void xfs_blockgc_flush_all(struct xfs_mount *mp);
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
@@ -202,6 +202,7 @@ DEFINE_FS_EVENT(xfs_fs_sync_fs);
DEFINE_FS_EVENT(xfs_blockgc_start);
DEFINE_FS_EVENT(xfs_blockgc_stop);
DEFINE_FS_EVENT(xfs_blockgc_worker);
+DEFINE_FS_EVENT(xfs_blockgc_flush_all);
DECLARE_EVENT_CLASS(xfs_ag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
@@ -295,10 +295,7 @@ xfs_trans_alloc(
* Do not perform a synchronous scan because callers can hold
* other locks.
*/
- error = xfs_blockgc_free_space(mp, NULL);
- if (error)
- return error;
-
+ xfs_blockgc_flush_all(mp);
want_retry = false;
goto retry;
}