diff mbox series

[08/14] xfs: queue inactivation immediately when free realtime extents are tight

Message ID 162812922691.2589546.7668598169022490963.stgit@magnolia (mailing list archive)
State Accepted
Headers show
Series xfs: deferred inode inactivation | expand

Commit Message

Darrick J. Wong Aug. 5, 2021, 2:07 a.m. UTC
From: Darrick J. Wong <djwong@kernel.org>

Now that we have made the inactivation of unlinked inodes a background
task to increase the throughput of file deletions, we need to be a
little more careful about how long of a delay we can tolerate.

Similar to the patch doing this for free space on the data device, if
the file being inactivated is a realtime file and the realtime volume is
running low on free extents, we want to run the worker ASAP so that the
realtime allocator can make better decisions.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/xfs/xfs_icache.c |   24 ++++++++++++++++++++++++
 fs/xfs/xfs_mount.c  |   13 ++++++++-----
 fs/xfs/xfs_mount.h  |    3 ++-
 3 files changed, 34 insertions(+), 6 deletions(-)

Comments

Dave Chinner Aug. 5, 2021, 5:36 a.m. UTC | #1
On Wed, Aug 04, 2021 at 07:07:06PM -0700, Darrick J. Wong wrote:
> From: Darrick J. Wong <djwong@kernel.org>
> 
> Now that we have made the inactivation of unlinked inodes a background
> task to increase the throughput of file deletions, we need to be a
> little more careful about how long of a delay we can tolerate.
> 
> Similar to the patch doing this for free space on the data device, if
> the file being inactivated is a realtime file and the realtime volume is
> running low on free extents, we want to run the worker ASAP so that the
> realtime allocator can make better decisions.
> 
> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
> ---
>  fs/xfs/xfs_icache.c |   24 ++++++++++++++++++++++++
>  fs/xfs/xfs_mount.c  |   13 ++++++++-----
>  fs/xfs/xfs_mount.h  |    3 ++-
>  3 files changed, 34 insertions(+), 6 deletions(-)
> 
> 
> diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
> index e5e90f09bcc6..4a062cf689c3 100644
> --- a/fs/xfs/xfs_icache.c
> +++ b/fs/xfs/xfs_icache.c
> @@ -1924,6 +1924,27 @@ xfs_inodegc_start(
>  	xfs_inodegc_queue_all(mp);
>  }
>  
> +#ifdef CONFIG_XFS_RT
> +static inline bool
> +xfs_inodegc_want_queue_rt_file(
> +	struct xfs_inode	*ip)
> +{
> +	struct xfs_mount	*mp = ip->i_mount;
> +	uint64_t		freertx;
> +
> +	if (!XFS_IS_REALTIME_INODE(ip))
> +		return false;
> +
> +	spin_lock(&mp->m_sb_lock);
> +	freertx = mp->m_sb.sb_frextents;
> +	spin_unlock(&mp->m_sb_lock);

READ_ONCE() is probably sufficient here. We're not actually
serialising this against any specific operation, so I don't think
the lock is necessary to sample the value.

Other than that, all good.

Reviewed-by: Dave Chinner <dchinner@redhat.com>
diff mbox series

Patch

diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index e5e90f09bcc6..4a062cf689c3 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1924,6 +1924,27 @@  xfs_inodegc_start(
 	xfs_inodegc_queue_all(mp);
 }
 
+#ifdef CONFIG_XFS_RT
+static inline bool
+xfs_inodegc_want_queue_rt_file(
+	struct xfs_inode	*ip)
+{
+	struct xfs_mount	*mp = ip->i_mount;
+	uint64_t		freertx;
+
+	if (!XFS_IS_REALTIME_INODE(ip))
+		return false;
+
+	spin_lock(&mp->m_sb_lock);
+	freertx = mp->m_sb.sb_frextents;
+	spin_unlock(&mp->m_sb_lock);
+
+	return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT];
+}
+#else
+# define xfs_inodegc_want_queue_rt_file(ip)	(false)
+#endif /* CONFIG_XFS_RT */
+
 /*
  * Schedule the inactivation worker when:
  *
@@ -1946,6 +1967,9 @@  xfs_inodegc_want_queue_work(
 				XFS_FDBLOCKS_BATCH) < 0)
 		return true;
 
+	if (xfs_inodegc_want_queue_rt_file(ip))
+		return true;
+
 	if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
 		return true;
 
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 5fe6f1db4fe9..ed1e7e3dce7e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -365,13 +365,16 @@  void
 xfs_set_low_space_thresholds(
 	struct xfs_mount	*mp)
 {
-	int i;
+	uint64_t		dblocks = mp->m_sb.sb_dblocks;
+	uint64_t		rtexts = mp->m_sb.sb_rextents;
+	int			i;
+
+	do_div(dblocks, 100);
+	do_div(rtexts, 100);
 
 	for (i = 0; i < XFS_LOWSP_MAX; i++) {
-		uint64_t space = mp->m_sb.sb_dblocks;
-
-		do_div(space, 100);
-		mp->m_low_space[i] = space * (i + 1);
+		mp->m_low_space[i] = dblocks * (i + 1);
+		mp->m_low_rtexts[i] = rtexts * (i + 1);
 	}
 }
 
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 750297498a09..1061ac985c18 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -145,7 +145,8 @@  typedef struct xfs_mount {
 	int			m_fixedfsid[2];	/* unchanged for life of FS */
 	uint			m_qflags;	/* quota status flags */
 	uint64_t		m_flags;	/* global mount flags */
-	int64_t			m_low_space[XFS_LOWSP_MAX];
+	uint64_t		m_low_space[XFS_LOWSP_MAX];
+	uint64_t		m_low_rtexts[XFS_LOWSP_MAX];
 	struct xfs_ino_geometry	m_ino_geo;	/* inode geometry */
 	struct xfs_trans_resv	m_resv;		/* precomputed res values */
 						/* low free space thresholds */