diff mbox series

[07/20] xfs: queue inodegc worker immediately when memory is tight

Message ID 162758427220.332903.7964745098855992435.stgit@magnolia (mailing list archive)
State New, archived
Headers show
Series xfs: deferred inode inactivation | expand

Commit Message

Darrick J. Wong July 29, 2021, 6:44 p.m. UTC
From: Darrick J. Wong <djwong@kernel.org>

If there is enough memory pressure that we're scheduling inodes for
inactivation from a shrinker, queue the inactivation worker immediately
to try to facilitate reclaming inodes.  This patch prepares us for
adding a configurable inodegc delay in the next patch.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/xfs/xfs_icache.c |   34 ++++++++++++++++++++++++++++++++--
 fs/xfs/xfs_trace.h  |    1 +
 2 files changed, 33 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index abd95f16b697..e0803544ea19 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -212,6 +212,32 @@  xfs_reclaim_work_queue(
 	rcu_read_unlock();
 }
 
+/*
+ * Compute the lag between scheduling and executing some kind of background
+ * garbage collection work.  Return value is in ms.
+ */
+static inline unsigned int
+xfs_gc_delay_ms(
+	struct xfs_mount	*mp,
+	unsigned int		tag)
+{
+	switch (tag) {
+	case XFS_ICI_INODEGC_TAG:
+		/* If we're in a shrinker, kick off the worker immediately. */
+		if (current->reclaim_state != NULL) {
+			trace_xfs_inodegc_delay_mempressure(mp,
+					__return_address);
+			return 0;
+		}
+		break;
+	default:
+		ASSERT(0);
+		return 0;
+	}
+
+	return 0;
+}
+
 /*
  * Background scanning to trim preallocated space. This is queued based on the
  * 'speculative_prealloc_lifetime' tunable (5m by default).
@@ -242,8 +268,12 @@  xfs_inodegc_queue(
 
 	rcu_read_lock();
 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_INODEGC_TAG)) {
-		trace_xfs_inodegc_queue(mp, 0);
-		queue_delayed_work(mp->m_gc_workqueue, &mp->m_inodegc_work, 0);
+		unsigned int	delay;
+
+		delay = xfs_gc_delay_ms(mp, XFS_ICI_INODEGC_TAG);
+		trace_xfs_inodegc_queue(mp, delay);
+		queue_delayed_work(mp->m_gc_workqueue, &mp->m_inodegc_work,
+				msecs_to_jiffies(delay));
 	}
 	rcu_read_unlock();
 }
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index b4dfa7e7e700..d3f3f6a32872 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -192,6 +192,7 @@  DEFINE_FS_EVENT(xfs_inodegc_stop);
 DEFINE_FS_EVENT(xfs_inodegc_worker);
 DEFINE_FS_EVENT(xfs_inodegc_throttled);
 DEFINE_FS_EVENT(xfs_fs_sync_fs);
+DEFINE_FS_EVENT(xfs_inodegc_delay_mempressure);
 
 TRACE_EVENT(xfs_inodegc_requeue_mempressure,
 	TP_PROTO(struct xfs_mount *mp, unsigned long nr, void *caller_ip),