diff mbox series

[1/4] xfs: make inode inactivation state changes atomic

Message ID 20240319001707.3430251-2-david@fromorbit.com (mailing list archive)
State New
Headers show
Series xfs: recycle inactive inodes immediately | expand

Commit Message

Dave Chinner March 19, 2024, 12:15 a.m. UTC
From: Dave Chinner <dchinner@redhat.com>

We need the XFS_NEED_INACTIVE flag to correspond to whether the
inode is on the inodegc queues so that we can then use this state
for lazy removal.

To do this, move the addition of the inode to the inodegc queue
under the ip->i_flags_lock so that it is atomic w.r.t. setting
the XFS_NEED_INACTIVE flag.

Then, when we remove the inode from the inodegc list to actually run
inactivation, clear the XFS_NEED_INACTIVE at the same time we are
setting XFS_INACTIVATING to indicate that inactivation is in
progress.

These changes result in all the state changes and inodegc queuing
being atomic w.r.t. each other and inode lookups via the use of the
ip->i_flags lock.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_icache.c | 16 ++++++++++++++--
 fs/xfs/xfs_inode.h  | 11 +++++++----
 2 files changed, 21 insertions(+), 6 deletions(-)

Comments

Darrick J. Wong March 19, 2024, 6:01 p.m. UTC | #1
On Tue, Mar 19, 2024 at 11:15:57AM +1100, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> We need the XFS_NEED_INACTIVE flag to correspond to whether the
> inode is on the inodegc queues so that we can then use this state
> for lazy removal.
> 
> To do this, move the addition of the inode to the inodegc queue
> under the ip->i_flags_lock so that it is atomic w.r.t. setting
> the XFS_NEED_INACTIVE flag.
> 
> Then, when we remove the inode from the inodegc list to actually run
> inactivation, clear the XFS_NEED_INACTIVE at the same time we are
> setting XFS_INACTIVATING to indicate that inactivation is in
> progress.
> 
> These changes result in all the state changes and inodegc queuing
> being atomic w.r.t. each other and inode lookups via the use of the
> ip->i_flags lock.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>

Pretty straightforward lock coverage extension,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_icache.c | 16 ++++++++++++++--
>  fs/xfs/xfs_inode.h  | 11 +++++++----
>  2 files changed, 21 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
> index 6c87b90754c4..9a362964f656 100644
> --- a/fs/xfs/xfs_icache.c
> +++ b/fs/xfs/xfs_icache.c
> @@ -1880,7 +1880,12 @@ xfs_inodegc_worker(
>  	llist_for_each_entry_safe(ip, n, node, i_gclist) {
>  		int	error;
>  
> -		xfs_iflags_set(ip, XFS_INACTIVATING);
> +		/* Switch state to inactivating. */
> +		spin_lock(&ip->i_flags_lock);
> +		ip->i_flags |= XFS_INACTIVATING;
> +		ip->i_flags &= ~XFS_NEED_INACTIVE;
> +		spin_unlock(&ip->i_flags_lock);
> +
>  		error = xfs_inodegc_inactivate(ip);
>  		if (error && !gc->error)
>  			gc->error = error;
> @@ -2075,9 +2080,14 @@ xfs_inodegc_queue(
>  	unsigned long		queue_delay = 1;
>  
>  	trace_xfs_inode_set_need_inactive(ip);
> +
> +	/*
> +	 * Put the addition of the inode to the gc list under the
> +	 * ip->i_flags_lock so that the state change and list addition are
> +	 * atomic w.r.t. lookup operations under the ip->i_flags_lock.
> +	 */
>  	spin_lock(&ip->i_flags_lock);
>  	ip->i_flags |= XFS_NEED_INACTIVE;
> -	spin_unlock(&ip->i_flags_lock);
>  
>  	cpu_nr = get_cpu();
>  	gc = this_cpu_ptr(mp->m_inodegc);
> @@ -2086,6 +2096,8 @@ xfs_inodegc_queue(
>  	WRITE_ONCE(gc->items, items + 1);
>  	shrinker_hits = READ_ONCE(gc->shrinker_hits);
>  
> +	spin_unlock(&ip->i_flags_lock);
> +
>  	/*
>  	 * Ensure the list add is always seen by anyone who finds the cpumask
>  	 * bit set. This effectively gives the cpumask bit set operation
> diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
> index 94fa79ae1591..b0943d888f5c 100644
> --- a/fs/xfs/xfs_inode.h
> +++ b/fs/xfs/xfs_inode.h
> @@ -349,10 +349,13 @@ static inline bool xfs_inode_has_forcealign(struct xfs_inode *ip)
>  
>  /*
>   * If we need to update on-disk metadata before this IRECLAIMABLE inode can be
> - * freed, then NEED_INACTIVE will be set.  Once we start the updates, the
> - * INACTIVATING bit will be set to keep iget away from this inode.  After the
> - * inactivation completes, both flags will be cleared and the inode is a
> - * plain old IRECLAIMABLE inode.
> + * freed, then NEED_INACTIVE will be set. If the inode is accessed via iget
> + * whilst NEED_INACTIVE is set, the inode will be reactivated and become a
> + * normal inode again. Once we start the inactivation, the INACTIVATING bit will
> + * be set and the NEED_INACTIVE bit will be cleared. The INACTIVATING bit will
> + * keep iget away from this inode whilst inactivation is in progress.  After the
> + * inactivation completes, INACTIVATING will be cleared and the inode
> + * transitions to a plain old IRECLAIMABLE inode.
>   */
>  #define XFS_INACTIVATING	(1 << 13)
>  
> -- 
> 2.43.0
> 
>
diff mbox series

Patch

diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 6c87b90754c4..9a362964f656 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1880,7 +1880,12 @@  xfs_inodegc_worker(
 	llist_for_each_entry_safe(ip, n, node, i_gclist) {
 		int	error;
 
-		xfs_iflags_set(ip, XFS_INACTIVATING);
+		/* Switch state to inactivating. */
+		spin_lock(&ip->i_flags_lock);
+		ip->i_flags |= XFS_INACTIVATING;
+		ip->i_flags &= ~XFS_NEED_INACTIVE;
+		spin_unlock(&ip->i_flags_lock);
+
 		error = xfs_inodegc_inactivate(ip);
 		if (error && !gc->error)
 			gc->error = error;
@@ -2075,9 +2080,14 @@  xfs_inodegc_queue(
 	unsigned long		queue_delay = 1;
 
 	trace_xfs_inode_set_need_inactive(ip);
+
+	/*
+	 * Put the addition of the inode to the gc list under the
+	 * ip->i_flags_lock so that the state change and list addition are
+	 * atomic w.r.t. lookup operations under the ip->i_flags_lock.
+	 */
 	spin_lock(&ip->i_flags_lock);
 	ip->i_flags |= XFS_NEED_INACTIVE;
-	spin_unlock(&ip->i_flags_lock);
 
 	cpu_nr = get_cpu();
 	gc = this_cpu_ptr(mp->m_inodegc);
@@ -2086,6 +2096,8 @@  xfs_inodegc_queue(
 	WRITE_ONCE(gc->items, items + 1);
 	shrinker_hits = READ_ONCE(gc->shrinker_hits);
 
+	spin_unlock(&ip->i_flags_lock);
+
 	/*
 	 * Ensure the list add is always seen by anyone who finds the cpumask
 	 * bit set. This effectively gives the cpumask bit set operation
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 94fa79ae1591..b0943d888f5c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -349,10 +349,13 @@  static inline bool xfs_inode_has_forcealign(struct xfs_inode *ip)
 
 /*
  * If we need to update on-disk metadata before this IRECLAIMABLE inode can be
- * freed, then NEED_INACTIVE will be set.  Once we start the updates, the
- * INACTIVATING bit will be set to keep iget away from this inode.  After the
- * inactivation completes, both flags will be cleared and the inode is a
- * plain old IRECLAIMABLE inode.
+ * freed, then NEED_INACTIVE will be set. If the inode is accessed via iget
+ * whilst NEED_INACTIVE is set, the inode will be reactivated and become a
+ * normal inode again. Once we start the inactivation, the INACTIVATING bit will
+ * be set and the NEED_INACTIVE bit will be cleared. The INACTIVATING bit will
+ * keep iget away from this inode whilst inactivation is in progress.  After the
+ * inactivation completes, INACTIVATING will be cleared and the inode
+ * transitions to a plain old IRECLAIMABLE inode.
  */
 #define XFS_INACTIVATING	(1 << 13)