Message ID | 20230118224505.1964941-13-david@fromorbit.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | xfs: per-ag centric allocation alogrithms | expand |
On Thu, 2023-01-19 at 09:44 +1100, Dave Chinner wrote: > From: Dave Chinner <dchinner@redhat.com> > > This is currently a spinlock lock protected rotor which can be > implemented with a single atomic operation. Change it to be more > efficient and get rid of the m_agirotor_lock. Noticed while > converting the inode allocation AG selection loop to active perag > references. > > Signed-off-by: Dave Chinner <dchinner@redhat.com> Ok, makes sense Reviewed-by: Allison Henderson <allison.henderson@oracle.com> > --- > fs/xfs/libxfs/xfs_ialloc.c | 17 +---------------- > fs/xfs/libxfs/xfs_sb.c | 3 ++- > fs/xfs/xfs_mount.h | 3 +-- > fs/xfs/xfs_super.c | 1 - > 4 files changed, 4 insertions(+), 20 deletions(-) > > diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c > index 5b8401038bab..c8d837d8876f 100644 > --- a/fs/xfs/libxfs/xfs_ialloc.c > +++ b/fs/xfs/libxfs/xfs_ialloc.c > @@ -1576,21 +1576,6 @@ xfs_dialloc_roll( > return error; > } > > -static xfs_agnumber_t > -xfs_ialloc_next_ag( > - xfs_mount_t *mp) > -{ > - xfs_agnumber_t agno; > - > - spin_lock(&mp->m_agirotor_lock); > - agno = mp->m_agirotor; > - if (++mp->m_agirotor >= mp->m_maxagi) > - mp->m_agirotor = 0; > - spin_unlock(&mp->m_agirotor_lock); > - > - return agno; > -} > - > static bool > xfs_dialloc_good_ag( > struct xfs_perag *pag, > @@ -1748,7 +1733,7 @@ xfs_dialloc( > * an AG has enough space for file creation. > */ > if (S_ISDIR(mode)) > - start_agno = xfs_ialloc_next_ag(mp); > + start_agno = atomic_inc_return(&mp->m_agirotor) % mp- > >m_maxagi; > else { > start_agno = XFS_INO_TO_AGNO(mp, parent); > if (start_agno >= mp->m_maxagi) > diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c > index 1eeecf2eb2a7..99cc03a298e2 100644 > --- a/fs/xfs/libxfs/xfs_sb.c > +++ b/fs/xfs/libxfs/xfs_sb.c > @@ -909,7 +909,8 @@ xfs_sb_mount_common( > struct xfs_mount *mp, > struct xfs_sb *sbp) > { > - mp->m_agfrotor = mp->m_agirotor = 0; > + mp->m_agfrotor = 0; > + atomic_set(&mp->m_agirotor, 0); > mp->m_maxagi = mp->m_sb.sb_agcount; > mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; > mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; > diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h > index 8aca2cc173ac..f3269c0626f0 100644 > --- a/fs/xfs/xfs_mount.h > +++ b/fs/xfs/xfs_mount.h > @@ -210,8 +210,7 @@ typedef struct xfs_mount { > struct > xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; > struct xstats m_stats; /* per-fs stats */ > xfs_agnumber_t m_agfrotor; /* last ag where > space found */ > - xfs_agnumber_t m_agirotor; /* last ag dir inode > alloced */ > - spinlock_t m_agirotor_lock;/* .. and lock > protecting it */ > + atomic_t m_agirotor; /* last ag dir inode > alloced */ > > /* Memory shrinker to throttle and reprioritize inodegc */ > struct shrinker m_inodegc_shrinker; > diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c > index 0c4b73e9b29d..96375b5622fd 100644 > --- a/fs/xfs/xfs_super.c > +++ b/fs/xfs/xfs_super.c > @@ -1922,7 +1922,6 @@ static int xfs_init_fs_context( > return -ENOMEM; > > spin_lock_init(&mp->m_sb_lock); > - spin_lock_init(&mp->m_agirotor_lock); > INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); > spin_lock_init(&mp->m_perag_lock); > mutex_init(&mp->m_growlock);
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 5b8401038bab..c8d837d8876f 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -1576,21 +1576,6 @@ xfs_dialloc_roll( return error; } -static xfs_agnumber_t -xfs_ialloc_next_ag( - xfs_mount_t *mp) -{ - xfs_agnumber_t agno; - - spin_lock(&mp->m_agirotor_lock); - agno = mp->m_agirotor; - if (++mp->m_agirotor >= mp->m_maxagi) - mp->m_agirotor = 0; - spin_unlock(&mp->m_agirotor_lock); - - return agno; -} - static bool xfs_dialloc_good_ag( struct xfs_perag *pag, @@ -1748,7 +1733,7 @@ xfs_dialloc( * an AG has enough space for file creation. */ if (S_ISDIR(mode)) - start_agno = xfs_ialloc_next_ag(mp); + start_agno = atomic_inc_return(&mp->m_agirotor) % mp->m_maxagi; else { start_agno = XFS_INO_TO_AGNO(mp, parent); if (start_agno >= mp->m_maxagi) diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 1eeecf2eb2a7..99cc03a298e2 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -909,7 +909,8 @@ xfs_sb_mount_common( struct xfs_mount *mp, struct xfs_sb *sbp) { - mp->m_agfrotor = mp->m_agirotor = 0; + mp->m_agfrotor = 0; + atomic_set(&mp->m_agirotor, 0); mp->m_maxagi = mp->m_sb.sb_agcount; mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 8aca2cc173ac..f3269c0626f0 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -210,8 +210,7 @@ typedef struct xfs_mount { struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; struct xstats m_stats; /* per-fs stats */ xfs_agnumber_t m_agfrotor; /* last ag where space found */ - xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ - spinlock_t m_agirotor_lock;/* .. and lock protecting it */ + atomic_t m_agirotor; /* last ag dir inode alloced */ /* Memory shrinker to throttle and reprioritize inodegc */ struct shrinker m_inodegc_shrinker; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 0c4b73e9b29d..96375b5622fd 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1922,7 +1922,6 @@ static int xfs_init_fs_context( return -ENOMEM; spin_lock_init(&mp->m_sb_lock); - spin_lock_init(&mp->m_agirotor_lock); INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); spin_lock_init(&mp->m_perag_lock); mutex_init(&mp->m_growlock);