diff mbox series

[v9,17/17] xfs: fold xfs_mount-alloc() into xfs_init_fs_context()

Message ID 157286496581.18393.3802665855647124772.stgit@fedora-28 (mailing list archive)
State New, archived
Headers show
Series xfs: mount API patch series | expand

Commit Message

Ian Kent Nov. 4, 2019, 10:56 a.m. UTC
After switching to use the mount-api the only remaining caller of
xfs_mount_alloc() is xfs_init_fs_context(), so fold xfs_mount_alloc()
into it.

Signed-off-by: Ian Kent <raven@themaw.net>
---
 fs/xfs/xfs_super.c |   49 +++++++++++++++++++------------------------------
 1 file changed, 19 insertions(+), 30 deletions(-)

Comments

Christoph Hellwig Nov. 4, 2019, 3:14 p.m. UTC | #1
Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
Darrick J. Wong Nov. 4, 2019, 8:55 p.m. UTC | #2
On Mon, Nov 04, 2019 at 06:56:05PM +0800, Ian Kent wrote:
> After switching to use the mount-api the only remaining caller of
> xfs_mount_alloc() is xfs_init_fs_context(), so fold xfs_mount_alloc()
> into it.
> 
> Signed-off-by: Ian Kent <raven@themaw.net>

Looks ok,
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>

--D

> ---
>  fs/xfs/xfs_super.c |   49 +++++++++++++++++++------------------------------
>  1 file changed, 19 insertions(+), 30 deletions(-)
> 
> diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
> index e156fd59d592..c14f285f3256 100644
> --- a/fs/xfs/xfs_super.c
> +++ b/fs/xfs/xfs_super.c
> @@ -1096,35 +1096,6 @@ static const struct super_operations xfs_super_operations = {
>  	.free_cached_objects	= xfs_fs_free_cached_objects,
>  };
>  
> -static struct xfs_mount *
> -xfs_mount_alloc(void)
> -{
> -	struct xfs_mount	*mp;
> -
> -	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
> -	if (!mp)
> -		return NULL;
> -
> -	spin_lock_init(&mp->m_sb_lock);
> -	spin_lock_init(&mp->m_agirotor_lock);
> -	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
> -	spin_lock_init(&mp->m_perag_lock);
> -	mutex_init(&mp->m_growlock);
> -	atomic_set(&mp->m_active_trans, 0);
> -	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
> -	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
> -	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
> -	mp->m_kobj.kobject.kset = xfs_kset;
> -	/*
> -	 * We don't create the finobt per-ag space reservation until after log
> -	 * recovery, so we must set this to true so that an ifree transaction
> -	 * started during log recovery will not depend on space reservations
> -	 * for finobt expansion.
> -	 */
> -	mp->m_finobt_nores = true;
> -	return mp;
> -}
> -
>  static int
>  suffix_kstrtoint(
>  	const char	*s,
> @@ -1763,10 +1734,28 @@ static int xfs_init_fs_context(
>  {
>  	struct xfs_mount	*mp;
>  
> -	mp = xfs_mount_alloc();
> +	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
>  	if (!mp)
>  		return -ENOMEM;
>  
> +	spin_lock_init(&mp->m_sb_lock);
> +	spin_lock_init(&mp->m_agirotor_lock);
> +	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
> +	spin_lock_init(&mp->m_perag_lock);
> +	mutex_init(&mp->m_growlock);
> +	atomic_set(&mp->m_active_trans, 0);
> +	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
> +	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
> +	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
> +	mp->m_kobj.kobject.kset = xfs_kset;
> +	/*
> +	 * We don't create the finobt per-ag space reservation until after log
> +	 * recovery, so we must set this to true so that an ifree transaction
> +	 * started during log recovery will not depend on space reservations
> +	 * for finobt expansion.
> +	 */
> +	mp->m_finobt_nores = true;
> +
>  	/*
>  	 * These can be overridden by the mount option parsing.
>  	 */
>
diff mbox series

Patch

diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index e156fd59d592..c14f285f3256 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1096,35 +1096,6 @@  static const struct super_operations xfs_super_operations = {
 	.free_cached_objects	= xfs_fs_free_cached_objects,
 };
 
-static struct xfs_mount *
-xfs_mount_alloc(void)
-{
-	struct xfs_mount	*mp;
-
-	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
-	if (!mp)
-		return NULL;
-
-	spin_lock_init(&mp->m_sb_lock);
-	spin_lock_init(&mp->m_agirotor_lock);
-	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
-	spin_lock_init(&mp->m_perag_lock);
-	mutex_init(&mp->m_growlock);
-	atomic_set(&mp->m_active_trans, 0);
-	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
-	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
-	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
-	mp->m_kobj.kobject.kset = xfs_kset;
-	/*
-	 * We don't create the finobt per-ag space reservation until after log
-	 * recovery, so we must set this to true so that an ifree transaction
-	 * started during log recovery will not depend on space reservations
-	 * for finobt expansion.
-	 */
-	mp->m_finobt_nores = true;
-	return mp;
-}
-
 static int
 suffix_kstrtoint(
 	const char	*s,
@@ -1763,10 +1734,28 @@  static int xfs_init_fs_context(
 {
 	struct xfs_mount	*mp;
 
-	mp = xfs_mount_alloc();
+	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
 	if (!mp)
 		return -ENOMEM;
 
+	spin_lock_init(&mp->m_sb_lock);
+	spin_lock_init(&mp->m_agirotor_lock);
+	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
+	spin_lock_init(&mp->m_perag_lock);
+	mutex_init(&mp->m_growlock);
+	atomic_set(&mp->m_active_trans, 0);
+	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
+	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
+	mp->m_kobj.kobject.kset = xfs_kset;
+	/*
+	 * We don't create the finobt per-ag space reservation until after log
+	 * recovery, so we must set this to true so that an ifree transaction
+	 * started during log recovery will not depend on space reservations
+	 * for finobt expansion.
+	 */
+	mp->m_finobt_nores = true;
+
 	/*
 	 * These can be overridden by the mount option parsing.
 	 */