diff mbox series

[10/21] btrfs: move struct scrub_ctx to scrub.h

Message ID c120d508b232845c70d4c5378c12b0152d7d700e.1637745470.git.johannes.thumshirn@wdc.com (mailing list archive)
State New, archived
Headers show
Series btrfs: first batch of zoned cleanups | expand

Commit Message

Johannes Thumshirn Nov. 24, 2021, 9:30 a.m. UTC
Move 'struct scrub_ctx' to the newly created scrub.h file.

This is a preparation step for moving zoned only code from scrub.c to
zoned.c.

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
 fs/btrfs/scrub.c | 44 +----------------------------------------
 fs/btrfs/scrub.h | 51 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 52 insertions(+), 43 deletions(-)
 create mode 100644 fs/btrfs/scrub.h

Comments

David Sterba Nov. 26, 2021, 4:48 p.m. UTC | #1
On Wed, Nov 24, 2021 at 01:30:36AM -0800, Johannes Thumshirn wrote:
> Move 'struct scrub_ctx' to the newly created scrub.h file.
> 
> This is a preparation step for moving zoned only code from scrub.c to
> zoned.c.
> 
> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> ---
>  fs/btrfs/scrub.c | 44 +----------------------------------------
>  fs/btrfs/scrub.h | 51 ++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 52 insertions(+), 43 deletions(-)
>  create mode 100644 fs/btrfs/scrub.h
> 
> diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
> index 70cf1f487748c..a2c42ff544701 100644
> --- a/fs/btrfs/scrub.c
> +++ b/fs/btrfs/scrub.c
> @@ -21,6 +21,7 @@
>  #include "raid56.h"
>  #include "block-group.h"
>  #include "zoned.h"
> +#include "scrub.h"
>  
>  /*
>   * This is only the first step towards a full-features scrub. It reads all
> @@ -46,7 +47,6 @@ struct scrub_ctx;
>   */
>  #define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
>  #define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
> -#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
>  
>  /*
>   * the following value times PAGE_SIZE needs to be large enough to match the
> @@ -151,48 +151,6 @@ struct scrub_parity {
>  	unsigned long		bitmap[];
>  };
>  
> -struct scrub_ctx {
> -	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
> -	struct btrfs_fs_info	*fs_info;
> -	int			first_free;
> -	int			curr;
> -	atomic_t		bios_in_flight;
> -	atomic_t		workers_pending;
> -	spinlock_t		list_lock;
> -	wait_queue_head_t	list_wait;
> -	struct list_head	csum_list;
> -	atomic_t		cancel_req;
> -	int			readonly;
> -	int			pages_per_rd_bio;
> -
> -	/* State of IO submission throttling affecting the associated device */
> -	ktime_t			throttle_deadline;
> -	u64			throttle_sent;
> -
> -	int			is_dev_replace;
> -	u64			write_pointer;
> -
> -	struct scrub_bio        *wr_curr_bio;
> -	struct mutex            wr_lock;
> -	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
> -	struct btrfs_device     *wr_tgtdev;
> -	bool                    flush_all_writes;
> -
> -	/*
> -	 * statistics
> -	 */
> -	struct btrfs_scrub_progress stat;
> -	spinlock_t		stat_lock;
> -
> -	/*
> -	 * Use a ref counter to avoid use-after-free issues. Scrub workers
> -	 * decrement bios_in_flight and workers_pending and then do a wakeup
> -	 * on the list_wait wait queue. We must ensure the main scrub task
> -	 * doesn't free the scrub context before or while the workers are
> -	 * doing the wakeup() call.
> -	 */
> -	refcount_t              refs;
> -};
>  
>  struct scrub_warning {
>  	struct btrfs_path	*path;
> diff --git a/fs/btrfs/scrub.h b/fs/btrfs/scrub.h
> new file mode 100644
> index 0000000000000..3eb8c8905c902
> --- /dev/null
> +++ b/fs/btrfs/scrub.h
> @@ -0,0 +1,51 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef BTRFS_SCRUB_H
> +#define BTRFS_SCRUB_H
> +
> +#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */

> +struct scrub_ctx {
> +	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
> +	struct btrfs_fs_info	*fs_info;
> +	int			first_free;
> +	int			curr;
> +	atomic_t		bios_in_flight;
> +	atomic_t		workers_pending;
> +	spinlock_t		list_lock;
> +	wait_queue_head_t	list_wait;
> +	struct list_head	csum_list;
> +	atomic_t		cancel_req;
> +	int			readonly;
> +	int			pages_per_rd_bio;
> +
> +	/* State of IO submission throttling affecting the associated device */
> +	ktime_t			throttle_deadline;
> +	u64			throttle_sent;
> +
> +	int			is_dev_replace;
> +	u64			write_pointer;
> +
> +	struct scrub_bio        *wr_curr_bio;
> +	struct mutex            wr_lock;
> +	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
> +	struct btrfs_device     *wr_tgtdev;
> +	bool                    flush_all_writes;
> +
> +	/*
> +	 * statistics
> +	 */
> +	struct btrfs_scrub_progress stat;
> +	spinlock_t		stat_lock;
> +
> +	/*
> +	 * Use a ref counter to avoid use-after-free issues. Scrub workers
> +	 * decrement bios_in_flight and workers_pending and then do a wakeup
> +	 * on the list_wait wait queue. We must ensure the main scrub task
> +	 * doesn't free the scrub context before or while the workers are
> +	 * doing the wakeup() call.
> +	 */
> +	refcount_t              refs;
> +};

Headers should be self contained and pull all includes or do forward
definitions, the structure contains many types that need defining.
diff mbox series

Patch

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 70cf1f487748c..a2c42ff544701 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -21,6 +21,7 @@ 
 #include "raid56.h"
 #include "block-group.h"
 #include "zoned.h"
+#include "scrub.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -46,7 +47,6 @@  struct scrub_ctx;
  */
 #define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
 #define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
-#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
 
 /*
  * the following value times PAGE_SIZE needs to be large enough to match the
@@ -151,48 +151,6 @@  struct scrub_parity {
 	unsigned long		bitmap[];
 };
 
-struct scrub_ctx {
-	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
-	struct btrfs_fs_info	*fs_info;
-	int			first_free;
-	int			curr;
-	atomic_t		bios_in_flight;
-	atomic_t		workers_pending;
-	spinlock_t		list_lock;
-	wait_queue_head_t	list_wait;
-	struct list_head	csum_list;
-	atomic_t		cancel_req;
-	int			readonly;
-	int			pages_per_rd_bio;
-
-	/* State of IO submission throttling affecting the associated device */
-	ktime_t			throttle_deadline;
-	u64			throttle_sent;
-
-	int			is_dev_replace;
-	u64			write_pointer;
-
-	struct scrub_bio        *wr_curr_bio;
-	struct mutex            wr_lock;
-	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
-	struct btrfs_device     *wr_tgtdev;
-	bool                    flush_all_writes;
-
-	/*
-	 * statistics
-	 */
-	struct btrfs_scrub_progress stat;
-	spinlock_t		stat_lock;
-
-	/*
-	 * Use a ref counter to avoid use-after-free issues. Scrub workers
-	 * decrement bios_in_flight and workers_pending and then do a wakeup
-	 * on the list_wait wait queue. We must ensure the main scrub task
-	 * doesn't free the scrub context before or while the workers are
-	 * doing the wakeup() call.
-	 */
-	refcount_t              refs;
-};
 
 struct scrub_warning {
 	struct btrfs_path	*path;
diff --git a/fs/btrfs/scrub.h b/fs/btrfs/scrub.h
new file mode 100644
index 0000000000000..3eb8c8905c902
--- /dev/null
+++ b/fs/btrfs/scrub.h
@@ -0,0 +1,51 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_SCRUB_H
+#define BTRFS_SCRUB_H
+
+#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
+
+struct scrub_ctx {
+	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
+	struct btrfs_fs_info	*fs_info;
+	int			first_free;
+	int			curr;
+	atomic_t		bios_in_flight;
+	atomic_t		workers_pending;
+	spinlock_t		list_lock;
+	wait_queue_head_t	list_wait;
+	struct list_head	csum_list;
+	atomic_t		cancel_req;
+	int			readonly;
+	int			pages_per_rd_bio;
+
+	/* State of IO submission throttling affecting the associated device */
+	ktime_t			throttle_deadline;
+	u64			throttle_sent;
+
+	int			is_dev_replace;
+	u64			write_pointer;
+
+	struct scrub_bio        *wr_curr_bio;
+	struct mutex            wr_lock;
+	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
+	struct btrfs_device     *wr_tgtdev;
+	bool                    flush_all_writes;
+
+	/*
+	 * statistics
+	 */
+	struct btrfs_scrub_progress stat;
+	spinlock_t		stat_lock;
+
+	/*
+	 * Use a ref counter to avoid use-after-free issues. Scrub workers
+	 * decrement bios_in_flight and workers_pending and then do a wakeup
+	 * on the list_wait wait queue. We must ensure the main scrub task
+	 * doesn't free the scrub context before or while the workers are
+	 * doing the wakeup() call.
+	 */
+	refcount_t              refs;
+};
+
+#endif /* BTRFS_SCRUB_H */