diff mbox

[OPW,kernel] fs: btrfs: Pack struct btrfs_space_info and btrfs_block_group_cache

Message ID 20131021125402.GA6252@gmail.com
State New, archived
Headers show

Commit Message

Rashika Oct. 21, 2013, 12:54 p.m. UTC
Packed the structure btrfs_space_info and btrfs_block_group_cache
in ctree.h to eliminate holes detected by pahole so that less
space is wasted. 

Signed-off-by: Rashika Kheria <rashika.kheria@gmail.com>
---
 fs/btrfs/ctree.h |   52 +++++++++++++++++++++++++++-------------------------
 1 file changed, 27 insertions(+), 25 deletions(-)

Comments

Josh Triplett Oct. 21, 2013, 8:04 p.m. UTC | #1
On Mon, Oct 21, 2013 at 06:24:02PM +0530, Rashika Kheria wrote:
> Packed the structure btrfs_space_info and btrfs_block_group_cache
> in ctree.h to eliminate holes detected by pahole so that less
> space is wasted. 
> 
> Signed-off-by: Rashika Kheria <rashika.kheria@gmail.com>

Reviewed-by: Josh Triplett <josh@joshtriplett.org>

>  fs/btrfs/ctree.h |   52 +++++++++++++++++++++++++++-------------------------
>  1 file changed, 27 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
> index 0506f40..b809f30 100644
> --- a/fs/btrfs/ctree.h
> +++ b/fs/btrfs/ctree.h
> @@ -1106,6 +1106,15 @@ struct btrfs_space_info {
>  	u64 disk_total;		/* total bytes on disk, takes mirrors into
>  				   account */
>  
> +	struct list_head list;
> +	struct percpu_counter total_bytes_pinned;
> +
> +	struct rw_semaphore groups_sem;
> +	wait_queue_head_t wait;
> +	/* for block groups in our same type */
> +	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
> +	spinlock_t lock;
> +
>  	/*
>  	 * bytes_pinned is kept in line with what is actually pinned, as in
>  	 * we've called update_block_group and dropped the bytes_used counter
> @@ -1116,24 +1125,15 @@ struct btrfs_space_info {
>  	 * once the transaction is committed.  It will be zero'ed everytime the
>  	 * transaction commits.
>  	 */
> -	struct percpu_counter total_bytes_pinned;
> -
> +	unsigned int force_alloc;	/* set if we need to force a chunk
> +					   alloc for this space */
>  	unsigned int full:1;	/* indicates that we cannot allocate any more
>  				   chunks for this space */
>  	unsigned int chunk_alloc:1;	/* set if we are allocating a chunk */
>  
>  	unsigned int flush:1;		/* set if we are trying to make space */
>  
> -	unsigned int force_alloc;	/* set if we need to force a chunk
> -					   alloc for this space */
> -
> -	struct list_head list;
>  
> -	/* for block groups in our same type */
> -	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
> -	spinlock_t lock;
> -	struct rw_semaphore groups_sem;
> -	wait_queue_head_t wait;
>  };
>  
>  #define	BTRFS_BLOCK_RSV_GLOBAL		1
> @@ -1206,11 +1206,6 @@ struct btrfs_caching_control {
>  };
>  
>  struct btrfs_block_group_cache {
> -	struct btrfs_key key;
> -	struct btrfs_block_group_item item;
> -	struct btrfs_fs_info *fs_info;
> -	struct inode *inode;
> -	spinlock_t lock;
>  	u64 pinned;
>  	u64 reserved;
>  	u64 bytes_super;
> @@ -1221,16 +1216,14 @@ struct btrfs_block_group_cache {
>  	/* for raid56, this is a full stripe, without parity */
>  	unsigned long full_stripe_len;
>  
> -	unsigned int ro:1;
> -	unsigned int dirty:1;
> -	unsigned int iref:1;
> +	struct btrfs_fs_info *fs_info;
> +	struct inode *inode;
> +	/* cache tracking stuff */
> +	int cached;
>  
>  	int disk_cache_state;
>  
> -	/* cache tracking stuff */
> -	int cached;
>  	struct btrfs_caching_control *caching_ctl;
> -	u64 last_byte_to_unpin;
>  
>  	struct btrfs_space_info *space_info;
>  
> @@ -1243,9 +1236,6 @@ struct btrfs_block_group_cache {
>  	/* for block groups in the same raid type */
>  	struct list_head list;
>  
> -	/* usage count */
> -	atomic_t count;
> -
>  	/* List of struct btrfs_free_clusters for this block group.
>  	 * Today it will only have one thing on it, but that may change
>  	 */
> @@ -1253,6 +1243,18 @@ struct btrfs_block_group_cache {
>  
>  	/* For delayed block group creation */
>  	struct list_head new_bg_list;
> +
> +	u64 last_byte_to_unpin;
> +	spinlock_t lock;
> +	/* usage count */
> +	atomic_t count;
> +
> +	struct btrfs_key key;
> +	struct btrfs_block_group_item item;
> +
> +	unsigned int ro:1;
> +	unsigned int dirty:1;
> +	unsigned int iref:1;
>  };
>  
>  /* delayed seq elem */
> -- 
> 1.7.9.5
> 
> -- 
> You received this message because you are subscribed to the Google Groups "opw-kernel" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to opw-kernel+unsubscribe@googlegroups.com.
> For more options, visit https://groups.google.com/groups/opt_out.
Zach Brown Oct. 29, 2013, 11:52 p.m. UTC | #2
> @@ -1106,6 +1106,15 @@ struct btrfs_space_info {
>  	u64 disk_total;		/* total bytes on disk, takes mirrors into
>  				   account */
>  
> +	struct list_head list;
> +	struct percpu_counter total_bytes_pinned;
> +
> +	struct rw_semaphore groups_sem;
> +	wait_queue_head_t wait;
> +	/* for block groups in our same type */
> +	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
> +	spinlock_t lock;
> +
>  	/*
>  	 * bytes_pinned is kept in line with what is actually pinned, as in
>  	 * we've called update_block_group and dropped the bytes_used counter
> @@ -1116,24 +1125,15 @@ struct btrfs_space_info {
>  	 * once the transaction is committed.  It will be zero'ed everytime the
>  	 * transaction commits.
>  	 */
> -	struct percpu_counter total_bytes_pinned;
> -
> +	unsigned int force_alloc;	/* set if we need to force a chunk
> +					   alloc for this space */

All you've done here is move the unpacked bitfield to the end so that
pahole stops warning about it.  It still uses the same amount of space.
The structure packing isn't really changed.

(and moved total_bytes_pinned away from its comment that someone
lovingly wrote to describe it)

Let's just drop this patch, these structures are too big and fiddly to
take on with little patches.

- z
diff mbox

Patch

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0506f40..b809f30 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1106,6 +1106,15 @@  struct btrfs_space_info {
 	u64 disk_total;		/* total bytes on disk, takes mirrors into
 				   account */
 
+	struct list_head list;
+	struct percpu_counter total_bytes_pinned;
+
+	struct rw_semaphore groups_sem;
+	wait_queue_head_t wait;
+	/* for block groups in our same type */
+	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
+	spinlock_t lock;
+
 	/*
 	 * bytes_pinned is kept in line with what is actually pinned, as in
 	 * we've called update_block_group and dropped the bytes_used counter
@@ -1116,24 +1125,15 @@  struct btrfs_space_info {
 	 * once the transaction is committed.  It will be zero'ed everytime the
 	 * transaction commits.
 	 */
-	struct percpu_counter total_bytes_pinned;
-
+	unsigned int force_alloc;	/* set if we need to force a chunk
+					   alloc for this space */
 	unsigned int full:1;	/* indicates that we cannot allocate any more
 				   chunks for this space */
 	unsigned int chunk_alloc:1;	/* set if we are allocating a chunk */
 
 	unsigned int flush:1;		/* set if we are trying to make space */
 
-	unsigned int force_alloc;	/* set if we need to force a chunk
-					   alloc for this space */
-
-	struct list_head list;
 
-	/* for block groups in our same type */
-	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
-	spinlock_t lock;
-	struct rw_semaphore groups_sem;
-	wait_queue_head_t wait;
 };
 
 #define	BTRFS_BLOCK_RSV_GLOBAL		1
@@ -1206,11 +1206,6 @@  struct btrfs_caching_control {
 };
 
 struct btrfs_block_group_cache {
-	struct btrfs_key key;
-	struct btrfs_block_group_item item;
-	struct btrfs_fs_info *fs_info;
-	struct inode *inode;
-	spinlock_t lock;
 	u64 pinned;
 	u64 reserved;
 	u64 bytes_super;
@@ -1221,16 +1216,14 @@  struct btrfs_block_group_cache {
 	/* for raid56, this is a full stripe, without parity */
 	unsigned long full_stripe_len;
 
-	unsigned int ro:1;
-	unsigned int dirty:1;
-	unsigned int iref:1;
+	struct btrfs_fs_info *fs_info;
+	struct inode *inode;
+	/* cache tracking stuff */
+	int cached;
 
 	int disk_cache_state;
 
-	/* cache tracking stuff */
-	int cached;
 	struct btrfs_caching_control *caching_ctl;
-	u64 last_byte_to_unpin;
 
 	struct btrfs_space_info *space_info;
 
@@ -1243,9 +1236,6 @@  struct btrfs_block_group_cache {
 	/* for block groups in the same raid type */
 	struct list_head list;
 
-	/* usage count */
-	atomic_t count;
-
 	/* List of struct btrfs_free_clusters for this block group.
 	 * Today it will only have one thing on it, but that may change
 	 */
@@ -1253,6 +1243,18 @@  struct btrfs_block_group_cache {
 
 	/* For delayed block group creation */
 	struct list_head new_bg_list;
+
+	u64 last_byte_to_unpin;
+	spinlock_t lock;
+	/* usage count */
+	atomic_t count;
+
+	struct btrfs_key key;
+	struct btrfs_block_group_item item;
+
+	unsigned int ro:1;
+	unsigned int dirty:1;
+	unsigned int iref:1;
 };
 
 /* delayed seq elem */