diff mbox series

ceph: Reorder fields in 'struct ceph_snapid_map'

Message ID 559c9a70419846e0cfc319505d3d5fffd45b3358.1682618727.git.christophe.jaillet@wanadoo.fr (mailing list archive)
State New, archived
Headers show
Series ceph: Reorder fields in 'struct ceph_snapid_map' | expand

Commit Message

Christophe JAILLET April 27, 2023, 6:05 p.m. UTC
Group some variables based on their sizes to reduce holes.
On x86_64, this shrinks the size of 'struct ceph_snapid_map' from 72 to 64
bytes.

When such a structure is allocated, because of the way memory allocation
works, when 72 bytes were requested, 96 bytes were allocated.

So, on x86_64, this change saves 32 bytes per allocation and has the
structure fit in a single cacheline.

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
---
Using pahole

Before:
======
struct ceph_snapid_map {
	struct rb_node             node __attribute__((__aligned__(8))); /*     0    24 */
	struct list_head           lru;                  /*    24    16 */
	atomic_t                   ref;                  /*    40     4 */

	/* XXX 4 bytes hole, try to pack */

	u64                        snap;                 /*    48     8 */
	dev_t                      dev;                  /*    56     4 */

	/* XXX 4 bytes hole, try to pack */

	/* --- cacheline 1 boundary (64 bytes) --- */
	long unsigned int          last_used;            /*    64     8 */

	/* size: 72, cachelines: 2, members: 6 */
	/* sum members: 64, holes: 2, sum holes: 8 */
	/* forced alignments: 1 */
	/* last cacheline: 8 bytes */
} __attribute__((__aligned__(8)));
---
 fs/ceph/mds_client.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

Comments

Xiubo Li April 28, 2023, 12:53 a.m. UTC | #1
On 4/28/23 02:05, Christophe JAILLET wrote:
> Group some variables based on their sizes to reduce holes.
> On x86_64, this shrinks the size of 'struct ceph_snapid_map' from 72 to 64
> bytes.
>
> When such a structure is allocated, because of the way memory allocation
> works, when 72 bytes were requested, 96 bytes were allocated.
>
> So, on x86_64, this change saves 32 bytes per allocation and has the
> structure fit in a single cacheline.
>
> Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
> ---
> Using pahole
>
> Before:
> ======
> struct ceph_snapid_map {
> 	struct rb_node             node __attribute__((__aligned__(8))); /*     0    24 */
> 	struct list_head           lru;                  /*    24    16 */
> 	atomic_t                   ref;                  /*    40     4 */
>
> 	/* XXX 4 bytes hole, try to pack */
>
> 	u64                        snap;                 /*    48     8 */
> 	dev_t                      dev;                  /*    56     4 */
>
> 	/* XXX 4 bytes hole, try to pack */
>
> 	/* --- cacheline 1 boundary (64 bytes) --- */
> 	long unsigned int          last_used;            /*    64     8 */
>
> 	/* size: 72, cachelines: 2, members: 6 */
> 	/* sum members: 64, holes: 2, sum holes: 8 */
> 	/* forced alignments: 1 */
> 	/* last cacheline: 8 bytes */
> } __attribute__((__aligned__(8)));
> ---
>   fs/ceph/mds_client.h | 2 +-
>   1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
> index 0598faa50e2e..2328dbda5ab6 100644
> --- a/fs/ceph/mds_client.h
> +++ b/fs/ceph/mds_client.h
> @@ -355,8 +355,8 @@ struct ceph_snapid_map {
>   	struct rb_node node;
>   	struct list_head lru;
>   	atomic_t ref;
> -	u64 snap;
>   	dev_t dev;
> +	u64 snap;
>   	unsigned long last_used;
>   };
>   

This looks good to me. Thanks.

Will apply it to the testing branch.

- Xiubo
Jeff Layton April 28, 2023, 11:10 a.m. UTC | #2
On Fri, 2023-04-28 at 08:53 +0800, Xiubo Li wrote:
> On 4/28/23 02:05, Christophe JAILLET wrote:
> > Group some variables based on their sizes to reduce holes.
> > On x86_64, this shrinks the size of 'struct ceph_snapid_map' from 72 to 64
> > bytes.
> > 
> > When such a structure is allocated, because of the way memory allocation
> > works, when 72 bytes were requested, 96 bytes were allocated.
> > 
> > So, on x86_64, this change saves 32 bytes per allocation and has the
> > structure fit in a single cacheline.
> > 
> > Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
> > ---
> > Using pahole
> > 
> > Before:
> > ======
> > struct ceph_snapid_map {
> > 	struct rb_node             node __attribute__((__aligned__(8))); /*     0    24 */
> > 	struct list_head           lru;                  /*    24    16 */
> > 	atomic_t                   ref;                  /*    40     4 */
> > 
> > 	/* XXX 4 bytes hole, try to pack */
> > 
> > 	u64                        snap;                 /*    48     8 */
> > 	dev_t                      dev;                  /*    56     4 */
> > 
> > 	/* XXX 4 bytes hole, try to pack */
> > 
> > 	/* --- cacheline 1 boundary (64 bytes) --- */
> > 	long unsigned int          last_used;            /*    64     8 */
> > 
> > 	/* size: 72, cachelines: 2, members: 6 */
> > 	/* sum members: 64, holes: 2, sum holes: 8 */
> > 	/* forced alignments: 1 */
> > 	/* last cacheline: 8 bytes */
> > } __attribute__((__aligned__(8)));
> > ---
> >   fs/ceph/mds_client.h | 2 +-
> >   1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
> > index 0598faa50e2e..2328dbda5ab6 100644
> > --- a/fs/ceph/mds_client.h
> > +++ b/fs/ceph/mds_client.h
> > @@ -355,8 +355,8 @@ struct ceph_snapid_map {
> >   	struct rb_node node;
> >   	struct list_head lru;
> >   	atomic_t ref;
> > -	u64 snap;
> >   	dev_t dev;
> > +	u64 snap;
> >   	unsigned long last_used;
> >   };
> >   
> 
> This looks good to me. Thanks.
> 
> Will apply it to the testing branch.
> 
> - Xiubo
> 
> 
> 

Reviewed-by: Jeff Layton <jlayton@kernel.org>
diff mbox series

Patch

diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 0598faa50e2e..2328dbda5ab6 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -355,8 +355,8 @@  struct ceph_snapid_map {
 	struct rb_node node;
 	struct list_head lru;
 	atomic_t ref;
-	u64 snap;
 	dev_t dev;
+	u64 snap;
 	unsigned long last_used;
 };