diff mbox series

KVM: x86: adjust kvm_mmu_page member to save 8 bytes

Message ID 20180905215816.4779-1-richard.weiyang@gmail.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: adjust kvm_mmu_page member to save 8 bytes | expand

Commit Message

Wei Yang Sept. 5, 2018, 9:58 p.m. UTC
On a 64bits machine, struct is naturally aligned with 8 bytes. Since
kvm_mmu_page member *unsync* and *role* are less then 4 bytes, we can
rearrange the sequence to compace the struct.

As the comment shows, *role* and *gfn* are used to key the shadow page. In
order to keep the comment valid, this patch moves the *unsync* up and
exchange the position of *role* and *gfn*.

From /proc/slabinfo, it shows the size of kvm_mmu_page is 8 bytes less and
with one more object per slap after applying this patch.

    # name            <active_objs> <num_objs> <objsize> <objperslab>
    kvm_mmu_page_header      0           0       168         24

    kvm_mmu_page_header      0           0       160         25

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 arch/x86/include/asm/kvm_host.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini Sept. 14, 2018, 5:58 p.m. UTC | #1
On 05/09/2018 23:58, Wei Yang wrote:
> On a 64bits machine, struct is naturally aligned with 8 bytes. Since
> kvm_mmu_page member *unsync* and *role* are less then 4 bytes, we can
> rearrange the sequence to compace the struct.
> 
> As the comment shows, *role* and *gfn* are used to key the shadow page. In
> order to keep the comment valid, this patch moves the *unsync* up and
> exchange the position of *role* and *gfn*.
> 
> From /proc/slabinfo, it shows the size of kvm_mmu_page is 8 bytes less and
> with one more object per slap after applying this patch.
> 
>     # name            <active_objs> <num_objs> <objsize> <objperslab>
>     kvm_mmu_page_header      0           0       168         24
> 
>     kvm_mmu_page_header      0           0       160         25
> 
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 00ddb0c9e612..f1a4e520ef5c 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -280,18 +280,18 @@ struct kvm_rmap_head {
>  struct kvm_mmu_page {
>  	struct list_head link;
>  	struct hlist_node hash_link;
> +	bool unsync;
>  
>  	/*
>  	 * The following two entries are used to key the shadow page in the
>  	 * hash table.
>  	 */
> -	gfn_t gfn;
>  	union kvm_mmu_page_role role;
> +	gfn_t gfn;
>  
>  	u64 *spt;
>  	/* hold the gfn of each spte inside spt */
>  	gfn_t *gfns;
> -	bool unsync;
>  	int root_count;          /* Currently serving as active root */
>  	unsigned int unsync_children;
>  	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00ddb0c9e612..f1a4e520ef5c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -280,18 +280,18 @@  struct kvm_rmap_head {
 struct kvm_mmu_page {
 	struct list_head link;
 	struct hlist_node hash_link;
+	bool unsync;
 
 	/*
 	 * The following two entries are used to key the shadow page in the
 	 * hash table.
 	 */
-	gfn_t gfn;
 	union kvm_mmu_page_role role;
+	gfn_t gfn;
 
 	u64 *spt;
 	/* hold the gfn of each spte inside spt */
 	gfn_t *gfns;
-	bool unsync;
 	int root_count;          /* Currently serving as active root */
 	unsigned int unsync_children;
 	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */