diff mbox series

[RFC,2/4] mm/mmu_notifier: use unsigned for event field in range struct

Message ID 20190131183706.20980-3-jglisse@redhat.com (mailing list archive)
State New, archived
Headers show
Series Restore change_pte optimization to its former glory | expand

Commit Message

Jerome Glisse Jan. 31, 2019, 6:37 p.m. UTC
From: Jérôme Glisse <jglisse@redhat.com>

Use unsigned for event field in range struct so that we can also set
flags with the event. This patch change the field and introduce the
helper.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: kvm@vger.kernel.org
---
 include/linux/mmu_notifier.h | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

Comments

Andrea Arcangeli Feb. 2, 2019, 1:13 a.m. UTC | #1
On Thu, Jan 31, 2019 at 01:37:04PM -0500, Jerome Glisse wrote:
> From: Jérôme Glisse <jglisse@redhat.com>
> 
> Use unsigned for event field in range struct so that we can also set
> flags with the event. This patch change the field and introduce the
> helper.
> 
> Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
> Cc: Andrea Arcangeli <aarcange@redhat.com>
> Cc: Peter Xu <peterx@redhat.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Radim Krčmář <rkrcmar@redhat.com>
> Cc: kvm@vger.kernel.org
> ---
>  include/linux/mmu_notifier.h | 8 ++++++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
> index be873c431886..d7a35975c2bd 100644
> --- a/include/linux/mmu_notifier.h
> +++ b/include/linux/mmu_notifier.h
> @@ -6,6 +6,7 @@
>  #include <linux/spinlock.h>
>  #include <linux/mm_types.h>
>  #include <linux/srcu.h>
> +#include <linux/log2.h>
>  
>  struct mmu_notifier;
>  struct mmu_notifier_ops;
> @@ -38,8 +39,11 @@ enum mmu_notifier_event {
>  	MMU_NOTIFY_PROTECTION_VMA,
>  	MMU_NOTIFY_PROTECTION_PAGE,
>  	MMU_NOTIFY_SOFT_DIRTY,
> +	MMU_NOTIFY_EVENT_MAX
>  };
>  
> +#define MMU_NOTIFIER_EVENT_BITS order_base_2(MMU_NOTIFY_EVENT_MAX)
> +
>  #ifdef CONFIG_MMU_NOTIFIER
>  
>  /*
> @@ -60,7 +64,7 @@ struct mmu_notifier_range {
>  	struct mm_struct *mm;
>  	unsigned long start;
>  	unsigned long end;
> -	enum mmu_notifier_event event;
> +	unsigned event;
>  	bool blockable;
>  };

This is only allocated in the stack, so saving RAM by mixing bitfields
with enum in the same 4 bytes to save 4 bytes isn't of maximum
priority.

A possibly cleaner way to save those 4 bytes without mixing enum with
bitfields by hand, is to add a "unsigned short flags" which will make
"event/flags/blockable" fit in the same 8 bytes (bool only needs 1
byte) as before the patch (the first bitfield can start from 0 then).

Yet another way is to drop blockable and convert it to a flag in
"unsigned int flags".
diff mbox series

Patch

diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index be873c431886..d7a35975c2bd 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -6,6 +6,7 @@ 
 #include <linux/spinlock.h>
 #include <linux/mm_types.h>
 #include <linux/srcu.h>
+#include <linux/log2.h>
 
 struct mmu_notifier;
 struct mmu_notifier_ops;
@@ -38,8 +39,11 @@  enum mmu_notifier_event {
 	MMU_NOTIFY_PROTECTION_VMA,
 	MMU_NOTIFY_PROTECTION_PAGE,
 	MMU_NOTIFY_SOFT_DIRTY,
+	MMU_NOTIFY_EVENT_MAX
 };
 
+#define MMU_NOTIFIER_EVENT_BITS order_base_2(MMU_NOTIFY_EVENT_MAX)
+
 #ifdef CONFIG_MMU_NOTIFIER
 
 /*
@@ -60,7 +64,7 @@  struct mmu_notifier_range {
 	struct mm_struct *mm;
 	unsigned long start;
 	unsigned long end;
-	enum mmu_notifier_event event;
+	unsigned event;
 	bool blockable;
 };
 
@@ -352,7 +356,7 @@  static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 
 
 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
-					   enum mmu_notifier_event event,
+					   unsigned event,
 					   struct vm_area_struct *vma,
 					   struct mm_struct *mm,
 					   unsigned long start,