diff mbox series

[1/1] mm: introduce vm_flags_reset_once to replace WRITE_ONCE vm_flags updates

Message ID 20230201000116.1333160-1-surenb@google.com (mailing list archive)
State New
Headers show
Series [1/1] mm: introduce vm_flags_reset_once to replace WRITE_ONCE vm_flags updates | expand

Commit Message

Suren Baghdasaryan Feb. 1, 2023, 12:01 a.m. UTC
Provide vm_flags_reset_once() and replace the vm_flags updates which used
WRITE_ONCE() to prevent compiler optimizations.

Fixes: 0cce31a0aa0e ("mm: replace vma->vm_flags direct modifications with modifier calls")
Reported-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
Notes:
- The patch applies cleanly over mm-unstable
- The SHA in Fixes: line is from mm-unstable, so is... unstable

 include/linux/mm.h | 7 +++++++
 mm/mlock.c         | 4 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)

Comments

Michal Hocko Feb. 1, 2023, 7:43 a.m. UTC | #1
On Tue 31-01-23 16:01:16, Suren Baghdasaryan wrote:
> Provide vm_flags_reset_once() and replace the vm_flags updates which used
> WRITE_ONCE() to prevent compiler optimizations.
> 
> Fixes: 0cce31a0aa0e ("mm: replace vma->vm_flags direct modifications with modifier calls")
> Reported-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

This would have been better folded into the vm_flags modification patch
because it would be more obvious change. Hugh has provided a very nice
comment in mlock_vma_pages_range but the git blame would be more visible
when the conversion is from WRITE_ONCE.

One way or the other
Acked-by: Michal Hocko <mhocko@suse.com>

> ---
> Notes:
> - The patch applies cleanly over mm-unstable
> - The SHA in Fixes: line is from mm-unstable, so is... unstable
> 
>  include/linux/mm.h | 7 +++++++
>  mm/mlock.c         | 4 ++--
>  2 files changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5bf0ad48faaa..23ce04f6e91e 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -648,6 +648,13 @@ static inline void vm_flags_reset(struct vm_area_struct *vma,
>  	vm_flags_init(vma, flags);
>  }
>  
> +static inline void vm_flags_reset_once(struct vm_area_struct *vma,
> +				       vm_flags_t flags)
> +{
> +	mmap_assert_write_locked(vma->vm_mm);
> +	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
> +}
> +
>  static inline void vm_flags_set(struct vm_area_struct *vma,
>  				vm_flags_t flags)
>  {
> diff --git a/mm/mlock.c b/mm/mlock.c
> index ed49459e343e..617469fce96d 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -380,7 +380,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
>  	 */
>  	if (newflags & VM_LOCKED)
>  		newflags |= VM_IO;
> -	vm_flags_reset(vma, newflags);
> +	vm_flags_reset_once(vma, newflags);
>  
>  	lru_add_drain();
>  	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
> @@ -388,7 +388,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
>  
>  	if (newflags & VM_IO) {
>  		newflags &= ~VM_IO;
> -		vm_flags_reset(vma, newflags);
> +		vm_flags_reset_once(vma, newflags);
>  	}
>  }
>  
> -- 
> 2.39.1.456.gfc5497dd1b-goog
Hyeonggon Yoo Feb. 8, 2023, 1:46 a.m. UTC | #2
On Tue, Jan 31, 2023 at 04:01:16PM -0800, Suren Baghdasaryan wrote:
> Provide vm_flags_reset_once() and replace the vm_flags updates which used
> WRITE_ONCE() to prevent compiler optimizations.
> 
> Fixes: 0cce31a0aa0e ("mm: replace vma->vm_flags direct modifications with modifier calls")
> Reported-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
> Notes:
> - The patch applies cleanly over mm-unstable
> - The SHA in Fixes: line is from mm-unstable, so is... unstable
> 
>  include/linux/mm.h | 7 +++++++
>  mm/mlock.c         | 4 ++--
>  2 files changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5bf0ad48faaa..23ce04f6e91e 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -648,6 +648,13 @@ static inline void vm_flags_reset(struct vm_area_struct *vma,
>  	vm_flags_init(vma, flags);
>  }
>  
> +static inline void vm_flags_reset_once(struct vm_area_struct *vma,
> +				       vm_flags_t flags)
> +{
> +	mmap_assert_write_locked(vma->vm_mm);
> +	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
> +}
> +
>  static inline void vm_flags_set(struct vm_area_struct *vma,
>  				vm_flags_t flags)
>  {
> diff --git a/mm/mlock.c b/mm/mlock.c
> index ed49459e343e..617469fce96d 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -380,7 +380,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
>  	 */
>  	if (newflags & VM_LOCKED)
>  		newflags |= VM_IO;
> -	vm_flags_reset(vma, newflags);
> +	vm_flags_reset_once(vma, newflags);
>  
>  	lru_add_drain();
>  	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
> @@ -388,7 +388,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
>  
>  	if (newflags & VM_IO) {
>  		newflags &= ~VM_IO;
> -		vm_flags_reset(vma, newflags);
> +		vm_flags_reset_once(vma, newflags);
>  	}
>  }

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

>  
> -- 
> 2.39.1.456.gfc5497dd1b-goog
>
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5bf0ad48faaa..23ce04f6e91e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -648,6 +648,13 @@  static inline void vm_flags_reset(struct vm_area_struct *vma,
 	vm_flags_init(vma, flags);
 }
 
+static inline void vm_flags_reset_once(struct vm_area_struct *vma,
+				       vm_flags_t flags)
+{
+	mmap_assert_write_locked(vma->vm_mm);
+	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
+}
+
 static inline void vm_flags_set(struct vm_area_struct *vma,
 				vm_flags_t flags)
 {
diff --git a/mm/mlock.c b/mm/mlock.c
index ed49459e343e..617469fce96d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -380,7 +380,7 @@  static void mlock_vma_pages_range(struct vm_area_struct *vma,
 	 */
 	if (newflags & VM_LOCKED)
 		newflags |= VM_IO;
-	vm_flags_reset(vma, newflags);
+	vm_flags_reset_once(vma, newflags);
 
 	lru_add_drain();
 	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
@@ -388,7 +388,7 @@  static void mlock_vma_pages_range(struct vm_area_struct *vma,
 
 	if (newflags & VM_IO) {
 		newflags &= ~VM_IO;
-		vm_flags_reset(vma, newflags);
+		vm_flags_reset_once(vma, newflags);
 	}
 }