diff mbox series

[3/3] tools: fix build error on parameter name omitted

Message ID 20241025031847.6274-4-richard.weiyang@gmail.com (mailing list archive)
State New
Headers show
Series mm/vma: miss to restore vmi.index on expansion failure | expand

Commit Message

Wei Yang Oct. 25, 2024, 3:18 a.m. UTC
On some old version gcc, e.g. 8.2.1, it is treated an error.

Just add a name for it.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
CC: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
 tools/testing/vma/vma.c          |  2 +-
 tools/testing/vma/vma_internal.h | 82 ++++++++++++++++----------------
 2 files changed, 42 insertions(+), 42 deletions(-)

Comments

Lorenzo Stoakes Oct. 25, 2024, 6:50 a.m. UTC | #1
On Fri, Oct 25, 2024 at 03:18:47AM +0000, Wei Yang wrote:
> On some old version gcc, e.g. 8.2.1, it is treated an error.
>
> Just add a name for it.

No, sorry. This is just churn and no bot has an issue with this, and we
don't support ye olde compilers forever doing dumb things.

I'm guessing this is actually a warning but CONFIG_WERROR is turned on.

>
> Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
> CC: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> ---
>  tools/testing/vma/vma.c          |  2 +-
>  tools/testing/vma/vma_internal.h | 82 ++++++++++++++++----------------
>  2 files changed, 42 insertions(+), 42 deletions(-)
>
> diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
> index 46e8989a90ae..212b14a52bd3 100644
> --- a/tools/testing/vma/vma.c
> +++ b/tools/testing/vma/vma.c
> @@ -219,7 +219,7 @@ static bool vma_write_started(struct vm_area_struct *vma)
>  }
>
>  /* Helper function providing a dummy vm_ops->close() method.*/
> -static void dummy_close(struct vm_area_struct *)
> +static void dummy_close(struct vm_area_struct *vma)
>  {
>  }
>
> diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
> index c5b9da034511..b7aa9369796e 100644
> --- a/tools/testing/vma/vma_internal.h
> +++ b/tools/testing/vma/vma_internal.h
> @@ -620,11 +620,11 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
>  	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
>  }
>
> -static inline void fput(struct file *)
> +static inline void fput(struct file *f)
>  {
>  }
>
> -static inline void mpol_put(struct mempolicy *)
> +static inline void mpol_put(struct mempolicy *mp)
>  {
>  }
>
> @@ -648,15 +648,15 @@ static inline void lru_add_drain(void)
>  {
>  }
>
> -static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
> +static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
>  {
>  }
>
> -static inline void update_hiwater_rss(struct mm_struct *)
> +static inline void update_hiwater_rss(struct mm_struct *mm)
>  {
>  }
>
> -static inline void update_hiwater_vm(struct mm_struct *)
> +static inline void update_hiwater_vm(struct mm_struct *mm)
>  {
>  }
>
> @@ -686,23 +686,23 @@ static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>  	(void)mm_wr_locked;
>  }
>
> -static inline void mapping_unmap_writable(struct address_space *)
> +static inline void mapping_unmap_writable(struct address_space *mapping)
>  {
>  }
>
> -static inline void flush_dcache_mmap_lock(struct address_space *)
> +static inline void flush_dcache_mmap_lock(struct address_space *mapping)
>  {
>  }
>
> -static inline void tlb_finish_mmu(struct mmu_gather *)
> +static inline void tlb_finish_mmu(struct mmu_gather *tlb)
>  {
>  }
>
> -static inline void get_file(struct file *)
> +static inline void get_file(struct file *f)
>  {
>  }
>
> -static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
> +static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
>  {
>  	return 0;
>  }
> @@ -750,31 +750,31 @@ static inline void vm_acct_memory(long pages)
>  {
>  }
>
> -static inline void vma_interval_tree_insert(struct vm_area_struct *,
> -					    struct rb_root_cached *)
> +static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
> +					    struct rb_root_cached *root)
>  {
>  }
>
> -static inline void vma_interval_tree_remove(struct vm_area_struct *,
> -					    struct rb_root_cached *)
> +static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
> +					    struct rb_root_cached *root)
>  {
>  }
>
> -static inline void flush_dcache_mmap_unlock(struct address_space *)
> +static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
>  {
>  }
>
> -static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
> -						 struct rb_root_cached *)
> +static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
> +						 struct rb_root_cached *root)
>  {
>  }
>
> -static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
> -						 struct rb_root_cached *)
> +static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
> +						 struct rb_root_cached *root)
>  {
>  }
>
> -static inline void uprobe_mmap(struct vm_area_struct *)
> +static inline void uprobe_mmap(struct vm_area_struct *vma)
>  {
>  }
>
> @@ -786,15 +786,15 @@ static inline void uprobe_munmap(struct vm_area_struct *vma,
>  	(void)end;
>  }
>
> -static inline void i_mmap_lock_write(struct address_space *)
> +static inline void i_mmap_lock_write(struct address_space *mapping)
>  {
>  }
>
> -static inline void anon_vma_lock_write(struct anon_vma *)
> +static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
>  {
>  }
>
> -static inline void vma_assert_write_locked(struct vm_area_struct *)
> +static inline void vma_assert_write_locked(struct vm_area_struct *vma)
>  {
>  }
>
> @@ -804,16 +804,16 @@ static inline void unlink_anon_vmas(struct vm_area_struct *vma)
>  	vma->anon_vma->was_unlinked = true;
>  }
>
> -static inline void anon_vma_unlock_write(struct anon_vma *)
> +static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
>  {
>  }
>
> -static inline void i_mmap_unlock_write(struct address_space *)
> +static inline void i_mmap_unlock_write(struct address_space *mapping)
>  {
>  }
>
> -static inline void anon_vma_merge(struct vm_area_struct *,
> -				  struct vm_area_struct *)
> +static inline void anon_vma_merge(struct vm_area_struct *vma,
> +				  struct vm_area_struct *next)
>  {
>  }
>
> @@ -830,15 +830,15 @@ static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
>  	return 0;
>  }
>
> -static inline void mmap_write_downgrade(struct mm_struct *)
> +static inline void mmap_write_downgrade(struct mm_struct *mm)
>  {
>  }
>
> -static inline void mmap_read_unlock(struct mm_struct *)
> +static inline void mmap_read_unlock(struct mm_struct *mm)
>  {
>  }
>
> -static inline void mmap_write_unlock(struct mm_struct *)
> +static inline void mmap_write_unlock(struct mm_struct *mm)
>  {
>  }
>
> @@ -862,11 +862,11 @@ static inline void arch_unmap(struct mm_struct *mm,
>  	(void)end;
>  }
>
> -static inline void mmap_assert_locked(struct mm_struct *)
> +static inline void mmap_assert_locked(struct mm_struct *mm)
>  {
>  }
>
> -static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
> +static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
>  {
>  	return true;
>  }
> @@ -878,44 +878,44 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
>  	(void)vm_flags;
>  }
>
> -static inline bool mapping_can_writeback(struct address_space *)
> +static inline bool mapping_can_writeback(struct address_space *mapping)
>  {
>  	return true;
>  }
>
> -static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
> +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
>  {
>  	return false;
>  }
>
> -static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
> +static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
>  {
>  	return false;
>  }
>
> -static inline bool userfaultfd_wp(struct vm_area_struct *)
> +static inline bool userfaultfd_wp(struct vm_area_struct *vma)
>  {
>  	return false;
>  }
>
> -static inline void mmap_assert_write_locked(struct mm_struct *)
> +static inline void mmap_assert_write_locked(struct mm_struct *mm)
>  {
>  }
>
> -static inline void mutex_lock(struct mutex *)
> +static inline void mutex_lock(struct mutex *lock)
>  {
>  }
>
> -static inline void mutex_unlock(struct mutex *)
> +static inline void mutex_unlock(struct mutex *lock)
>  {
>  }
>
> -static inline bool mutex_is_locked(struct mutex *)
> +static inline bool mutex_is_locked(struct mutex *lock)
>  {
>  	return true;
>  }
>
> -static inline bool signal_pending(void *)
> +static inline bool signal_pending(void *p)
>  {
>  	return false;
>  }
> --
> 2.34.1
>
>
diff mbox series

Patch

diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
index 46e8989a90ae..212b14a52bd3 100644
--- a/tools/testing/vma/vma.c
+++ b/tools/testing/vma/vma.c
@@ -219,7 +219,7 @@  static bool vma_write_started(struct vm_area_struct *vma)
 }
 
 /* Helper function providing a dummy vm_ops->close() method.*/
-static void dummy_close(struct vm_area_struct *)
+static void dummy_close(struct vm_area_struct *vma)
 {
 }
 
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index c5b9da034511..b7aa9369796e 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -620,11 +620,11 @@  static inline unsigned long vma_pages(struct vm_area_struct *vma)
 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 }
 
-static inline void fput(struct file *)
+static inline void fput(struct file *f)
 {
 }
 
-static inline void mpol_put(struct mempolicy *)
+static inline void mpol_put(struct mempolicy *mp)
 {
 }
 
@@ -648,15 +648,15 @@  static inline void lru_add_drain(void)
 {
 }
 
-static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
+static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
 {
 }
 
-static inline void update_hiwater_rss(struct mm_struct *)
+static inline void update_hiwater_rss(struct mm_struct *mm)
 {
 }
 
-static inline void update_hiwater_vm(struct mm_struct *)
+static inline void update_hiwater_vm(struct mm_struct *mm)
 {
 }
 
@@ -686,23 +686,23 @@  static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 	(void)mm_wr_locked;
 }
 
-static inline void mapping_unmap_writable(struct address_space *)
+static inline void mapping_unmap_writable(struct address_space *mapping)
 {
 }
 
-static inline void flush_dcache_mmap_lock(struct address_space *)
+static inline void flush_dcache_mmap_lock(struct address_space *mapping)
 {
 }
 
-static inline void tlb_finish_mmu(struct mmu_gather *)
+static inline void tlb_finish_mmu(struct mmu_gather *tlb)
 {
 }
 
-static inline void get_file(struct file *)
+static inline void get_file(struct file *f)
 {
 }
 
-static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
+static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 {
 	return 0;
 }
@@ -750,31 +750,31 @@  static inline void vm_acct_memory(long pages)
 {
 }
 
-static inline void vma_interval_tree_insert(struct vm_area_struct *,
-					    struct rb_root_cached *)
+static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
+					    struct rb_root_cached *root)
 {
 }
 
-static inline void vma_interval_tree_remove(struct vm_area_struct *,
-					    struct rb_root_cached *)
+static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
+					    struct rb_root_cached *root)
 {
 }
 
-static inline void flush_dcache_mmap_unlock(struct address_space *)
+static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
 {
 }
 
-static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
-						 struct rb_root_cached *)
+static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
+						 struct rb_root_cached *root)
 {
 }
 
-static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
-						 struct rb_root_cached *)
+static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
+						 struct rb_root_cached *root)
 {
 }
 
-static inline void uprobe_mmap(struct vm_area_struct *)
+static inline void uprobe_mmap(struct vm_area_struct *vma)
 {
 }
 
@@ -786,15 +786,15 @@  static inline void uprobe_munmap(struct vm_area_struct *vma,
 	(void)end;
 }
 
-static inline void i_mmap_lock_write(struct address_space *)
+static inline void i_mmap_lock_write(struct address_space *mapping)
 {
 }
 
-static inline void anon_vma_lock_write(struct anon_vma *)
+static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 {
 }
 
-static inline void vma_assert_write_locked(struct vm_area_struct *)
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
 {
 }
 
@@ -804,16 +804,16 @@  static inline void unlink_anon_vmas(struct vm_area_struct *vma)
 	vma->anon_vma->was_unlinked = true;
 }
 
-static inline void anon_vma_unlock_write(struct anon_vma *)
+static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
 {
 }
 
-static inline void i_mmap_unlock_write(struct address_space *)
+static inline void i_mmap_unlock_write(struct address_space *mapping)
 {
 }
 
-static inline void anon_vma_merge(struct vm_area_struct *,
-				  struct vm_area_struct *)
+static inline void anon_vma_merge(struct vm_area_struct *vma,
+				  struct vm_area_struct *next)
 {
 }
 
@@ -830,15 +830,15 @@  static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
 	return 0;
 }
 
-static inline void mmap_write_downgrade(struct mm_struct *)
+static inline void mmap_write_downgrade(struct mm_struct *mm)
 {
 }
 
-static inline void mmap_read_unlock(struct mm_struct *)
+static inline void mmap_read_unlock(struct mm_struct *mm)
 {
 }
 
-static inline void mmap_write_unlock(struct mm_struct *)
+static inline void mmap_write_unlock(struct mm_struct *mm)
 {
 }
 
@@ -862,11 +862,11 @@  static inline void arch_unmap(struct mm_struct *mm,
 	(void)end;
 }
 
-static inline void mmap_assert_locked(struct mm_struct *)
+static inline void mmap_assert_locked(struct mm_struct *mm)
 {
 }
 
-static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
 	return true;
 }
@@ -878,44 +878,44 @@  static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
 	(void)vm_flags;
 }
 
-static inline bool mapping_can_writeback(struct address_space *)
+static inline bool mapping_can_writeback(struct address_space *mapping)
 {
 	return true;
 }
 
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
 {
 	return false;
 }
 
-static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
+static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
 {
 	return false;
 }
 
-static inline bool userfaultfd_wp(struct vm_area_struct *)
+static inline bool userfaultfd_wp(struct vm_area_struct *vma)
 {
 	return false;
 }
 
-static inline void mmap_assert_write_locked(struct mm_struct *)
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
 {
 }
 
-static inline void mutex_lock(struct mutex *)
+static inline void mutex_lock(struct mutex *lock)
 {
 }
 
-static inline void mutex_unlock(struct mutex *)
+static inline void mutex_unlock(struct mutex *lock)
 {
 }
 
-static inline bool mutex_is_locked(struct mutex *)
+static inline bool mutex_is_locked(struct mutex *lock)
 {
 	return true;
 }
 
-static inline bool signal_pending(void *)
+static inline bool signal_pending(void *p)
 {
 	return false;
 }