diff mbox series

[13/16] mm: Replace spin_is_locked() with lockdep

Message ID 20181003053902.6910-14-ldr709@gmail.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Lance Roy Oct. 3, 2018, 5:38 a.m. UTC
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().

Signed-off-by: Lance Roy <ldr709@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: <linux-mm@kvack.org>
---
 mm/khugepaged.c | 4 ++--
 mm/swap.c       | 3 +--
 2 files changed, 3 insertions(+), 4 deletions(-)

Comments

Vlastimil Babka Oct. 3, 2018, 7:37 a.m. UTC | #1
On 10/3/18 7:38 AM, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().

Agreed
> Signed-off-by: Lance Roy <ldr709@gmail.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> Cc: Yang Shi <yang.shi@linux.alibaba.com>
> Cc: Matthew Wilcox <mawilcox@microsoft.com>
> Cc: Mel Gorman <mgorman@techsingularity.net>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Jan Kara <jack@suse.cz>
> Cc: Shakeel Butt <shakeelb@google.com>
> Cc: <linux-mm@kvack.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/khugepaged.c | 4 ++--
>  mm/swap.c       | 3 +--
>  2 files changed, 3 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index a31d740e6cd1..80f12467ccb3 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
>  {
>  	struct mm_struct *mm = mm_slot->mm;
>  
> -	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
> +	lockdep_assert_held(&khugepaged_mm_lock);
>  
>  	if (khugepaged_test_exit(mm)) {
>  		/* free mm_slot */
> @@ -1665,7 +1665,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
>  	int progress = 0;
>  
>  	VM_BUG_ON(!pages);
> -	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
> +	lockdep_assert_held(&khugepaged_mm_lock);
>  
>  	if (khugepaged_scan.mm_slot)
>  		mm_slot = khugepaged_scan.mm_slot;
> diff --git a/mm/swap.c b/mm/swap.c
> index 26fc9b5f1b6c..c89eb442c0bf 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -824,8 +824,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
>  	VM_BUG_ON_PAGE(!PageHead(page), page);
>  	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
>  	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
> -	VM_BUG_ON(NR_CPUS != 1 &&
> -		  !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
> +	lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
>  
>  	if (!list)
>  		SetPageLRU(page_tail);
>
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a31d740e6cd1..80f12467ccb3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1225,7 +1225,7 @@  static void collect_mm_slot(struct mm_slot *mm_slot)
 {
 	struct mm_struct *mm = mm_slot->mm;
 
-	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+	lockdep_assert_held(&khugepaged_mm_lock);
 
 	if (khugepaged_test_exit(mm)) {
 		/* free mm_slot */
@@ -1665,7 +1665,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	int progress = 0;
 
 	VM_BUG_ON(!pages);
-	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+	lockdep_assert_held(&khugepaged_mm_lock);
 
 	if (khugepaged_scan.mm_slot)
 		mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/swap.c b/mm/swap.c
index 26fc9b5f1b6c..c89eb442c0bf 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -824,8 +824,7 @@  void lru_add_page_tail(struct page *page, struct page *page_tail,
 	VM_BUG_ON_PAGE(!PageHead(page), page);
 	VM_BUG_ON_PAGE(PageCompound(page_tail), page);
 	VM_BUG_ON_PAGE(PageLRU(page_tail), page);
-	VM_BUG_ON(NR_CPUS != 1 &&
-		  !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
+	lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
 
 	if (!list)
 		SetPageLRU(page_tail);