diff mbox series

memcg: Optimise relock_page_lruvec functions

Message ID 20211019125034.2799438-1-willy@infradead.org (mailing list archive)
State New
Headers show
Series memcg: Optimise relock_page_lruvec functions | expand

Commit Message

Matthew Wilcox Oct. 19, 2021, 12:50 p.m. UTC
Leave interrupts disabled when we change which lru lock is held.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/memcontrol.h | 27 ++++++++++++++-------------
 1 file changed, 14 insertions(+), 13 deletions(-)

Comments

Vlastimil Babka Oct. 25, 2021, 10:44 a.m. UTC | #1
On 10/19/21 14:50, Matthew Wilcox (Oracle) wrote:
> Leave interrupts disabled when we change which lru lock is held.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

Assuming lockdep is fine with e.g.:

spin_lock_irqsave(A);
spin_unlock(A);
spin_lock(B);
spin_unlock_irqrestore(B);

(with A and B of same class).

> ---
>  include/linux/memcontrol.h | 27 ++++++++++++++-------------
>  1 file changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 3096c9a0ee01..a6a90b00a22b 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1524,16 +1524,22 @@ static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
>  }
>  
>  /* Don't lock again iff page's lruvec locked */
> -static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
> +static inline struct lruvec *relock_page_lruvec(struct page *page,
>  		struct lruvec *locked_lruvec)
>  {
> -	if (locked_lruvec) {
> -		if (page_matches_lruvec(page, locked_lruvec))
> -			return locked_lruvec;
> +	if (page_matches_lruvec(page, locked_lruvec))
> +		return locked_lruvec;
>  
> -		unlock_page_lruvec_irq(locked_lruvec);
> -	}
> +	unlock_page_lruvec(locked_lruvec);
> +	return lock_page_lruvec(page);
> +}
>  
> +/* Don't lock again iff page's lruvec locked */
> +static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
> +		struct lruvec *locked_lruvec)
> +{
> +	if (locked_lruvec)
> +		return relock_page_lruvec(page, locked_lruvec);
>  	return lock_page_lruvec_irq(page);
>  }
>  
> @@ -1541,13 +1547,8 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
>  static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
>  		struct lruvec *locked_lruvec, unsigned long *flags)
>  {
> -	if (locked_lruvec) {
> -		if (page_matches_lruvec(page, locked_lruvec))
> -			return locked_lruvec;
> -
> -		unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
> -	}
> -
> +	if (locked_lruvec)
> +		return relock_page_lruvec(page, locked_lruvec);
>  	return lock_page_lruvec_irqsave(page, flags);
>  }
>  
>
Peter Zijlstra Oct. 25, 2021, 11:16 a.m. UTC | #2
On Mon, Oct 25, 2021 at 12:44:05PM +0200, Vlastimil Babka wrote:
> On 10/19/21 14:50, Matthew Wilcox (Oracle) wrote:
> > Leave interrupts disabled when we change which lru lock is held.
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> 
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> 
> Assuming lockdep is fine with e.g.:
> 
> spin_lock_irqsave(A);
> spin_unlock(A);
> spin_lock(B);
> spin_unlock_irqrestore(B);
> 
> (with A and B of same class).

It's unconditionally okay with that pattern. As far as lockdep
is concerned there really is no differences vs:

	local_irq_save()
	spin_lock(a)
	spin_unlock(a)
	spin_lock(b)
	spin_unlock(b)
	local_irq_restore()

It's the RT locking primitives that care about the difference :-)
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3096c9a0ee01..a6a90b00a22b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1524,16 +1524,22 @@  static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
 }
 
 /* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+static inline struct lruvec *relock_page_lruvec(struct page *page,
 		struct lruvec *locked_lruvec)
 {
-	if (locked_lruvec) {
-		if (page_matches_lruvec(page, locked_lruvec))
-			return locked_lruvec;
+	if (page_matches_lruvec(page, locked_lruvec))
+		return locked_lruvec;
 
-		unlock_page_lruvec_irq(locked_lruvec);
-	}
+	unlock_page_lruvec(locked_lruvec);
+	return lock_page_lruvec(page);
+}
 
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+		struct lruvec *locked_lruvec)
+{
+	if (locked_lruvec)
+		return relock_page_lruvec(page, locked_lruvec);
 	return lock_page_lruvec_irq(page);
 }
 
@@ -1541,13 +1547,8 @@  static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
 static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
 		struct lruvec *locked_lruvec, unsigned long *flags)
 {
-	if (locked_lruvec) {
-		if (page_matches_lruvec(page, locked_lruvec))
-			return locked_lruvec;
-
-		unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
-	}
-
+	if (locked_lruvec)
+		return relock_page_lruvec(page, locked_lruvec);
 	return lock_page_lruvec_irqsave(page, flags);
 }