diff mbox series

mm: mmap_lock: replace get_memcg_path_buf() with on-stack buffer

Message ID ef22d289-eadb-4ed9-863b-fbc922b33d8d@I-love.SAKURA.ne.jp (mailing list archive)
State New
Headers show
Series mm: mmap_lock: replace get_memcg_path_buf() with on-stack buffer | expand

Commit Message

Tetsuo Handa June 21, 2024, 1:08 a.m. UTC
Commit 2b5067a8143e ("mm: mmap_lock: add tracepoints around lock
acquisition") introduced TRACE_MMAP_LOCK_EVENT() macro using
preempt_disable() in order to let get_mm_memcg_path() return a percpu
buffer exclusively used by normal, softirq, irq and NMI contexts
respectively.

Commit 832b50725373 ("mm: mmap_lock: use local locks instead of disabling
preemption") replaced preempt_disable() with local_lock(&memcg_paths.lock)
based on an argument that preempt_disable() has to be avoided because
get_mm_memcg_path() might sleep if PREEMPT_RT=y.

But syzbot started reporting

  inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.

and

  inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.

messages, for local_lock() does not disable IRQ.

We could replace local_lock() with local_lock_irqsave() in order to
suppress these messages. But this patch instead replaces percpu buffers
with on-stack buffer, for the size of each buffer returned by
get_memcg_path_buf() is only 256 bytes which is tolerable for allocating
 from current thread's kernel stack memory.

Reported-by: syzbot <syzbot+40905bca570ae6784745@syzkaller.appspotmail.com>
Closes: https://syzkaller.appspot.com/bug?extid=40905bca570ae6784745
Fixes: 832b50725373 ("mm: mmap_lock: use local locks instead of disabling preemption")
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
Only compile tested.

 mm/mmap_lock.c | 175 ++++++-------------------------------------------
 1 file changed, 20 insertions(+), 155 deletions(-)

Comments

Axel Rasmussen June 21, 2024, 11:03 p.m. UTC | #1
On Thu, Jun 20, 2024 at 6:08 PM Tetsuo Handa
<penguin-kernel@i-love.sakura.ne.jp> wrote:
>
> Commit 2b5067a8143e ("mm: mmap_lock: add tracepoints around lock
> acquisition") introduced TRACE_MMAP_LOCK_EVENT() macro using
> preempt_disable() in order to let get_mm_memcg_path() return a percpu
> buffer exclusively used by normal, softirq, irq and NMI contexts
> respectively.
>
> Commit 832b50725373 ("mm: mmap_lock: use local locks instead of disabling
> preemption") replaced preempt_disable() with local_lock(&memcg_paths.lock)
> based on an argument that preempt_disable() has to be avoided because
> get_mm_memcg_path() might sleep if PREEMPT_RT=y.
>
> But syzbot started reporting
>
>   inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
>
> and
>
>   inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
>
> messages, for local_lock() does not disable IRQ.
>
> We could replace local_lock() with local_lock_irqsave() in order to
> suppress these messages. But this patch instead replaces percpu buffers
> with on-stack buffer, for the size of each buffer returned by
> get_memcg_path_buf() is only 256 bytes which is tolerable for allocating
>  from current thread's kernel stack memory.
>
> Reported-by: syzbot <syzbot+40905bca570ae6784745@syzkaller.appspotmail.com>
> Closes: https://syzkaller.appspot.com/bug?extid=40905bca570ae6784745
> Fixes: 832b50725373 ("mm: mmap_lock: use local locks instead of disabling preemption")
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
> Only compile tested.
>
>  mm/mmap_lock.c | 175 ++++++-------------------------------------------
>  1 file changed, 20 insertions(+), 155 deletions(-)

No objections. Looking back all the way to the first version [1] the
buffers were already percpu, instead of on the stack like this. IOW,
there was no on-list discussion about why this shouldn't go on the
stack. It has been a while, but if memory serves I opted to do it that
way just out of paranoia around putting large buffers on the stack.
But, I agree 256 bytes isn't all that large.

That v1 patch wasn't all that complex, but then again it didn't deal
with various edge cases properly :) so it has grown significantly more
complex over time. Reconsidering the approach seems reasonable now,
given how much code this removes.

This change looks straightforwardly correct to me. You can take:

Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>

[1]: https://lore.kernel.org/all/20200917154258.1a364cdf@gandalf.local.home/T/

>
> diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
> index 1854850b4b89..368b840e7508 100644
> --- a/mm/mmap_lock.c
> +++ b/mm/mmap_lock.c
> @@ -19,14 +19,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
>
>  #ifdef CONFIG_MEMCG
>
> -/*
> - * Our various events all share the same buffer (because we don't want or need
> - * to allocate a set of buffers *per event type*), so we need to protect against
> - * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
> - * been made.
> - */
> -static DEFINE_MUTEX(reg_lock);
> -static int reg_refcount; /* Protected by reg_lock. */
> +static atomic_t reg_refcount;
>
>  /*
>   * Size of the buffer for memcg path names. Ignoring stack trace support,
> @@ -34,136 +27,22 @@ static int reg_refcount; /* Protected by reg_lock. */
>   */
>  #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
>
> -/*
> - * How many contexts our trace events might be called in: normal, softirq, irq,
> - * and NMI.
> - */
> -#define CONTEXT_COUNT 4
> -
> -struct memcg_path {
> -       local_lock_t lock;
> -       char __rcu *buf;
> -       local_t buf_idx;
> -};
> -static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
> -       .lock = INIT_LOCAL_LOCK(lock),
> -       .buf_idx = LOCAL_INIT(0),
> -};
> -
> -static char **tmp_bufs;
> -
> -/* Called with reg_lock held. */
> -static void free_memcg_path_bufs(void)
> -{
> -       struct memcg_path *memcg_path;
> -       int cpu;
> -       char **old = tmp_bufs;
> -
> -       for_each_possible_cpu(cpu) {
> -               memcg_path = per_cpu_ptr(&memcg_paths, cpu);
> -               *(old++) = rcu_dereference_protected(memcg_path->buf,
> -                       lockdep_is_held(&reg_lock));
> -               rcu_assign_pointer(memcg_path->buf, NULL);
> -       }
> -
> -       /* Wait for inflight memcg_path_buf users to finish. */
> -       synchronize_rcu();
> -
> -       old = tmp_bufs;
> -       for_each_possible_cpu(cpu) {
> -               kfree(*(old++));
> -       }
> -
> -       kfree(tmp_bufs);
> -       tmp_bufs = NULL;
> -}
> -
>  int trace_mmap_lock_reg(void)
>  {
> -       int cpu;
> -       char *new;
> -
> -       mutex_lock(&reg_lock);
> -
> -       /* If the refcount is going 0->1, proceed with allocating buffers. */
> -       if (reg_refcount++)
> -               goto out;
> -
> -       tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
> -                                GFP_KERNEL);
> -       if (tmp_bufs == NULL)
> -               goto out_fail;
> -
> -       for_each_possible_cpu(cpu) {
> -               new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
> -               if (new == NULL)
> -                       goto out_fail_free;
> -               rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
> -               /* Don't need to wait for inflights, they'd have gotten NULL. */
> -       }
> -
> -out:
> -       mutex_unlock(&reg_lock);
> +       atomic_inc(&reg_refcount);
>         return 0;
> -
> -out_fail_free:
> -       free_memcg_path_bufs();
> -out_fail:
> -       /* Since we failed, undo the earlier ref increment. */
> -       --reg_refcount;
> -
> -       mutex_unlock(&reg_lock);
> -       return -ENOMEM;
>  }
>
>  void trace_mmap_lock_unreg(void)
>  {
> -       mutex_lock(&reg_lock);
> -
> -       /* If the refcount is going 1->0, proceed with freeing buffers. */
> -       if (--reg_refcount)
> -               goto out;
> -
> -       free_memcg_path_bufs();
> -
> -out:
> -       mutex_unlock(&reg_lock);
> -}
> -
> -static inline char *get_memcg_path_buf(void)
> -{
> -       struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
> -       char *buf;
> -       int idx;
> -
> -       rcu_read_lock();
> -       buf = rcu_dereference(memcg_path->buf);
> -       if (buf == NULL) {
> -               rcu_read_unlock();
> -               return NULL;
> -       }
> -       idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
> -             MEMCG_PATH_BUF_SIZE;
> -       return &buf[idx];
> +       atomic_dec(&reg_refcount);
>  }
>
> -static inline void put_memcg_path_buf(void)
> -{
> -       local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
> -       rcu_read_unlock();
> -}
> -
> -#define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
> -       do {                                                                   \
> -               const char *memcg_path;                                        \
> -               local_lock(&memcg_paths.lock);                                 \
> -               memcg_path = get_mm_memcg_path(mm);                            \
> -               trace_mmap_lock_##type(mm,                                     \
> -                                      memcg_path != NULL ? memcg_path : "",   \
> -                                      ##__VA_ARGS__);                         \
> -               if (likely(memcg_path != NULL))                                \
> -                       put_memcg_path_buf();                                  \
> -               local_unlock(&memcg_paths.lock);                               \
> +#define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                    \
> +       do {                                                    \
> +               char buf[MEMCG_PATH_BUF_SIZE];                  \
> +               get_mm_memcg_path(mm, buf, sizeof(buf));        \
> +               trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
>         } while (0)
>
>  #else /* !CONFIG_MEMCG */
> @@ -185,37 +64,23 @@ void trace_mmap_lock_unreg(void)
>  #ifdef CONFIG_TRACING
>  #ifdef CONFIG_MEMCG
>  /*
> - * Write the given mm_struct's memcg path to a percpu buffer, and return a
> - * pointer to it. If the path cannot be determined, or no buffer was available
> - * (because the trace event is being unregistered), NULL is returned.
> - *
> - * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
> - * disabled by the caller before calling us, and re-enabled only after the
> - * caller is done with the pointer.
> - *
> - * The caller must call put_memcg_path_buf() once the buffer is no longer
> - * needed. This must be done while preemption is still disabled.
> + * Write the given mm_struct's memcg path to a buffer. If the path cannot be
> + * determined or the trace event is being unregistered, empty string is written.
>   */
> -static const char *get_mm_memcg_path(struct mm_struct *mm)
> +static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
>  {
> -       char *buf = NULL;
> -       struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
> +       struct mem_cgroup *memcg;
>
> +       buf[0] = '\0';
> +       /* No need to get path if no trace event is registered. */
> +       if (!atomic_read(&reg_refcount))
> +               return;
> +       memcg = get_mem_cgroup_from_mm(mm);
>         if (memcg == NULL)
> -               goto out;
> -       if (unlikely(memcg->css.cgroup == NULL))
> -               goto out_put;
> -
> -       buf = get_memcg_path_buf();
> -       if (buf == NULL)
> -               goto out_put;
> -
> -       cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
> -
> -out_put:
> +               return;
> +       if (memcg->css.cgroup)
> +               cgroup_path(memcg->css.cgroup, buf, buflen);
>         css_put(&memcg->css);
> -out:
> -       return buf;
>  }
>
>  #endif /* CONFIG_MEMCG */
> --
> 2.43.0
>
Tetsuo Handa June 22, 2024, 5:57 a.m. UTC | #2
On 2024/06/22 8:03, Axel Rasmussen wrote:
> No objections. Looking back all the way to the first version [1] the
> buffers were already percpu, instead of on the stack like this. IOW,
> there was no on-list discussion about why this shouldn't go on the
> stack. It has been a while, but if memory serves I opted to do it that
> way just out of paranoia around putting large buffers on the stack.
> But, I agree 256 bytes isn't all that large.
> 
> That v1 patch wasn't all that complex, but then again it didn't deal
> with various edge cases properly :) so it has grown significantly more
> complex over time. Reconsidering the approach seems reasonable now,
> given how much code this removes.
> 
> This change looks straightforwardly correct to me. You can take:
> 
> Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>

Thank you. One question. CONTEXT_COUNT was defined as below.

>> -/*
>> - * How many contexts our trace events might be called in: normal, softirq, irq,
>> - * and NMI.
>> - */
>> -#define CONTEXT_COUNT 4

Is there possibility that this function (or in general, trace events) is called from NMI
context? If yes, I worry that functions called from get_mm_memcg_path() are not NMI-safe.
Original change at
https://lkml.kernel.org/r/3e9b2a54-73d4-48cb-a510-d17984c97a45@I-love.SAKURA.ne.jp was
posted due to worrying about NMI safety.
Axel Rasmussen June 27, 2024, 9:41 p.m. UTC | #3
On Fri, Jun 21, 2024 at 10:57 PM Tetsuo Handa
<penguin-kernel@i-love.sakura.ne.jp> wrote:
>
> On 2024/06/22 8:03, Axel Rasmussen wrote:
> > No objections. Looking back all the way to the first version [1] the
> > buffers were already percpu, instead of on the stack like this. IOW,
> > there was no on-list discussion about why this shouldn't go on the
> > stack. It has been a while, but if memory serves I opted to do it that
> > way just out of paranoia around putting large buffers on the stack.
> > But, I agree 256 bytes isn't all that large.
> >
> > That v1 patch wasn't all that complex, but then again it didn't deal
> > with various edge cases properly :) so it has grown significantly more
> > complex over time. Reconsidering the approach seems reasonable now,
> > given how much code this removes.
> >
> > This change looks straightforwardly correct to me. You can take:
> >
> > Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
>
> Thank you. One question. CONTEXT_COUNT was defined as below.
>
> >> -/*
> >> - * How many contexts our trace events might be called in: normal, softirq, irq,
> >> - * and NMI.
> >> - */
> >> -#define CONTEXT_COUNT 4
>
> Is there possibility that this function (or in general, trace events) is called from NMI
> context? If yes, I worry that functions called from get_mm_memcg_path() are not NMI-safe.
> Original change at
> https://lkml.kernel.org/r/3e9b2a54-73d4-48cb-a510-d17984c97a45@I-love.SAKURA.ne.jp was
> posted due to worrying about NMI safety.

I think it's unlikely, but I'm not certain. Although trace events *in
general* can be called from NMI context.

This code was added based on the discussion here [1] from v4 of this
patchset. At the time I don't think we had any certainty that this
*was* called from NMI context, but rather just that if CONTEXT_COUNT
was 4 when it only had to be 3 in practice, it wasn't a huge deal, but
getting it wrong the other way would be much worse.

[1]: https://lore.kernel.org/r/20201020184746.300555-2-axelrasmussen@google.com

>
diff mbox series

Patch

diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index 1854850b4b89..368b840e7508 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -19,14 +19,7 @@  EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
 
 #ifdef CONFIG_MEMCG
 
-/*
- * Our various events all share the same buffer (because we don't want or need
- * to allocate a set of buffers *per event type*), so we need to protect against
- * concurrent _reg() and _unreg() calls, and count how many _reg() calls have
- * been made.
- */
-static DEFINE_MUTEX(reg_lock);
-static int reg_refcount; /* Protected by reg_lock. */
+static atomic_t reg_refcount;
 
 /*
  * Size of the buffer for memcg path names. Ignoring stack trace support,
@@ -34,136 +27,22 @@  static int reg_refcount; /* Protected by reg_lock. */
  */
 #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
 
-/*
- * How many contexts our trace events might be called in: normal, softirq, irq,
- * and NMI.
- */
-#define CONTEXT_COUNT 4
-
-struct memcg_path {
-	local_lock_t lock;
-	char __rcu *buf;
-	local_t buf_idx;
-};
-static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = {
-	.lock = INIT_LOCAL_LOCK(lock),
-	.buf_idx = LOCAL_INIT(0),
-};
-
-static char **tmp_bufs;
-
-/* Called with reg_lock held. */
-static void free_memcg_path_bufs(void)
-{
-	struct memcg_path *memcg_path;
-	int cpu;
-	char **old = tmp_bufs;
-
-	for_each_possible_cpu(cpu) {
-		memcg_path = per_cpu_ptr(&memcg_paths, cpu);
-		*(old++) = rcu_dereference_protected(memcg_path->buf,
-			lockdep_is_held(&reg_lock));
-		rcu_assign_pointer(memcg_path->buf, NULL);
-	}
-
-	/* Wait for inflight memcg_path_buf users to finish. */
-	synchronize_rcu();
-
-	old = tmp_bufs;
-	for_each_possible_cpu(cpu) {
-		kfree(*(old++));
-	}
-
-	kfree(tmp_bufs);
-	tmp_bufs = NULL;
-}
-
 int trace_mmap_lock_reg(void)
 {
-	int cpu;
-	char *new;
-
-	mutex_lock(&reg_lock);
-
-	/* If the refcount is going 0->1, proceed with allocating buffers. */
-	if (reg_refcount++)
-		goto out;
-
-	tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs),
-				 GFP_KERNEL);
-	if (tmp_bufs == NULL)
-		goto out_fail;
-
-	for_each_possible_cpu(cpu) {
-		new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL);
-		if (new == NULL)
-			goto out_fail_free;
-		rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new);
-		/* Don't need to wait for inflights, they'd have gotten NULL. */
-	}
-
-out:
-	mutex_unlock(&reg_lock);
+	atomic_inc(&reg_refcount);
 	return 0;
-
-out_fail_free:
-	free_memcg_path_bufs();
-out_fail:
-	/* Since we failed, undo the earlier ref increment. */
-	--reg_refcount;
-
-	mutex_unlock(&reg_lock);
-	return -ENOMEM;
 }
 
 void trace_mmap_lock_unreg(void)
 {
-	mutex_lock(&reg_lock);
-
-	/* If the refcount is going 1->0, proceed with freeing buffers. */
-	if (--reg_refcount)
-		goto out;
-
-	free_memcg_path_bufs();
-
-out:
-	mutex_unlock(&reg_lock);
-}
-
-static inline char *get_memcg_path_buf(void)
-{
-	struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths);
-	char *buf;
-	int idx;
-
-	rcu_read_lock();
-	buf = rcu_dereference(memcg_path->buf);
-	if (buf == NULL) {
-		rcu_read_unlock();
-		return NULL;
-	}
-	idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) -
-	      MEMCG_PATH_BUF_SIZE;
-	return &buf[idx];
+	atomic_dec(&reg_refcount);
 }
 
-static inline void put_memcg_path_buf(void)
-{
-	local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx);
-	rcu_read_unlock();
-}
-
-#define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
-	do {                                                                   \
-		const char *memcg_path;                                        \
-		local_lock(&memcg_paths.lock);                                 \
-		memcg_path = get_mm_memcg_path(mm);                            \
-		trace_mmap_lock_##type(mm,                                     \
-				       memcg_path != NULL ? memcg_path : "",   \
-				       ##__VA_ARGS__);                         \
-		if (likely(memcg_path != NULL))                                \
-			put_memcg_path_buf();                                  \
-		local_unlock(&memcg_paths.lock);                               \
+#define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                    \
+	do {                                                    \
+		char buf[MEMCG_PATH_BUF_SIZE];                  \
+		get_mm_memcg_path(mm, buf, sizeof(buf));        \
+		trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
 	} while (0)
 
 #else /* !CONFIG_MEMCG */
@@ -185,37 +64,23 @@  void trace_mmap_lock_unreg(void)
 #ifdef CONFIG_TRACING
 #ifdef CONFIG_MEMCG
 /*
- * Write the given mm_struct's memcg path to a percpu buffer, and return a
- * pointer to it. If the path cannot be determined, or no buffer was available
- * (because the trace event is being unregistered), NULL is returned.
- *
- * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
- * disabled by the caller before calling us, and re-enabled only after the
- * caller is done with the pointer.
- *
- * The caller must call put_memcg_path_buf() once the buffer is no longer
- * needed. This must be done while preemption is still disabled.
+ * Write the given mm_struct's memcg path to a buffer. If the path cannot be
+ * determined or the trace event is being unregistered, empty string is written.
  */
-static const char *get_mm_memcg_path(struct mm_struct *mm)
+static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
 {
-	char *buf = NULL;
-	struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
+	struct mem_cgroup *memcg;
 
+	buf[0] = '\0';
+	/* No need to get path if no trace event is registered. */
+	if (!atomic_read(&reg_refcount))
+		return;
+	memcg = get_mem_cgroup_from_mm(mm);
 	if (memcg == NULL)
-		goto out;
-	if (unlikely(memcg->css.cgroup == NULL))
-		goto out_put;
-
-	buf = get_memcg_path_buf();
-	if (buf == NULL)
-		goto out_put;
-
-	cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE);
-
-out_put:
+		return;
+	if (memcg->css.cgroup)
+		cgroup_path(memcg->css.cgroup, buf, buflen);
 	css_put(&memcg->css);
-out:
-	return buf;
 }
 
 #endif /* CONFIG_MEMCG */