diff mbox series

[4/4] mm, swap_cgroup: remove global swap cgroup lock

Message ID 20241202184154.19321-5-ryncsn@gmail.com (mailing list archive)
State New
Headers show
Series mm/swap_cgroup: remove global swap cgroup lock | expand

Commit Message

Kairui Song Dec. 2, 2024, 6:41 p.m. UTC
From: Kairui Song <kasong@tencent.com>

commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
replaced the cmpxchg/xchg with a global irq spinlock because some archs
doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.

And as commented in swap_cgroup.c, this lock is not needed for map
synchronization.

Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
it to get rid of this lock.

Testing using 64G brd and build with build kernel with make -j96 in 1.5G
memory cgroup using 4k folios showed below improvement (10 test run):

Before this series:
Sys time: 10730.08 (stdev 49.030728)
Real time: 171.03 (stdev 0.850355)

After this commit:
Sys time: 9612.24 (stdev 66.310789), -10.42%
Real time: 159.78 (stdev 0.577193), -6.57%

With 64k folios and 2G memcg:
Before this series:
Sys time: 7626.77 (stdev 43.545517)
Real time: 136.22 (stdev 1.265544)

After this commit:
Sys time: 6936.03 (stdev 39.996280), -9.06%
Real time: 129.65 (stdev 0.880039), -4.82%

Sequential swapout of 8G 4k zero folios (24 test run):
Before this series:
5461409.12 us (stdev 183957.827084)

After this commit:
5420447.26 us (stdev 196419.240317)

Sequential swapin of 8G 4k zero folios (24 test run):
Before this series:
19736958.916667 us (stdev 189027.246676)

After this commit:
19662182.629630 us (stdev 172717.640614)

Performance is better or at least not worse for all tests above.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
 1 file changed, 41 insertions(+), 15 deletions(-)

Comments

Yosry Ahmed Dec. 2, 2024, 7:28 p.m. UTC | #1
On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> replaced the cmpxchg/xchg with a global irq spinlock because some archs
> doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
>
> And as commented in swap_cgroup.c, this lock is not needed for map
> synchronization.
>
> Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> it to get rid of this lock.
>
> Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> memory cgroup using 4k folios showed below improvement (10 test run):
>
> Before this series:
> Sys time: 10730.08 (stdev 49.030728)
> Real time: 171.03 (stdev 0.850355)
>
> After this commit:
> Sys time: 9612.24 (stdev 66.310789), -10.42%
> Real time: 159.78 (stdev 0.577193), -6.57%
>
> With 64k folios and 2G memcg:
> Before this series:
> Sys time: 7626.77 (stdev 43.545517)
> Real time: 136.22 (stdev 1.265544)
>
> After this commit:
> Sys time: 6936.03 (stdev 39.996280), -9.06%
> Real time: 129.65 (stdev 0.880039), -4.82%
>
> Sequential swapout of 8G 4k zero folios (24 test run):
> Before this series:
> 5461409.12 us (stdev 183957.827084)
>
> After this commit:
> 5420447.26 us (stdev 196419.240317)
>
> Sequential swapin of 8G 4k zero folios (24 test run):
> Before this series:
> 19736958.916667 us (stdev 189027.246676)
>
> After this commit:
> 19662182.629630 us (stdev 172717.640614)
>
> Performance is better or at least not worse for all tests above.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 41 insertions(+), 15 deletions(-)
>
> diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> index a76afdc3666a..028f5e6be3f0 100644
> --- a/mm/swap_cgroup.c
> +++ b/mm/swap_cgroup.c
> @@ -5,6 +5,15 @@
>
>  #include <linux/swapops.h> /* depends on mm.h include */
>
> +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> +struct swap_cgroup_unit {
> +       union {
> +               int raw;
> +               atomic_t val;
> +               unsigned short __id[ID_PER_UNIT];
> +       };
> +};

This doubles the size of the per-entry data, right?

Why do we need this? I thought cmpxchg() supports multiple sizes and
will already do the emulation for us.

> +
>  static DEFINE_MUTEX(swap_cgroup_mutex);
>
>  struct swap_cgroup {
> @@ -12,8 +21,10 @@ struct swap_cgroup {
>  };
>
>  struct swap_cgroup_ctrl {
> -       unsigned short  *map;
> -       spinlock_t      lock;
> +       union {
> +               struct swap_cgroup_unit *units;
> +               unsigned short *map;
> +       };
>  };
>
>  static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
> @@ -31,6 +42,24 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
>   *
>   * TODO: we can push these buffers out to HIGHMEM.
>   */
> +static unsigned short __swap_cgroup_xchg(void *map,
> +                                        pgoff_t offset,
> +                                        unsigned int new_id)
> +{
> +       unsigned int old_id;
> +       struct swap_cgroup_unit *units = map;
> +       struct swap_cgroup_unit *unit = &units[offset / ID_PER_UNIT];
> +       struct swap_cgroup_unit new, old = { .raw = atomic_read(&unit->val) };
> +
> +       do {
> +               new.raw = old.raw;
> +               old_id = old.__id[offset % ID_PER_UNIT];
> +               new.__id[offset % ID_PER_UNIT] = new_id;
> +       } while (!atomic_try_cmpxchg(&unit->val, &old.raw, new.raw));
> +
> +       return old_id;
> +}
> +
>  /**
>   * swap_cgroup_record - record mem_cgroup for a set of swap entries
>   * @ent: the first swap entry to be recorded into
> @@ -44,22 +73,19 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
>                                   unsigned int nr_ents)
>  {
>         struct swap_cgroup_ctrl *ctrl;
> -       unsigned short *map;
> -       unsigned short old;
> -       unsigned long flags;
>         pgoff_t offset = swp_offset(ent);
>         pgoff_t end = offset + nr_ents;
> +       unsigned short old, iter;
> +       unsigned short *map;
>
>         ctrl = &swap_cgroup_ctrl[swp_type(ent)];
>         map = ctrl->map;
>
> -       spin_lock_irqsave(&ctrl->lock, flags);
> -       old = map[offset];
> +       old = READ_ONCE(map[offset]);
>         do {
> -               VM_BUG_ON(map[offset] != old);
> -               map[offset] = id;
> +               iter = __swap_cgroup_xchg(map, offset, id);
> +               VM_BUG_ON(iter != old);
>         } while (++offset != end);
> -       spin_unlock_irqrestore(&ctrl->lock, flags);
>
>         return old;
>  }
> @@ -85,20 +111,20 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
>
>  int swap_cgroup_swapon(int type, unsigned long max_pages)
>  {
> -       void *map;
> +       struct swap_cgroup_unit *units;
>         struct swap_cgroup_ctrl *ctrl;
>
>         if (mem_cgroup_disabled())
>                 return 0;
>
> -       map = vzalloc(max_pages * sizeof(unsigned short));
> -       if (!map)
> +       units = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_UNIT) *
> +                       sizeof(struct swap_cgroup_unit));
> +       if (!units)
>                 goto nomem;
>
>         ctrl = &swap_cgroup_ctrl[type];
>         mutex_lock(&swap_cgroup_mutex);
> -       ctrl->map = map;
> -       spin_lock_init(&ctrl->lock);
> +       ctrl->units = units;
>         mutex_unlock(&swap_cgroup_mutex);
>
>         return 0;
> --
> 2.47.0
>
Yosry Ahmed Dec. 2, 2024, 7:37 p.m. UTC | #2
On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> replaced the cmpxchg/xchg with a global irq spinlock because some archs
> doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
>
> And as commented in swap_cgroup.c, this lock is not needed for map
> synchronization.
>
> Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> it to get rid of this lock.
>
> Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> memory cgroup using 4k folios showed below improvement (10 test run):
>
> Before this series:
> Sys time: 10730.08 (stdev 49.030728)
> Real time: 171.03 (stdev 0.850355)
>
> After this commit:
> Sys time: 9612.24 (stdev 66.310789), -10.42%
> Real time: 159.78 (stdev 0.577193), -6.57%
>
> With 64k folios and 2G memcg:
> Before this series:
> Sys time: 7626.77 (stdev 43.545517)
> Real time: 136.22 (stdev 1.265544)
>
> After this commit:
> Sys time: 6936.03 (stdev 39.996280), -9.06%
> Real time: 129.65 (stdev 0.880039), -4.82%
>
> Sequential swapout of 8G 4k zero folios (24 test run):
> Before this series:
> 5461409.12 us (stdev 183957.827084)
>
> After this commit:
> 5420447.26 us (stdev 196419.240317)
>
> Sequential swapin of 8G 4k zero folios (24 test run):
> Before this series:
> 19736958.916667 us (stdev 189027.246676)
>
> After this commit:
> 19662182.629630 us (stdev 172717.640614)
>
> Performance is better or at least not worse for all tests above.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 41 insertions(+), 15 deletions(-)
>
> diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> index a76afdc3666a..028f5e6be3f0 100644
> --- a/mm/swap_cgroup.c
> +++ b/mm/swap_cgroup.c
> @@ -5,6 +5,15 @@
>
>  #include <linux/swapops.h> /* depends on mm.h include */
>
> +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> +struct swap_cgroup_unit {
> +       union {
> +               int raw;
> +               atomic_t val;
> +               unsigned short __id[ID_PER_UNIT];
> +       };
> +};
> +
>  static DEFINE_MUTEX(swap_cgroup_mutex);
>
>  struct swap_cgroup {
> @@ -12,8 +21,10 @@ struct swap_cgroup {
>  };
>
>  struct swap_cgroup_ctrl {
> -       unsigned short  *map;
> -       spinlock_t      lock;
> +       union {
> +               struct swap_cgroup_unit *units;
> +               unsigned short *map;
> +       };
>  };
>
>  static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
> @@ -31,6 +42,24 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
>   *
>   * TODO: we can push these buffers out to HIGHMEM.
>   */

While you're at it, I think the comment above is quite outdated :)
Yosry Ahmed Dec. 2, 2024, 8:35 p.m. UTC | #3
On Mon, Dec 2, 2024 at 11:28 AM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
> >
> > From: Kairui Song <kasong@tencent.com>
> >
> > commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> > replaced the cmpxchg/xchg with a global irq spinlock because some archs
> > doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
> >
> > And as commented in swap_cgroup.c, this lock is not needed for map
> > synchronization.
> >
> > Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> > it to get rid of this lock.
> >
> > Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> > memory cgroup using 4k folios showed below improvement (10 test run):
> >
> > Before this series:
> > Sys time: 10730.08 (stdev 49.030728)
> > Real time: 171.03 (stdev 0.850355)
> >
> > After this commit:
> > Sys time: 9612.24 (stdev 66.310789), -10.42%
> > Real time: 159.78 (stdev 0.577193), -6.57%
> >
> > With 64k folios and 2G memcg:
> > Before this series:
> > Sys time: 7626.77 (stdev 43.545517)
> > Real time: 136.22 (stdev 1.265544)
> >
> > After this commit:
> > Sys time: 6936.03 (stdev 39.996280), -9.06%
> > Real time: 129.65 (stdev 0.880039), -4.82%
> >
> > Sequential swapout of 8G 4k zero folios (24 test run):
> > Before this series:
> > 5461409.12 us (stdev 183957.827084)
> >
> > After this commit:
> > 5420447.26 us (stdev 196419.240317)
> >
> > Sequential swapin of 8G 4k zero folios (24 test run):
> > Before this series:
> > 19736958.916667 us (stdev 189027.246676)
> >
> > After this commit:
> > 19662182.629630 us (stdev 172717.640614)
> >
> > Performance is better or at least not worse for all tests above.
> >
> > Signed-off-by: Kairui Song <kasong@tencent.com>
> > ---
> >  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
> >  1 file changed, 41 insertions(+), 15 deletions(-)
> >
> > diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> > index a76afdc3666a..028f5e6be3f0 100644
> > --- a/mm/swap_cgroup.c
> > +++ b/mm/swap_cgroup.c
> > @@ -5,6 +5,15 @@
> >
> >  #include <linux/swapops.h> /* depends on mm.h include */
> >
> > +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> > +struct swap_cgroup_unit {
> > +       union {
> > +               int raw;
> > +               atomic_t val;
> > +               unsigned short __id[ID_PER_UNIT];
> > +       };
> > +};
>
> This doubles the size of the per-entry data, right?

Oh we don't, we just store 2 ids in an int instead of storing each id
individually. But the question below still stands, can't we just use
cmpxchg() directly on the id?

>
> Why do we need this? I thought cmpxchg() supports multiple sizes and
> will already do the emulation for us.
Kairui Song Dec. 3, 2024, 6:20 p.m. UTC | #4
On Tue, Dec 3, 2024 at 4:36 AM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> On Mon, Dec 2, 2024 at 11:28 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
> > >
> > > From: Kairui Song <kasong@tencent.com>
> > >
> > > commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> > > replaced the cmpxchg/xchg with a global irq spinlock because some archs
> > > doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
> > >
> > > And as commented in swap_cgroup.c, this lock is not needed for map
> > > synchronization.
> > >
> > > Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> > > it to get rid of this lock.
> > >
> > > Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> > > memory cgroup using 4k folios showed below improvement (10 test run):
> > >
> > > Before this series:
> > > Sys time: 10730.08 (stdev 49.030728)
> > > Real time: 171.03 (stdev 0.850355)
> > >
> > > After this commit:
> > > Sys time: 9612.24 (stdev 66.310789), -10.42%
> > > Real time: 159.78 (stdev 0.577193), -6.57%
> > >
> > > With 64k folios and 2G memcg:
> > > Before this series:
> > > Sys time: 7626.77 (stdev 43.545517)
> > > Real time: 136.22 (stdev 1.265544)
> > >
> > > After this commit:
> > > Sys time: 6936.03 (stdev 39.996280), -9.06%
> > > Real time: 129.65 (stdev 0.880039), -4.82%
> > >
> > > Sequential swapout of 8G 4k zero folios (24 test run):
> > > Before this series:
> > > 5461409.12 us (stdev 183957.827084)
> > >
> > > After this commit:
> > > 5420447.26 us (stdev 196419.240317)
> > >
> > > Sequential swapin of 8G 4k zero folios (24 test run):
> > > Before this series:
> > > 19736958.916667 us (stdev 189027.246676)
> > >
> > > After this commit:
> > > 19662182.629630 us (stdev 172717.640614)
> > >
> > > Performance is better or at least not worse for all tests above.
> > >
> > > Signed-off-by: Kairui Song <kasong@tencent.com>
> > > ---
> > >  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
> > >  1 file changed, 41 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> > > index a76afdc3666a..028f5e6be3f0 100644
> > > --- a/mm/swap_cgroup.c
> > > +++ b/mm/swap_cgroup.c
> > > @@ -5,6 +5,15 @@
> > >
> > >  #include <linux/swapops.h> /* depends on mm.h include */
> > >
> > > +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> > > +struct swap_cgroup_unit {
> > > +       union {
> > > +               int raw;
> > > +               atomic_t val;
> > > +               unsigned short __id[ID_PER_UNIT];
> > > +       };
> > > +};
> >
> > This doubles the size of the per-entry data, right?
>
> Oh we don't, we just store 2 ids in an int instead of storing each id
> individually. But the question below still stands, can't we just use
> cmpxchg() directly on the id?

Hi Yosry,

Last time I checked the xchg status some archs still don't support
xchg for 2 bytes, I just found things may have changed slightly but it
seems at least parisc still doesn't support that. And looking at the
code some arches still don't support cmpxchg of 2 bytes today (And I
just dropped cmpxchg helper for swap_cgroup so that should be OK). RCU
just dropped one-byte cmpxchg emulation 2 months ago in d4e287d7caff
so that area is changing. Lacking such support is exactly the reason
why there was a global lock previously, so I think the safe move is
just to emulate the operation manually for now?

>
> >
> > Why do we need this? I thought cmpxchg() supports multiple sizes and
> > will already do the emulation for us.
>
Yosry Ahmed Dec. 3, 2024, 7:17 p.m. UTC | #5
On Tue, Dec 3, 2024 at 10:20 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> On Tue, Dec 3, 2024 at 4:36 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > On Mon, Dec 2, 2024 at 11:28 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> > >
> > > On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
> > > >
> > > > From: Kairui Song <kasong@tencent.com>
> > > >
> > > > commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> > > > replaced the cmpxchg/xchg with a global irq spinlock because some archs
> > > > doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
> > > >
> > > > And as commented in swap_cgroup.c, this lock is not needed for map
> > > > synchronization.
> > > >
> > > > Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> > > > it to get rid of this lock.
> > > >
> > > > Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> > > > memory cgroup using 4k folios showed below improvement (10 test run):
> > > >
> > > > Before this series:
> > > > Sys time: 10730.08 (stdev 49.030728)
> > > > Real time: 171.03 (stdev 0.850355)
> > > >
> > > > After this commit:
> > > > Sys time: 9612.24 (stdev 66.310789), -10.42%
> > > > Real time: 159.78 (stdev 0.577193), -6.57%
> > > >
> > > > With 64k folios and 2G memcg:
> > > > Before this series:
> > > > Sys time: 7626.77 (stdev 43.545517)
> > > > Real time: 136.22 (stdev 1.265544)
> > > >
> > > > After this commit:
> > > > Sys time: 6936.03 (stdev 39.996280), -9.06%
> > > > Real time: 129.65 (stdev 0.880039), -4.82%
> > > >
> > > > Sequential swapout of 8G 4k zero folios (24 test run):
> > > > Before this series:
> > > > 5461409.12 us (stdev 183957.827084)
> > > >
> > > > After this commit:
> > > > 5420447.26 us (stdev 196419.240317)
> > > >
> > > > Sequential swapin of 8G 4k zero folios (24 test run):
> > > > Before this series:
> > > > 19736958.916667 us (stdev 189027.246676)
> > > >
> > > > After this commit:
> > > > 19662182.629630 us (stdev 172717.640614)
> > > >
> > > > Performance is better or at least not worse for all tests above.
> > > >
> > > > Signed-off-by: Kairui Song <kasong@tencent.com>
> > > > ---
> > > >  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
> > > >  1 file changed, 41 insertions(+), 15 deletions(-)
> > > >
> > > > diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> > > > index a76afdc3666a..028f5e6be3f0 100644
> > > > --- a/mm/swap_cgroup.c
> > > > +++ b/mm/swap_cgroup.c
> > > > @@ -5,6 +5,15 @@
> > > >
> > > >  #include <linux/swapops.h> /* depends on mm.h include */
> > > >
> > > > +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> > > > +struct swap_cgroup_unit {
> > > > +       union {
> > > > +               int raw;
> > > > +               atomic_t val;
> > > > +               unsigned short __id[ID_PER_UNIT];
> > > > +       };
> > > > +};
> > >
> > > This doubles the size of the per-entry data, right?
> >
> > Oh we don't, we just store 2 ids in an int instead of storing each id
> > individually. But the question below still stands, can't we just use
> > cmpxchg() directly on the id?
>
> Hi Yosry,
>
> Last time I checked the xchg status some archs still don't support
> xchg for 2 bytes, I just found things may have changed slightly but it
> seems at least parisc still doesn't support that. And looking at the
> code some arches still don't support cmpxchg of 2 bytes today (And I
> just dropped cmpxchg helper for swap_cgroup so that should be OK). RCU
> just dropped one-byte cmpxchg emulation 2 months ago in d4e287d7caff
> so that area is changing. Lacking such support is exactly the reason
> why there was a global lock previously, so I think the safe move is
> just to emulate the operation manually for now?

+Paul E. McKenney

If there's already work to support 2-byte cmpxchg() I'd rather wait
for that. Alternatively, if it's not too difficult, we should
generalize this emulation to something like cmpxchg_emu_u8() and add
the missing arch support. It doesn't feel right to have our own custom
2-byte cmpxchg() emulation here.

>
> >
> > >
> > > Why do we need this? I thought cmpxchg() supports multiple sizes and
> > > will already do the emulation for us.
> >
Kairui Song Dec. 4, 2024, 5:58 p.m. UTC | #6
On Wed, Dec 4, 2024 at 3:18 AM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> On Tue, Dec 3, 2024 at 10:20 AM Kairui Song <ryncsn@gmail.com> wrote:
> >
> > On Tue, Dec 3, 2024 at 4:36 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> > >
> > > On Mon, Dec 2, 2024 at 11:28 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> > > >
> > > > On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
> > > > >
> > > > > From: Kairui Song <kasong@tencent.com>
> > > > >
> > > > > commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> > > > > replaced the cmpxchg/xchg with a global irq spinlock because some archs
> > > > > doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
> > > > >
> > > > > And as commented in swap_cgroup.c, this lock is not needed for map
> > > > > synchronization.
> > > > >
> > > > > Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> > > > > it to get rid of this lock.
> > > > >
> > > > > Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> > > > > memory cgroup using 4k folios showed below improvement (10 test run):
> > > > >
> > > > > Before this series:
> > > > > Sys time: 10730.08 (stdev 49.030728)
> > > > > Real time: 171.03 (stdev 0.850355)
> > > > >
> > > > > After this commit:
> > > > > Sys time: 9612.24 (stdev 66.310789), -10.42%
> > > > > Real time: 159.78 (stdev 0.577193), -6.57%
> > > > >
> > > > > With 64k folios and 2G memcg:
> > > > > Before this series:
> > > > > Sys time: 7626.77 (stdev 43.545517)
> > > > > Real time: 136.22 (stdev 1.265544)
> > > > >
> > > > > After this commit:
> > > > > Sys time: 6936.03 (stdev 39.996280), -9.06%
> > > > > Real time: 129.65 (stdev 0.880039), -4.82%
> > > > >
> > > > > Sequential swapout of 8G 4k zero folios (24 test run):
> > > > > Before this series:
> > > > > 5461409.12 us (stdev 183957.827084)
> > > > >
> > > > > After this commit:
> > > > > 5420447.26 us (stdev 196419.240317)
> > > > >
> > > > > Sequential swapin of 8G 4k zero folios (24 test run):
> > > > > Before this series:
> > > > > 19736958.916667 us (stdev 189027.246676)
> > > > >
> > > > > After this commit:
> > > > > 19662182.629630 us (stdev 172717.640614)
> > > > >
> > > > > Performance is better or at least not worse for all tests above.
> > > > >
> > > > > Signed-off-by: Kairui Song <kasong@tencent.com>
> > > > > ---
> > > > >  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
> > > > >  1 file changed, 41 insertions(+), 15 deletions(-)
> > > > >
> > > > > diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> > > > > index a76afdc3666a..028f5e6be3f0 100644
> > > > > --- a/mm/swap_cgroup.c
> > > > > +++ b/mm/swap_cgroup.c
> > > > > @@ -5,6 +5,15 @@
> > > > >
> > > > >  #include <linux/swapops.h> /* depends on mm.h include */
> > > > >
> > > > > +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> > > > > +struct swap_cgroup_unit {
> > > > > +       union {
> > > > > +               int raw;
> > > > > +               atomic_t val;
> > > > > +               unsigned short __id[ID_PER_UNIT];
> > > > > +       };
> > > > > +};
> > > >
> > > > This doubles the size of the per-entry data, right?
> > >
> > > Oh we don't, we just store 2 ids in an int instead of storing each id
> > > individually. But the question below still stands, can't we just use
> > > cmpxchg() directly on the id?
> >
> > Hi Yosry,
> >
> > Last time I checked the xchg status some archs still don't support
> > xchg for 2 bytes, I just found things may have changed slightly but it
> > seems at least parisc still doesn't support that. And looking at the
> > code some arches still don't support cmpxchg of 2 bytes today (And I
> > just dropped cmpxchg helper for swap_cgroup so that should be OK). RCU
> > just dropped one-byte cmpxchg emulation 2 months ago in d4e287d7caff
> > so that area is changing. Lacking such support is exactly the reason
> > why there was a global lock previously, so I think the safe move is
> > just to emulate the operation manually for now?
>
> +Paul E. McKenney
>
> If there's already work to support 2-byte cmpxchg() I'd rather wait
> for that. Alternatively, if it's not too difficult, we should
> generalize this emulation to something like cmpxchg_emu_u8() and add
> the missing arch support. It doesn't feel right to have our own custom
> 2-byte cmpxchg() emulation here.

Actually here we need 2-byte xchg, not cmpxchg. I'm not exactly sure
if any arch still has anything missing for that support, or is there a
plan to support it for all archs?
Yosry Ahmed Dec. 4, 2024, 6:57 p.m. UTC | #7
On Wed, Dec 4, 2024 at 9:58 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> On Wed, Dec 4, 2024 at 3:18 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > On Tue, Dec 3, 2024 at 10:20 AM Kairui Song <ryncsn@gmail.com> wrote:
> > >
> > > On Tue, Dec 3, 2024 at 4:36 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> > > >
> > > > On Mon, Dec 2, 2024 at 11:28 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> > > > >
> > > > > On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
> > > > > >
> > > > > > From: Kairui Song <kasong@tencent.com>
> > > > > >
> > > > > > commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> > > > > > replaced the cmpxchg/xchg with a global irq spinlock because some archs
> > > > > > doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
> > > > > >
> > > > > > And as commented in swap_cgroup.c, this lock is not needed for map
> > > > > > synchronization.
> > > > > >
> > > > > > Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> > > > > > it to get rid of this lock.
> > > > > >
> > > > > > Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> > > > > > memory cgroup using 4k folios showed below improvement (10 test run):
> > > > > >
> > > > > > Before this series:
> > > > > > Sys time: 10730.08 (stdev 49.030728)
> > > > > > Real time: 171.03 (stdev 0.850355)
> > > > > >
> > > > > > After this commit:
> > > > > > Sys time: 9612.24 (stdev 66.310789), -10.42%
> > > > > > Real time: 159.78 (stdev 0.577193), -6.57%
> > > > > >
> > > > > > With 64k folios and 2G memcg:
> > > > > > Before this series:
> > > > > > Sys time: 7626.77 (stdev 43.545517)
> > > > > > Real time: 136.22 (stdev 1.265544)
> > > > > >
> > > > > > After this commit:
> > > > > > Sys time: 6936.03 (stdev 39.996280), -9.06%
> > > > > > Real time: 129.65 (stdev 0.880039), -4.82%
> > > > > >
> > > > > > Sequential swapout of 8G 4k zero folios (24 test run):
> > > > > > Before this series:
> > > > > > 5461409.12 us (stdev 183957.827084)
> > > > > >
> > > > > > After this commit:
> > > > > > 5420447.26 us (stdev 196419.240317)
> > > > > >
> > > > > > Sequential swapin of 8G 4k zero folios (24 test run):
> > > > > > Before this series:
> > > > > > 19736958.916667 us (stdev 189027.246676)
> > > > > >
> > > > > > After this commit:
> > > > > > 19662182.629630 us (stdev 172717.640614)
> > > > > >
> > > > > > Performance is better or at least not worse for all tests above.
> > > > > >
> > > > > > Signed-off-by: Kairui Song <kasong@tencent.com>
> > > > > > ---
> > > > > >  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
> > > > > >  1 file changed, 41 insertions(+), 15 deletions(-)
> > > > > >
> > > > > > diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> > > > > > index a76afdc3666a..028f5e6be3f0 100644
> > > > > > --- a/mm/swap_cgroup.c
> > > > > > +++ b/mm/swap_cgroup.c
> > > > > > @@ -5,6 +5,15 @@
> > > > > >
> > > > > >  #include <linux/swapops.h> /* depends on mm.h include */
> > > > > >
> > > > > > +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
> > > > > > +struct swap_cgroup_unit {
> > > > > > +       union {
> > > > > > +               int raw;
> > > > > > +               atomic_t val;
> > > > > > +               unsigned short __id[ID_PER_UNIT];
> > > > > > +       };
> > > > > > +};
> > > > >
> > > > > This doubles the size of the per-entry data, right?
> > > >
> > > > Oh we don't, we just store 2 ids in an int instead of storing each id
> > > > individually. But the question below still stands, can't we just use
> > > > cmpxchg() directly on the id?
> > >
> > > Hi Yosry,
> > >
> > > Last time I checked the xchg status some archs still don't support
> > > xchg for 2 bytes, I just found things may have changed slightly but it
> > > seems at least parisc still doesn't support that. And looking at the
> > > code some arches still don't support cmpxchg of 2 bytes today (And I
> > > just dropped cmpxchg helper for swap_cgroup so that should be OK). RCU
> > > just dropped one-byte cmpxchg emulation 2 months ago in d4e287d7caff
> > > so that area is changing. Lacking such support is exactly the reason
> > > why there was a global lock previously, so I think the safe move is
> > > just to emulate the operation manually for now?
> >
> > +Paul E. McKenney
> >
> > If there's already work to support 2-byte cmpxchg() I'd rather wait
> > for that. Alternatively, if it's not too difficult, we should
> > generalize this emulation to something like cmpxchg_emu_u8() and add
> > the missing arch support. It doesn't feel right to have our own custom
> > 2-byte cmpxchg() emulation here.
>
> Actually here we need 2-byte xchg, not cmpxchg. I'm not exactly sure
> if any arch still has anything missing for that support, or is there a
> plan to support it for all archs?

Not sure to be honest.

Taking a step back, with swap_cgroup_cmpxchg() do we still need the
synchronization to begin with? It seems like swap_cgroup_record() is
the only modifier now, could multiple callers be racing for the same
swap slot?
Chris Li Dec. 4, 2024, 7:34 p.m. UTC | #8
On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> replaced the cmpxchg/xchg with a global irq spinlock because some archs
> doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
>
> And as commented in swap_cgroup.c, this lock is not needed for map
> synchronization.
>
> Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> it to get rid of this lock.
>
> Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> memory cgroup using 4k folios showed below improvement (10 test run):
>
> Before this series:
> Sys time: 10730.08 (stdev 49.030728)
> Real time: 171.03 (stdev 0.850355)
>
> After this commit:
> Sys time: 9612.24 (stdev 66.310789), -10.42%
> Real time: 159.78 (stdev 0.577193), -6.57%
>
> With 64k folios and 2G memcg:
> Before this series:
> Sys time: 7626.77 (stdev 43.545517)
> Real time: 136.22 (stdev 1.265544)
>
> After this commit:
> Sys time: 6936.03 (stdev 39.996280), -9.06%
> Real time: 129.65 (stdev 0.880039), -4.82%
>
> Sequential swapout of 8G 4k zero folios (24 test run):
> Before this series:
> 5461409.12 us (stdev 183957.827084)
>
> After this commit:
> 5420447.26 us (stdev 196419.240317)
>
> Sequential swapin of 8G 4k zero folios (24 test run):
> Before this series:
> 19736958.916667 us (stdev 189027.246676)
>
> After this commit:
> 19662182.629630 us (stdev 172717.640614)
>
> Performance is better or at least not worse for all tests above.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 41 insertions(+), 15 deletions(-)
>
> diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> index a76afdc3666a..028f5e6be3f0 100644
> --- a/mm/swap_cgroup.c
> +++ b/mm/swap_cgroup.c
> @@ -5,6 +5,15 @@
>
>  #include <linux/swapops.h> /* depends on mm.h include */
>
> +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))

You might want to have some compile time assert that (sizeof(atomic_t)
% sizeof(unsigned short)) is zero. Could not hurt.

> +struct swap_cgroup_unit {
> +       union {
> +               int raw;
> +               atomic_t val;
> +               unsigned short __id[ID_PER_UNIT];
> +       };
> +};

I suggest just getting rid of this complicated struct/union and using
bit shift and mask to get the u16 out from the atomic_t.

> +
>  static DEFINE_MUTEX(swap_cgroup_mutex);
>
>  struct swap_cgroup {
> @@ -12,8 +21,10 @@ struct swap_cgroup {
>  };
>
>  struct swap_cgroup_ctrl {
> -       unsigned short  *map;
> -       spinlock_t      lock;
> +       union {
> +               struct swap_cgroup_unit *units;
> +               unsigned short *map;

You really shouldn't access the map as an "unsigned short" array,
therefore, I suggest changing the array pointer to "atomic_t".

> +       };
>  };
>
>  static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
> @@ -31,6 +42,24 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
>   *
>   * TODO: we can push these buffers out to HIGHMEM.
>   */
> +static unsigned short __swap_cgroup_xchg(void *map,
> +                                        pgoff_t offset,
> +                                        unsigned int new_id)
> +{
> +       unsigned int old_id;
> +       struct swap_cgroup_unit *units = map;
> +       struct swap_cgroup_unit *unit = &units[offset / ID_PER_UNIT];
> +       struct swap_cgroup_unit new, old = { .raw = atomic_read(&unit->val) };
> +
> +       do {
> +               new.raw = old.raw;
> +               old_id = old.__id[offset % ID_PER_UNIT];
> +               new.__id[offset % ID_PER_UNIT] = new_id;
> +       } while (!atomic_try_cmpxchg(&unit->val, &old.raw, new.raw));

I suggest just calculating the atomic_t offset  (offset /
ID_PER_UNIT) and getting the address of the atomic_t.
Then use the mask and shift to construct the new atomic_t value. It is
likely to generate better code.
You don't want the compiler to generate memory load and store for
constructing the temporary new value.
I haven't checked the machine generated code, I suspect the compiler
is not smart enough to convert those into register shift here. Which
is what you really want.

> +
> +       return old_id;
> +}
> +
>  /**
>   * swap_cgroup_record - record mem_cgroup for a set of swap entries
>   * @ent: the first swap entry to be recorded into
> @@ -44,22 +73,19 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
>                                   unsigned int nr_ents)
>  {
>         struct swap_cgroup_ctrl *ctrl;
> -       unsigned short *map;
> -       unsigned short old;
> -       unsigned long flags;
>         pgoff_t offset = swp_offset(ent);
>         pgoff_t end = offset + nr_ents;
> +       unsigned short old, iter;
> +       unsigned short *map;

Make it an atomic_t pointer here as well.

>
>         ctrl = &swap_cgroup_ctrl[swp_type(ent)];
>         map = ctrl->map;
>
> -       spin_lock_irqsave(&ctrl->lock, flags);
> -       old = map[offset];
> +       old = READ_ONCE(map[offset]);

Ah, you shouldn't perform u16 reading directly. That will get into the
endian problem of how the u16 is arranged into atomic_t. You should do
atomic reading then shift the bits out so you don't have the endian
problem. It is a bad idea mixing atomic updates and reading the middle
of the atomic address location.

Chris

>         do {
> -               VM_BUG_ON(map[offset] != old);
> -               map[offset] = id;
> +               iter = __swap_cgroup_xchg(map, offset, id);
> +               VM_BUG_ON(iter != old);
>         } while (++offset != end);
> -       spin_unlock_irqrestore(&ctrl->lock, flags);
>
>         return old;
>  }
> @@ -85,20 +111,20 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
>
>  int swap_cgroup_swapon(int type, unsigned long max_pages)
>  {
> -       void *map;
> +       struct swap_cgroup_unit *units;
>         struct swap_cgroup_ctrl *ctrl;
>
>         if (mem_cgroup_disabled())
>                 return 0;
>
> -       map = vzalloc(max_pages * sizeof(unsigned short));
> -       if (!map)
> +       units = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_UNIT) *
> +                       sizeof(struct swap_cgroup_unit));
> +       if (!units)
>                 goto nomem;
>
>         ctrl = &swap_cgroup_ctrl[type];
>         mutex_lock(&swap_cgroup_mutex);
> -       ctrl->map = map;
> -       spin_lock_init(&ctrl->lock);
> +       ctrl->units = units;
>         mutex_unlock(&swap_cgroup_mutex);
>
>         return 0;
> --
> 2.47.0
>
Kairui Song Dec. 10, 2024, 7:05 a.m. UTC | #9
On Thu, Dec 5, 2024 at 3:40 AM Chris Li <chrisl@kernel.org> wrote:
>
> On Mon, Dec 2, 2024 at 10:42 AM Kairui Song <ryncsn@gmail.com> wrote:
> >
> > From: Kairui Song <kasong@tencent.com>
> >
> > commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance")
> > replaced the cmpxchg/xchg with a global irq spinlock because some archs
> > doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well.
> >
> > And as commented in swap_cgroup.c, this lock is not needed for map
> > synchronization.
> >
> > Emulation of 2 bytes cmpxchg/xchg with atomic isn't hard, so implement
> > it to get rid of this lock.
> >
> > Testing using 64G brd and build with build kernel with make -j96 in 1.5G
> > memory cgroup using 4k folios showed below improvement (10 test run):
> >
> > Before this series:
> > Sys time: 10730.08 (stdev 49.030728)
> > Real time: 171.03 (stdev 0.850355)
> >
> > After this commit:
> > Sys time: 9612.24 (stdev 66.310789), -10.42%
> > Real time: 159.78 (stdev 0.577193), -6.57%
> >
> > With 64k folios and 2G memcg:
> > Before this series:
> > Sys time: 7626.77 (stdev 43.545517)
> > Real time: 136.22 (stdev 1.265544)
> >
> > After this commit:
> > Sys time: 6936.03 (stdev 39.996280), -9.06%
> > Real time: 129.65 (stdev 0.880039), -4.82%
> >
> > Sequential swapout of 8G 4k zero folios (24 test run):
> > Before this series:
> > 5461409.12 us (stdev 183957.827084)
> >
> > After this commit:
> > 5420447.26 us (stdev 196419.240317)
> >
> > Sequential swapin of 8G 4k zero folios (24 test run):
> > Before this series:
> > 19736958.916667 us (stdev 189027.246676)
> >
> > After this commit:
> > 19662182.629630 us (stdev 172717.640614)
> >
> > Performance is better or at least not worse for all tests above.
> >
> > Signed-off-by: Kairui Song <kasong@tencent.com>
> > ---
> >  mm/swap_cgroup.c | 56 +++++++++++++++++++++++++++++++++++-------------
> >  1 file changed, 41 insertions(+), 15 deletions(-)
> >
> > diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
> > index a76afdc3666a..028f5e6be3f0 100644
> > --- a/mm/swap_cgroup.c
> > +++ b/mm/swap_cgroup.c
> > @@ -5,6 +5,15 @@
> >
> >  #include <linux/swapops.h> /* depends on mm.h include */
> >
> > +#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
>
> You might want to have some compile time assert that (sizeof(atomic_t)
> % sizeof(unsigned short)) is zero. Could not hurt.
>
> > +struct swap_cgroup_unit {
> > +       union {
> > +               int raw;
> > +               atomic_t val;
> > +               unsigned short __id[ID_PER_UNIT];
> > +       };
> > +};
>
> I suggest just getting rid of this complicated struct/union and using
> bit shift and mask to get the u16 out from the atomic_t.

Good suggestion.

>
> > +
> >  static DEFINE_MUTEX(swap_cgroup_mutex);
> >
> >  struct swap_cgroup {
> > @@ -12,8 +21,10 @@ struct swap_cgroup {
> >  };
> >
> >  struct swap_cgroup_ctrl {
> > -       unsigned short  *map;
> > -       spinlock_t      lock;
> > +       union {
> > +               struct swap_cgroup_unit *units;
> > +               unsigned short *map;
>
> You really shouldn't access the map as an "unsigned short" array,
> therefore, I suggest changing the array pointer to "atomic_t".
>
> > +       };
> >  };
> >
> >  static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
> > @@ -31,6 +42,24 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
> >   *
> >   * TODO: we can push these buffers out to HIGHMEM.
> >   */
> > +static unsigned short __swap_cgroup_xchg(void *map,
> > +                                        pgoff_t offset,
> > +                                        unsigned int new_id)
> > +{
> > +       unsigned int old_id;
> > +       struct swap_cgroup_unit *units = map;
> > +       struct swap_cgroup_unit *unit = &units[offset / ID_PER_UNIT];
> > +       struct swap_cgroup_unit new, old = { .raw = atomic_read(&unit->val) };
> > +
> > +       do {
> > +               new.raw = old.raw;
> > +               old_id = old.__id[offset % ID_PER_UNIT];
> > +               new.__id[offset % ID_PER_UNIT] = new_id;
> > +       } while (!atomic_try_cmpxchg(&unit->val, &old.raw, new.raw));
>
> I suggest just calculating the atomic_t offset  (offset /
> ID_PER_UNIT) and getting the address of the atomic_t.
> Then use the mask and shift to construct the new atomic_t value. It is
> likely to generate better code.
> You don't want the compiler to generate memory load and store for
> constructing the temporary new value.
> I haven't checked the machine generated code, I suspect the compiler
> is not smart enough to convert those into register shift here. Which
> is what you really want.
>
> > +
> > +       return old_id;
> > +}
> > +
> >  /**
> >   * swap_cgroup_record - record mem_cgroup for a set of swap entries
> >   * @ent: the first swap entry to be recorded into
> > @@ -44,22 +73,19 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
> >                                   unsigned int nr_ents)
> >  {
> >         struct swap_cgroup_ctrl *ctrl;
> > -       unsigned short *map;
> > -       unsigned short old;
> > -       unsigned long flags;
> >         pgoff_t offset = swp_offset(ent);
> >         pgoff_t end = offset + nr_ents;
> > +       unsigned short old, iter;
> > +       unsigned short *map;
>
> Make it an atomic_t pointer here as well.
>
> >
> >         ctrl = &swap_cgroup_ctrl[swp_type(ent)];
> >         map = ctrl->map;
> >
> > -       spin_lock_irqsave(&ctrl->lock, flags);
> > -       old = map[offset];
> > +       old = READ_ONCE(map[offset]);
>
> Ah, you shouldn't perform u16 reading directly. That will get into the
> endian problem of how the u16 is arranged into atomic_t. You should do
> atomic reading then shift the bits out so you don't have the endian
> problem. It is a bad idea mixing atomic updates and reading the middle
> of the atomic address location.

Good suggestion, convert the whole map into atomic_t and access / xchg
with bit shifts is also OK, mixing atomic with other types may lead to
misuse indeed.

>
> Chris
>
> >         do {
> > -               VM_BUG_ON(map[offset] != old);
> > -               map[offset] = id;
> > +               iter = __swap_cgroup_xchg(map, offset, id);
> > +               VM_BUG_ON(iter != old);
> >         } while (++offset != end);
> > -       spin_unlock_irqrestore(&ctrl->lock, flags);
> >
> >         return old;
> >  }
> > @@ -85,20 +111,20 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
> >
> >  int swap_cgroup_swapon(int type, unsigned long max_pages)
> >  {
> > -       void *map;
> > +       struct swap_cgroup_unit *units;
> >         struct swap_cgroup_ctrl *ctrl;
> >
> >         if (mem_cgroup_disabled())
> >                 return 0;
> >
> > -       map = vzalloc(max_pages * sizeof(unsigned short));
> > -       if (!map)
> > +       units = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_UNIT) *
> > +                       sizeof(struct swap_cgroup_unit));
> > +       if (!units)
> >                 goto nomem;
> >
> >         ctrl = &swap_cgroup_ctrl[type];
> >         mutex_lock(&swap_cgroup_mutex);
> > -       ctrl->map = map;
> > -       spin_lock_init(&ctrl->lock);
> > +       ctrl->units = units;
> >         mutex_unlock(&swap_cgroup_mutex);
> >
> >         return 0;
> > --
> > 2.47.0
> >
>
diff mbox series

Patch

diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index a76afdc3666a..028f5e6be3f0 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -5,6 +5,15 @@ 
 
 #include <linux/swapops.h> /* depends on mm.h include */
 
+#define ID_PER_UNIT (sizeof(atomic_t) / sizeof(unsigned short))
+struct swap_cgroup_unit {
+	union {
+		int raw;
+		atomic_t val;
+		unsigned short __id[ID_PER_UNIT];
+	};
+};
+
 static DEFINE_MUTEX(swap_cgroup_mutex);
 
 struct swap_cgroup {
@@ -12,8 +21,10 @@  struct swap_cgroup {
 };
 
 struct swap_cgroup_ctrl {
-	unsigned short	*map;
-	spinlock_t	lock;
+	union {
+		struct swap_cgroup_unit *units;
+		unsigned short *map;
+	};
 };
 
 static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
@@ -31,6 +42,24 @@  static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
  *
  * TODO: we can push these buffers out to HIGHMEM.
  */
+static unsigned short __swap_cgroup_xchg(void *map,
+					 pgoff_t offset,
+					 unsigned int new_id)
+{
+	unsigned int old_id;
+	struct swap_cgroup_unit *units = map;
+	struct swap_cgroup_unit *unit = &units[offset / ID_PER_UNIT];
+	struct swap_cgroup_unit new, old = { .raw = atomic_read(&unit->val) };
+
+	do {
+		new.raw = old.raw;
+		old_id = old.__id[offset % ID_PER_UNIT];
+		new.__id[offset % ID_PER_UNIT] = new_id;
+	} while (!atomic_try_cmpxchg(&unit->val, &old.raw, new.raw));
+
+	return old_id;
+}
+
 /**
  * swap_cgroup_record - record mem_cgroup for a set of swap entries
  * @ent: the first swap entry to be recorded into
@@ -44,22 +73,19 @@  unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
 				  unsigned int nr_ents)
 {
 	struct swap_cgroup_ctrl *ctrl;
-	unsigned short *map;
-	unsigned short old;
-	unsigned long flags;
 	pgoff_t offset = swp_offset(ent);
 	pgoff_t end = offset + nr_ents;
+	unsigned short old, iter;
+	unsigned short *map;
 
 	ctrl = &swap_cgroup_ctrl[swp_type(ent)];
 	map = ctrl->map;
 
-	spin_lock_irqsave(&ctrl->lock, flags);
-	old = map[offset];
+	old = READ_ONCE(map[offset]);
 	do {
-		VM_BUG_ON(map[offset] != old);
-		map[offset] = id;
+		iter = __swap_cgroup_xchg(map, offset, id);
+		VM_BUG_ON(iter != old);
 	} while (++offset != end);
-	spin_unlock_irqrestore(&ctrl->lock, flags);
 
 	return old;
 }
@@ -85,20 +111,20 @@  unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
 
 int swap_cgroup_swapon(int type, unsigned long max_pages)
 {
-	void *map;
+	struct swap_cgroup_unit *units;
 	struct swap_cgroup_ctrl *ctrl;
 
 	if (mem_cgroup_disabled())
 		return 0;
 
-	map = vzalloc(max_pages * sizeof(unsigned short));
-	if (!map)
+	units = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_UNIT) *
+			sizeof(struct swap_cgroup_unit));
+	if (!units)
 		goto nomem;
 
 	ctrl = &swap_cgroup_ctrl[type];
 	mutex_lock(&swap_cgroup_mutex);
-	ctrl->map = map;
-	spin_lock_init(&ctrl->lock);
+	ctrl->units = units;
 	mutex_unlock(&swap_cgroup_mutex);
 
 	return 0;