Message ID | 20240130014208.565554-16-hannes@cmpxchg.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: zswap: cleanups | expand |
On Mon, Jan 29, 2024 at 5:42 PM Johannes Weiner <hannes@cmpxchg.org> wrote: > > This completes consolidation of the LRU section. > > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> LGTM. Reviewed-by: Nhat Pham <nphamcs@gmail.com> > --- > mm/zswap.c | 101 ++++++++++++++++++++++++++--------------------------- > 1 file changed, 49 insertions(+), 52 deletions(-) > > diff --git a/mm/zswap.c b/mm/zswap.c > index 511bfafc1456..756d4d575efe 100644 > --- a/mm/zswap.c > +++ b/mm/zswap.c > @@ -768,58 +768,6 @@ static inline int entry_to_nid(struct zswap_entry *entry) > return page_to_nid(virt_to_page(entry)); > } > > -void zswap_lruvec_state_init(struct lruvec *lruvec) > -{ > - atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); > -} > - > -void zswap_folio_swapin(struct folio *folio) > -{ > - struct lruvec *lruvec; > - > - if (folio) { > - lruvec = folio_lruvec(folio); > - atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); > - } > -} > - > -void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) > -{ > - struct zswap_pool *pool; > - > - /* lock out zswap pools list modification */ > - spin_lock(&zswap_pools_lock); > - list_for_each_entry(pool, &zswap_pools, list) { > - if (pool->next_shrink == memcg) > - pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL); > - } > - spin_unlock(&zswap_pools_lock); > -} > - > -/********************************* > -* zswap entry functions > -**********************************/ > -static struct kmem_cache *zswap_entry_cache; > - > -static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) > -{ > - struct zswap_entry *entry; > - entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); > - if (!entry) > - return NULL; > - entry->refcount = 1; > - RB_CLEAR_NODE(&entry->rbnode); > - return entry; > -} > - > -static void zswap_entry_cache_free(struct zswap_entry *entry) > -{ > - kmem_cache_free(zswap_entry_cache, entry); > -} > - > -/********************************* > -* lru functions > -**********************************/ > static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) > { > atomic_long_t *nr_zswap_protected; > @@ -872,6 +820,55 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) > rcu_read_unlock(); > } > > +void zswap_lruvec_state_init(struct lruvec *lruvec) > +{ > + atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); > +} > + > +void zswap_folio_swapin(struct folio *folio) > +{ > + struct lruvec *lruvec; > + > + if (folio) { > + lruvec = folio_lruvec(folio); > + atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); > + } > +} > + > +void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) > +{ > + struct zswap_pool *pool; > + > + /* lock out zswap pools list modification */ > + spin_lock(&zswap_pools_lock); > + list_for_each_entry(pool, &zswap_pools, list) { > + if (pool->next_shrink == memcg) > + pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL); > + } > + spin_unlock(&zswap_pools_lock); > +} > + > +/********************************* > +* zswap entry functions > +**********************************/ > +static struct kmem_cache *zswap_entry_cache; > + > +static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) > +{ > + struct zswap_entry *entry; > + entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); > + if (!entry) > + return NULL; > + entry->refcount = 1; > + RB_CLEAR_NODE(&entry->rbnode); > + return entry; > +} > + > +static void zswap_entry_cache_free(struct zswap_entry *entry) > +{ > + kmem_cache_free(zswap_entry_cache, entry); > +} > + > /********************************* > * rbtree functions > **********************************/ > -- > 2.43.0 >
diff --git a/mm/zswap.c b/mm/zswap.c index 511bfafc1456..756d4d575efe 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -768,58 +768,6 @@ static inline int entry_to_nid(struct zswap_entry *entry) return page_to_nid(virt_to_page(entry)); } -void zswap_lruvec_state_init(struct lruvec *lruvec) -{ - atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); -} - -void zswap_folio_swapin(struct folio *folio) -{ - struct lruvec *lruvec; - - if (folio) { - lruvec = folio_lruvec(folio); - atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); - } -} - -void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) -{ - struct zswap_pool *pool; - - /* lock out zswap pools list modification */ - spin_lock(&zswap_pools_lock); - list_for_each_entry(pool, &zswap_pools, list) { - if (pool->next_shrink == memcg) - pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL); - } - spin_unlock(&zswap_pools_lock); -} - -/********************************* -* zswap entry functions -**********************************/ -static struct kmem_cache *zswap_entry_cache; - -static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) -{ - struct zswap_entry *entry; - entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); - if (!entry) - return NULL; - entry->refcount = 1; - RB_CLEAR_NODE(&entry->rbnode); - return entry; -} - -static void zswap_entry_cache_free(struct zswap_entry *entry) -{ - kmem_cache_free(zswap_entry_cache, entry); -} - -/********************************* -* lru functions -**********************************/ static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) { atomic_long_t *nr_zswap_protected; @@ -872,6 +820,55 @@ static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) rcu_read_unlock(); } +void zswap_lruvec_state_init(struct lruvec *lruvec) +{ + atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0); +} + +void zswap_folio_swapin(struct folio *folio) +{ + struct lruvec *lruvec; + + if (folio) { + lruvec = folio_lruvec(folio); + atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected); + } +} + +void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) +{ + struct zswap_pool *pool; + + /* lock out zswap pools list modification */ + spin_lock(&zswap_pools_lock); + list_for_each_entry(pool, &zswap_pools, list) { + if (pool->next_shrink == memcg) + pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL); + } + spin_unlock(&zswap_pools_lock); +} + +/********************************* +* zswap entry functions +**********************************/ +static struct kmem_cache *zswap_entry_cache; + +static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) +{ + struct zswap_entry *entry; + entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); + if (!entry) + return NULL; + entry->refcount = 1; + RB_CLEAR_NODE(&entry->rbnode); + return entry; +} + +static void zswap_entry_cache_free(struct zswap_entry *entry) +{ + kmem_cache_free(zswap_entry_cache, entry); +} + /********************************* * rbtree functions **********************************/
This completes consolidation of the LRU section. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> --- mm/zswap.c | 101 ++++++++++++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 52 deletions(-)