@@ -40,6 +40,7 @@
#include <linux/spinlock.h>
#include <linux/zpool.h>
#include <linux/kmemleak.h>
+#include "zpdesc.h"
/*
* NCHUNKS_ORDER determines the internal allocation granularity, effectively
@@ -1251,22 +1252,23 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
{
struct z3fold_header *zhdr;
struct z3fold_pool *pool;
+ struct zpdesc *zpdesc = page_zpdesc(page);
- VM_BUG_ON_PAGE(PageIsolated(page), page);
+ VM_BUG_ON_PAGE(PageIsolated(zpdesc_page(zpdesc)), zpdesc_page(zpdesc));
- if (test_bit(PAGE_HEADLESS, &page->private))
+ if (test_bit(PAGE_HEADLESS, &zpdesc->zppage_flag))
return false;
- zhdr = page_address(page);
+ zhdr = zpdesc_address(zpdesc);
z3fold_page_lock(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private) ||
- test_bit(PAGE_STALE, &page->private))
+ if (test_bit(NEEDS_COMPACTING, &zpdesc->zppage_flag) ||
+ test_bit(PAGE_STALE, &zpdesc->zppage_flag))
goto out;
if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
goto out;
- if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+ if (test_and_set_bit(PAGE_CLAIMED, &zpdesc->zppage_flag))
goto out;
pool = zhdr_to_pool(zhdr);
spin_lock(&pool->lock);
new file mode 100644
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* zpdesc.h: zswap.zpool memory descriptor
+ *
+ * Written by Alex Shi (Tencent) <alexs@kernel.org>
+ */
+#ifndef __MM_ZPDESC_H__
+#define __MM_ZPDESC_H__
+
+/*
+ * struct zpdesc - Memory descriptor for z3fold memory
+ * @flags: Page flags, PG_locked for headless z3fold memory
+ * @lru: Indirected used by page migration
+ * @zppage_flag: z3fold memory flags
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct zpdesc {
+ unsigned long flags;
+ struct list_head lru;
+ unsigned long _zp_pad_1;
+ unsigned long _zp_pad_2;
+ unsigned long zppage_flag;
+};
+#define ZPDESC_MATCH(pg, zp) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct zpdesc, zp))
+
+ZPDESC_MATCH(flags, flags);
+ZPDESC_MATCH(lru, lru);
+ZPDESC_MATCH(private, zppage_flag);
+#undef ZPDESC_MATCH
+static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
+
+#define zpdesc_page(zp) (_Generic((zp), \
+ const struct zpdesc *: (const struct page *)(zp), \
+ struct zpdesc *: (struct page *)(zp)))
+
+#define zpdesc_folio(zp) (_Generic((zp), \
+ const struct zpdesc *: (const struct folio *)(zp), \
+ struct zpdesc *: (struct folio *)(zp)))
+
+#define page_zpdesc(p) (_Generic((p), \
+ const struct page *: (const struct zpdesc *)(p), \
+ struct page *: (struct zpdesc *)(p)))
+
+static inline void *zpdesc_address(const struct zpdesc *zpdesc)
+{
+ return folio_address(zpdesc_folio(zpdesc));
+}
+
+#endif