@@ -138,7 +138,7 @@ const struct movable_operations *page_movable_ops(struct page *page)
VM_BUG_ON(!__PageMovable(page));
return (const struct movable_operations *)
- ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
+ ((unsigned long)page->___mapping - PAGE_MAPPING_MOVABLE);
}
#ifdef CONFIG_NUMA_BALANCING
@@ -2216,17 +2216,6 @@ static inline void *folio_address(const struct folio *folio)
extern pgoff_t __page_file_index(struct page *page);
-/*
- * Return the pagecache index of the passed page. Regular pagecache pages
- * use ->index whereas swapcache pages use swp_offset(->private)
- */
-static inline pgoff_t page_index(struct page *page)
-{
- if (unlikely(PageSwapCache(page)))
- return __page_file_index(page);
- return page->index;
-}
-
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
@@ -103,9 +103,9 @@ struct page {
struct list_head pcp_list;
};
/* See page-flags.h for PAGE_MAPPING_FLAGS */
- struct address_space *mapping;
+ struct address_space *___mapping;
union {
- pgoff_t index; /* Our offset within mapping. */
+ pgoff_t ___index; /* Our offset within mapping. */
unsigned long share; /* share count for fsdax */
};
/**
@@ -361,9 +361,9 @@ struct folio {
static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
FOLIO_MATCH(flags, flags);
FOLIO_MATCH(lru, lru);
-FOLIO_MATCH(mapping, mapping);
+FOLIO_MATCH(___mapping, mapping);
FOLIO_MATCH(compound_head, lru);
-FOLIO_MATCH(index, index);
+FOLIO_MATCH(___index, index);
FOLIO_MATCH(private, private);
FOLIO_MATCH(_mapcount, _mapcount);
FOLIO_MATCH(_refcount, _refcount);
@@ -449,7 +449,7 @@ struct ptdesc {
TABLE_MATCH(flags, __page_flags);
TABLE_MATCH(compound_head, pt_list);
TABLE_MATCH(compound_head, _pt_pad_1);
-TABLE_MATCH(mapping, __page_mapping);
+TABLE_MATCH(___mapping, __page_mapping);
TABLE_MATCH(rcu_head, pt_rcu_head);
TABLE_MATCH(page_type, __page_type);
TABLE_MATCH(_refcount, _refcount);
@@ -642,11 +642,6 @@ static __always_inline bool folio_mapping_flags(struct folio *folio)
return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageMappingFlags(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
-}
-
static __always_inline bool folio_test_anon(struct folio *folio)
{
return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
@@ -665,7 +660,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
static __always_inline int __PageMovable(struct page *page)
{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ return ((unsigned long)page->___mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_MOVABLE;
}
@@ -865,30 +865,10 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
- struct page *head;
-
- if (likely(!PageTransTail(page)))
- return page->index;
-
- head = compound_head(page);
- /*
- * We don't initialize ->index for tail pages: calculate based on
- * head page
- */
- return head->index + page - head;
-}
-
-/*
- * Return byte-offset into filesystem object for page.
- */
-static inline loff_t page_offset(struct page *page)
-{
- return ((loff_t)page->index) << PAGE_SHIFT;
-}
+ struct folio *folio;
-static inline loff_t page_file_offset(struct page *page)
-{
- return ((loff_t)page_index(page)) << PAGE_SHIFT;
+ folio = page_folio(page);
+ return folio->index + folio_page_idx(folio, page);
}
/**
@@ -897,7 +877,7 @@ static inline loff_t page_file_offset(struct page *page)
*/
static inline loff_t folio_pos(struct folio *folio)
{
- return page_offset(&folio->page);
+ return (loff_t)folio->index * PAGE_SIZE;
}
/**
@@ -909,7 +889,7 @@ static inline loff_t folio_pos(struct folio *folio)
*/
static inline loff_t folio_file_pos(struct folio *folio)
{
- return page_file_offset(&folio->page);
+ return (loff_t)folio_index(folio) * PAGE_SIZE;
}
/*
@@ -1464,34 +1444,6 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
return offset;
}
-/**
- * page_mkwrite_check_truncate - check if page was truncated
- * @page: the page to check
- * @inode: the inode to check the page against
- *
- * Returns the number of bytes in the page up to EOF,
- * or -EFAULT if the page was truncated.
- */
-static inline int page_mkwrite_check_truncate(struct page *page,
- struct inode *inode)
-{
- loff_t size = i_size_read(inode);
- pgoff_t index = size >> PAGE_SHIFT;
- int offset = offset_in_page(size);
-
- if (page->mapping != inode->i_mapping)
- return -EFAULT;
-
- /* page is wholly inside EOF */
- if (page->index < index)
- return PAGE_SIZE;
- /* page is wholly past EOF */
- if (page->index > index || !offset)
- return -EFAULT;
- /* page is partially inside EOF */
- return offset;
-}
-
/**
* i_blocks_per_folio - How many blocks fit in this folio.
* @inode: The inode which contains the blocks.
@@ -29,10 +29,6 @@
/********** mm/page_poison.c **********/
#define PAGE_POISON 0xaa
-/********** mm/page_alloc.c ************/
-
-#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA)
-
/********** mm/slab.c **********/
/*
* Magic nums for obj red zoning.
@@ -6147,8 +6147,6 @@ static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
goto unlock;
get_page(vmf->page);
- vmf->page->mapping = vmf->vma->vm_file->f_mapping;
- vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
@@ -637,7 +637,6 @@ static void rb_free_aux_page(struct perf_buffer *rb, int idx)
struct page *page = virt_to_page(rb->aux_pages[idx]);
ClearPagePrivate(page);
- page->mapping = NULL;
__free_page(page);
}
@@ -808,7 +807,6 @@ static void perf_mmap_free_page(void *addr)
{
struct page *page = virt_to_page(addr);
- page->mapping = NULL;
__free_page(page);
}
@@ -75,7 +75,7 @@ static void __dump_page(struct page *page)
* and potentially other situations. (See the page_mapping()
* implementation for what's missing here.)
*/
- unsigned long tmp = (unsigned long)page->mapping;
+ unsigned long tmp = (unsigned long)page->___mapping;
if (tmp & PAGE_MAPPING_ANON)
mapping = NULL;
@@ -2478,11 +2478,8 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
(1L << PG_dirty) |
LRU_GEN_MASK | LRU_REFS_MASK));
- /* ->mapping in first and second tail page is replaced by other uses */
- VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
- page_tail);
- page_tail->mapping = head->mapping;
- page_tail->index = head->index + tail;
+ new_folio->mapping = folio->mapping;
+ new_folio->index = folio->index + tail;
/*
* page->private should not be set in tail pages. Fix up and warn once
@@ -438,7 +438,6 @@ static inline void prep_compound_tail(struct page *head, int tail_idx)
{
struct page *p = head + tail_idx;
- p->mapping = TAIL_MAPPING;
set_compound_head(p, head);
set_page_private(p, 0);
}
@@ -215,12 +215,12 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
*/
static inline int get_pcppage_migratetype(struct page *page)
{
- return page->index;
+ return page->___index;
}
static inline void set_pcppage_migratetype(struct page *page, int migratetype)
{
- page->index = migratetype;
+ page->___index = migratetype;
}
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -911,7 +911,7 @@ static inline bool page_expected_state(struct page *page,
if (unlikely(atomic_read(&page->_mapcount) != -1))
return false;
- if (unlikely((unsigned long)page->mapping |
+ if (unlikely((unsigned long)page->___mapping |
page_ref_count(page) |
#ifdef CONFIG_MEMCG
page->memcg_data |
@@ -928,7 +928,7 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
if (unlikely(atomic_read(&page->_mapcount) != -1))
bad_reason = "nonzero mapcount";
- if (unlikely(page->mapping != NULL))
+ if (unlikely(page->___mapping != NULL))
bad_reason = "non-NULL mapping";
if (unlikely(page_ref_count(page) != 0))
bad_reason = "nonzero _refcount";
@@ -981,9 +981,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
ret = 0;
goto out;
}
- switch (page - head_page) {
- case 1:
- /* the first tail page: these may be in place of ->mapping */
+ if (page - head_page == 1) {
if (unlikely(folio_entire_mapcount(folio))) {
bad_page(page, "nonzero entire_mapcount");
goto out;
@@ -996,19 +994,6 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
bad_page(page, "nonzero pincount");
goto out;
}
- break;
- case 2:
- /*
- * the second tail page: ->mapping is
- * deferred_list.next -- ignore value.
- */
- break;
- default:
- if (page->mapping != TAIL_MAPPING) {
- bad_page(page, "corrupted mapping in tail page");
- goto out;
- }
- break;
}
if (unlikely(!PageTail(page))) {
bad_page(page, "PageTail not set");
@@ -1020,7 +1005,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
}
ret = 0;
out:
- page->mapping = NULL;
+ page->___mapping = NULL;
clear_compound_head(page);
return ret;
}
@@ -1080,8 +1065,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
bool init = want_init_on_free();
bool compound = PageCompound(page);
+ struct folio *folio;
VM_BUG_ON_PAGE(PageTail(page), page);
+ folio = (struct folio *)page;
trace_mm_page_free(page, order);
kmsan_free_page(page, order);
@@ -1121,8 +1108,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
- if (PageMappingFlags(page))
- page->mapping = NULL;
+ if (folio_mapping_flags(folio))
+ folio->mapping = NULL;
if (memcg_kmem_online() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order);
if (is_check_pages_enabled()) {
@@ -253,13 +253,13 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
/* set the pointer to a chunk in a page struct */
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
{
- page->index = (unsigned long)pcpu;
+ page->___index = (unsigned long)pcpu;
}
/* obtain pointer to a chunk from a page struct */
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
{
- return (struct pcpu_chunk *)page->index;
+ return (struct pcpu_chunk *)page->___index;
}
static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
@@ -38,10 +38,6 @@
/********** mm/page_poison.c **********/
#define PAGE_POISON 0xaa
-/********** mm/page_alloc.c ************/
-
-#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA)
-
/********** mm/slab.c **********/
/*
* Magic nums for obj red zoning.