@@ -764,7 +764,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
extern void kvfree(const void *addr);
-static inline int compound_mapcount(struct page *page)
+static inline int compound_mapcount(const struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
page = compound_head(page);
@@ -781,9 +781,9 @@ static inline void page_mapcount_reset(struct page *page)
atomic_set(&(page)->_mapcount, -1);
}
-int __page_mapcount(struct page *page);
+int __page_mapcount(const struct page *page);
-static inline int page_mapcount(struct page *page)
+static inline int page_mapcount(const struct page *page)
{
VM_BUG_ON_PAGE(PageSlab(page), page);
@@ -857,14 +857,14 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
return compound_page_dtors[page[1].compound_dtor];
}
-static inline unsigned int compound_order(struct page *page)
+static inline unsigned int compound_order(const struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
-static inline bool hpage_pincount_available(struct page *page)
+static inline bool hpage_pincount_available(const struct page *page)
{
/*
* Can the page->hpage_pinned_refcount field be used? That field is in
@@ -875,7 +875,7 @@ static inline bool hpage_pincount_available(struct page *page)
return PageCompound(page) && compound_order(page) > 1;
}
-static inline int compound_pincount(struct page *page)
+static inline int compound_pincount(const struct page *page)
{
VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
page = compound_head(page);
@@ -1495,12 +1495,12 @@ void page_address_init(void);
extern void *page_rmapping(struct page *page);
extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
+extern struct address_space *page_mapping(const struct page *page);
-extern struct address_space *__page_file_mapping(struct page *);
+extern struct address_space *__page_file_mapping(const struct page *);
static inline
-struct address_space *page_file_mapping(struct page *page)
+struct address_space *page_file_mapping(const struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_mapping(page);
@@ -1508,13 +1508,13 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-extern pgoff_t __page_file_index(struct page *page);
+extern pgoff_t __page_file_index(const struct page *page);
/*
* Return the pagecache index of the passed page. Regular pagecache pages
* use ->index whereas swapcache pages use swp_offset(->private)
*/
-static inline pgoff_t page_index(struct page *page)
+static inline pgoff_t page_index(const struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_index(page);
@@ -1522,15 +1522,15 @@ static inline pgoff_t page_index(struct page *page)
}
bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-struct address_space *page_mapping_file(struct page *page);
+struct address_space *page_mapping(const struct page *page);
+struct address_space *page_mapping_file(const struct page *page);
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
* Page index cannot be this large so this must be
@@ -221,15 +221,8 @@ struct page {
#endif
} _struct_page_alignment;
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
-static inline atomic_t *compound_pincount_ptr(struct page *page)
-{
- return &page[2].hpage_pinned_refcount;
-}
+#define compound_mapcount_ptr(page) (&(page)[1].compound_mapcount)
+#define compound_pincount_ptr(page) (&(page)[2].hpage_pinned_refcount)
/*
* Used for sizing the vmemmap region on some architectures
@@ -9,8 +9,8 @@ struct page;
struct vm_area_struct;
struct mm_struct;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+extern void dump_page(const struct page *page, const char *reason);
+extern void __dump_page(const struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
@@ -175,23 +175,20 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
+#define compound_head(page) ({ \
+ __typeof__(page) _page = page; \
+ unsigned long head = READ_ONCE(_page->compound_head); \
+ if (unlikely(head & 1)) \
+ _page = (void *)(head - 1); \
+ _page; \
+})
-static inline struct page *compound_head(struct page *page)
-{
- unsigned long head = READ_ONCE(page->compound_head);
-
- if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
-}
-
-static __always_inline int PageTail(struct page *page)
+static __always_inline int PageTail(const struct page *page)
{
return READ_ONCE(page->compound_head) & 1;
}
-static __always_inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(const struct page *page)
{
return test_bit(PG_head, &page->flags) || PageTail(page);
}
@@ -252,7 +249,7 @@ static inline void page_init_poison(struct page *page, size_t size)
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
-static __always_inline int Page##uname(struct page *page) \
+static __always_inline int Page##uname(const struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
@@ -385,7 +382,7 @@ PAGEFLAG_FALSE(HighMem)
#endif
#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline int PageSwapCache(const struct page *page)
{
#ifdef CONFIG_THP_SWAP
page = compound_head(page);
@@ -474,7 +471,7 @@ static __always_inline int PageMappingFlags(struct page *page)
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline int PageAnon(const struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -493,7 +490,7 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline int PageKsm(const struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -586,14 +583,14 @@ static inline void ClearPageCompound(struct page *page)
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
+int PageHuge(const struct page *page);
+int PageHeadHuge(const struct page *page);
+bool page_huge_active(const struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)
-static inline bool page_huge_active(struct page *page)
+static inline bool page_huge_active(const struct page *page)
{
return 0;
}
@@ -667,7 +664,7 @@ static inline int PageTransCompoundMap(struct page *page)
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
*/
-static inline int PageTransTail(struct page *page)
+static inline int PageTransTail(const struct page *page)
{
return PageTail(page);
}
@@ -685,7 +682,7 @@ static inline int PageTransTail(struct page *page)
*
* See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
*/
-static inline int PageDoubleMap(struct page *page)
+static inline int PageDoubleMap(const struct page *page)
{
return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
}
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
extern void __split_page_owner(struct page *page, unsigned int order);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
@@ -46,7 +46,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason)
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner_migrate_reason(page, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
@@ -69,7 +69,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
@@ -62,12 +62,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+static inline int page_count(const struct page *page)
{
return atomic_read(&compound_head(page)->_refcount);
}
@@ -54,7 +54,7 @@ extern unsigned int pageblock_order;
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long end_bitidx,
unsigned long mask);
@@ -401,7 +401,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
* Get index of the page with in radix-tree
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
-static inline pgoff_t page_to_index(struct page *page)
+static inline pgoff_t page_to_index(const struct page *page)
{
pgoff_t pgoff;
@@ -421,7 +421,7 @@ static inline pgoff_t page_to_index(struct page *page)
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
*/
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_to_pgoff(const struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
@@ -464,7 +464,7 @@ extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
+extern struct swap_info_struct *page_swap_info(const struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *);
@@ -42,9 +42,9 @@ const struct trace_print_flags vmaflag_names[] = {
{0, NULL}
};
-void __dump_page(struct page *page, const char *reason)
+void __dump_page(const struct page *page, const char *reason)
{
- struct page *head = compound_head(page);
+ const struct page *head = compound_head(page);
struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
bool compound = PageCompound(page);
@@ -140,7 +140,7 @@ void __dump_page(struct page *page, const char *reason)
#endif
}
-void dump_page(struct page *page, const char *reason)
+void dump_page(const struct page *page, const char *reason)
{
__dump_page(page, reason);
dump_page_owner(page);
@@ -1305,7 +1305,7 @@ struct hstate *size_to_hstate(unsigned long size)
*
* This function can be called for tail pages, but never returns true for them.
*/
-bool page_huge_active(struct page *page)
+bool page_huge_active(const struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
return PageHead(page) && PagePrivate(&page[1]);
@@ -1509,7 +1509,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
* transparent huge pages. See the PageTransHuge() documentation for more
* details.
*/
-int PageHuge(struct page *page)
+int PageHuge(const struct page *page)
{
if (!PageCompound(page))
return 0;
@@ -1523,7 +1523,7 @@ EXPORT_SYMBOL_GPL(PageHuge);
* PageHeadHuge() only returns true for hugetlbfs head page, but not for
* normal or transparent huge pages.
*/
-int PageHeadHuge(struct page *page_head)
+int PageHeadHuge(const struct page *page_head)
{
if (!PageHead(page_head))
return 0;
@@ -446,7 +446,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
#endif
/* Return a pointer to the bitmap storing bits affecting a block of pages */
-static inline unsigned long *get_pageblock_bitmap(struct page *page,
+static inline unsigned long *get_pageblock_bitmap(const struct page *page,
unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
@@ -456,7 +456,7 @@ static inline unsigned long *get_pageblock_bitmap(struct page *page,
#endif /* CONFIG_SPARSEMEM */
}
-static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
+static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
@@ -476,7 +476,8 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
*
* Return: pageblock_bits flags
*/
-static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
+static __always_inline
+unsigned long __get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long end_bitidx,
unsigned long mask)
@@ -495,9 +496,8 @@ static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page
return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
}
-unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
- unsigned long end_bitidx,
- unsigned long mask)
+unsigned long get_pfnblock_flags_mask(const struct page *page,
+ unsigned long pfn, unsigned long end_bitidx, unsigned long mask)
{
return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
}
@@ -399,7 +399,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
}
-void __dump_page_owner(struct page *page)
+void __dump_page_owner(const struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;
@@ -3489,7 +3489,7 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
return swap_type_to_swap_info(swp_type(entry));
}
-struct swap_info_struct *page_swap_info(struct page *page)
+struct swap_info_struct *page_swap_info(const struct page *page)
{
swp_entry_t entry = { .val = page_private(page) };
return swp_swap_info(entry);
@@ -3498,13 +3498,13 @@ struct swap_info_struct *page_swap_info(struct page *page)
/*
* out-of-line __page_file_ methods to avoid include hell.
*/
-struct address_space *__page_file_mapping(struct page *page)
+struct address_space *__page_file_mapping(const struct page *page)
{
return page_swap_info(page)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(__page_file_mapping);
-pgoff_t __page_file_index(struct page *page)
+pgoff_t __page_file_index(const struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
return swp_offset(swap);
@@ -655,7 +655,7 @@ struct anon_vma *page_anon_vma(struct page *page)
return __page_rmapping(page);
}
-struct address_space *page_mapping(struct page *page)
+struct address_space *page_mapping(const struct page *page)
{
struct address_space *mapping;
@@ -683,7 +683,7 @@ EXPORT_SYMBOL(page_mapping);
/*
* For file cache pages, return the address_space, otherwise return NULL
*/
-struct address_space *page_mapping_file(struct page *page)
+struct address_space *page_mapping_file(const struct page *page)
{
if (unlikely(PageSwapCache(page)))
return NULL;
@@ -691,7 +691,7 @@ struct address_space *page_mapping_file(struct page *page)
}
/* Slow path of page_mapcount() for compound pages */
-int __page_mapcount(struct page *page)
+int __page_mapcount(const struct page *page)
{
int ret;