@@ -1488,10 +1488,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
- unsigned int len;
- len = hpage_nr_pages(page) * PAGE_SIZE;
- rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
+ rc = btt_do_bvec(btt, NULL, page, page_size(page), 0, op, sector);
if (rc == 0)
page_endio(page, op_is_write(op), 0);
@@ -224,8 +224,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_queue->queuedata;
blk_status_t rc;
- rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
- 0, op, sector);
+ rc = pmem_do_bvec(pmem, page, page_size(page), 0, op, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -226,12 +226,6 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
-{
- if (unlikely(PageTransHuge(page)))
- return HPAGE_PMD_NR;
- return 1;
-}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
@@ -285,8 +279,6 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-#define hpage_nr_pages(x) 1
-
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
return false;
@@ -47,14 +47,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
@@ -62,7 +62,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), -compound_nr(page));
}
/**
@@ -196,7 +196,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
if (PageHuge(page))
return;
- nr = hpage_nr_pages(page);
+ nr = compound_nr(page);
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
@@ -1469,7 +1469,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_cache(head),
- hpage_nr_pages(head));
+ compound_nr(head));
}
}
}
@@ -327,7 +327,7 @@ extern void clear_page_mlock(struct page *page);
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
if (TestClearPageMlocked(page)) {
- int nr_pages = hpage_nr_pages(page);
+ int nr_pages = compound_nr(page);
/* Holding pmd lock, no change in irq context: __mod is safe */
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
@@ -354,7 +354,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
unsigned long start, end;
start = __vma_address(page, vma);
- end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+ end = start + page_size(page) - 1;
/* page should be within @vma mapping range */
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
@@ -5406,7 +5406,7 @@ static int mem_cgroup_move_account(struct page *page,
struct mem_cgroup *to)
{
unsigned long flags;
- unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
+ unsigned int nr_pages = compound ? compound_nr(page) : 1;
int ret;
bool anon;
@@ -6447,7 +6447,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
bool compound)
{
struct mem_cgroup *memcg = NULL;
- unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
+ unsigned int nr_pages = compound ? compound_nr(page) : 1;
int ret = 0;
if (mem_cgroup_disabled())
@@ -6521,7 +6521,7 @@ int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare, bool compound)
{
- unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
+ unsigned int nr_pages = compound ? compound_nr(page) : 1;
VM_BUG_ON_PAGE(!page->mapping, page);
VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
@@ -6565,7 +6565,7 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
bool compound)
{
- unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
+ unsigned int nr_pages = compound ? compound_nr(page) : 1;
if (mem_cgroup_disabled())
return;
@@ -6772,7 +6772,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
/* Force-charge the new page. The old one will be freed soon */
compound = PageTransHuge(newpage);
- nr_pages = compound ? hpage_nr_pages(newpage) : 1;
+ nr_pages = compound ? compound_nr(newpage) : 1;
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
@@ -6995,7 +6995,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* ancestor for the swap instead and transfer the memory+swap charge.
*/
swap_memcg = mem_cgroup_id_get_online(memcg);
- nr_entries = hpage_nr_pages(page);
+ nr_entries = compound_nr(page);
/* Get references for the tail pages, too */
if (nr_entries > 1)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
@@ -7041,7 +7041,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
*/
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
- unsigned int nr_pages = hpage_nr_pages(page);
+ unsigned int nr_pages = compound_nr(page);
struct page_counter *counter;
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -1344,8 +1344,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
isolate_huge_page(head, &source);
continue;
} else if (PageTransHuge(page))
- pfn = page_to_pfn(compound_head(page))
- + hpage_nr_pages(page) - 1;
+ pfn = page_to_pfn(compound_head(page)) +
+ compound_nr(page) - 1;
/*
* HWPoison pages have elevated reference counts so the migration would
@@ -978,7 +978,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
- hpage_nr_pages(head));
+ compound_nr(head));
} else if (flags & MPOL_MF_STRICT) {
/*
* Non-movable page may reach here. And, there may be
@@ -191,8 +191,9 @@ void putback_movable_pages(struct list_head *l)
unlock_page(page);
put_page(page);
} else {
- mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
- page_is_file_cache(page), -hpage_nr_pages(page));
+ mod_node_page_state(page_pgdat(page),
+ NR_ISOLATED_ANON + page_is_file_cache(page),
+ -compound_nr(page));
putback_lru_page(page);
}
}
@@ -381,7 +382,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
*/
expected_count += is_device_private_page(page);
if (mapping)
- expected_count += hpage_nr_pages(page) + page_has_private(page);
+ expected_count += compound_nr(page) + page_has_private(page);
return expected_count;
}
@@ -436,7 +437,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
- page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
+ page_ref_add(newpage, compound_nr(page)); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
@@ -469,7 +470,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
* to one less reference.
* We know this isn't the last reference.
*/
- page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
+ page_ref_unfreeze(page, expected_count - compound_nr(page));
xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
@@ -579,7 +580,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
} else {
/* thp page */
BUG_ON(!PageTransHuge(src));
- nr_pages = hpage_nr_pages(src);
+ nr_pages = compound_nr(src);
}
for (i = 0; i < nr_pages; i++) {
@@ -1215,7 +1216,7 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
*/
if (likely(!__PageMovable(page)))
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
- page_is_file_cache(page), -hpage_nr_pages(page));
+ page_is_file_cache(page), -compound_nr(page));
}
/*
@@ -1571,7 +1572,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
- hpage_nr_pages(head));
+ compound_nr(head));
}
out_putpage:
/*
@@ -1912,7 +1913,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
page_lru = page_is_file_cache(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
- hpage_nr_pages(page));
+ compound_nr(page));
/*
* Isolating the page has taken another reference, so the
@@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page)
if (!TestClearPageMlocked(page))
return;
- mod_zone_page_state(page_zone(page), NR_MLOCK,
- -hpage_nr_pages(page));
+ mod_zone_page_state(page_zone(page), NR_MLOCK, -compound_nr(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
/*
* The previous TestClearPageMlocked() corresponds to the smp_mb()
@@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page)
if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
- hpage_nr_pages(page));
+ compound_nr(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
@@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page)
/*
* Serialize with any parallel __split_huge_page_refcount() which
* might otherwise copy PageMlocked to part of the tail pages before
- * we clear it in the head page. It also stabilizes hpage_nr_pages().
+ * we clear it in the head page. It also stabilizes compound_nr().
*/
spin_lock_irq(&pgdat->lru_lock);
@@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page)
goto unlock_out;
}
- nr_pages = hpage_nr_pages(page);
+ nr_pages = compound_nr(page);
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
if (__munlock_isolate_lru_page(page, true)) {
@@ -40,7 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio->bi_end_io = end_io;
- bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
+ bio_add_page(bio, page, page_size(page), 0);
}
return bio;
}
@@ -271,7 +271,7 @@ static inline void count_swpout_vm_event(struct page *page)
if (unlikely(PageTransHuge(page)))
count_vm_event(THP_SWPOUT);
#endif
- count_vm_events(PSWPOUT, hpage_nr_pages(page));
+ count_vm_events(PSWPOUT, compound_nr(page));
}
int __swap_writepage(struct page *page, struct writeback_control *wbc,
@@ -57,7 +57,7 @@ static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
unsigned long hpage_pfn = page_to_pfn(hpage);
/* THP can be referenced by any subpage */
- return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
+ return (pfn - hpage_pfn) < compound_nr(hpage);
}
/**
@@ -223,7 +223,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->address >= pvmw->vma->vm_end ||
pvmw->address >=
__vma_address(pvmw->page, pvmw->vma) +
- hpage_nr_pages(pvmw->page) * PAGE_SIZE)
+ page_size(pvmw->page))
return not_found(pvmw);
/* Did we cross page table boundary? */
if (pvmw->address % PMD_SIZE == 0) {
@@ -264,7 +264,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
unsigned long start, end;
start = __vma_address(page, vma);
- end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+ end = start + page_size(page) - 1;
if (unlikely(end < vma->vm_start || start >= vma->vm_end))
return 0;
@@ -1112,7 +1112,7 @@ void do_page_add_anon_rmap(struct page *page,
}
if (first) {
- int nr = compound ? hpage_nr_pages(page) : 1;
+ int nr = compound ? compound_nr(page) : 1;
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
@@ -1150,7 +1150,7 @@ void do_page_add_anon_rmap(struct page *page,
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, bool compound)
{
- int nr = compound ? hpage_nr_pages(page) : 1;
+ int nr = compound ? compound_nr(page) : 1;
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
__SetPageSwapBacked(page);
@@ -1826,7 +1826,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
return;
pgoff_start = page_to_pgoff(page);
- pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+ pgoff_end = pgoff_start + compound_nr(page) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
@@ -1879,7 +1879,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
return;
pgoff_start = page_to_pgoff(page);
- pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+ pgoff_end = pgoff_start + compound_nr(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap,
@@ -465,7 +465,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
* lock is held(spinlock), which implies preemption disabled.
*/
__mod_zone_page_state(page_zone(page), NR_MLOCK,
- hpage_nr_pages(page));
+ compound_nr(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
lru_cache_add(page);
@@ -558,7 +558,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
ClearPageSwapBacked(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
- __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
+ __count_vm_events(PGLAZYFREE, compound_nr(page));
count_memcg_page_event(page, PGLAZYFREE);
update_page_reclaim_stat(lruvec, 1, 0);
}
@@ -158,7 +158,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
{
struct address_space *address_space = swap_address_space(entry);
- int i, nr = hpage_nr_pages(page);
+ int i, nr = compound_nr(page);
pgoff_t idx = swp_offset(entry);
XA_STATE(xas, &address_space->i_pages, idx);
@@ -251,7 +251,7 @@ void delete_from_swap_cache(struct page *page)
xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry);
- page_ref_sub(page, hpage_nr_pages(page));
+ page_ref_sub(page, compound_nr(page));
}
/*
@@ -1331,7 +1331,7 @@ void put_swap_page(struct page *page, swp_entry_t entry)
unsigned char *map;
unsigned int i, free_entries = 0;
unsigned char val;
- int size = swap_entry_size(hpage_nr_pages(page));
+ int size = swap_entry_size(compound_nr(page));
si = _swap_info_get(entry);
if (!si)
@@ -1901,7 +1901,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
SetPageLRU(page);
lru = page_lru(page);
- nr_pages = hpage_nr_pages(page);
+ nr_pages = compound_nr(page);
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
@@ -2095,7 +2095,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
if (page_referenced(page, 0, sc->target_mem_cgroup,
&vm_flags)) {
- nr_rotated += hpage_nr_pages(page);
+ nr_rotated += compound_nr(page);
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So