@@ -630,7 +630,7 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
unsigned long flags;
spin_lock_irqsave(&mapping->tree_lock, flags);
- if (page->mapping) { /* Race with truncate? */
+ if (page_mapping(page)) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
@@ -1223,7 +1223,7 @@ retry_journal:
}
lock_page(page);
- if (page->mapping != mapping) {
+ if (page_mapping(page) != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
@@ -2962,7 +2962,7 @@ retry_journal:
}
lock_page(page);
- if (page->mapping != mapping) {
+ if (page_mapping(page) != mapping) {
/* The page got truncated from under us */
unlock_page(page);
put_page(page);
@@ -369,7 +369,7 @@ static int __filemap_fdatawait_range(struct address_space *mapping,
struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */
- if (page->index > end)
+ if (page_to_pgoff(page) > end)
continue;
page = compound_head(page);
@@ -1307,12 +1307,12 @@ repeat:
}
/* Has the page been truncated? */
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(page_mapping(page) != mapping)) {
unlock_page(page);
put_page(page);
goto repeat;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
}
if (page && (fgp_flags & FGP_ACCESSED))
@@ -1606,7 +1606,8 @@ repeat:
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
- if (page->mapping == NULL || page_to_pgoff(page) != index) {
+ if (page_mapping(page) == NULL ||
+ page_to_pgoff(page) != index) {
put_page(page);
break;
}
@@ -1907,7 +1908,7 @@ find_page:
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
- if (!page->mapping)
+ if (page_mapping(page))
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
offset, iter->count))
@@ -1987,7 +1988,7 @@ page_not_up_to_date:
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
- if (!page->mapping) {
+ if (!page_mapping(page)) {
unlock_page(page);
put_page(page);
continue;
@@ -2023,7 +2024,7 @@ readpage:
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {
- if (page->mapping == NULL) {
+ if (page_mapping(page) == NULL) {
/*
* invalidate_mapping_pages got it
*/
@@ -2324,12 +2325,12 @@ retry_find:
}
/* Did it get truncated? */
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(page_mapping(page) != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
- VM_BUG_ON_PAGE(page->index != offset, page);
+ VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
/*
* We have a locked page in the page cache, now we need to check
@@ -2505,7 +2506,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
lock_page(page);
- if (page->mapping != inode->i_mapping) {
+ if (page_mapping(page) != inode->i_mapping) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
@@ -2654,7 +2655,7 @@ filler:
lock_page(page);
/* Case c or d, restart the operation */
- if (!page->mapping) {
+ if (!page_mapping(page)) {
unlock_page(page);
put_page(page);
goto repeat;
@@ -3110,12 +3111,13 @@ EXPORT_SYMBOL(generic_file_write_iter);
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
- struct address_space * const mapping = page->mapping;
+ struct address_space * const mapping = page_mapping(page);
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
+ page = compound_head(page);
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
@@ -2052,7 +2052,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
return ret;
if (unlikely(!(ret & VM_FAULT_LOCKED))) {
lock_page(page);
- if (!page->mapping) {
+ if (!page_mapping(page)) {
unlock_page(page);
return 0; /* retry */
}
@@ -2100,7 +2100,7 @@ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte,
dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
- mapping = page->mapping;
+ mapping = page_mapping(page);
unlock_page(page);
put_page(page);
@@ -2878,7 +2878,7 @@ EXPORT_SYMBOL(mapping_tagged);
*/
void wait_for_stable_page(struct page *page)
{
- if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
+ if (bdi_cap_stable_pages_required(inode_to_bdi(page_mapping(page)->host)))
wait_on_page_writeback(page);
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
@@ -627,6 +627,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
unsigned long flags;
+ page = compound_head(page);
if (page->mapping != mapping)
return 0;
@@ -655,7 +656,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
{
if (!PageDirty(page))
return 0;
- if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
+ if (page_mapping(page) != mapping || mapping->a_ops->launder_page == NULL)
return 0;
return mapping->a_ops->launder_page(page);
}
@@ -703,7 +704,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
lock_page(page);
WARN_ON(page_to_pgoff(page) != index);
- if (page->mapping != mapping) {
+ if (page_mapping(page) != mapping) {
unlock_page(page);
continue;
}
With huge pages in page cache we see tail pages in more code paths. This patch replaces direct access to struct page fields with macros which can handle tail pages properly. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- fs/buffer.c | 2 +- fs/ext4/inode.c | 4 ++-- mm/filemap.c | 26 ++++++++++++++------------ mm/memory.c | 4 ++-- mm/page-writeback.c | 2 +- mm/truncate.c | 5 +++-- 6 files changed, 23 insertions(+), 20 deletions(-)