@@ -834,6 +834,7 @@ static int __add_to_page_cache_locked(struct page *page,
int huge = PageHuge(page);
struct mem_cgroup *memcg;
int error;
+ unsigned int nr = 1;
void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -845,31 +846,48 @@ static int __add_to_page_cache_locked(struct page *page,
gfp_mask, &memcg, false);
if (error)
return error;
+ xas_set_order(&xas, offset, thp_order(page));
+ nr = hpage_nr_pages(page);
}
- get_page(page);
+ page_ref_add(page, nr);
page->mapping = mapping;
page->index = offset;
do {
+ unsigned long exceptional = 0;
+ unsigned int i = 0;
+
xas_lock_irq(&xas);
- old = xas_load(&xas);
- if (old && !xa_is_value(old))
- xas_set_err(&xas, -EEXIST);
- xas_store(&xas, page);
+ xas_for_each_conflict(&xas, old) {
+ if (!xa_is_value(old)) {
+ xas_set_err(&xas, -EEXIST);
+ break;
+ }
+ exceptional++;
+ if (shadowp)
+ *shadowp = old;
+ }
+ xas_create_range(&xas);
if (xas_error(&xas))
goto unlock;
- if (xa_is_value(old)) {
- mapping->nrexceptional--;
- if (shadowp)
- *shadowp = old;
+next:
+ xas_store(&xas, page);
+ if (++i < nr) {
+ xas_next(&xas);
+ goto next;
}
- mapping->nrpages++;
+ mapping->nrexceptional -= exceptional;
+ mapping->nrpages += nr;
/* hugetlb pages do not participate in page cache accounting */
- if (!huge)
- __inc_node_page_state(page, NR_FILE_PAGES);
+ if (!huge) {
+ __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES,
+ nr);
+ if (nr > 1)
+ __inc_node_page_state(page, NR_FILE_THPS);
+ }
unlock:
xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
@@ -886,7 +904,7 @@ static int __add_to_page_cache_locked(struct page *page,
/* Leave page->index set: truncation relies upon it */
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
- put_page(page);
+ page_ref_sub(page, nr);
return xas_error(&xas);
}
ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);