@@ -149,7 +149,7 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages,
blk_finish_plug(&plug);
- BUG_ON(!list_empty(pages));
+ BUG_ON(pages && !list_empty(pages));
BUG_ON(readahead_count(rac));
out:
@@ -428,13 +428,92 @@ static int try_context_readahead(struct address_space *mapping,
return 1;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int ra_alloc_page(struct readahead_control *rac, pgoff_t index,
+ pgoff_t mark, unsigned int order, gfp_t gfp)
+{
+ int err;
+ struct page *page = __page_cache_alloc_order(gfp, order);
+
+ if (!page)
+ return -ENOMEM;
+ if (mark - index < (1UL << order))
+ SetPageReadahead(page);
+ err = add_to_page_cache_lru(page, rac->mapping, index, gfp);
+ if (err)
+ put_page(page);
+ else
+ rac->_nr_pages += 1UL << order;
+ return err;
+}
+
+static bool page_cache_readahead_order(struct readahead_control *rac,
+ struct file_ra_state *ra, unsigned int order)
+{
+ struct address_space *mapping = rac->mapping;
+ unsigned int old_order = order;
+ pgoff_t index = readahead_index(rac);
+ pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
+ pgoff_t mark = index + ra->size - ra->async_size;
+ int err = 0;
+ gfp_t gfp = readahead_gfp_mask(mapping);
+
+ if (!mapping_thp_support(mapping))
+ return false;
+
+ limit = min(limit, index + ra->size - 1);
+
+ /* Grow page size up to PMD size */
+ if (order < HPAGE_PMD_ORDER) {
+ order += 2;
+ if (order > HPAGE_PMD_ORDER)
+ order = HPAGE_PMD_ORDER;
+ while ((1 << order) > ra->size)
+ order--;
+ }
+
+ /* If size is somehow misaligned, fill with order-0 pages */
+ while (!err && index & ((1UL << old_order) - 1))
+ err = ra_alloc_page(rac, index++, mark, 0, gfp);
+
+ while (!err && index & ((1UL << order) - 1)) {
+ err = ra_alloc_page(rac, index, mark, old_order, gfp);
+ index += 1UL << old_order;
+ }
+
+ while (!err && index <= limit) {
+ err = ra_alloc_page(rac, index, mark, order, gfp);
+ index += 1UL << order;
+ }
+
+ if (index > limit) {
+ ra->size += index - limit - 1;
+ ra->async_size += index - limit - 1;
+ }
+
+ read_pages(rac, NULL, false);
+
+ /*
+ * If there were already pages in the page cache, then we may have
+ * left some gaps. Let the regular readahead code take care of this
+ * situation.
+ */
+ return !err;
+}
+#else
+static bool page_cache_readahead_order(struct readahead_control *rac,
+ struct file_ra_state *ra, unsigned int order)
+{
+ return false;
+}
+#endif
+
/*
* A minimal readahead algorithm for trivial sequential/random reads.
*/
static void ondemand_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
- bool hit_readahead_marker, pgoff_t index,
- unsigned long req_size)
+ struct page *page, pgoff_t index, unsigned long req_size)
{
DEFINE_READAHEAD(rac, file, mapping, index);
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
@@ -473,7 +552,7 @@ static void ondemand_readahead(struct address_space *mapping,
* Query the pagecache for async_size, which normally equals to
* readahead size. Ramp it up and use it as the new readahead size.
*/
- if (hit_readahead_marker) {
+ if (page) {
pgoff_t start;
rcu_read_lock();
@@ -544,6 +623,8 @@ static void ondemand_readahead(struct address_space *mapping,
}
rac._index = ra->start;
+ if (page && page_cache_readahead_order(&rac, ra, thp_order(page)))
+ return;
__do_page_cache_readahead(&rac, ra->size, ra->async_size);
}
@@ -578,7 +659,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
}
/* do read-ahead */
- ondemand_readahead(mapping, ra, filp, false, index, req_count);
+ ondemand_readahead(mapping, ra, filp, NULL, index, req_count);
}
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
@@ -624,7 +705,7 @@ page_cache_async_readahead(struct address_space *mapping,
return;
/* do read-ahead */
- ondemand_readahead(mapping, ra, filp, true, index, req_count);
+ ondemand_readahead(mapping, ra, filp, page, index, req_count);
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);