@@ -833,11 +833,13 @@ static inline int add_to_page_cache(struct page *page,
* May be NULL if invoked internally by the filesystem.
* @mapping: Readahead this filesystem object.
* @ra: File readahead state. May be NULL.
+ * @gfp_flags: Memory allocation flags to use.
*/
struct readahead_control {
struct file *file;
struct address_space *mapping;
struct file_ra_state *ra;
+ gfp_t gfp_flags;
/* private: use the readahead_* accessors instead */
pgoff_t _index;
unsigned int _nr_pages;
@@ -849,6 +851,7 @@ struct readahead_control {
.file = f, \
.mapping = m, \
.ra = r, \
+ .gfp_flags = readahead_gfp_mask(m), \
._index = i, \
}
@@ -177,7 +177,6 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
struct address_space *mapping = ractl->mapping;
unsigned long index = readahead_index(ractl);
LIST_HEAD(page_pool);
- gfp_t gfp_mask = readahead_gfp_mask(mapping);
unsigned long i;
/*
@@ -212,14 +211,14 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
continue;
}
- page = __page_cache_alloc(gfp_mask);
+ page = __page_cache_alloc(ractl->gfp_flags);
if (!page)
break;
if (mapping->a_ops->readpages) {
page->index = index + i;
list_add(&page->lru, &page_pool);
} else if (add_to_page_cache_lru(page, mapping, index + i,
- gfp_mask) < 0) {
+ ractl->gfp_flags) < 0) {
put_page(page);
read_pages(ractl, &page_pool, true);
i = ractl->_index + ractl->_nr_pages - index - 1;
@@ -663,7 +662,6 @@ void readahead_expand(struct readahead_control *ractl,
struct address_space *mapping = ractl->mapping;
struct file_ra_state *ra = ractl->ra;
pgoff_t new_index, new_nr_pages;
- gfp_t gfp_mask = readahead_gfp_mask(mapping);
new_index = new_start / PAGE_SIZE;
@@ -675,10 +673,11 @@ void readahead_expand(struct readahead_control *ractl,
if (page && !xa_is_value(page))
return; /* Page apparently present */
- page = __page_cache_alloc(gfp_mask);
+ page = __page_cache_alloc(ractl->gfp_flags);
if (!page)
return;
- if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
+ if (add_to_page_cache_lru(page, mapping, index,
+ ractl->gfp_flags) < 0) {
put_page(page);
return;
}
@@ -698,10 +697,11 @@ void readahead_expand(struct readahead_control *ractl,
if (page && !xa_is_value(page))
return; /* Page apparently present */
- page = __page_cache_alloc(gfp_mask);
+ page = __page_cache_alloc(ractl->gfp_flags);
if (!page)
return;
- if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
+ if (add_to_page_cache_lru(page, mapping, index,
+ ractl->gfp_flags) < 0) {
put_page(page);
return;
}
It is currently possible for an I/O request that specifies IOCB_NOWAIT to sleep waiting for I/O to complete in order to allocate pages for readahead. In order to fix that, we need the caller to be able to specify the GFP flags to use for memory allocation in the rest of the readahead path. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/pagemap.h | 3 +++ mm/readahead.c | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-)