diff mbox series

[RFC] mm: readahead: change ra size for random read

Message ID 20191220015827.8904-1-hdanton@sina.com (mailing list archive)
State New, archived
Headers show
Series [RFC] mm: readahead: change ra size for random read | expand

Commit Message

Hillf Danton Dec. 20, 2019, 1:58 a.m. UTC
Set a smaller ra size for random read than contiguous one. It is
deemed random if the lower-level device is unable to meet the
read pattern even with its IO capability doubled.

Signed-off-by: Hillf Danton <hdanton@sina.com>
---
diff mbox series

Patch

--- a/mm/readahead.c
+++ p/mm/readahead.c
@@ -388,6 +388,7 @@  ondemand_readahead(struct address_space
 	unsigned long max_pages = ra->ra_pages;
 	unsigned long add_pages;
 	pgoff_t prev_offset;
+	bool random = true;
 
 	/*
 	 * If the request exceeds the readahead window, allow the read to
@@ -399,8 +400,34 @@  ondemand_readahead(struct address_space
 	/*
 	 * start of file
 	 */
-	if (!offset)
-		goto initial_readahead;
+	if (!offset) {
+fill_ra:
+		ra->start = offset;
+		ra->size = random ?
+				min(bdi->io_pages, bdi->ra_pages) :
+				max(bdi->io_pages, bdi->ra_pages);
+
+		ra->async_size = ra->size > req_size ?
+				 ra->size - req_size : ra->size;
+
+		return ra_submit(ra, mapping, filp);
+	} else {
+		unsigned long leap;
+
+		if (offset > ra->start)
+			leap = offset - ra->start;
+		else
+			leap = ra->start - offset;
+
+		/*
+		 * anything other than page cache cannot help if it is
+		 * too great a leap for the lower-level device to back
+		 * up so feel free to put ra into fire
+		 */
+		random = leap > max(bdi->io_pages, bdi->ra_pages) * 2;
+
+		goto fill_ra;
+	}
 
 	/*
 	 * It's the expected callback offset, assume sequential access.