Message ID | 20200212041845.25879-15-willy@infradead.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Large pages in the page cache | expand |
On Tue, Feb 11, 2020 at 08:18:34PM -0800, Matthew Wilcox wrote: > From: "Matthew Wilcox (Oracle)" <willy@infradead.org> > > Size the uptodate array dynamically. Now that this array is protected > by a spinlock, we can use bitmap functions to set the bits in this array > instead of a loop around set_bit(). > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > fs/iomap/buffered-io.c | 27 +++++++++------------------ > 1 file changed, 9 insertions(+), 18 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index c551a48e2a81..5e5a6b038fc3 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -22,14 +22,14 @@ > #include "../internal.h" > > /* > - * Structure allocated for each page when block size < PAGE_SIZE to track > + * Structure allocated for each page when block size < page size to track > * sub-page uptodate status and I/O completions. > */ > struct iomap_page { > atomic_t read_count; > atomic_t write_count; > spinlock_t uptodate_lock; > - DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); > + unsigned long uptodate[]; > }; > > static inline struct iomap_page *to_iomap_page(struct page *page) > @@ -45,15 +45,14 @@ static struct iomap_page * > iomap_page_create(struct inode *inode, struct page *page) > { > struct iomap_page *iop = to_iomap_page(page); > + unsigned int n, nr_blocks = i_blocks_per_page(inode, page); > > - if (iop || i_blocks_per_page(inode, page) <= 1) > + if (iop || nr_blocks <= 1) > return iop; > > - iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); > - atomic_set(&iop->read_count, 0); > - atomic_set(&iop->write_count, 0); > + n = BITS_TO_LONGS(nr_blocks); > + iop = kzalloc(struct_size(iop, uptodate, n), GFP_NOFS | __GFP_NOFAIL); Nit: I don't really hink we need the n variable here, we can just opencode the BITS_TO_LONGS in the struct_size call. > struct inode *inode = page->mapping->host; > unsigned first = off >> inode->i_blkbits; > - unsigned last = (off + len - 1) >> inode->i_blkbits; > - bool uptodate = true; > + unsigned count = len >> inode->i_blkbits; > unsigned long flags; > - unsigned int i; > > spin_lock_irqsave(&iop->uptodate_lock, flags); > - for (i = 0; i < i_blocks_per_page(inode, page); i++) { > - if (i >= first && i <= last) > - set_bit(i, iop->uptodate); > - else if (!test_bit(i, iop->uptodate)) > - uptodate = false; > - } > - > - if (uptodate) > + bitmap_set(iop->uptodate, first, count); > + if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page))) > SetPageUptodate(page); Switching to the bitmap helpers might be worthwhile prep patch that can go in directly now that we're having the uptodate_lock.
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c551a48e2a81..5e5a6b038fc3 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -22,14 +22,14 @@ #include "../internal.h" /* - * Structure allocated for each page when block size < PAGE_SIZE to track + * Structure allocated for each page when block size < page size to track * sub-page uptodate status and I/O completions. */ struct iomap_page { atomic_t read_count; atomic_t write_count; spinlock_t uptodate_lock; - DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); + unsigned long uptodate[]; }; static inline struct iomap_page *to_iomap_page(struct page *page) @@ -45,15 +45,14 @@ static struct iomap_page * iomap_page_create(struct inode *inode, struct page *page) { struct iomap_page *iop = to_iomap_page(page); + unsigned int n, nr_blocks = i_blocks_per_page(inode, page); - if (iop || i_blocks_per_page(inode, page) <= 1) + if (iop || nr_blocks <= 1) return iop; - iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); - atomic_set(&iop->read_count, 0); - atomic_set(&iop->write_count, 0); + n = BITS_TO_LONGS(nr_blocks); + iop = kzalloc(struct_size(iop, uptodate, n), GFP_NOFS | __GFP_NOFAIL); spin_lock_init(&iop->uptodate_lock); - bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); /* * migrate_page_move_mapping() assumes that pages with private data have @@ -146,20 +145,12 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) struct iomap_page *iop = to_iomap_page(page); struct inode *inode = page->mapping->host; unsigned first = off >> inode->i_blkbits; - unsigned last = (off + len - 1) >> inode->i_blkbits; - bool uptodate = true; + unsigned count = len >> inode->i_blkbits; unsigned long flags; - unsigned int i; spin_lock_irqsave(&iop->uptodate_lock, flags); - for (i = 0; i < i_blocks_per_page(inode, page); i++) { - if (i >= first && i <= last) - set_bit(i, iop->uptodate); - else if (!test_bit(i, iop->uptodate)) - uptodate = false; - } - - if (uptodate) + bitmap_set(iop->uptodate, first, count); + if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page))) SetPageUptodate(page); spin_unlock_irqrestore(&iop->uptodate_lock, flags); }