@@ -5501,7 +5501,10 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
- ioffset = round_down(offset, PAGE_SIZE);
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
+ ioffset = round_down(offset, HPAGE_PMD_SIZE);
+ else
+ ioffset = round_down(offset, PAGE_SIZE);
/*
* Write tail of the last page before removed range since it will get
* removed from the page cache below.
@@ -5650,7 +5653,10 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
* Need to round down to align start offset to page size boundary
* for page size > block size.
*/
- ioffset = round_down(offset, PAGE_SIZE);
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
+ ioffset = round_down(offset, HPAGE_PMD_SIZE);
+ else
+ ioffset = round_down(offset, PAGE_SIZE);
/* Write out all dirty pages */
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
LLONG_MAX);
@@ -3816,7 +3816,6 @@ void ext4_set_aops(struct inode *inode)
static int __ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
- ext4_fsblk_t index = from >> PAGE_SHIFT;
unsigned offset;
unsigned blocksize, pos;
ext4_lblk_t iblock;
@@ -3835,7 +3834,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
blocksize = inode->i_sb->s_blocksize;
- iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
+ iblock = page->index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
__ext4_block_zero_page_range() adjusted to calculate starting iblock correctry for huge pages. ext4_{collapse,insert}_range() requires page cache invalidation. We need the invalidation to be aligning to huge page border if huge pages are possible in page cache. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- fs/ext4/extents.c | 10 ++++++++-- fs/ext4/inode.c | 3 +-- 2 files changed, 9 insertions(+), 4 deletions(-)