@@ -914,7 +914,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
}
for (i = 1, count = 1; i < cluster_size; i++, count++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
/* [COMPR_ADDR, ..., COMPR_ADDR] */
@@ -955,7 +955,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode,
int count, i;
for (i = 0, count = 0; i < cluster_size; i++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (__is_valid_data_blkaddr(blkaddr))
@@ -1322,7 +1322,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
goto out_unlock_op;
for (i = 0; i < cc->cluster_size; i++) {
- if (data_blkaddr(dn.inode, dn.node_page,
+ if (data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) == NULL_ADDR)
goto out_put_dnode;
}
@@ -1354,7 +1354,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
page_folio(cc->rpages[i + 1])->index, cic);
fio.compressed_page = cc->cpages[i];
- fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
+ fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1);
/* wait for GCed page writeback via META_MAPPING */
@@ -1887,14 +1887,14 @@ void f2fs_put_folio_dic(struct folio *folio, bool in_task)
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
unsigned int ofs_in_node)
{
- bool compressed = data_blkaddr(dn->inode, dn->node_page,
+ bool compressed = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node) == COMPRESS_ADDR;
int i = compressed ? 1 : 0;
- block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node + i);
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
- block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -1117,7 +1117,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
+ __le32 *addr = get_dnode_addr(dn->inode, &dn->node_folio->page);
dn->data_blkaddr = blkaddr;
addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
@@ -1126,14 +1126,14 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
/*
* Lock ordering for the change of data block address:
* ->data_page
- * ->node_page
+ * ->node_folio
* update block addresses in the node page
*/
void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
__set_data_blkaddr(dn, blkaddr);
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
}
@@ -1161,7 +1161,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
- f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr = f2fs_data_blkaddr(dn);
@@ -1172,7 +1172,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
}
}
- if (set_page_dirty(dn->node_page))
+ if (folio_mark_dirty(dn->node_folio))
dn->node_changed = true;
return 0;
}
@@ -1590,7 +1590,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
start_pgofs = pgofs;
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
next_block:
blkaddr = f2fs_data_blkaddr(&dn);
@@ -2247,7 +2247,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i) :
ei.blk + i - 1;
@@ -2281,7 +2281,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
block_t blkaddr;
struct bio_post_read_ctx *ctx;
- blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
+ blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
dn.ofs_in_node + i + 1) :
ei.blk + i;
@@ -934,7 +934,7 @@ static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type typ
if (!__may_extent_tree(dn->inode, type))
return;
- ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+ ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(&dn->node_folio->page), dn->inode) +
dn->ofs_in_node;
ei.len = 1;
@@ -995,7 +995,7 @@ struct f2fs_nm_info {
struct dnode_of_data {
struct inode *inode; /* vfs inode pointer */
struct folio *inode_folio; /* its inode folio, NULL is possible */
- struct page *node_page; /* cached direct node page */
+ struct folio *node_folio; /* cached direct node folio */
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
bool inode_folio_locked; /* inode folio is locked or not */
@@ -1011,7 +1011,7 @@ static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
memset(dn, 0, sizeof(*dn));
dn->inode = inode;
dn->inode_folio = ifolio;
- dn->node_page = &nfolio->page;
+ dn->node_folio = nfolio;
dn->nid = nid;
}
@@ -2890,11 +2890,11 @@ static inline void f2fs_put_page(struct page *page, int unlock)
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
{
- if (dn->node_page)
- f2fs_put_page(dn->node_page, 1);
- if (dn->inode_folio && dn->node_page != &dn->inode_folio->page)
+ if (dn->node_folio)
+ f2fs_folio_put(dn->node_folio, true);
+ if (dn->inode_folio && dn->node_folio != dn->inode_folio)
f2fs_folio_put(dn->inode_folio, false);
- dn->node_page = NULL;
+ dn->node_folio = NULL;
dn->inode_folio = NULL;
}
@@ -3026,14 +3026,14 @@ static inline __le32 *get_dnode_addr(struct inode *inode,
}
static inline block_t data_blkaddr(struct inode *inode,
- struct page *node_page, unsigned int offset)
+ struct folio *node_folio, unsigned int offset)
{
- return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
+ return le32_to_cpu(*(get_dnode_addr(inode, &node_folio->page) + offset));
}
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
{
- return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
+ return data_blkaddr(dn->inode, dn->node_folio, dn->ofs_in_node);
}
static inline int f2fs_test_bit(unsigned int nr, char *addr)
@@ -404,7 +404,7 @@ static bool __found_offset(struct address_space *mapping,
bool compressed_cluster = false;
if (f2fs_compressed_file(inode)) {
- block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
compressed_cluster = first_blkaddr == COMPRESS_ADDR;
@@ -474,7 +474,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
}
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
@@ -625,7 +625,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
block_t blkstart;
int blklen = 0;
- addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+ addr = get_dnode_addr(dn->inode, &dn->node_folio->page) + ofs;
blkstart = le32_to_cpu(*addr);
/* Assumption: truncation starts with cluster */
@@ -689,7 +689,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
* once we invalidate valid blkaddr in range [ofs, ofs + count],
* we will invalidate all blkaddr in the whole range.
*/
- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
+ fofs = f2fs_start_bidx_of_node(ofs_of_node(&dn->node_folio->page),
dn->inode) + ofs;
f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
f2fs_update_age_extent_cache_range(dn, fofs, len);
@@ -796,12 +796,12 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
goto out;
}
- count = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
- if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
+ if (dn.ofs_in_node || IS_INODE(&dn.node_folio->page)) {
f2fs_truncate_data_blocks_range(&dn, count);
free_from += count;
}
@@ -1202,7 +1202,7 @@ int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return err;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -1297,7 +1297,7 @@ static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
goto next;
}
- done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ done = min((pgoff_t)ADDRS_PER_PAGE(&dn.node_folio->page, inode) -
dn.ofs_in_node, len);
for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
*blkaddr = f2fs_data_blkaddr(&dn);
@@ -1386,7 +1386,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
}
ilen = min((pgoff_t)
- ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ ADDRS_PER_PAGE(&dn.node_folio->page, dst_inode) -
dn.ofs_in_node, len - i);
do {
dn.data_blkaddr = f2fs_data_blkaddr(&dn);
@@ -1676,7 +1676,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
goto out;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
end = min(pg_end, end_offset - dn.ofs_in_node + index);
ret = f2fs_do_zero_range(&dn, index, end);
@@ -3712,7 +3712,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
int i;
for (i = 0; i < count; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -3830,7 +3830,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
break;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = round_up(count, fi->i_cluster_size);
@@ -3881,7 +3881,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
int i;
for (i = 0; i < count; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
@@ -3898,7 +3898,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
int ret;
for (i = 0; i < cluster_size; i++) {
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
dn->ofs_in_node + i);
if (i == 0) {
@@ -4008,7 +4008,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
break;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = round_up(count, fi->i_cluster_size);
@@ -4172,7 +4172,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
goto out;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - index);
for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
struct block_device *cur_bdev;
@@ -1178,7 +1178,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
*nofs = ofs_of_node(&node_folio->page);
- source_blkaddr = data_blkaddr(NULL, &node_folio->page, ofs_in_node);
+ source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
f2fs_folio_put(node_folio, true);
if (source_blkaddr != blkaddr) {
@@ -851,7 +851,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
}
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
- dn->node_page = &nfolio[level]->page;
+ dn->node_folio = nfolio[level];
dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
@@ -872,9 +872,9 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
if (!c_len)
goto out;
- blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
if (blkaddr == COMPRESS_ADDR)
- blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ blkaddr = data_blkaddr(dn->inode, dn->node_folio,
ofs_in_node + 1);
f2fs_update_read_extent_tree_range_compressed(dn->inode,
@@ -889,7 +889,7 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
f2fs_folio_put(nfolio[0], false);
release_out:
dn->inode_folio = NULL;
- dn->node_page = NULL;
+ dn->node_folio = NULL;
if (err == -ENOENT) {
dn->cur_level = i;
dn->max_level = level;
@@ -930,16 +930,16 @@ static int truncate_node(struct dnode_of_data *dn)
f2fs_inode_synced(dn->inode);
}
- clear_node_page_dirty(dn->node_page);
+ clear_node_page_dirty(&dn->node_folio->page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- index = page_folio(dn->node_page)->index;
- f2fs_put_page(dn->node_page, 1);
+ index = dn->node_folio->index;
+ f2fs_folio_put(dn->node_folio, true);
invalidate_mapping_pages(NODE_MAPPING(sbi),
index, index);
- dn->node_page = NULL;
+ dn->node_folio = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
return 0;
@@ -971,7 +971,7 @@ static int truncate_dnode(struct dnode_of_data *dn)
}
/* Make dnode_of_data for parameter */
- dn->node_page = &folio->page;
+ dn->node_folio = folio;
dn->ofs_in_node = 0;
f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
err = truncate_node(dn);
@@ -1043,7 +1043,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
if (!ofs) {
/* remove current indirect node */
- dn->node_page = &folio->page;
+ dn->node_folio = folio;
ret = truncate_node(dn);
if (ret)
goto out_err;
@@ -1102,7 +1102,7 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
}
if (offset[idx + 1] == 0) {
- dn->node_page = &folios[idx]->page;
+ dn->node_folio = folios[idx];
dn->nid = nid[idx];
err = truncate_node(dn);
if (err)
@@ -527,7 +527,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
nid = le32_to_cpu(sum.nid);
ofs_in_node = le16_to_cpu(sum.ofs_in_node);
- max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode);
+ max_addrs = ADDRS_PER_PAGE(&dn->node_folio->page, dn->inode);
if (ofs_in_node >= max_addrs) {
f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
ofs_in_node, dn->inode->i_ino, nid, max_addrs);
@@ -539,7 +539,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
tdn.nid = nid;
if (!dn->inode_folio_locked)
folio_lock(dn->inode_folio);
- tdn.node_page = &dn->inode_folio->page;
+ tdn.node_folio = dn->inode_folio;
tdn.ofs_in_node = ofs_in_node;
goto truncate_out;
} else if (dn->nid == nid) {
@@ -662,7 +662,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
goto out;
}
- f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
+ f2fs_folio_wait_writeback(dn.node_folio, NODE, true, true);
err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (err)
@@ -670,9 +670,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
f2fs_bug_on(sbi, ni.ino != ino_of_node(&folio->page));
- if (ofs_of_node(dn.node_page) != ofs_of_node(&folio->page)) {
+ if (ofs_of_node(&dn.node_folio->page) != ofs_of_node(&folio->page)) {
f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
- inode->i_ino, ofs_of_node(dn.node_page),
+ inode->i_ino, ofs_of_node(&dn.node_folio->page),
ofs_of_node(&folio->page));
err = -EFSCORRUPTED;
f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
@@ -683,7 +683,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
block_t src, dest;
src = f2fs_data_blkaddr(&dn);
- dest = data_blkaddr(dn.inode, &folio->page, dn.ofs_in_node);
+ dest = data_blkaddr(dn.inode, folio, dn.ofs_in_node);
if (__is_valid_data_blkaddr(src) &&
!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
@@ -758,10 +758,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
}
- copy_node_footer(dn.node_page, &folio->page);
- fill_node_footer(dn.node_page, dn.nid, ni.ino,
+ copy_node_footer(&dn.node_folio->page, &folio->page);
+ fill_node_footer(&dn.node_folio->page, dn.nid, ni.ino,
ofs_of_node(&folio->page), false);
- set_page_dirty(dn.node_page);
+ folio_mark_dirty(dn.node_folio);
err:
f2fs_put_dnode(&dn);
out:
@@ -334,7 +334,7 @@ static int __f2fs_commit_atomic_write(struct inode *inode)
goto next;
}
- blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
+ blen = min((pgoff_t)ADDRS_PER_PAGE(&dn.node_folio->page, cow_inode),
len);
index = off;
for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
All assignments to this struct member are conversions from a folio so convert it to be a folio and convert all users. At the same time, convert data_blkaddr() to take a folio as all callers now have a folio. Remove eight calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/f2fs/compress.c | 14 +++++++------- fs/f2fs/data.c | 18 +++++++++--------- fs/f2fs/extent_cache.c | 2 +- fs/f2fs/f2fs.h | 18 +++++++++--------- fs/f2fs/file.c | 32 ++++++++++++++++---------------- fs/f2fs/gc.c | 2 +- fs/f2fs/node.c | 22 +++++++++++----------- fs/f2fs/recovery.c | 18 +++++++++--------- fs/f2fs/segment.c | 2 +- 9 files changed, 64 insertions(+), 64 deletions(-)