@@ -313,7 +313,8 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
bitmap_file_kick(bitmap);
}
-static void end_bitmap_write(struct buffer_head *bh, int uptodate)
+static void end_bitmap_write(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate)
{
struct bitmap *bitmap = bh->b_private;
@@ -3123,7 +3123,8 @@ int open_ctree(struct super_block *sb,
}
ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
-static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
+static void btrfs_end_buffer_write_sync(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
if (uptodate) {
set_buffer_uptodate(bh);
@@ -159,14 +159,16 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
* Default synchronous end-of-IO handler.. Just mark it up-to-date and
* unlock the buffer. This is what ll_rw_block uses too.
*/
-void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+void end_buffer_read_sync(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate)
{
__end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
}
EXPORT_SYMBOL(end_buffer_read_sync);
-void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
+void end_buffer_write_sync(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate)
{
if (uptodate) {
set_buffer_uptodate(bh);
@@ -250,12 +252,12 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
-static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_read(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
struct buffer_head *tmp;
- struct page *page;
int page_uptodate = 1;
BUG_ON(!buffer_async_read(bh));
@@ -311,12 +313,12 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+void end_buffer_async_write(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first;
struct buffer_head *tmp;
- struct page *page;
BUG_ON(!buffer_async_write(bh));
@@ -2311,7 +2313,7 @@ int block_read_full_page(struct inode *inode, struct page *page,
for (i = 0; i < nr; i++) {
bh = arr[i];
if (buffer_uptodate(bh))
- end_buffer_async_read(bh, 1);
+ end_buffer_async_read(inode->i_mapping, page, bh, 1);
else
submit_bh(REQ_OP_READ, 0, bh);
}
@@ -2517,7 +2519,8 @@ EXPORT_SYMBOL(block_page_mkwrite);
* immediately, while under the page lock. So it needs a special end_io
* handler which does not touch the bh after unlocking it.
*/
-static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
+static void end_buffer_read_nobh(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
__end_buffer_read_notouch(bh, uptodate);
}
@@ -2989,11 +2992,16 @@ EXPORT_SYMBOL(generic_block_bmap);
static void end_bio_bh_io_sync(struct bio *bio)
{
struct buffer_head *bh = bio->bi_private;
+ struct address_space *mapping;
+ struct page *page;
+
+ page = bh->b_page;
+ mapping = fs_page_mapping_get_with_bh(page, bh);
if (unlikely(bio_flagged(bio, BIO_QUIET)))
set_bit(BH_Quiet, &bh->b_state);
- bh->b_end_io(bh, !bio->bi_status);
+ bh->b_end_io(mapping, page, bh, !bio->bi_status);
bio_put(bio);
}
@@ -2389,7 +2389,8 @@ extern void ext4_check_inodes_bitmap(struct super_block *);
extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
extern int ext4_init_inode_table(struct super_block *sb,
ext4_group_t group, int barrier);
-extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
+extern void ext4_end_bitmap_read(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate);
/* mballoc.c */
extern const struct file_operations ext4_seq_mb_groups_fops;
@@ -104,7 +104,8 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
return 0;
}
-void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
+void ext4_end_bitmap_read(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate)
{
if (uptodate) {
set_buffer_uptodate(bh);
@@ -202,7 +202,7 @@ static void gfs2_meta_read_endio(struct bio *bio)
do {
struct buffer_head *next = bh->b_this_page;
len -= bh->b_size;
- bh->b_end_io(bh, !bio->bi_status);
+ bh->b_end_io(page->mapping, page, bh, !bio->bi_status);
bh = next;
} while (bh && len);
}
@@ -29,7 +29,8 @@
/*
* IO end handler for temporary buffer_heads handling writes to the journal.
*/
-static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
+static void journal_end_buffer_io_sync(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
struct buffer_head *orig_bh = bh->b_private;
@@ -42,6 +42,8 @@
/**
* ntfs_end_buffer_async_read - async io completion for reading attributes
+ * @mapping: address space for the page of buffer head
+ * @page: page the buffer head belongs to
* @bh: buffer head on which io is completed
* @uptodate: whether @bh is now uptodate or not
*
@@ -56,11 +58,11 @@
* record size, and index_block_size_bits, to the log(base 2) of the ntfs
* record size.
*/
-static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+static void ntfs_end_buffer_async_read(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first, *tmp;
- struct page *page;
struct inode *vi;
ntfs_inode *ni;
int page_uptodate = 1;
@@ -365,7 +367,8 @@ static int ntfs_read_block(struct page *page)
if (likely(!buffer_uptodate(tbh)))
submit_bh(REQ_OP_READ, 0, tbh);
else
- ntfs_end_buffer_async_read(tbh, 1);
+ ntfs_end_buffer_async_read(page->mapping,
+ page, tbh, 1);
}
return 0;
}
@@ -617,7 +617,8 @@ static void release_buffer_page(struct buffer_head *bh)
}
}
-static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
+static void reiserfs_end_buffer_io_sync(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
if (buffer_journaled(bh)) {
reiserfs_warning(NULL, "clm-2084",
@@ -633,7 +634,8 @@ static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
release_buffer_page(bh);
}
-static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
+static void reiserfs_end_ordered_io(struct address_space *mapping,
+ struct page *page, struct buffer_head *bh, int uptodate)
{
if (uptodate)
set_buffer_uptodate(bh);
@@ -49,7 +49,8 @@ enum bh_state_bits {
struct page;
struct buffer_head;
struct address_space;
-typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
+typedef void (bh_end_io_t)(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate);
/*
* Historically, a buffer_head was used to map a single block
@@ -163,9 +164,12 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
-void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
+void end_buffer_read_sync(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate);
+void end_buffer_write_sync(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate);
+void end_buffer_async_write(struct address_space *mapping, struct page *page,
+ struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);