@@ -827,6 +827,8 @@ cache in your filesystem. The following members are defined:
int (*swap_activate)(struct swap_info_struct *sis, struct file *f, sector_t *span)
int (*swap_deactivate)(struct file *);
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
+ void (*ioerror)(struct address_space *mapping, int direction,
+ loff_t pos, u64 len, int error);
};
``writepage``
@@ -1056,6 +1058,11 @@ cache in your filesystem. The following members are defined:
``swap_rw``
Called to read or write swap pages when SWP_FS_OPS is set.
+``ioerror``
+ Called to deal with IO errors during readahead or writeback.
+ This may be called from interrupt context, and without any
+ locks necessarily being held.
+
The File Object
===============
@@ -284,6 +284,14 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
*lenp = plen;
}
+static inline void iomap_mapping_ioerror(struct address_space *mapping,
+ int direction, loff_t pos, u64 len, int error)
+{
+ if (mapping && mapping->a_ops->ioerror)
+ mapping->a_ops->ioerror(mapping, direction, pos, len,
+ error);
+}
+
static void iomap_finish_folio_read(struct folio *folio, size_t off,
size_t len, int error)
{
@@ -302,6 +310,10 @@ static void iomap_finish_folio_read(struct folio *folio, size_t off,
spin_unlock_irqrestore(&ifs->state_lock, flags);
}
+ if (error)
+ iomap_mapping_ioerror(folio->mapping, READ,
+ folio_pos(folio) + off, len, error);
+
if (finished)
folio_end_read(folio, uptodate);
}
@@ -670,11 +682,16 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
{
struct bio_vec bvec;
struct bio bio;
+ int ret;
bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
bio_add_folio_nofail(&bio, folio, plen, poff);
- return submit_bio_wait(&bio);
+ ret = submit_bio_wait(&bio);
+ if (ret)
+ iomap_mapping_ioerror(folio->mapping, READ,
+ folio_pos(folio) + poff, plen, ret);
+ return ret;
}
static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
@@ -1573,6 +1590,11 @@ u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
/* walk all folios in bio, ending page IO on them */
bio_for_each_folio_all(fi, bio) {
+ if (ioend->io_error)
+ iomap_mapping_ioerror(inode->i_mapping, WRITE,
+ folio_pos(fi.folio) + fi.offset,
+ fi.length, ioend->io_error);
+
iomap_finish_folio_write(inode, fi.folio, fi.length);
folio_count++;
}
@@ -1881,6 +1903,8 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
if (count)
wpc->nr_folios++;
+ if (error && !count)
+ iomap_mapping_ioerror(inode->i_mapping, WRITE, pos, 0, error);
/*
* We can have dirty bits set past end of file in page_mkwrite path
@@ -438,6 +438,10 @@ struct address_space_operations {
sector_t *span);
void (*swap_deactivate)(struct file *file);
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
+
+ /* Callback for dealing with IO errors during readahead or writeback */
+ void (*ioerror)(struct address_space *mapping, int direction,
+ loff_t pos, u64 len, int error);
};
extern const struct address_space_operations empty_aops;