@@ -1338,8 +1338,11 @@ xfs_buf_bio_end_io(
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+ if (bp->b_flags & XBF_RW_HINT)
+ bitmap_copy(bp->b_rd_hint, bio->bi_rd_hint, BLKDEV_MAX_MIRRORS);
xfs_buf_ioend_async(bp);
+ }
bio_put(bio);
}
@@ -1385,6 +1388,7 @@ xfs_buf_ioapply_map(
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
+ bitmap_copy(bio->bi_rd_hint, bp->b_rd_hint, BLKDEV_MAX_MIRRORS);
bio_set_op_attrs(bio, op, op_flags);
for (; size && nr_pages; nr_pages--, page_index++) {
@@ -40,6 +40,7 @@ typedef enum {
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
#define XBF_FUA (1 << 11)/* force cache write through mode */
#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
+#define XBF_RW_HINT (1 << 13)/* Read/write hint used for alt dev retry */
/* flags used only as arguments to access routines */
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
@@ -65,6 +66,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \
+ { XBF_RW_HINT, "RW_HINT" }, \
{ XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
{ XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \
@@ -197,6 +199,16 @@ typedef struct xfs_buf {
unsigned long b_first_retry_time; /* in jiffies */
int b_last_error;
+ /*
+ * Bitmask used by block device for alternate device retry
+ *
+ * To retry a read with the next device, resubmit the bio with
+ * the bi_rd_hint returned from the last read.
+ * Otherwise use bitmap_zero() if we don't care about alt mirror
+ * device retry.
+ */
+ DECLARE_BITMAP(b_rd_hint, BLKDEV_MAX_MIRRORS);
+
const struct xfs_buf_ops *b_ops;
bool b_alt_retry; /* toggle alt device retry */
} xfs_buf_t;