@@ -70,6 +70,7 @@ typedef struct xfs_buf {
struct xfs_buf_map *b_maps;
struct xfs_buf_map __b_map;
int b_nmaps;
+ struct list_head b_list;
#ifdef XFS_BUF_TRACING
struct list_head b_lock_list;
const char *b_func;
@@ -243,11 +244,14 @@ xfs_buf_get_uncached(struct xfs_buftarg *targ, size_t bblen, int flags)
return bp;
}
+/* Push a single buffer on a delwri queue. */
static inline void
xfs_buf_delwri_queue(struct xfs_buf *bp, struct list_head *buffer_list)
{
bp->b_node.cn_count++;
- libxfs_writebuf(bp, 0);
+ list_add_tail(&bp->b_list, buffer_list);
}
+int xfs_buf_delwri_submit(struct list_head *buffer_list);
+
#endif /* __LIBXFS_IO_H__ */
@@ -381,7 +381,6 @@ roundup_64(uint64_t x, uint32_t y)
#define xfs_buf_relse(bp) libxfs_putbuf(bp)
#define xfs_buf_get(devp,blkno,len) (libxfs_getbuf((devp), (blkno), (len)))
#define xfs_bwrite(bp) libxfs_writebuf((bp), 0)
-#define xfs_buf_delwri_submit(bl) (0)
#define xfs_buf_oneshot(bp) ((void) 0)
#define XBRW_READ LIBXFS_BREAD
@@ -1472,3 +1472,28 @@ libxfs_irele(
libxfs_idestroy(ip);
kmem_zone_free(xfs_inode_zone, ip);
}
+
+/*
+ * Write out a buffer list synchronously.
+ *
+ * This will take the @buffer_list, write all buffers out and wait for I/O
+ * completion on all of the buffers. @buffer_list is consumed by the function,
+ * so callers must have some other way of tracking buffers if they require such
+ * functionality.
+ */
+int
+xfs_buf_delwri_submit(
+ struct list_head *buffer_list)
+{
+ struct xfs_buf *bp, *n;
+ int error = 0, error2;
+
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ list_del_init(&bp->b_list);
+ error2 = libxfs_writebuf(bp, 0);
+ if (!error)
+ error = error2;
+ }
+
+ return error;
+}