@@ -364,7 +364,7 @@ xfs_ialloc_inode_init(
(j * M_IGEO(mp)->blocks_per_cluster));
error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
- XBF_UNMAPPED, &fbuf);
+ 0, &fbuf);
if (error)
return error;
@@ -137,7 +137,7 @@ xfs_imap_to_bp(
int error;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
- imap->im_len, XBF_UNMAPPED, bpp, &xfs_inode_buf_ops);
+ imap->im_len, 0, bpp, &xfs_inode_buf_ops);
if (xfs_metadata_is_sick(error))
xfs_agno_mark_sick(mp, xfs_daddr_to_agno(mp, imap->im_blkno),
XFS_SICK_AG_INODES);
@@ -1558,8 +1558,7 @@ xrep_dinode_core(
/* Read the inode cluster buffer. */
error = xfs_trans_read_buf(sc->mp, sc->tp, sc->mp->m_ddev_targp,
- ri->imap.im_blkno, ri->imap.im_len, XBF_UNMAPPED, &bp,
- NULL);
+ ri->imap.im_blkno, ri->imap.im_len, 0, &bp, NULL);
if (error)
return error;
@@ -203,7 +203,7 @@ _xfs_buf_alloc(
* We don't want certain flags to appear in b_flags unless they are
* specifically set by later operations on the buffer.
*/
- flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+ flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
/*
* A new buffer is held and locked by the owner. This ensures that the
@@ -349,9 +349,7 @@ xfs_buf_alloc_kmem(
*
* The third type of buffer is the multi-page buffer. These are always made
* up of single pages so that they can be fed to vmap_ram() to return a
- * contiguous memory region we can access the data through, or mark it as
- * XBF_UNMAPPED and access the data directly through individual page_address()
- * calls.
+ * contiguous memory region we can access the data through.
*/
static int
xfs_buf_alloc_backing_mem(
@@ -474,8 +472,6 @@ _xfs_buf_map_pages(
if (bp->b_page_count == 1) {
/* A single page buffer is always mappable */
bp->b_addr = page_address(bp->b_pages[0]);
- } else if (flags & XBF_UNMAPPED) {
- bp->b_addr = NULL;
} else {
int retried = 0;
unsigned nofs_flag;
@@ -1411,7 +1407,7 @@ xfs_buf_ioend(
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_flags & XBF_READ) {
- if (!bp->b_error && bp->b_addr && is_vmalloc_addr(bp->b_addr))
+ if (!bp->b_error && is_vmalloc_addr(bp->b_addr))
invalidate_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
if (!bp->b_error && bp->b_ops)
@@ -1583,7 +1579,7 @@ xfs_buf_submit_bio(
__bio_add_page(bio, bp->b_pages[p], PAGE_SIZE, 0);
bio->bi_iter.bi_size = size; /* limit to the actual size used */
- if (bp->b_addr && is_vmalloc_addr(bp->b_addr))
+ if (is_vmalloc_addr(bp->b_addr))
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
@@ -1715,52 +1711,6 @@ xfs_buf_submit(
xfs_buf_submit_bio(bp);
}
-void *
-xfs_buf_offset(
- struct xfs_buf *bp,
- size_t offset)
-{
- struct page *page;
-
- if (bp->b_addr)
- return bp->b_addr + offset;
-
- page = bp->b_pages[offset >> PAGE_SHIFT];
- return page_address(page) + (offset & (PAGE_SIZE-1));
-}
-
-void
-xfs_buf_zero(
- struct xfs_buf *bp,
- size_t boff,
- size_t bsize)
-{
- size_t bend;
-
- if (bp->b_addr) {
- memset(bp->b_addr + boff, 0, bsize);
- return;
- }
-
- bend = boff + bsize;
- while (boff < bend) {
- struct page *page;
- int page_index, page_offset, csize;
-
- page_index = boff >> PAGE_SHIFT;
- page_offset = boff & ~PAGE_MASK;
- page = bp->b_pages[page_index];
- csize = min_t(size_t, PAGE_SIZE - page_offset,
- BBTOB(bp->b_length) - boff);
-
- ASSERT((csize + page_offset) <= PAGE_SIZE);
-
- memset(page_address(page) + page_offset, 0, csize);
-
- boff += csize;
- }
-}
-
/*
* Log a message about and stale a buffer that a caller has decided is corrupt.
*
@@ -49,7 +49,6 @@ struct xfs_buf;
#define XBF_LIVESCAN (1u << 28)
#define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
#define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
-#define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
typedef unsigned int xfs_buf_flags_t;
@@ -70,8 +69,7 @@ typedef unsigned int xfs_buf_flags_t;
/* The following interface flags should never be set */ \
{ XBF_LIVESCAN, "LIVESCAN" }, \
{ XBF_INCORE, "INCORE" }, \
- { XBF_TRYLOCK, "TRYLOCK" }, \
- { XBF_UNMAPPED, "UNMAPPED" }
+ { XBF_TRYLOCK, "TRYLOCK" }
/*
* Internal state flags.
@@ -316,12 +314,20 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
void xfs_buf_ioend_fail(struct xfs_buf *);
-void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
/* Buffer Utility Routines */
-extern void *xfs_buf_offset(struct xfs_buf *, size_t);
+static inline void *xfs_buf_offset(struct xfs_buf *bp, size_t offset)
+{
+ return bp->b_addr + offset;
+}
+
+static inline void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize)
+{
+ memset(bp->b_addr + boff, 0, bsize);
+}
+
extern void xfs_buf_stale(struct xfs_buf *bp);
/* Delayed Write Buffer Routines */
@@ -70,7 +70,7 @@ xfs_buf_item_straddle(
{
void *first, *last;
- if (bp->b_page_count == 1 || !(bp->b_flags & XBF_UNMAPPED))
+ if (bp->b_page_count == 1)
return false;
first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
@@ -1006,7 +1006,6 @@ xlog_recover_buf_commit_pass2(
struct xfs_mount *mp = log->l_mp;
struct xfs_buf *bp;
int error;
- uint buf_flags;
xfs_lsn_t lsn;
/*
@@ -1025,13 +1024,8 @@ xlog_recover_buf_commit_pass2(
}
trace_xfs_log_recover_buf_recover(log, buf_f);
-
- buf_flags = 0;
- if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
- buf_flags |= XBF_UNMAPPED;
-
error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
- buf_flags, &bp, NULL);
+ 0, &bp, NULL);
if (error)
return error;
@@ -1721,8 +1721,7 @@ xfs_ifree_cluster(
* to mark all the active inodes on the buffer stale.
*/
error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
- mp->m_bsize * igeo->blocks_per_cluster,
- XBF_UNMAPPED, &bp);
+ mp->m_bsize * igeo->blocks_per_cluster, 0, &bp);
if (error)
return error;