@@ -685,53 +685,39 @@ xfs_buf_incore(
* cache hits, as metadata intensive workloads will see 3 orders of magnitude
* more hits than misses.
*/
-struct xfs_buf *
+int
xfs_buf_get_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
+ *bpp = NULL;
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-
- switch (error) {
- case 0:
- /* cache hit */
+ if (!error)
goto found;
- case -EAGAIN:
- /* cache hit, trylock failure, caller handles failure */
- ASSERT(flags & XBF_TRYLOCK);
- return NULL;
- case -ENOENT:
- /* cache miss, go for insert */
- break;
- case -EFSCORRUPTED:
- default:
- /*
- * None of the higher layers understand failure types
- * yet, so return NULL to signal a fatal lookup error.
- */
- return NULL;
- }
+ if (error != -ENOENT)
+ return error;
error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
if (unlikely(error))
- return NULL;
+ return error;
error = xfs_buf_allocate_memory(new_bp, flags);
if (error) {
xfs_buf_free(new_bp);
- return NULL;
+ return error;
}
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
if (error) {
xfs_buf_free(new_bp);
- return NULL;
+ return error;
}
if (bp != new_bp)
@@ -744,7 +730,7 @@ xfs_buf_get_map(
xfs_warn(target->bt_mount,
"%s: failed to map pagesn", __func__);
xfs_buf_relse(bp);
- return NULL;
+ return error;
}
}
@@ -757,7 +743,8 @@ xfs_buf_get_map(
XFS_STATS_INC(target->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
- return bp;
+ *bpp = bp;
+ return 0;
}
STATIC int
@@ -819,13 +806,14 @@ xfs_buf_read_map(
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
+ int error;
flags |= XBF_READ;
*bpp = NULL;
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (!bp)
- return -ENOMEM;
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return error;
trace_xfs_buf_read(bp, flags, _RET_IP_);
@@ -192,9 +192,8 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
xfs_daddr_t blkno, size_t numblks,
xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
- struct xfs_buf_map *map, int nmaps,
- xfs_buf_flags_t flags);
+int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+ int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
const struct xfs_buf_ops *ops);
@@ -208,16 +207,9 @@ xfs_buf_get(
size_t numblks,
struct xfs_buf **bpp)
{
- struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- *bpp = NULL;
- bp = xfs_buf_get_map(target, &map, 1, 0);
- if (!bp)
- return -ENOMEM;
-
- *bpp = bp;
- return 0;
+ return xfs_buf_get_map(target, &map, 1, 0, bpp);
}
int xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno, size_t numblks,
@@ -122,9 +122,14 @@ xfs_trans_get_buf_map(
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip;
+ int error;
- if (!tp)
- return xfs_buf_get_map(target, map, nmaps, flags);
+ if (!tp) {
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return NULL;
+ return bp;
+ }
/*
* If we find the buffer in the cache with this transaction
@@ -149,10 +154,9 @@ xfs_trans_get_buf_map(
return bp;
}
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (bp == NULL) {
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
return NULL;
- }
ASSERT(!bp->b_error);