@@ -684,53 +684,49 @@ xfs_buf_incore(
* cache hits, as metadata intensive workloads will see 3 orders of magnitude
* more hits than misses.
*/
-struct xfs_buf *
+int
xfs_buf_get_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-
switch (error) {
case 0:
/* cache hit */
goto found;
- case -EAGAIN:
- /* cache hit, trylock failure, caller handles failure */
- ASSERT(flags & XBF_TRYLOCK);
- return NULL;
case -ENOENT:
/* cache miss, go for insert */
break;
+ case -EAGAIN:
+ /* cache hit, trylock failure, caller handles failure */
+ ASSERT(flags & XBF_TRYLOCK);
+ /* fall through */
case -EFSCORRUPTED:
default:
- /*
- * None of the higher layers understand failure types
- * yet, so return NULL to signal a fatal lookup error.
- */
- return NULL;
+ return error;
}
error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
if (unlikely(error))
- return NULL;
+ return error;
error = xfs_buf_allocate_memory(new_bp, flags);
if (error) {
xfs_buf_free(new_bp);
- return NULL;
+ return error;
}
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
if (error) {
xfs_buf_free(new_bp);
- return NULL;
+ return error;
}
if (bp != new_bp)
@@ -743,7 +739,7 @@ xfs_buf_get_map(
xfs_warn(target->bt_mount,
"%s: failed to map pagesn", __func__);
xfs_buf_relse(bp);
- return NULL;
+ return error;
}
}
@@ -756,7 +752,8 @@ xfs_buf_get_map(
XFS_STATS_INC(target->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
- return bp;
+ *bpp = bp;
+ return 0;
}
int
@@ -767,11 +764,12 @@ xfs_buf_get(
struct xfs_buf **bpp)
{
struct xfs_buf *bp;
+ int error;
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- bp = xfs_buf_get_map(target, &map, 1, 0);
- if (!bp)
- return -ENOMEM;
+ error = xfs_buf_get_map(target, &map, 1, 0, &bp);
+ if (error)
+ return error;
*bpp = bp;
return 0;
@@ -836,12 +834,13 @@ xfs_buf_read_map(
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
+ int error;
flags |= XBF_READ;
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (!bp)
- return -ENOMEM;
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return error;
trace_xfs_buf_read(bp, flags, _RET_IP_);
@@ -192,9 +192,8 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
xfs_daddr_t blkno, size_t numblks,
xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
- struct xfs_buf_map *map, int nmaps,
- xfs_buf_flags_t flags);
+int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+ int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
const struct xfs_buf_ops *ops);
@@ -122,9 +122,14 @@ xfs_trans_get_buf_map(
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip;
+ int error;
- if (!tp)
- return xfs_buf_get_map(target, map, nmaps, flags);
+ if (!tp) {
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return NULL;
+ return bp;
+ }
/*
* If we find the buffer in the cache with this transaction
@@ -149,10 +154,9 @@ xfs_trans_get_buf_map(
return bp;
}
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (bp == NULL) {
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
return NULL;
- }
ASSERT(!bp->b_error);