@@ -43,7 +43,6 @@ xfs_get_aghdr_buf(
if (error)
return error;
- xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
bp->b_bn = blkno;
bp->b_maps[0].bm_bn = blkno;
bp->b_ops = ops;
@@ -22,9 +22,6 @@
static kmem_zone_t *xfs_buf_zone;
-#define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
-
/*
* Locking orders
*
@@ -354,11 +351,21 @@ xfs_buf_alloc_slab(
static int
xfs_buf_alloc_pages(
struct xfs_buf *bp,
- gfp_t gfp_mask,
+ xfs_buf_flags_t flags,
bool fail_fast)
{
+ gfp_t gfp_mask = __GFP_NOWARN;
int i;
+ if (flags & XBF_READ_AHEAD)
+ gfp_mask |= __GFP_NORETRY;
+ else
+ gfp_mask |= GFP_NOFS;
+
+ /* assure a zeroed buffer for non-read cases */
+ if (!(flags & XBF_READ))
+ gfp_mask |= __GFP_ZERO;
+
ASSERT(bp->b_pages == NULL);
bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
@@ -675,12 +682,7 @@ xfs_buf_get_map(
*/
if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
xfs_buf_alloc_slab(new_bp, flags) < 0) {
- gfp_t gfp_mask = xb_to_gfp(flags);
-
- /* assure a zeroed buffer for non-read cases */
- if (!(flags & XBF_READ))
- gfp_mask |= __GFP_ZERO;
- error = xfs_buf_alloc_pages(new_bp, gfp_mask,
+ error = xfs_buf_alloc_pages(new_bp, flags,
flags & XBF_READ_AHEAD);
if (error)
goto out_free_buf;
@@ -922,7 +924,7 @@ xfs_buf_get_uncached(
if (error)
goto fail;
- error = xfs_buf_alloc_pages(bp, xb_to_gfp(flags), true);
+ error = xfs_buf_alloc_pages(bp, flags, true);
if (error)
goto fail_free_buf;
Lift the buffer zeroing logic from xfs_buf_get_map into so that it also covers uncached buffers, and remove the now obsolete manual zeroing in the only direct caller of xfs_buf_get_uncached. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/xfs/libxfs/xfs_ag.c | 1 - fs/xfs/xfs_buf.c | 24 +++++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-)