@@ -1330,7 +1330,7 @@ xfs_btree_get_buf_block(
* Read in the buffer at the given ptr and return the buffer and
* the block pointer within the buffer.
*/
-STATIC int
+int
xfs_btree_read_buf_block(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr,
@@ -700,6 +700,9 @@ void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
int xfs_btree_get_buf_block(struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr, struct xfs_btree_block **block,
struct xfs_buf **bpp);
+int xfs_btree_read_buf_block(struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *ptr, int flags,
+ struct xfs_btree_block **block, struct xfs_buf **bpp);
void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
struct xfs_btree_block *block, const union xfs_btree_ptr *ptr,
int lr);
@@ -333,18 +333,35 @@ xfs_btree_commit_ifakeroot(
/*
* Put a btree block that we're loading onto the ordered list and release it.
* The btree blocks will be written to disk when bulk loading is finished.
+ * If we reach the dirty buffer threshold, flush them to disk before
+ * continuing.
*/
-static void
+static int
xfs_btree_bload_drop_buf(
- struct list_head *buffers_list,
- struct xfs_buf **bpp)
+ struct xfs_btree_bload *bbl,
+ struct list_head *buffers_list,
+ struct xfs_buf **bpp)
{
- if (*bpp == NULL)
- return;
+ struct xfs_buf *bp = *bpp;
+ int error;
- xfs_buf_delwri_queue_here(*bpp, buffers_list);
- xfs_buf_relse(*bpp);
+ if (!bp)
+ return 0;
+
+ xfs_buf_delwri_queue_here(bp, buffers_list);
+ xfs_buf_relse(bp);
*bpp = NULL;
+ bbl->nr_dirty++;
+
+ if (!bbl->max_dirty || bbl->nr_dirty < bbl->max_dirty)
+ return 0;
+
+ error = xfs_buf_delwri_submit(buffers_list);
+ if (error)
+ return error;
+
+ bbl->nr_dirty = 0;
+ return 0;
}
/*
@@ -416,7 +433,10 @@ xfs_btree_bload_prep_block(
*/
if (*blockp)
xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB);
- xfs_btree_bload_drop_buf(buffers_list, bpp);
+
+ ret = xfs_btree_bload_drop_buf(bbl, buffers_list, bpp);
+ if (ret)
+ return ret;
/* Initialize the new btree block. */
xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block);
@@ -480,7 +500,7 @@ xfs_btree_bload_node(
ASSERT(!xfs_btree_ptr_is_null(cur, child_ptr));
- ret = xfs_btree_get_buf_block(cur, child_ptr, &child_block,
+ ret = xfs_btree_read_buf_block(cur, child_ptr, 0, &child_block,
&child_bp);
if (ret)
return ret;
@@ -759,6 +779,7 @@ xfs_btree_bload(
cur->bc_nlevels = bbl->btree_height;
xfs_btree_set_ptr_null(cur, &child_ptr);
xfs_btree_set_ptr_null(cur, &ptr);
+ bbl->nr_dirty = 0;
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &blocks, &blocks_with_extra);
@@ -797,7 +818,10 @@ xfs_btree_bload(
xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1);
}
total_blocks += blocks;
- xfs_btree_bload_drop_buf(&buffers_list, &bp);
+
+ ret = xfs_btree_bload_drop_buf(bbl, &buffers_list, &bp);
+ if (ret)
+ goto out;
/* Populate the internal btree nodes. */
for (level = 1; level < cur->bc_nlevels; level++) {
@@ -839,7 +863,11 @@ xfs_btree_bload(
xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1);
}
total_blocks += blocks;
- xfs_btree_bload_drop_buf(&buffers_list, &bp);
+
+ ret = xfs_btree_bload_drop_buf(bbl, &buffers_list, &bp);
+ if (ret)
+ goto out;
+
xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1);
}
@@ -115,6 +115,16 @@ struct xfs_btree_bload {
* height of the new btree.
*/
unsigned int btree_height;
+
+ /*
+ * Flush the new btree block buffer list to disk after this many blocks
+ * have been formatted. Zero prohibits writing any buffers until all
+ * blocks have been formatted.
+ */
+ uint16_t max_dirty;
+
+ /* Number of dirty buffers. */
+ uint16_t nr_dirty;
};
int xfs_btree_bload_compute_geometry(struct xfs_btree_cur *cur,
@@ -89,6 +89,7 @@ xrep_newbt_init_ag(
xnr->alloc_hint = alloc_hint;
xnr->resv = resv;
INIT_LIST_HEAD(&xnr->resv_list);
+ xnr->bload.max_dirty = XFS_B_TO_FSBT(sc->mp, 256U << 10); /* 256K */
xrep_newbt_estimate_slack(xnr);
}