@@ -148,6 +148,7 @@ int reftable_writer_new(struct reftable_writer **out,
reftable_buf_init(&wp->block_writer_data.last_key);
reftable_buf_init(&wp->last_key);
+ reftable_buf_init(&wp->scratch);
REFTABLE_CALLOC_ARRAY(wp->block, opts.block_size);
if (!wp->block) {
reftable_free(wp);
@@ -180,6 +181,7 @@ static void writer_release(struct reftable_writer *w)
w->block_writer = NULL;
writer_clear_index(w);
reftable_buf_release(&w->last_key);
+ reftable_buf_release(&w->scratch);
}
}
@@ -249,20 +251,19 @@ static int writer_index_hash(struct reftable_writer *w, struct reftable_buf *has
static int writer_add_record(struct reftable_writer *w,
struct reftable_record *rec)
{
- struct reftable_buf key = REFTABLE_BUF_INIT;
int err;
- err = reftable_record_key(rec, &key);
+ err = reftable_record_key(rec, &w->scratch);
if (err < 0)
goto done;
- if (reftable_buf_cmp(&w->last_key, &key) >= 0) {
+ if (reftable_buf_cmp(&w->last_key, &w->scratch) >= 0) {
err = REFTABLE_API_ERROR;
goto done;
}
reftable_buf_reset(&w->last_key);
- err = reftable_buf_add(&w->last_key, key.buf, key.len);
+ err = reftable_buf_add(&w->last_key, w->scratch.buf, w->scratch.len);
if (err < 0)
goto done;
@@ -312,7 +313,6 @@ static int writer_add_record(struct reftable_writer *w,
}
done:
- reftable_buf_release(&key);
return err;
}
@@ -325,7 +325,6 @@ int reftable_writer_add_ref(struct reftable_writer *w,
.ref = *ref
},
};
- struct reftable_buf buf = REFTABLE_BUF_INIT;
int err;
if (!ref->refname ||
@@ -340,24 +339,25 @@ int reftable_writer_add_ref(struct reftable_writer *w,
goto out;
if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
- err = reftable_buf_add(&buf, (char *)reftable_ref_record_val1(ref),
+ reftable_buf_reset(&w->scratch);
+ err = reftable_buf_add(&w->scratch, (char *)reftable_ref_record_val1(ref),
hash_size(w->opts.hash_id));
if (err < 0)
goto out;
- err = writer_index_hash(w, &buf);
+ err = writer_index_hash(w, &w->scratch);
if (err < 0)
goto out;
}
if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
- reftable_buf_reset(&buf);
- err = reftable_buf_add(&buf, reftable_ref_record_val2(ref),
+ reftable_buf_reset(&w->scratch);
+ err = reftable_buf_add(&w->scratch, reftable_ref_record_val2(ref),
hash_size(w->opts.hash_id));
if (err < 0)
goto out;
- err = writer_index_hash(w, &buf);
+ err = writer_index_hash(w, &w->scratch);
if (err < 0)
goto out;
}
@@ -365,7 +365,6 @@ int reftable_writer_add_ref(struct reftable_writer *w,
err = 0;
out:
- reftable_buf_release(&buf);
return err;
}
@@ -20,6 +20,8 @@ struct reftable_writer {
void *write_arg;
int pending_padding;
struct reftable_buf last_key;
+ /* Scratch buffer used to avoid allocations. */
+ struct reftable_buf scratch;
/* offset of next block to write. */
uint64_t next;
Both `writer_add_record()` and `reftable_writer_add_ref()` get executed for every single ref record we're adding to the reftable writer. And as both functions use a local buffer to write data, the allocations we have to do here add up during larger transactions. Refactor the code to use a scratch buffer part of the `reftable_writer` itself such that we can reuse it. This signifcantly reduces the number of allocations during large transactions, e.g. when migrating refs from the "files" backend to the "reftable" backend. Before this change: HEAP SUMMARY: in use at exit: 80,048 bytes in 49 blocks total heap usage: 5,032,171 allocs, 5,032,122 frees, 418,792,092 bytes allocated After this change: HEAP SUMMARY: in use at exit: 80,048 bytes in 49 blocks total heap usage: 3,025,864 allocs, 3,025,815 frees, 372,746,291 bytes allocated This also translate into a small speedup: Benchmark 1: migrate files:reftable (refcount = 1000000, revision = HEAD~) Time (mean ± σ): 827.2 ms ± 16.5 ms [User: 689.4 ms, System: 124.9 ms] Range (min … max): 809.0 ms … 924.7 ms 50 runs Benchmark 2: migrate files:reftable (refcount = 1000000, revision = HEAD) Time (mean ± σ): 813.6 ms ± 11.6 ms [User: 679.0 ms, System: 123.4 ms] Range (min … max): 786.7 ms … 833.5 ms 50 runs Summary migrate files:reftable (refcount = 1000000, revision = HEAD) ran 1.02 ± 0.02 times faster than migrate files:reftable (refcount = 1000000, revision = HEAD~) Signed-off-by: Patrick Steinhardt <ps@pks.im> --- reftable/writer.c | 23 +++++++++++------------ reftable/writer.h | 2 ++ 2 files changed, 13 insertions(+), 12 deletions(-)