@@ -129,6 +129,16 @@ char *reftable_strdup(const char *str);
REFTABLE_REALLOC_ARRAY(x, alloc); \
} \
} while (0)
+
+#define REFTABLE_ALLOC_GROW_OR_NULL(x, nr, alloc) do { \
+ void *reftable_alloc_grow_or_null_orig_ptr = (x); \
+ REFTABLE_ALLOC_GROW((x), (nr), (alloc)); \
+ if (!(x)) { \
+ reftable_free(reftable_alloc_grow_or_null_orig_ptr); \
+ alloc = 0; \
+ } \
+} while (0)
+
#define REFTABLE_FREE_AND_NULL(p) do { reftable_free(p); (p) = NULL; } while (0)
#ifndef REFTABLE_ALLOW_BANNED_ALLOCATORS
@@ -53,7 +53,8 @@ static int block_writer_register_restart(struct block_writer *w, int n,
if (2 + 3 * rlen + n > w->block_size - w->next)
return -1;
if (is_restart) {
- REFTABLE_ALLOC_GROW(w->restarts, w->restart_len + 1, w->restart_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(w->restarts, w->restart_len + 1,
+ w->restart_cap);
if (!w->restarts)
return REFTABLE_OUT_OF_MEMORY_ERROR;
w->restarts[w->restart_len++] = w->next;
@@ -176,7 +177,8 @@ int block_writer_finish(struct block_writer *w)
* is guaranteed to return `Z_STREAM_END`.
*/
compressed_len = deflateBound(w->zstream, src_len);
- REFTABLE_ALLOC_GROW(w->compressed, compressed_len, w->compressed_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(w->compressed, compressed_len,
+ w->compressed_cap);
if (!w->compressed) {
ret = REFTABLE_OUT_OF_MEMORY_ERROR;
return ret;
@@ -235,8 +237,8 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uLong src_len = block->len - block_header_skip;
/* Log blocks specify the *uncompressed* size in their header. */
- REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
- br->uncompressed_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(br->uncompressed_data, sz,
+ br->uncompressed_cap);
if (!br->uncompressed_data) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -49,7 +49,7 @@ int merged_iter_pqueue_add(struct merged_iter_pqueue *pq, const struct pq_entry
{
size_t i = 0;
- REFTABLE_ALLOC_GROW(pq->heap, pq->len + 1, pq->cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(pq->heap, pq->len + 1, pq->cap);
if (!pq->heap)
return REFTABLE_OUT_OF_MEMORY_ERROR;
pq->heap[pq->len++] = *e;
@@ -246,8 +246,8 @@ static int reftable_ref_record_copy_from(void *rec, const void *src_rec,
if (src->refname) {
size_t refname_len = strlen(src->refname);
- REFTABLE_ALLOC_GROW(ref->refname, refname_len + 1,
- ref->refname_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(ref->refname, refname_len + 1,
+ ref->refname_cap);
if (!ref->refname) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto out;
@@ -385,7 +385,7 @@ static int reftable_ref_record_decode(void *rec, struct reftable_buf key,
SWAP(r->refname, refname);
SWAP(r->refname_cap, refname_cap);
- REFTABLE_ALLOC_GROW(r->refname, key.len + 1, r->refname_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(r->refname, key.len + 1, r->refname_cap);
if (!r->refname) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -839,7 +839,7 @@ static int reftable_log_record_decode(void *rec, struct reftable_buf key,
if (key.len <= 9 || key.buf[key.len - 9] != 0)
return REFTABLE_FORMAT_ERROR;
- REFTABLE_ALLOC_GROW(r->refname, key.len - 8, r->refname_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(r->refname, key.len - 8, r->refname_cap);
if (!r->refname) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -947,8 +947,8 @@ static int reftable_log_record_decode(void *rec, struct reftable_buf key,
}
string_view_consume(&in, n);
- REFTABLE_ALLOC_GROW(r->value.update.message, scratch->len + 1,
- r->value.update.message_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(r->value.update.message, scratch->len + 1,
+ r->value.update.message_cap);
if (!r->value.update.message) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -317,7 +317,9 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
* thus need to keep them alive here, which we
* do by bumping their refcount.
*/
- REFTABLE_ALLOC_GROW(reused, reused_len + 1, reused_alloc);
+ REFTABLE_ALLOC_GROW_OR_NULL(reused,
+ reused_len + 1,
+ reused_alloc);
if (!reused) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -949,8 +951,8 @@ int reftable_addition_add(struct reftable_addition *add,
if (err < 0)
goto done;
- REFTABLE_ALLOC_GROW(add->new_tables, add->new_tables_len + 1,
- add->new_tables_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(add->new_tables, add->new_tables_len + 1,
+ add->new_tables_cap);
if (!add->new_tables) {
err = REFTABLE_OUT_OF_MEMORY_ERROR;
goto done;
@@ -254,7 +254,8 @@ static int writer_index_hash(struct reftable_writer *w, struct reftable_buf *has
if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off)
return 0;
- REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(key->offsets, key->offset_len + 1,
+ key->offset_cap);
if (!key->offsets)
return REFTABLE_OUT_OF_MEMORY_ERROR;
key->offsets[key->offset_len++] = off;
@@ -820,7 +821,7 @@ static int writer_flush_nonempty_block(struct reftable_writer *w)
* Note that this also applies when flushing index blocks, in which
* case we will end up with a multi-level index.
*/
- REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
+ REFTABLE_ALLOC_GROW_OR_NULL(w->index, w->index_len + 1, w->index_cap);
if (!w->index)
return REFTABLE_OUT_OF_MEMORY_ERROR;
When realloc(3) fails, it returns NULL and keeps the original allocation intact. REFTABLE_ALLOC_GROW overwrites both the original pointer and the allocation count variable in that case, simultaneously leaking the original allocation and misrepresenting the number of storable items. parse_names() and reftable_buf_add() avoid leaking by restoring the original pointer value on failure, but all other callers seem to be OK with losing the old allocation. Add a new variant of the macro, REFTABLE_ALLOC_GROW_OR_NULL, which plugs the leak and zeros the allocation counter. Use it for those callers. Signed-off-by: René Scharfe <l.s.r@web.de> --- reftable/basics.h | 10 ++++++++++ reftable/block.c | 10 ++++++---- reftable/pq.c | 2 +- reftable/record.c | 12 ++++++------ reftable/stack.c | 8 +++++--- reftable/writer.c | 5 +++-- 6 files changed, 31 insertions(+), 16 deletions(-) -- 2.47.1