diff mbox series

[2/3] mem-pool: use more standard initialization and finalization

Message ID f13a52055cd975d457e0593cbabb70897e78024b.1597374135.git.gitgitgadget@gmail.com (mailing list archive)
State Superseded
Headers show
Series Extend and add a little more generalization to the mem_pool API | expand

Commit Message

Jean-Noël Avila via GitGitGadget Aug. 14, 2020, 3:02 a.m. UTC
From: Elijah Newren <newren@gmail.com>

A typical memory type, such as strbuf, hashmap, or string_list can be
stored on the stack or embedded within another structure.  mem_pool
cannot be, because of how mem_pool_init() and mem_pool_discard() are
written.  mem_pool_init() does essentially the following (simplified
for purposes of explanation here):

    void mem_pool_init(struct mem_pool **pool...)
    {
        *pool = xcalloc(1, sizeof(*pool));

It seems weird to require that mem_pools can only be accessed through a
pointer.  It also seems slightly dangerous: unlike strbuf_release() or
strbuf_reset() or string_list_clear(), all of which put the data
structure into a state where it can be re-used after the call,
mem_pool_discard(pool) will leave pool pointing at free'd memory.
read-cache (and split-index) are the only current users of mem_pools,
and they haven't fallen into a use-after-free mistake here, but it seems
likely to be problematic for future users especially since several of
the current callers of mem_pool_init() will only call it when the
mem_pool* is not already allocated (i.e. is NULL).

This type of mechanism also prevents finding synchronization
points where one can free existing memory and then resume more
operations.  It would be natural at such points to run something like
    mem_pool_discard(pool...);
and, if necessary,
    mem_pool_init(&pool...);
and then carry on continuing to use the pool.  However, this fails badly
if several objects had a copy of the value of pool from before these
commands; in such a case, those objects won't get the updated value of
pool that mem_pool_init() overwrites pool with and they'll all instead
be reading and writing from free'd memory.

Modify mem_pool_init()/mem_pool_discard() to behave more like
   strbuf_init()/strbuf_release()
or
   string_list_init()/string_list_clear()
In particular: (1) make mem_pool_init() just take a mem_pool* and have
it only worry about allocating struct mp_blocks, not the struct mem_pool
itself, (2) make mem_pool_discard() free the memory that the pool was
responsible for, but leave it in a state where it can be used to
allocate more memory afterward (without the need to call mem_pool_init()
again).

Signed-off-by: Elijah Newren <newren@gmail.com>
---
 mem-pool.c    | 20 +++++++-------------
 mem-pool.h    |  4 ++--
 read-cache.c  | 21 +++++++++++++--------
 split-index.c |  6 ++++--
 4 files changed, 26 insertions(+), 25 deletions(-)

Comments

Junio C Hamano Aug. 14, 2020, 4:38 a.m. UTC | #1
"Elijah Newren via GitGitGadget" <gitgitgadget@gmail.com> writes:

> written.  mem_pool_init() does essentially the following (simplified
> for purposes of explanation here):
>
>     void mem_pool_init(struct mem_pool **pool...)
>     {
>         *pool = xcalloc(1, sizeof(*pool));
>
> It seems weird to require that mem_pools can only be accessed through a
> pointer.

Yup, if the _init() were to also allocate, I would expect it to be
more like

	struct mem_pool *mem_pool_create(...)
	{
		struct mem_pool *pool = xcalloc(1, sizeof(*pool));
		...
		return pool;
	}

It also is OK to let the caller pass uninitialized region of memory,
which is how we usually arrange _init() to work.  It seems that that
is the approach this patch takes.

> -void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
> +void mem_pool_init(struct mem_pool *mem_pool, size_t initial_size)
>  {
> -	struct mem_pool *pool;
> -
> -	if (*mem_pool)
> -		return;
> -
> -	pool = xcalloc(1, sizeof(*pool));
> -
> -	pool->block_alloc = BLOCK_GROWTH_SIZE;
> +	mem_pool->mp_block = NULL;
> +	mem_pool->pool_alloc = 0;
> +	mem_pool->block_alloc = BLOCK_GROWTH_SIZE;
>  
>  	if (initial_size > 0)
> -		mem_pool_alloc_block(pool, initial_size, NULL);
> -
> -	*mem_pool = pool;
> +		mem_pool_alloc_block(mem_pool, initial_size, NULL);

It used to be that this function both knew and took control of all
the bits in *pool memory by using xcalloc().  Any field the function
assigned to of course got explicitly the value the function wanted
it to have, and all other fields were left to 0.

It may happen to be still the case (i.e. the assignments we see in
this function cover all the fields defined), but don't we need some
provision to make sure it will hold to be true in the future?

Starting it with "memset(pool, 0, sizeof(*pool)" would be one way.

You'd standardize to s/mem_pool/pool/ in [3/3]; shouldn't this be
written with pool to begin with, instead of reintroducing mem_pool
that is of different type from the original?

> -	if (!*pool_ptr)
> -		mem_pool_init(pool_ptr, 0);
> +	if (!*pool_ptr) {
> +		*pool_ptr = xmalloc(sizeof(**pool_ptr));
> +		mem_pool_init(*pool_ptr, 0);

This one gives an uninitialized chunk of memory to the _init(); an
example of the caller that the earlier comment may matter.

> +	istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
>  	if (istate->version == 4) {
> -		mem_pool_init(&istate->ce_mem_pool,
> +		mem_pool_init(istate->ce_mem_pool,
>  				estimate_cache_size_from_compressed(istate->cache_nr));
>  	} else {
> -		mem_pool_init(&istate->ce_mem_pool,
> +		mem_pool_init(istate->ce_mem_pool,
>  				estimate_cache_size(mmap_size, istate->cache_nr));
>  	}

Likewise.

Thanks.
diff mbox series

Patch

diff --git a/mem-pool.c b/mem-pool.c
index 3a8c54d9df..b7d789823e 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -33,21 +33,14 @@  static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t b
 	return p;
 }
 
-void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
+void mem_pool_init(struct mem_pool *mem_pool, size_t initial_size)
 {
-	struct mem_pool *pool;
-
-	if (*mem_pool)
-		return;
-
-	pool = xcalloc(1, sizeof(*pool));
-
-	pool->block_alloc = BLOCK_GROWTH_SIZE;
+	mem_pool->mp_block = NULL;
+	mem_pool->pool_alloc = 0;
+	mem_pool->block_alloc = BLOCK_GROWTH_SIZE;
 
 	if (initial_size > 0)
-		mem_pool_alloc_block(pool, initial_size, NULL);
-
-	*mem_pool = pool;
+		mem_pool_alloc_block(mem_pool, initial_size, NULL);
 }
 
 void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
@@ -66,7 +59,8 @@  void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
 		free(block_to_free);
 	}
 
-	free(mem_pool);
+	mem_pool->mp_block = NULL;
+	mem_pool->pool_alloc = 0;
 }
 
 void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
diff --git a/mem-pool.h b/mem-pool.h
index fcaa2d462b..30b7a8c03b 100644
--- a/mem-pool.h
+++ b/mem-pool.h
@@ -24,10 +24,10 @@  struct mem_pool {
 /*
  * Initialize mem_pool with specified initial size.
  */
-void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
+void mem_pool_init(struct mem_pool *mem_pool, size_t initial_size);
 
 /*
- * Discard a memory pool and free all the memory it is responsible for.
+ * Discard all the memory the memory pool is responsible for.
  */
 void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
 
diff --git a/read-cache.c b/read-cache.c
index 8ed1c29b54..fa291cdbee 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -89,8 +89,10 @@  static struct mem_pool *find_mem_pool(struct index_state *istate)
 	else
 		pool_ptr = &istate->ce_mem_pool;
 
-	if (!*pool_ptr)
-		mem_pool_init(pool_ptr, 0);
+	if (!*pool_ptr) {
+		*pool_ptr = xmalloc(sizeof(**pool_ptr));
+		mem_pool_init(*pool_ptr, 0);
+	}
 
 	return *pool_ptr;
 }
@@ -2006,11 +2008,12 @@  static unsigned long load_all_cache_entries(struct index_state *istate,
 {
 	unsigned long consumed;
 
+	istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
 	if (istate->version == 4) {
-		mem_pool_init(&istate->ce_mem_pool,
+		mem_pool_init(istate->ce_mem_pool,
 				estimate_cache_size_from_compressed(istate->cache_nr));
 	} else {
-		mem_pool_init(&istate->ce_mem_pool,
+		mem_pool_init(istate->ce_mem_pool,
 				estimate_cache_size(mmap_size, istate->cache_nr));
 	}
 
@@ -2070,7 +2073,8 @@  static unsigned long load_cache_entries_threaded(struct index_state *istate, con
 	if (istate->name_hash_initialized)
 		BUG("the name hash isn't thread safe");
 
-	mem_pool_init(&istate->ce_mem_pool, 0);
+	istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+	mem_pool_init(istate->ce_mem_pool, 0);
 
 	/* ensure we have no more threads than we have blocks to process */
 	if (nr_threads > ieot->nr)
@@ -2097,11 +2101,12 @@  static unsigned long load_cache_entries_threaded(struct index_state *istate, con
 		nr = 0;
 		for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
 			nr += p->ieot->entries[j].nr;
+		istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
 		if (istate->version == 4) {
-			mem_pool_init(&p->ce_mem_pool,
+			mem_pool_init(p->ce_mem_pool,
 				estimate_cache_size_from_compressed(nr));
 		} else {
-			mem_pool_init(&p->ce_mem_pool,
+			mem_pool_init(p->ce_mem_pool,
 				estimate_cache_size(mmap_size, nr));
 		}
 
@@ -2358,7 +2363,7 @@  int discard_index(struct index_state *istate)
 
 	if (istate->ce_mem_pool) {
 		mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
-		istate->ce_mem_pool = NULL;
+		FREE_AND_NULL(istate->ce_mem_pool);
 	}
 
 	return 0;
diff --git a/split-index.c b/split-index.c
index e6154e4ea9..c0e8ad670d 100644
--- a/split-index.c
+++ b/split-index.c
@@ -79,8 +79,10 @@  void move_cache_to_base_index(struct index_state *istate)
 	if (si->base &&
 		si->base->ce_mem_pool) {
 
-		if (!istate->ce_mem_pool)
-			mem_pool_init(&istate->ce_mem_pool, 0);
+		if (!istate->ce_mem_pool) {
+			istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
+			mem_pool_init(istate->ce_mem_pool, 0);
+		}
 
 		mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
 	}