diff mbox series

[2/8] reftable/stack: test compaction with already-locked tables

Message ID 123fb9d80eecbd3690280991e0415cbb718b7202.1722435214.git.ps@pks.im (mailing list archive)
State Superseded
Headers show
Series reftable: improvements and fixes for compaction | expand

Commit Message

Patrick Steinhardt July 31, 2024, 2:14 p.m. UTC
We're lacking test coverage for compacting tables when some of the
tables that we are about to compact are locked. Add two tests that
exercise this, one for auto-compaction and one for full compaction.

Signed-off-by: Patrick Steinhardt <ps@pks.im>
---
 reftable/stack_test.c | 103 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 103 insertions(+)

Comments

Junio C Hamano Aug. 2, 2024, 9:05 p.m. UTC | #1
Patrick Steinhardt <ps@pks.im> writes:

> +static void test_reftable_stack_auto_compaction_with_locked_tables(void)
> +{
> +	struct reftable_write_options opts = {
> +		.disable_auto_compact = 1,
> +	};
> +	struct reftable_stack *st = NULL;
> +	struct strbuf buf = STRBUF_INIT;
> +	char *dir = get_tmp_dir(__LINE__);
> +	int err;
> +
> +	err = reftable_new_stack(&st, dir, &opts);
> +	EXPECT_ERR(err);
> +
> +	for (size_t i = 0; i < 5; i++) {
> +		struct reftable_ref_record ref = {
> +			.update_index = reftable_stack_next_update_index(st),
> +			.value_type = REFTABLE_REF_VAL1,
> +			.value.val1 = { i },
> +		};

As val1 is an array of unsigned char, i cannot reasonably go beyond
255, but that is perfectly fine.  We are preparing 5 original tables
to compact, and that might grow to 17 tables over time, but 255 ought
to be more than enough.

> +
> +		strbuf_reset(&buf);
> +		strbuf_addf(&buf, "refs/heads/branch-%04" PRIuMAX, (uintmax_t) i);

Yet we are prepared to handle i that is beyond any usual integer ;-)

I am tempted to suggest using the bog-standard int for everything
for the sake of consistency within this loop, but it does not matter
all that much in a standalone test program ;-)

> +		ref.refname = buf.buf;
> +
> +		err = reftable_stack_add(st, &write_test_ref, &ref);
> +		EXPECT_ERR(err);
> +	}
> +	EXPECT(st->merged->stack_len == 5);
> +
> +	/*
> +	 * Given that all tables we have written should be roughly the same
> +	 * size, we expect that auto-compaction will want to compact all of the
> +	 * tables. Locking any of the tables will keep it from doing so.
> +	 */
> +	strbuf_reset(&buf);
> +	strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[2]->name);
> +	write_file_buf(buf.buf, "", 0);

OK.  [2] is just a random number pulled out of 0..5?

> +static void test_reftable_stack_compaction_with_locked_tables(void)
> +{
> +	struct reftable_write_options opts = {
> +		.disable_auto_compact = 1,
> +	};
> +	struct reftable_stack *st = NULL;
> +	struct strbuf buf = STRBUF_INIT;
> +	char *dir = get_tmp_dir(__LINE__);
> +	int err;
> +
> +	err = reftable_new_stack(&st, dir, &opts);
> +	EXPECT_ERR(err);
> +
> +	for (size_t i = 0; i < 3; i++) {
> +...
> +	}
> +	EXPECT(st->merged->stack_len == 3);

Hmph, this somehow looks familiar.  The only difference is how many
tables are compacted with which one locked, and whether it is
compact_all() or auto_compact() that triggers the compaction
behaviour, right?

I wonder if we want to factor out the commonality into a shared
function, or it is too much trouble only for two duplicates and we
can worry about it when we were about to add the third one?

Thanks.
Patrick Steinhardt Aug. 5, 2024, 12:11 p.m. UTC | #2
On Fri, Aug 02, 2024 at 02:05:43PM -0700, Junio C Hamano wrote:
> Patrick Steinhardt <ps@pks.im> writes:
> > +		ref.refname = buf.buf;
> > +
> > +		err = reftable_stack_add(st, &write_test_ref, &ref);
> > +		EXPECT_ERR(err);
> > +	}
> > +	EXPECT(st->merged->stack_len == 5);
> > +
> > +	/*
> > +	 * Given that all tables we have written should be roughly the same
> > +	 * size, we expect that auto-compaction will want to compact all of the
> > +	 * tables. Locking any of the tables will keep it from doing so.
> > +	 */
> > +	strbuf_reset(&buf);
> > +	strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[2]->name);
> > +	write_file_buf(buf.buf, "", 0);
> 
> OK.  [2] is just a random number pulled out of 0..5?

Not quite as random. It is picked such that we can demonstrate in a
follow-up patch that auto-compaction knows to pack tables 4 and 5, while
leaking tables 1 to 3 intact. This only becomes important in a follow up
patch where we change the backing logic.

> > +static void test_reftable_stack_compaction_with_locked_tables(void)
> > +{
> > +	struct reftable_write_options opts = {
> > +		.disable_auto_compact = 1,
> > +	};
> > +	struct reftable_stack *st = NULL;
> > +	struct strbuf buf = STRBUF_INIT;
> > +	char *dir = get_tmp_dir(__LINE__);
> > +	int err;
> > +
> > +	err = reftable_new_stack(&st, dir, &opts);
> > +	EXPECT_ERR(err);
> > +
> > +	for (size_t i = 0; i < 3; i++) {
> > +...
> > +	}
> > +	EXPECT(st->merged->stack_len == 3);
> 
> Hmph, this somehow looks familiar.  The only difference is how many
> tables are compacted with which one locked, and whether it is
> compact_all() or auto_compact() that triggers the compaction
> behaviour, right?
> 
> I wonder if we want to factor out the commonality into a shared
> function, or it is too much trouble only for two duplicates and we
> can worry about it when we were about to add the third one?

I was also briefly thinking the same, but then didn't follow through
with that thought. In fact, there's multiple places in this file where
we populate a stack with N tables. I think it should be easy enough to
pull this into a function indeed.

Patrick
diff mbox series

Patch

diff --git a/reftable/stack_test.c b/reftable/stack_test.c
index e3c11e6a6e..04526c6604 100644
--- a/reftable/stack_test.c
+++ b/reftable/stack_test.c
@@ -863,6 +863,58 @@  static void test_reftable_stack_auto_compaction(void)
 	clear_dir(dir);
 }
 
+static void test_reftable_stack_auto_compaction_with_locked_tables(void)
+{
+	struct reftable_write_options opts = {
+		.disable_auto_compact = 1,
+	};
+	struct reftable_stack *st = NULL;
+	struct strbuf buf = STRBUF_INIT;
+	char *dir = get_tmp_dir(__LINE__);
+	int err;
+
+	err = reftable_new_stack(&st, dir, &opts);
+	EXPECT_ERR(err);
+
+	for (size_t i = 0; i < 5; i++) {
+		struct reftable_ref_record ref = {
+			.update_index = reftable_stack_next_update_index(st),
+			.value_type = REFTABLE_REF_VAL1,
+			.value.val1 = { i },
+		};
+
+		strbuf_reset(&buf);
+		strbuf_addf(&buf, "refs/heads/branch-%04" PRIuMAX, (uintmax_t) i);
+		ref.refname = buf.buf;
+
+		err = reftable_stack_add(st, &write_test_ref, &ref);
+		EXPECT_ERR(err);
+	}
+	EXPECT(st->merged->stack_len == 5);
+
+	/*
+	 * Given that all tables we have written should be roughly the same
+	 * size, we expect that auto-compaction will want to compact all of the
+	 * tables. Locking any of the tables will keep it from doing so.
+	 */
+	strbuf_reset(&buf);
+	strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[2]->name);
+	write_file_buf(buf.buf, "", 0);
+
+	/*
+	 * Ideally, we'd handle the situation where any of the tables is locked
+	 * gracefully. We don't (yet) do this though and thus fail.
+	 */
+	err = reftable_stack_auto_compact(st);
+	EXPECT(err == REFTABLE_LOCK_ERROR);
+	EXPECT(st->stats.failures == 1);
+	EXPECT(st->merged->stack_len == 5);
+
+	reftable_stack_destroy(st);
+	strbuf_release(&buf);
+	clear_dir(dir);
+}
+
 static void test_reftable_stack_add_performs_auto_compaction(void)
 {
 	struct reftable_write_options opts = { 0 };
@@ -911,6 +963,55 @@  static void test_reftable_stack_add_performs_auto_compaction(void)
 	clear_dir(dir);
 }
 
+static void test_reftable_stack_compaction_with_locked_tables(void)
+{
+	struct reftable_write_options opts = {
+		.disable_auto_compact = 1,
+	};
+	struct reftable_stack *st = NULL;
+	struct strbuf buf = STRBUF_INIT;
+	char *dir = get_tmp_dir(__LINE__);
+	int err;
+
+	err = reftable_new_stack(&st, dir, &opts);
+	EXPECT_ERR(err);
+
+	for (size_t i = 0; i < 3; i++) {
+		struct reftable_ref_record ref = {
+			.update_index = reftable_stack_next_update_index(st),
+			.value_type = REFTABLE_REF_VAL1,
+			.value.val1 = { i },
+		};
+
+		strbuf_reset(&buf);
+		strbuf_addf(&buf, "refs/heads/branch-%04" PRIuMAX, (uintmax_t) i);
+		ref.refname = buf.buf;
+
+		err = reftable_stack_add(st, &write_test_ref, &ref);
+		EXPECT_ERR(err);
+	}
+	EXPECT(st->merged->stack_len == 3);
+
+	/* Lock one of the tables that we're about to compact. */
+	strbuf_reset(&buf);
+	strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[1]->name);
+	write_file_buf(buf.buf, "", 0);
+
+	/*
+	 * Compaction is expected to fail given that we were not able to
+	 * compact all tables.
+	 */
+	err = reftable_stack_compact_all(st, NULL);
+	EXPECT(err == REFTABLE_LOCK_ERROR);
+	/* TODO: this is wrong, we should get notified about the failure. */
+	EXPECT(st->stats.failures == 0);
+	EXPECT(st->merged->stack_len == 3);
+
+	reftable_stack_destroy(st);
+	strbuf_release(&buf);
+	clear_dir(dir);
+}
+
 static void test_reftable_stack_compaction_concurrent(void)
 {
 	struct reftable_write_options opts = { 0 };
@@ -1016,9 +1117,11 @@  int stack_test_main(int argc, const char *argv[])
 	RUN_TEST(test_reftable_stack_add);
 	RUN_TEST(test_reftable_stack_add_one);
 	RUN_TEST(test_reftable_stack_auto_compaction);
+	RUN_TEST(test_reftable_stack_auto_compaction_with_locked_tables);
 	RUN_TEST(test_reftable_stack_add_performs_auto_compaction);
 	RUN_TEST(test_reftable_stack_compaction_concurrent);
 	RUN_TEST(test_reftable_stack_compaction_concurrent_clean);
+	RUN_TEST(test_reftable_stack_compaction_with_locked_tables);
 	RUN_TEST(test_reftable_stack_hash_id);
 	RUN_TEST(test_reftable_stack_lock_failure);
 	RUN_TEST(test_reftable_stack_log_normalize);