@@ -75,6 +75,16 @@ static int load_bloom_filter_from_graph(struct commit_graph *g,
check_bloom_offset(g, lex_pos - 1, start_index) < 0)
return 0;
+ if (end_index < start_index) {
+ warning("ignoring decreasing changed-path index offsets"
+ " (%"PRIuMAX" > %"PRIuMAX") for positions"
+ " %"PRIuMAX" and %"PRIuMAX" of %s",
+ (uintmax_t)start_index, (uintmax_t)end_index,
+ (uintmax_t)(lex_pos-1), (uintmax_t)lex_pos,
+ g->filename);
+ return 0;
+ }
+
filter->len = end_index - start_index;
filter->data = (unsigned char *)(g->chunk_bloom_data +
sizeof(unsigned char) * start_index +
@@ -441,4 +441,17 @@ test_expect_success 'Bloom reader notices too-small index chunk' '
test_cmp expect.err err
'
+test_expect_success 'Bloom reader notices out-of-order index offsets' '
+ # we do not know any real offsets, but we can pick
+ # something plausible; we should not get to the point of
+ # actually reading from the bogus offsets anyway.
+ corrupt_graph BIDX 4 0000000c00000005 &&
+ echo "warning: ignoring decreasing changed-path index offsets" \
+ "(12 > 5) for positions 1 and 2 of .git/objects/info/commit-graph" >expect.err &&
+ git -c core.commitGraph=false log -- A/B/file2 >expect.out &&
+ git -c core.commitGraph=true log -- A/B/file2 >out 2>err &&
+ test_cmp expect.out out &&
+ test_cmp expect.err err
+'
+
test_done
The BIDX chunk tells us the offsets at which each commit's Bloom filters can be found in the BDAT chunk. We compute the length of each filter by checking the offsets of neighbors and subtracting them. If the offsets are out of order, then we'll get a negative length, which we then store as a very large unsigned value. This can cause us to read out-of-bounds memory, as we access the hash data modulo "filter->len * BITS_PER_WORD". We can easily detect this case when loading the individual filters. Signed-off-by: Jeff King <peff@peff.net> --- bloom.c | 10 ++++++++++ t/t4216-log-bloom.sh | 13 +++++++++++++ 2 files changed, 23 insertions(+)