diff mbox series

[v7,14/20] trace-cmd library: Add logic for in-memory decompression

Message ID 20220119082715.245846-15-tz.stoyanov@gmail.com (mailing list archive)
State Superseded
Headers show
Series Trace file version 7 - compression | expand

Commit Message

Tzvetomir Stoyanov (VMware) Jan. 19, 2022, 8:27 a.m. UTC
There are two approaches to read compressed trace data:
 - use a temporary file to decompress entire trace data before reading
 - use in-memory decompression of requested trace data chunk only
In-memory decompression seems to be more efficient, but selecting which
approach to use depends in the use case.
A compression chunk consists of multiple trace pages, that's why a small
cache with uncompressed chunks is implemented. The chunk stays in the
cache until there are pages which have reference to it.

Signed-off-by: Tzvetomir Stoyanov (VMware) <tz.stoyanov@gmail.com>
---
 lib/trace-cmd/trace-input.c | 110 ++++++++++++++++++++++++++++++++++++
 1 file changed, 110 insertions(+)

Comments

Steven Rostedt Jan. 25, 2022, 6:30 p.m. UTC | #1
On Wed, 19 Jan 2022 10:27:09 +0200
"Tzvetomir Stoyanov (VMware)" <tz.stoyanov@gmail.com> wrote:

> There are two approaches to read compressed trace data:
>  - use a temporary file to decompress entire trace data before reading
>  - use in-memory decompression of requested trace data chunk only
> In-memory decompression seems to be more efficient, but selecting which
> approach to use depends in the use case.
> A compression chunk consists of multiple trace pages, that's why a small
> cache with uncompressed chunks is implemented. The chunk stays in the
> cache until there are pages which have reference to it.
> 
> Signed-off-by: Tzvetomir Stoyanov (VMware) <tz.stoyanov@gmail.com>
> ---
>  lib/trace-cmd/trace-input.c | 110 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 110 insertions(+)
> 
> diff --git a/lib/trace-cmd/trace-input.c b/lib/trace-cmd/trace-input.c
> index 45a87a63..f5241e4b 100644
> --- a/lib/trace-cmd/trace-input.c
> +++ b/lib/trace-cmd/trace-input.c
> @@ -29,6 +29,9 @@
>  
>  #define COMMIT_MASK ((1 << 27) - 1)
>  
> +/* force uncompressing in memory */
> +#define INMEMORY_DECOMPRESS
> +
>  /* for debugging read instead of mmap */
>  static int force_read = 0;
>  
> @@ -1257,6 +1260,105 @@ static void free_page_map(struct page_map *page_map)
>  	free(page_map);
>  }
>  
> +#define CHUNK_CHECK_OFFSET(C, O)	((O) >= (C)->offset && (O) < ((C)->offset + (C)->size))

space

> +static struct tracecmd_compress_chunk *get_zchunk(struct cpu_data *cpu, off64_t offset)
> +{
> +	struct cpu_zdata *cpuz = &cpu->compress;
> +	int min, mid, max;
> +
> +	if (!cpuz->chunks)
> +		return NULL;

space

> +	if (offset > (cpuz->chunks[cpuz->count - 1].offset + cpuz->chunks[cpuz->count - 1].size))
> +		return NULL;
> +
> +	/* check if the requested offset is in the last requested chunk or in the next chunk */
> +	if (CHUNK_CHECK_OFFSET(cpuz->chunks + cpuz->last_chunk, offset))
> +		return cpuz->chunks + cpuz->last_chunk;
> +	cpuz->last_chunk++;
> +	if (cpuz->last_chunk < cpuz->count &&
> +	    CHUNK_CHECK_OFFSET(cpuz->chunks + cpuz->last_chunk, offset))
> +		return cpuz->chunks + cpuz->last_chunk;
> +
> +	/* do a binary search to find the chunk holding the given offset */
> +	min = 0;
> +	max = cpuz->count - 1;
> +	mid = (min + max)/2;
> +	while (min <= max) {
> +		if (offset < cpuz->chunks[mid].offset)
> +			max = mid - 1;
> +		else if (offset > (cpuz->chunks[mid].offset + cpuz->chunks[mid].size))
> +			min = mid + 1;
> +		else
> +			break;
> +		mid = (min + max)/2;
> +	}
> +	cpuz->last_chunk = mid;
> +	return cpuz->chunks + mid;

Instead of open coding the above what about:


	struct tracecmd_compress_chunk *chunk;
	struct tracecmd_compress_chunk key;

	key.offset = offset;
	chunk = bsearch(&key, cpuz->chunks, cpuz->count, sizeof(*chunk),
			chunk_cmp);

	if (!chunk) /* should never happen */
		return NULL;

	cpuz->last_chunk = chunk - cpuz->chunks;
	return chunk;
}

static int chunk_cmp(const void *A, const void *B)
{
	struct tracecmd_compress_chunk *a = A;
	struct tracecmd_compress_chunk *b = B;

	if (CHUNK_CHECK_OFFSET(b, a->offset))
		return 0;

	if (b->offset < a->offset)
		return -1;

	return 1;
}
	
> +}
> +
> +static void free_zpage(struct cpu_data *cpu_data, void *map)
> +{
> +	struct zchunk_cache *cache;
> +
> +	list_for_each_entry(cache, &cpu_data->compress.cache, list) {
> +		if (map <= cache->map && map > (cache->map + cache->chunk->size))
> +			goto found;
> +	}
> +	return;
> +
> +found:
> +	cache->ref--;
> +	if (cache->ref)
> +		return;
> +	list_del(&cache->list);
> +	free(cache->map);
> +	free(cache);
> +}
> +
> +static void *read_zpage(struct tracecmd_input *handle, int cpu, off64_t offset)
> +{
> +	struct cpu_data *cpu_data = &handle->cpu_data[cpu];
> +	struct tracecmd_compress_chunk *chunk;
> +	struct zchunk_cache *cache;
> +	void *map = NULL;
> +	int pindex;
> +	int size;
> +
> +	/* Look in the cache of already loaded chunks */
> +	list_for_each_entry(cache, &cpu_data->compress.cache, list) {
> +		if (CHUNK_CHECK_OFFSET(cache->chunk, offset)) {
> +			cache->ref++;
> +			goto out;
> +		}
> +	}
> +
> +	chunk =  get_zchunk(cpu_data, offset);
> +	if (!chunk)
> +		return NULL;

space

> +	size = handle->page_size > chunk->size ? handle->page_size : chunk->size;
> +	map = malloc(size);
> +	if (!map)
> +		return NULL;

space

> +	if (tracecmd_uncompress_chunk(handle->compress, chunk, map) < 0)
> +		goto error;
> +
> +	cache = calloc(1, sizeof(struct zchunk_cache));
> +	if (!cache)
> +		goto error;
> +	cache->ref = 1;
> +	cache->chunk = chunk;
> +	cache->map = map;
> +	list_add(&cache->list, &cpu_data->compress.cache);
> +
> +	/* a chunk can hold multiple pages, get the requested one */
> +out:
> +	pindex = (offset - cache->chunk->offset) / handle->page_size;
> +	return cache->map + (pindex * handle->page_size);
> +error:
> +	free(map);
> +	return NULL;
> +}
> +

-- Steve

>  static void *allocate_page_map(struct tracecmd_input *handle,
>  			       struct page *page, int cpu, off64_t offset)
>  {
> @@ -1268,6 +1370,9 @@ static void *allocate_page_map(struct tracecmd_input *handle,
>  	int ret;
>  	int fd;
>  
> +	if (handle->cpu_compressed && handle->read_zpage)
> +		return read_zpage(handle, cpu, offset);
> +
>  	if (handle->read_page) {
>  		map = malloc(handle->page_size);
>  		if (!map)
> @@ -1410,6 +1515,8 @@ static void __free_page(struct tracecmd_input *handle, struct page *page)
>  
>  	if (handle->read_page)
>  		free(page->map);
> +	else if (handle->read_zpage)
> +		free_zpage(cpu_data, page->map);
>  	else
>  		free_page_map(page->page_map);
>  
> @@ -3954,6 +4061,9 @@ struct tracecmd_input *tracecmd_alloc_fd(int fd, int flags)
>  	/* By default, use usecs, unless told otherwise */
>  	handle->flags |= TRACECMD_FL_IN_USECS;
>  
> +#ifdef INMEMORY_DECOMPRESS
> +	handle->read_zpage = 1;
> +#endif
>  	if (do_read_check(handle, buf, 3))
>  		goto failed_read;
>
diff mbox series

Patch

diff --git a/lib/trace-cmd/trace-input.c b/lib/trace-cmd/trace-input.c
index 45a87a63..f5241e4b 100644
--- a/lib/trace-cmd/trace-input.c
+++ b/lib/trace-cmd/trace-input.c
@@ -29,6 +29,9 @@ 
 
 #define COMMIT_MASK ((1 << 27) - 1)
 
+/* force uncompressing in memory */
+#define INMEMORY_DECOMPRESS
+
 /* for debugging read instead of mmap */
 static int force_read = 0;
 
@@ -1257,6 +1260,105 @@  static void free_page_map(struct page_map *page_map)
 	free(page_map);
 }
 
+#define CHUNK_CHECK_OFFSET(C, O)	((O) >= (C)->offset && (O) < ((C)->offset + (C)->size))
+static struct tracecmd_compress_chunk *get_zchunk(struct cpu_data *cpu, off64_t offset)
+{
+	struct cpu_zdata *cpuz = &cpu->compress;
+	int min, mid, max;
+
+	if (!cpuz->chunks)
+		return NULL;
+	if (offset > (cpuz->chunks[cpuz->count - 1].offset + cpuz->chunks[cpuz->count - 1].size))
+		return NULL;
+
+	/* check if the requested offset is in the last requested chunk or in the next chunk */
+	if (CHUNK_CHECK_OFFSET(cpuz->chunks + cpuz->last_chunk, offset))
+		return cpuz->chunks + cpuz->last_chunk;
+	cpuz->last_chunk++;
+	if (cpuz->last_chunk < cpuz->count &&
+	    CHUNK_CHECK_OFFSET(cpuz->chunks + cpuz->last_chunk, offset))
+		return cpuz->chunks + cpuz->last_chunk;
+
+	/* do a binary search to find the chunk holding the given offset */
+	min = 0;
+	max = cpuz->count - 1;
+	mid = (min + max)/2;
+	while (min <= max) {
+		if (offset < cpuz->chunks[mid].offset)
+			max = mid - 1;
+		else if (offset > (cpuz->chunks[mid].offset + cpuz->chunks[mid].size))
+			min = mid + 1;
+		else
+			break;
+		mid = (min + max)/2;
+	}
+	cpuz->last_chunk = mid;
+	return cpuz->chunks + mid;
+}
+
+static void free_zpage(struct cpu_data *cpu_data, void *map)
+{
+	struct zchunk_cache *cache;
+
+	list_for_each_entry(cache, &cpu_data->compress.cache, list) {
+		if (map <= cache->map && map > (cache->map + cache->chunk->size))
+			goto found;
+	}
+	return;
+
+found:
+	cache->ref--;
+	if (cache->ref)
+		return;
+	list_del(&cache->list);
+	free(cache->map);
+	free(cache);
+}
+
+static void *read_zpage(struct tracecmd_input *handle, int cpu, off64_t offset)
+{
+	struct cpu_data *cpu_data = &handle->cpu_data[cpu];
+	struct tracecmd_compress_chunk *chunk;
+	struct zchunk_cache *cache;
+	void *map = NULL;
+	int pindex;
+	int size;
+
+	/* Look in the cache of already loaded chunks */
+	list_for_each_entry(cache, &cpu_data->compress.cache, list) {
+		if (CHUNK_CHECK_OFFSET(cache->chunk, offset)) {
+			cache->ref++;
+			goto out;
+		}
+	}
+
+	chunk =  get_zchunk(cpu_data, offset);
+	if (!chunk)
+		return NULL;
+	size = handle->page_size > chunk->size ? handle->page_size : chunk->size;
+	map = malloc(size);
+	if (!map)
+		return NULL;
+	if (tracecmd_uncompress_chunk(handle->compress, chunk, map) < 0)
+		goto error;
+
+	cache = calloc(1, sizeof(struct zchunk_cache));
+	if (!cache)
+		goto error;
+	cache->ref = 1;
+	cache->chunk = chunk;
+	cache->map = map;
+	list_add(&cache->list, &cpu_data->compress.cache);
+
+	/* a chunk can hold multiple pages, get the requested one */
+out:
+	pindex = (offset - cache->chunk->offset) / handle->page_size;
+	return cache->map + (pindex * handle->page_size);
+error:
+	free(map);
+	return NULL;
+}
+
 static void *allocate_page_map(struct tracecmd_input *handle,
 			       struct page *page, int cpu, off64_t offset)
 {
@@ -1268,6 +1370,9 @@  static void *allocate_page_map(struct tracecmd_input *handle,
 	int ret;
 	int fd;
 
+	if (handle->cpu_compressed && handle->read_zpage)
+		return read_zpage(handle, cpu, offset);
+
 	if (handle->read_page) {
 		map = malloc(handle->page_size);
 		if (!map)
@@ -1410,6 +1515,8 @@  static void __free_page(struct tracecmd_input *handle, struct page *page)
 
 	if (handle->read_page)
 		free(page->map);
+	else if (handle->read_zpage)
+		free_zpage(cpu_data, page->map);
 	else
 		free_page_map(page->page_map);
 
@@ -3954,6 +4061,9 @@  struct tracecmd_input *tracecmd_alloc_fd(int fd, int flags)
 	/* By default, use usecs, unless told otherwise */
 	handle->flags |= TRACECMD_FL_IN_USECS;
 
+#ifdef INMEMORY_DECOMPRESS
+	handle->read_zpage = 1;
+#endif
 	if (do_read_check(handle, buf, 3))
 		goto failed_read;