@@ -54,6 +54,24 @@ struct page {
#endif
};
+struct zchunk_cache {
+ struct list_head list;
+ struct tracecmd_compress_chunk *chunk;
+ void *map;
+ int ref;
+};
+
+struct cpu_zdata {
+ /* uncompressed cpu data */
+ int fd;
+ char file[26]; /* strlen(COMPR_TEMP_FILE) */
+ unsigned int count;
+ unsigned int last_chunk;
+ struct list_head cache;
+ struct tracecmd_compress_chunk *chunks;
+};
+
+#define COMPR_TEMP_FILE "/tmp/trace_cpu_dataXXXXXX"
struct cpu_data {
/* the first two never change */
unsigned long long file_offset;
@@ -72,6 +90,7 @@ struct cpu_data {
int page_cnt;
int cpu;
int pipe_fd;
+ struct cpu_zdata compress;
};
struct cpu_file_data {
@@ -150,6 +169,8 @@ struct tracecmd_input {
bool use_trace_clock;
bool read_page;
bool use_pipe;
+ bool read_zpage; /* uncompress pages in memory, do not use tmp files */
+ bool cpu_compressed;
int file_version;
struct cpu_data *cpu_data;
long long ts_offset;
@@ -3300,6 +3321,7 @@ static int init_cpu_data(struct tracecmd_input *handle)
endian = KBUFFER_ENDIAN_LITTLE;
for (cpu = 0; cpu < handle->cpus; cpu++) {
+ handle->cpu_data[cpu].compress.fd = -1;
handle->cpu_data[cpu].kbuf = kbuffer_alloc(long_size, endian);
if (!handle->cpu_data[cpu].kbuf)
goto out_free;
@@ -4041,6 +4063,7 @@ static inline void free_buffer(struct input_buffer_instance *buf)
*/
void tracecmd_close(struct tracecmd_input *handle)
{
+ struct zchunk_cache *cache;
struct file_section *del_sec;
int cpu;
int i;
@@ -4060,17 +4083,31 @@ void tracecmd_close(struct tracecmd_input *handle)
/* The tracecmd_peek_data may have cached a record */
free_next(handle, cpu);
free_page(handle, cpu);
- if (handle->cpu_data && handle->cpu_data[cpu].kbuf) {
- kbuffer_free(handle->cpu_data[cpu].kbuf);
- if (handle->cpu_data[cpu].page_map)
- free_page_map(handle->cpu_data[cpu].page_map);
-
- if (handle->cpu_data[cpu].page_cnt)
- tracecmd_warning("%d pages still allocated on cpu %d%s",
- handle->cpu_data[cpu].page_cnt, cpu,
- show_records(handle->cpu_data[cpu].pages,
- handle->cpu_data[cpu].nr_pages));
- free(handle->cpu_data[cpu].pages);
+ if (handle->cpu_data) {
+ if (handle->cpu_data[cpu].kbuf) {
+ kbuffer_free(handle->cpu_data[cpu].kbuf);
+ if (handle->cpu_data[cpu].page_map)
+ free_page_map(handle->cpu_data[cpu].page_map);
+
+ if (handle->cpu_data[cpu].page_cnt)
+ tracecmd_warning("%d pages still allocated on cpu %d%s",
+ handle->cpu_data[cpu].page_cnt, cpu,
+ show_records(handle->cpu_data[cpu].pages,
+ handle->cpu_data[cpu].nr_pages));
+ free(handle->cpu_data[cpu].pages);
+ }
+ if (handle->cpu_data[cpu].compress.fd >= 0) {
+ close(handle->cpu_data[cpu].compress.fd);
+ unlink(handle->cpu_data[cpu].compress.file);
+ }
+ while (!list_empty(&handle->cpu_data[cpu].compress.cache)) {
+ cache = container_of(handle->cpu_data[cpu].compress.cache.next,
+ struct zchunk_cache, list);
+ list_del(&cache->list);
+ free(cache->map);
+ free(cache);
+ }
+ free(handle->cpu_data[cpu].compress.chunks);
}
}
The CPU trace data is compressed in chunks, as chunk's size is multiple trace pages. The input handler is extended with the necessary structures, to control the data decompression. There are two approaches for data decompression, both are supported and can be used in different use cases: - in-memory decompression, page by page. - using a temporary file Signed-off-by: Tzvetomir Stoyanov (VMware) <tz.stoyanov@gmail.com> --- lib/trace-cmd/trace-input.c | 59 ++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 11 deletions(-)