diff mbox series

tests/qtest: Plug memory leaks in qtest_get_machines

Message ID 20230120194435.29796-1-farosas@suse.de (mailing list archive)
State New, archived
Headers show
Series tests/qtest: Plug memory leaks in qtest_get_machines | expand

Commit Message

Fabiano Rosas Jan. 20, 2023, 7:44 p.m. UTC
These leaks can be avoided:

 759 bytes in 61 blocks are still reachable in loss record 56 of 60
    at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
    by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
    by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
    by 0x12083E: qtest_get_machines (libqtest.c:1323)
    by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
    by 0x11556C: main (test-hmp.c:160)

 992 bytes in 1 blocks are still reachable in loss record 57 of 60
    at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
    by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
    by 0x120725: qtest_get_machines (libqtest.c:1313)
    by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
    by 0x11556C: main (test-hmp.c:160)

Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 tests/qtest/libqtest.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

Comments

Thomas Huth Jan. 23, 2023, 7:55 a.m. UTC | #1
On 20/01/2023 20.44, Fabiano Rosas wrote:
> These leaks can be avoided:
> 
>   759 bytes in 61 blocks are still reachable in loss record 56 of 60
>      at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>      by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>      by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>      by 0x12083E: qtest_get_machines (libqtest.c:1323)
>      by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>      by 0x11556C: main (test-hmp.c:160)
> 
>   992 bytes in 1 blocks are still reachable in loss record 57 of 60
>      at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>      by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>      by 0x120725: qtest_get_machines (libqtest.c:1313)
>      by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>      by 0x11556C: main (test-hmp.c:160)
> 
> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> ---
>   tests/qtest/libqtest.c | 14 ++++++++++++++
>   1 file changed, 14 insertions(+)
> 
> diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
> index 6b2216cb20..65abac5029 100644
> --- a/tests/qtest/libqtest.c
> +++ b/tests/qtest/libqtest.c
> @@ -1285,6 +1285,18 @@ struct MachInfo {
>       char *alias;
>   };
>   
> +static void qtest_free_machine_info(gpointer data)
> +{
> +    struct MachInfo *machines = data;
> +    int i;
> +
> +    for (i = 0; machines[i].name != NULL; i++) {
> +        g_free((void *)machines[i].name); > +        g_free((void *)machines[i].alias);

I'd suggest setting .name and .alias to NULL after freeing them, to avoid 
that danling pointers are left behind.

> +    }
> +    g_free(machines);
> +}
> +
>   /*
>    * Returns an array with pointers to the available machine names.
>    * The terminating entry has the name set to NULL.
> @@ -1336,6 +1348,8 @@ static struct MachInfo *qtest_get_machines(void)
>       qobject_unref(response);
>   
>       memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */
> +    g_test_queue_destroy(qtest_free_machine_info, machines);

So this frees the machines structure...

>       return machines;

... but here it gets returned, too? ... that looks wrong. Did you maybe 
rather want to free it at the end of qtest_cb_for_every_machine() and 
qtest_has_machine ?

  Thomas

>   }
>
Fabiano Rosas Jan. 23, 2023, 1:32 p.m. UTC | #2
Thomas Huth <thuth@redhat.com> writes:

> On 20/01/2023 20.44, Fabiano Rosas wrote:
>> These leaks can be avoided:
>> 
>>   759 bytes in 61 blocks are still reachable in loss record 56 of 60
>>      at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>      by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>      by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>      by 0x12083E: qtest_get_machines (libqtest.c:1323)
>>      by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>      by 0x11556C: main (test-hmp.c:160)
>> 
>>   992 bytes in 1 blocks are still reachable in loss record 57 of 60
>>      at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>      by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>      by 0x120725: qtest_get_machines (libqtest.c:1313)
>>      by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>      by 0x11556C: main (test-hmp.c:160)
>> 
>> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>> ---
>>   tests/qtest/libqtest.c | 14 ++++++++++++++
>>   1 file changed, 14 insertions(+)
>> 
>> diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
>> index 6b2216cb20..65abac5029 100644
>> --- a/tests/qtest/libqtest.c
>> +++ b/tests/qtest/libqtest.c
>> @@ -1285,6 +1285,18 @@ struct MachInfo {
>>       char *alias;
>>   };
>>   
>> +static void qtest_free_machine_info(gpointer data)
>> +{
>> +    struct MachInfo *machines = data;
>> +    int i;
>> +
>> +    for (i = 0; machines[i].name != NULL; i++) {
>> +        g_free((void *)machines[i].name); > +        g_free((void *)machines[i].alias);
>
> I'd suggest setting .name and .alias to NULL after freeing them, to avoid 
> that danling pointers are left behind.
>
>> +    }
>> +    g_free(machines);
>> +}
>> +
>>   /*
>>    * Returns an array with pointers to the available machine names.
>>    * The terminating entry has the name set to NULL.
>> @@ -1336,6 +1348,8 @@ static struct MachInfo *qtest_get_machines(void)
>>       qobject_unref(response);
>>   
>>       memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */
>> +    g_test_queue_destroy(qtest_free_machine_info, machines);
>
> So this frees the machines structure...
>
>>       return machines;
>
> ... but here it gets returned, too? ... that looks wrong. Did you maybe 
> rather want to free it at the end of qtest_cb_for_every_machine() and 
> qtest_has_machine ?

g_test_queue_destroy will only call qtest_free_machine_info during the
test teardown phase:

#0  qtest_free_machine_info (data=0x555555677870) at ../tests/qtest/libqtest.c:1289
#1  0x00007ffff7b1d9d1 in ?? () from /usr/lib64/libglib-2.0.so.0
#2  0x00007ffff7b1d8b3 in ?? () from /usr/lib64/libglib-2.0.so.0
#3  0x00007ffff7b1d8b3 in ?? () from /usr/lib64/libglib-2.0.so.0
#4  0x00007ffff7b1de82 in g_test_run_suite () from /usr/lib64/libglib-2.0.so.0
#5  0x00007ffff7b1deab in g_test_run () from /usr/lib64/libglib-2.0.so.0
#6  0x0000555555561221 in main (argc=<optimized out>, argv=<optimized
#out>) at ../tests/qtest/qom-test.c:12

As long as 'machines' is static and not being exposed to the tests, I
think this should be fine.
Thomas Huth Jan. 23, 2023, 7:49 p.m. UTC | #3
On 23/01/2023 14.32, Fabiano Rosas wrote:
> Thomas Huth <thuth@redhat.com> writes:
> 
>> On 20/01/2023 20.44, Fabiano Rosas wrote:
>>> These leaks can be avoided:
>>>
>>>    759 bytes in 61 blocks are still reachable in loss record 56 of 60
>>>       at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>       by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>       by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>       by 0x12083E: qtest_get_machines (libqtest.c:1323)
>>>       by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>       by 0x11556C: main (test-hmp.c:160)
>>>
>>>    992 bytes in 1 blocks are still reachable in loss record 57 of 60
>>>       at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>       by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>       by 0x120725: qtest_get_machines (libqtest.c:1313)
>>>       by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>       by 0x11556C: main (test-hmp.c:160)
>>>
>>> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>>> ---
>>>    tests/qtest/libqtest.c | 14 ++++++++++++++
>>>    1 file changed, 14 insertions(+)
>>>
>>> diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
>>> index 6b2216cb20..65abac5029 100644
>>> --- a/tests/qtest/libqtest.c
>>> +++ b/tests/qtest/libqtest.c
>>> @@ -1285,6 +1285,18 @@ struct MachInfo {
>>>        char *alias;
>>>    };
>>>    
>>> +static void qtest_free_machine_info(gpointer data)
>>> +{
>>> +    struct MachInfo *machines = data;
>>> +    int i;
>>> +
>>> +    for (i = 0; machines[i].name != NULL; i++) {
>>> +        g_free((void *)machines[i].name); > +        g_free((void *)machines[i].alias);
>>
>> I'd suggest setting .name and .alias to NULL after freeing them, to avoid
>> that danling pointers are left behind.
>>
>>> +    }
>>> +    g_free(machines);
>>> +}
>>> +
>>>    /*
>>>     * Returns an array with pointers to the available machine names.
>>>     * The terminating entry has the name set to NULL.
>>> @@ -1336,6 +1348,8 @@ static struct MachInfo *qtest_get_machines(void)
>>>        qobject_unref(response);
>>>    
>>>        memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */
>>> +    g_test_queue_destroy(qtest_free_machine_info, machines);
>>
>> So this frees the machines structure...
>>
>>>        return machines;
>>
>> ... but here it gets returned, too? ... that looks wrong. Did you maybe
>> rather want to free it at the end of qtest_cb_for_every_machine() and
>> qtest_has_machine ?
> 
> g_test_queue_destroy will only call qtest_free_machine_info during the
> test teardown phase:
> 
> #0  qtest_free_machine_info (data=0x555555677870) at ../tests/qtest/libqtest.c:1289
> #1  0x00007ffff7b1d9d1 in ?? () from /usr/lib64/libglib-2.0.so.0
> #2  0x00007ffff7b1d8b3 in ?? () from /usr/lib64/libglib-2.0.so.0
> #3  0x00007ffff7b1d8b3 in ?? () from /usr/lib64/libglib-2.0.so.0
> #4  0x00007ffff7b1de82 in g_test_run_suite () from /usr/lib64/libglib-2.0.so.0
> #5  0x00007ffff7b1deab in g_test_run () from /usr/lib64/libglib-2.0.so.0
> #6  0x0000555555561221 in main (argc=<optimized out>, argv=<optimized
> #out>) at ../tests/qtest/qom-test.c:12
> 
> As long as 'machines' is static and not being exposed to the tests, I
> think this should be fine.

Ah, thanks for the explanation, I really got that wrong.

But I think g_test_queue_destroy() might still be the wrong thing to use 
here. I added a fprintf() to your new qtest_free_machine_info() funcion, and 
it seems to be called once at the end of the very first test already. So if 
anything else calls qtest_get_machines() again after the first test 
finished, it will crash due to the dangling static machine pointer.

So maybe the static machine pointer should be moved outside of the function 
and then be released from qtest_quit() instead?

(Also, it's valgrind that you used, isn't it? - I wonder why it's 
complaining here at all since the pointer to the memory should still be 
valid at the end?)

  Thomas
Fabiano Rosas Jan. 23, 2023, 9:22 p.m. UTC | #4
Thomas Huth <thuth@redhat.com> writes:

> On 23/01/2023 14.32, Fabiano Rosas wrote:
>> Thomas Huth <thuth@redhat.com> writes:
>> 
>>> On 20/01/2023 20.44, Fabiano Rosas wrote:
>>>> These leaks can be avoided:
>>>>
>>>>    759 bytes in 61 blocks are still reachable in loss record 56 of 60
>>>>       at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>       by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>       by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>       by 0x12083E: qtest_get_machines (libqtest.c:1323)
>>>>       by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>       by 0x11556C: main (test-hmp.c:160)
>>>>
>>>>    992 bytes in 1 blocks are still reachable in loss record 57 of 60
>>>>       at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>       by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>       by 0x120725: qtest_get_machines (libqtest.c:1313)
>>>>       by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>       by 0x11556C: main (test-hmp.c:160)
>>>>
>>>> Signed-off-by: Fabiano Rosas <farosas@suse.de>
>>>> ---
>>>>    tests/qtest/libqtest.c | 14 ++++++++++++++
>>>>    1 file changed, 14 insertions(+)
>>>>
>>>> diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
>>>> index 6b2216cb20..65abac5029 100644
>>>> --- a/tests/qtest/libqtest.c
>>>> +++ b/tests/qtest/libqtest.c
>>>> @@ -1285,6 +1285,18 @@ struct MachInfo {
>>>>        char *alias;
>>>>    };
>>>>    
>>>> +static void qtest_free_machine_info(gpointer data)
>>>> +{
>>>> +    struct MachInfo *machines = data;
>>>> +    int i;
>>>> +
>>>> +    for (i = 0; machines[i].name != NULL; i++) {
>>>> +        g_free((void *)machines[i].name); > +        g_free((void *)machines[i].alias);
>>>
>>> I'd suggest setting .name and .alias to NULL after freeing them, to avoid
>>> that danling pointers are left behind.
>>>
>>>> +    }
>>>> +    g_free(machines);
>>>> +}
>>>> +
>>>>    /*
>>>>     * Returns an array with pointers to the available machine names.
>>>>     * The terminating entry has the name set to NULL.
>>>> @@ -1336,6 +1348,8 @@ static struct MachInfo *qtest_get_machines(void)
>>>>        qobject_unref(response);
>>>>    
>>>>        memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */
>>>> +    g_test_queue_destroy(qtest_free_machine_info, machines);
>>>
>>> So this frees the machines structure...
>>>
>>>>        return machines;
>>>
>>> ... but here it gets returned, too? ... that looks wrong. Did you maybe
>>> rather want to free it at the end of qtest_cb_for_every_machine() and
>>> qtest_has_machine ?
>> 
>> g_test_queue_destroy will only call qtest_free_machine_info during the
>> test teardown phase:
>> 
>> #0  qtest_free_machine_info (data=0x555555677870) at ../tests/qtest/libqtest.c:1289
>> #1  0x00007ffff7b1d9d1 in ?? () from /usr/lib64/libglib-2.0.so.0
>> #2  0x00007ffff7b1d8b3 in ?? () from /usr/lib64/libglib-2.0.so.0
>> #3  0x00007ffff7b1d8b3 in ?? () from /usr/lib64/libglib-2.0.so.0
>> #4  0x00007ffff7b1de82 in g_test_run_suite () from /usr/lib64/libglib-2.0.so.0
>> #5  0x00007ffff7b1deab in g_test_run () from /usr/lib64/libglib-2.0.so.0
>> #6  0x0000555555561221 in main (argc=<optimized out>, argv=<optimized
>> #out>) at ../tests/qtest/qom-test.c:12
>> 
>> As long as 'machines' is static and not being exposed to the tests, I
>> think this should be fine.
>
> Ah, thanks for the explanation, I really got that wrong.
>
> But I think g_test_queue_destroy() might still be the wrong thing to use 
> here. I added a fprintf() to your new qtest_free_machine_info() funcion, and 
> it seems to be called once at the end of the very first test already.

Yes, because we currently only use qtest_get_machines to construct the
test paths. So it is only needed before the tests.

The g_test_queue_destroy function is actually attaching the callback to
the first test.

> So if anything else calls qtest_get_machines() again after the first
> test finished, it will crash due to the dangling static machine
> pointer.

Ah, my reply about it being fine was while looking at the v2 which sets
'machines' to NULL. So we would go around qtest_get_machines once more and
register qtest_free_machine_info a second time. Every time we have to
fetch the machines, we register the cleanup callback.

> So maybe the static machine pointer should be moved outside of the function 
> and then be released from qtest_quit() instead?

Hm.. let me give it a try. I'm not sure if it works because we call
qtest_quit while still using the machines array. Maybe if I move it out
and do the cleanup in qtest_cb_for_every_machine...

> (Also, it's valgrind that you used, isn't it? - I wonder why it's 
> complaining here at all since the pointer to the memory should still be 
> valid at the end?)

valgrind is complaining about the memory not being explicitly freed at
any point. I guess "leak" was not the most precise term to use in the
commit message.
Thomas Huth Jan. 24, 2023, 7:25 a.m. UTC | #5
On 23/01/2023 22.22, Fabiano Rosas wrote:
> Thomas Huth <thuth@redhat.com> writes:
> 
>> On 23/01/2023 14.32, Fabiano Rosas wrote:
>>> Thomas Huth <thuth@redhat.com> writes:
>>>
>>>> On 20/01/2023 20.44, Fabiano Rosas wrote:
>>>>> These leaks can be avoided:
>>>>>
>>>>>     759 bytes in 61 blocks are still reachable in loss record 56 of 60
>>>>>        at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>>        by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>        by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>        by 0x12083E: qtest_get_machines (libqtest.c:1323)
>>>>>        by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>>        by 0x11556C: main (test-hmp.c:160)
>>>>>
>>>>>     992 bytes in 1 blocks are still reachable in loss record 57 of 60
>>>>>        at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>>        by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>        by 0x120725: qtest_get_machines (libqtest.c:1313)
>>>>>        by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>>        by 0x11556C: main (test-hmp.c:160)
...
>> (Also, it's valgrind that you used, isn't it? - I wonder why it's
>> complaining here at all since the pointer to the memory should still be
>> valid at the end?)
> 
> valgrind is complaining about the memory not being explicitly freed at
> any point. I guess "leak" was not the most precise term to use in the
> commit message.

How did you run valgrind? For me, it does not really complain about the 
non-freed memory here since it is still reachable. The only difference that 
I see is in the summary. Without your patch:

     still reachable: 27,471 bytes in 152 blocks

with your patch:

     still reachable: 25,713 bytes in 88 blocks

... but that IMHO doesn't really hurt, since the memory is not really 
leaked, i.e. the memory usage won't increase during runtime here. So I fail 
to see which problem you're really trying to solve here, could you please 
elaborate?

  Thomas
Fabiano Rosas Jan. 24, 2023, 12:45 p.m. UTC | #6
Thomas Huth <thuth@redhat.com> writes:

> On 23/01/2023 22.22, Fabiano Rosas wrote:
>> Thomas Huth <thuth@redhat.com> writes:
>> 
>>> On 23/01/2023 14.32, Fabiano Rosas wrote:
>>>> Thomas Huth <thuth@redhat.com> writes:
>>>>
>>>>> On 20/01/2023 20.44, Fabiano Rosas wrote:
>>>>>> These leaks can be avoided:
>>>>>>
>>>>>>     759 bytes in 61 blocks are still reachable in loss record 56 of 60
>>>>>>        at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>>>        by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>>        by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>>        by 0x12083E: qtest_get_machines (libqtest.c:1323)
>>>>>>        by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>>>        by 0x11556C: main (test-hmp.c:160)
>>>>>>
>>>>>>     992 bytes in 1 blocks are still reachable in loss record 57 of 60
>>>>>>        at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>>>        by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>>        by 0x120725: qtest_get_machines (libqtest.c:1313)
>>>>>>        by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>>>        by 0x11556C: main (test-hmp.c:160)
> ...
>>> (Also, it's valgrind that you used, isn't it? - I wonder why it's
>>> complaining here at all since the pointer to the memory should still be
>>> valid at the end?)
>> 
>> valgrind is complaining about the memory not being explicitly freed at
>> any point. I guess "leak" was not the most precise term to use in the
>> commit message.
>
> How did you run valgrind? For me, it does not really complain about the 
> non-freed memory here since it is still reachable. The only difference that 
> I see is in the summary. Without your patch:
>
>      still reachable: 27,471 bytes in 152 blocks
>
> with your patch:
>
>      still reachable: 25,713 bytes in 88 blocks

valgrind --leak-check=full --show-leak-kinds=all

> ... but that IMHO doesn't really hurt, since the memory is not really 
> leaked, i.e. the memory usage won't increase during runtime here. So I fail 
> to see which problem you're really trying to solve here, could you please 
> elaborate?

You're right, its harmless. We could just not bother with it.
Thomas Huth Jan. 24, 2023, 1:04 p.m. UTC | #7
On 24/01/2023 13.45, Fabiano Rosas wrote:
> Thomas Huth <thuth@redhat.com> writes:
> 
>> On 23/01/2023 22.22, Fabiano Rosas wrote:
>>> Thomas Huth <thuth@redhat.com> writes:
>>>
>>>> On 23/01/2023 14.32, Fabiano Rosas wrote:
>>>>> Thomas Huth <thuth@redhat.com> writes:
>>>>>
>>>>>> On 20/01/2023 20.44, Fabiano Rosas wrote:
>>>>>>> These leaks can be avoided:
>>>>>>>
>>>>>>>      759 bytes in 61 blocks are still reachable in loss record 56 of 60
>>>>>>>         at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>>>>         by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>>>         by 0x4AA313E: g_strdup (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>>>         by 0x12083E: qtest_get_machines (libqtest.c:1323)
>>>>>>>         by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>>>>         by 0x11556C: main (test-hmp.c:160)
>>>>>>>
>>>>>>>      992 bytes in 1 blocks are still reachable in loss record 57 of 60
>>>>>>>         at 0x4034744: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
>>>>>>>         by 0x4A88518: g_malloc (in /usr/lib64/libglib-2.0.so.0.7000.5)
>>>>>>>         by 0x120725: qtest_get_machines (libqtest.c:1313)
>>>>>>>         by 0x12098C: qtest_cb_for_every_machine (libqtest.c:1348)
>>>>>>>         by 0x11556C: main (test-hmp.c:160)
>> ...
>>>> (Also, it's valgrind that you used, isn't it? - I wonder why it's
>>>> complaining here at all since the pointer to the memory should still be
>>>> valid at the end?)
>>>
>>> valgrind is complaining about the memory not being explicitly freed at
>>> any point. I guess "leak" was not the most precise term to use in the
>>> commit message.
>>
>> How did you run valgrind? For me, it does not really complain about the
>> non-freed memory here since it is still reachable. The only difference that
>> I see is in the summary. Without your patch:
>>
>>       still reachable: 27,471 bytes in 152 blocks
>>
>> with your patch:
>>
>>       still reachable: 25,713 bytes in 88 blocks
> 
> valgrind --leak-check=full --show-leak-kinds=all

Ok, so that "--show-leak-kinds=all" is the issue here. I think it does not 
make sense to go hunting for each and every "reachable" non-freed memory 
block - since they have not really been leaked during runtime. It's maybe 
better if you just use "--show-leak-kinds=definite,indirect,possible" instead.

  Thomas
diff mbox series

Patch

diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c
index 6b2216cb20..65abac5029 100644
--- a/tests/qtest/libqtest.c
+++ b/tests/qtest/libqtest.c
@@ -1285,6 +1285,18 @@  struct MachInfo {
     char *alias;
 };
 
+static void qtest_free_machine_info(gpointer data)
+{
+    struct MachInfo *machines = data;
+    int i;
+
+    for (i = 0; machines[i].name != NULL; i++) {
+        g_free((void *)machines[i].name);
+        g_free((void *)machines[i].alias);
+    }
+    g_free(machines);
+}
+
 /*
  * Returns an array with pointers to the available machine names.
  * The terminating entry has the name set to NULL.
@@ -1336,6 +1348,8 @@  static struct MachInfo *qtest_get_machines(void)
     qobject_unref(response);
 
     memset(&machines[idx], 0, sizeof(struct MachInfo)); /* Terminating entry */
+    g_test_queue_destroy(qtest_free_machine_info, machines);
+
     return machines;
 }