Message ID | 20240226091446.479436-7-pierrick.bouvier@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | TCG Plugin inline operation enhancement | expand |
Hi Pierrick, On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > --- > tests/plugin/mem.c | 40 +++++++++++++++++++++++++--------------- > 1 file changed, 25 insertions(+), 15 deletions(-) > > diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c > index 44e91065ba7..d4729f5e015 100644 > --- a/tests/plugin/mem.c > +++ b/tests/plugin/mem.c > @@ -16,9 +16,14 @@ > > QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; > > -static uint64_t inline_mem_count; > -static uint64_t cb_mem_count; > -static uint64_t io_count; > +typedef struct { > + uint64_t mem_count; > + uint64_t io_count; > +} CPUCount; > + > +static struct qemu_plugin_scoreboard *counts; > +static qemu_plugin_u64 mem_count; > +static qemu_plugin_u64 io_count; I see that you merged inline and callback counts into the same variable. I wonder... For this test don't you want to keep a plain uint64_t for callback counts? I have the feeling that this test was made so one can make sure inline and callback counts match. Luc > static bool do_inline, do_callback; > static bool do_haddr; > static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; > @@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > { > g_autoptr(GString) out = g_string_new(""); > > - if (do_inline) { > - g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count); > - } > - if (do_callback) { > - g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count); > + if (do_inline || do_callback) { > + g_string_printf(out, "mem accesses: %" PRIu64 "\n", > + qemu_plugin_u64_sum(mem_count)); > } > if (do_haddr) { > - g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count); > + g_string_append_printf(out, "io accesses: %" PRIu64 "\n", > + qemu_plugin_u64_sum(io_count)); > } > qemu_plugin_outs(out->str); > + qemu_plugin_scoreboard_free(counts); > } > > static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, > @@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, > struct qemu_plugin_hwaddr *hwaddr; > hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr); > if (qemu_plugin_hwaddr_is_io(hwaddr)) { > - io_count++; > + qemu_plugin_u64_add(io_count, cpu_index, 1); > } else { > - cb_mem_count++; > + qemu_plugin_u64_add(mem_count, cpu_index, 1); > } > } else { > - cb_mem_count++; > + qemu_plugin_u64_add(mem_count, cpu_index, 1); > } > } > > @@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) > struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); > > if (do_inline) { > - qemu_plugin_register_vcpu_mem_inline(insn, rw, > - QEMU_PLUGIN_INLINE_ADD_U64, > - &inline_mem_count, 1); > + qemu_plugin_register_vcpu_mem_inline_per_vcpu( > + insn, rw, > + QEMU_PLUGIN_INLINE_ADD_U64, > + mem_count, 1); > } > if (do_callback) { > qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, > @@ -117,6 +123,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, > } > } > > + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); > + mem_count = qemu_plugin_scoreboard_u64_in_struct( > + counts, CPUCount, mem_count); > + io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count); > qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); > qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); > return 0; > -- > 2.43.0 > > --
Hi Luc, On 2/27/24 1:35 PM, Luc Michel wrote: > Hi Pierrick, > > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: >> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >> --- >> tests/plugin/mem.c | 40 +++++++++++++++++++++++++--------------- >> 1 file changed, 25 insertions(+), 15 deletions(-) >> >> diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c >> index 44e91065ba7..d4729f5e015 100644 >> --- a/tests/plugin/mem.c >> +++ b/tests/plugin/mem.c >> @@ -16,9 +16,14 @@ >> >> QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; >> >> -static uint64_t inline_mem_count; >> -static uint64_t cb_mem_count; >> -static uint64_t io_count; >> +typedef struct { >> + uint64_t mem_count; >> + uint64_t io_count; >> +} CPUCount; >> + >> +static struct qemu_plugin_scoreboard *counts; >> +static qemu_plugin_u64 mem_count; >> +static qemu_plugin_u64 io_count; > > I see that you merged inline and callback counts into the same variable. > > I wonder... For this test don't you want to keep a plain uint64_t for > callback counts? I have the feeling that this test was made so one can > make sure inline and callback counts match. > Indeed, the new API guarantees thread safety (current inline implementation was racy), so this plugin now gives the exact same result whether you use inline ops or callbacks. In more, callback based approach can be implemented without any lock, as we are counting per vcpu. Thus, it's faster and safer. Regarding the "testing" side, this series introduce a new plugin tests/plugin/inline.c that allows to test all thoses cases in a single plugin. Thus, it's not needed that other plugins offer a way to test this. Thanks for your review. > Luc > >> static bool do_inline, do_callback; >> static bool do_haddr; >> static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; >> @@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) >> { >> g_autoptr(GString) out = g_string_new(""); >> >> - if (do_inline) { >> - g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count); >> - } >> - if (do_callback) { >> - g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count); >> + if (do_inline || do_callback) { >> + g_string_printf(out, "mem accesses: %" PRIu64 "\n", >> + qemu_plugin_u64_sum(mem_count)); >> } >> if (do_haddr) { >> - g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count); >> + g_string_append_printf(out, "io accesses: %" PRIu64 "\n", >> + qemu_plugin_u64_sum(io_count)); >> } >> qemu_plugin_outs(out->str); >> + qemu_plugin_scoreboard_free(counts); >> } >> >> static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, >> @@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, >> struct qemu_plugin_hwaddr *hwaddr; >> hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr); >> if (qemu_plugin_hwaddr_is_io(hwaddr)) { >> - io_count++; >> + qemu_plugin_u64_add(io_count, cpu_index, 1); >> } else { >> - cb_mem_count++; >> + qemu_plugin_u64_add(mem_count, cpu_index, 1); >> } >> } else { >> - cb_mem_count++; >> + qemu_plugin_u64_add(mem_count, cpu_index, 1); >> } >> } >> >> @@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) >> struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); >> >> if (do_inline) { >> - qemu_plugin_register_vcpu_mem_inline(insn, rw, >> - QEMU_PLUGIN_INLINE_ADD_U64, >> - &inline_mem_count, 1); >> + qemu_plugin_register_vcpu_mem_inline_per_vcpu( >> + insn, rw, >> + QEMU_PLUGIN_INLINE_ADD_U64, >> + mem_count, 1); >> } >> if (do_callback) { >> qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, >> @@ -117,6 +123,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, >> } >> } >> >> + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); >> + mem_count = qemu_plugin_scoreboard_u64_in_struct( >> + counts, CPUCount, mem_count); >> + io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count); >> qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); >> qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); >> return 0; >> -- >> 2.43.0 >> >> >
On 14:56 Tue 27 Feb , Pierrick Bouvier wrote: > Hi Luc, > > On 2/27/24 1:35 PM, Luc Michel wrote: > > Hi Pierrick, > > > > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: > > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > > > --- > > > tests/plugin/mem.c | 40 +++++++++++++++++++++++++--------------- > > > 1 file changed, 25 insertions(+), 15 deletions(-) > > > > > > diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c > > > index 44e91065ba7..d4729f5e015 100644 > > > --- a/tests/plugin/mem.c > > > +++ b/tests/plugin/mem.c > > > @@ -16,9 +16,14 @@ > > > > > > QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; > > > > > > -static uint64_t inline_mem_count; > > > -static uint64_t cb_mem_count; > > > -static uint64_t io_count; > > > +typedef struct { > > > + uint64_t mem_count; > > > + uint64_t io_count; > > > +} CPUCount; > > > + > > > +static struct qemu_plugin_scoreboard *counts; > > > +static qemu_plugin_u64 mem_count; > > > +static qemu_plugin_u64 io_count; > > > > I see that you merged inline and callback counts into the same variable. > > > > I wonder... For this test don't you want to keep a plain uint64_t for > > callback counts? I have the feeling that this test was made so one can > > make sure inline and callback counts match. > > > > Indeed, the new API guarantees thread safety (current inline > implementation was racy), so this plugin now gives the exact same result > whether you use inline ops or callbacks. In more, callback based > approach can be implemented without any lock, as we are counting per > vcpu. Thus, it's faster and safer. > > Regarding the "testing" side, this series introduce a new plugin > tests/plugin/inline.c that allows to test all thoses cases in a single > plugin. Thus, it's not needed that other plugins offer a way to test this. I see! Then: Reviewed-by: Luc Michel <luc.michel@amd.com> > > Thanks for your review. > > > Luc > > > > > static bool do_inline, do_callback; > > > static bool do_haddr; > > > static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; > > > @@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) > > > { > > > g_autoptr(GString) out = g_string_new(""); > > > > > > - if (do_inline) { > > > - g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count); > > > - } > > > - if (do_callback) { > > > - g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count); > > > + if (do_inline || do_callback) { > > > + g_string_printf(out, "mem accesses: %" PRIu64 "\n", > > > + qemu_plugin_u64_sum(mem_count)); > > > } > > > if (do_haddr) { > > > - g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count); > > > + g_string_append_printf(out, "io accesses: %" PRIu64 "\n", > > > + qemu_plugin_u64_sum(io_count)); > > > } > > > qemu_plugin_outs(out->str); > > > + qemu_plugin_scoreboard_free(counts); > > > } > > > > > > static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, > > > @@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, > > > struct qemu_plugin_hwaddr *hwaddr; > > > hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr); > > > if (qemu_plugin_hwaddr_is_io(hwaddr)) { > > > - io_count++; > > > + qemu_plugin_u64_add(io_count, cpu_index, 1); > > > } else { > > > - cb_mem_count++; > > > + qemu_plugin_u64_add(mem_count, cpu_index, 1); > > > } > > > } else { > > > - cb_mem_count++; > > > + qemu_plugin_u64_add(mem_count, cpu_index, 1); > > > } > > > } > > > > > > @@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) > > > struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); > > > > > > if (do_inline) { > > > - qemu_plugin_register_vcpu_mem_inline(insn, rw, > > > - QEMU_PLUGIN_INLINE_ADD_U64, > > > - &inline_mem_count, 1); > > > + qemu_plugin_register_vcpu_mem_inline_per_vcpu( > > > + insn, rw, > > > + QEMU_PLUGIN_INLINE_ADD_U64, > > > + mem_count, 1); > > > } > > > if (do_callback) { > > > qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, > > > @@ -117,6 +123,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, > > > } > > > } > > > > > > + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); > > > + mem_count = qemu_plugin_scoreboard_u64_in_struct( > > > + counts, CPUCount, mem_count); > > > + io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count); > > > qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); > > > qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); > > > return 0; > > > -- > > > 2.43.0 > > > > > > > > --
Luc Michel <luc.michel@amd.com> writes: > Hi Pierrick, > > On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: >> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >> --- >> tests/plugin/mem.c | 40 +++++++++++++++++++++++++--------------- >> 1 file changed, 25 insertions(+), 15 deletions(-) >> >> diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c >> index 44e91065ba7..d4729f5e015 100644 >> --- a/tests/plugin/mem.c >> +++ b/tests/plugin/mem.c >> @@ -16,9 +16,14 @@ >> >> QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; >> >> -static uint64_t inline_mem_count; >> -static uint64_t cb_mem_count; >> -static uint64_t io_count; >> +typedef struct { >> + uint64_t mem_count; >> + uint64_t io_count; >> +} CPUCount; >> + >> +static struct qemu_plugin_scoreboard *counts; >> +static qemu_plugin_u64 mem_count; >> +static qemu_plugin_u64 io_count; > > I see that you merged inline and callback counts into the same variable. > > I wonder... For this test don't you want to keep a plain uint64_t for > callback counts? I have the feeling that this test was made so one can > make sure inline and callback counts match. Indeed the problem now is double counting: ➜ ./qemu-hppa -plugin ./tests/plugin/libmem.so,inline=true -d plugin ./tests/tcg/hppa-linux-user/sha512 1..10 ok 1 - do_test(&tests[i]) ok 2 - do_test(&tests[i]) ok 3 - do_test(&tests[i]) ok 4 - do_test(&tests[i]) ok 5 - do_test(&tests[i]) ok 6 - do_test(&tests[i]) ok 7 - do_test(&tests[i]) ok 8 - do_test(&tests[i]) ok 9 - do_test(&tests[i]) ok 10 - do_test(&tests[i]) mem accesses: 262917
On 2/29/24 2:08 AM, Alex Bennée wrote: > Luc Michel <luc.michel@amd.com> writes: > >> Hi Pierrick, >> >> On 13:14 Mon 26 Feb , Pierrick Bouvier wrote: >>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >>> --- >>> tests/plugin/mem.c | 40 +++++++++++++++++++++++++--------------- >>> 1 file changed, 25 insertions(+), 15 deletions(-) >>> >>> diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c >>> index 44e91065ba7..d4729f5e015 100644 >>> --- a/tests/plugin/mem.c >>> +++ b/tests/plugin/mem.c >>> @@ -16,9 +16,14 @@ >>> >>> QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; >>> >>> -static uint64_t inline_mem_count; >>> -static uint64_t cb_mem_count; >>> -static uint64_t io_count; >>> +typedef struct { >>> + uint64_t mem_count; >>> + uint64_t io_count; >>> +} CPUCount; >>> + >>> +static struct qemu_plugin_scoreboard *counts; >>> +static qemu_plugin_u64 mem_count; >>> +static qemu_plugin_u64 io_count; >> >> I see that you merged inline and callback counts into the same variable. >> >> I wonder... For this test don't you want to keep a plain uint64_t for >> callback counts? I have the feeling that this test was made so one can >> make sure inline and callback counts match. > > Indeed the problem now is double counting: > > ➜ ./qemu-hppa -plugin ./tests/plugin/libmem.so,inline=true -d plugin ./tests/tcg/hppa-linux-user/sha512 > 1..10 > ok 1 - do_test(&tests[i]) > ok 2 - do_test(&tests[i]) > ok 3 - do_test(&tests[i]) > ok 4 - do_test(&tests[i]) > ok 5 - do_test(&tests[i]) > ok 6 - do_test(&tests[i]) > ok 7 - do_test(&tests[i]) > ok 8 - do_test(&tests[i]) > ok 9 - do_test(&tests[i]) > ok 10 - do_test(&tests[i]) > mem accesses: 262917 >
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: > On 2/29/24 2:08 AM, Alex Bennée wrote: >> Luc Michel <luc.michel@amd.com> writes: >> >>> Hi Pierrick, >>> <snip> >> > > My bad. Other plugins enable only inline when both are supplied, so I > missed this here. > I added an explicit error when user enable callback and inline at the > same time on this plugin. We could default to inline unless we need the more features of callbacks?
On 2/29/24 11:08 AM, Alex Bennée wrote: > Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: > >> On 2/29/24 2:08 AM, Alex Bennée wrote: >>> Luc Michel <luc.michel@amd.com> writes: >>> >>>> Hi Pierrick, >>>> > <snip> >>> >> >> My bad. Other plugins enable only inline when both are supplied, so I >> missed this here. >> I added an explicit error when user enable callback and inline at the >> same time on this plugin. > > We could default to inline unless we need the more features of callbacks? > This remark applies for all plugins in general. Now we have safe and correct inline operations, callbacks are not needed in some case. Would that be acceptable for you that we delay this "cleanup" to existing plugins after soft freeze? So we can focus on merging current API changes needed.
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: > On 2/29/24 11:08 AM, Alex Bennée wrote: >> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: >> >>> On 2/29/24 2:08 AM, Alex Bennée wrote: >>>> Luc Michel <luc.michel@amd.com> writes: >>>> >>>>> Hi Pierrick, >>>>> >> <snip> >>>> >>> >>> My bad. Other plugins enable only inline when both are supplied, so I >>> missed this here. >>> I added an explicit error when user enable callback and inline at the >>> same time on this plugin. >> We could default to inline unless we need the more features of >> callbacks? >> > > This remark applies for all plugins in general. Now we have safe and > correct inline operations, callbacks are not needed in some case. > > Would that be acceptable for you that we delay this "cleanup" to > existing plugins after soft freeze? So we can focus on merging current > API changes needed. As long as we fix the double reporting bug sure.
On 2/29/24 5:46 PM, Alex Bennée wrote: > Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: > >> On 2/29/24 11:08 AM, Alex Bennée wrote: >>> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: >>> >>>> On 2/29/24 2:08 AM, Alex Bennée wrote: >>>>> Luc Michel <luc.michel@amd.com> writes: >>>>> >>>>>> Hi Pierrick, >>>>>> >>> <snip> >>>>> >>>> >>>> My bad. Other plugins enable only inline when both are supplied, so I >>>> missed this here. >>>> I added an explicit error when user enable callback and inline at the >>>> same time on this plugin. >>> We could default to inline unless we need the more features of >>> callbacks? >>> >> >> This remark applies for all plugins in general. Now we have safe and >> correct inline operations, callbacks are not needed in some case. >> >> Would that be acceptable for you that we delay this "cleanup" to >> existing plugins after soft freeze? So we can focus on merging current >> API changes needed. > > As long as we fix the double reporting bug sure. > Yes, fixed in v6.
diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c index 44e91065ba7..d4729f5e015 100644 --- a/tests/plugin/mem.c +++ b/tests/plugin/mem.c @@ -16,9 +16,14 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; -static uint64_t inline_mem_count; -static uint64_t cb_mem_count; -static uint64_t io_count; +typedef struct { + uint64_t mem_count; + uint64_t io_count; +} CPUCount; + +static struct qemu_plugin_scoreboard *counts; +static qemu_plugin_u64 mem_count; +static qemu_plugin_u64 io_count; static bool do_inline, do_callback; static bool do_haddr; static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; @@ -27,16 +32,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) { g_autoptr(GString) out = g_string_new(""); - if (do_inline) { - g_string_printf(out, "inline mem accesses: %" PRIu64 "\n", inline_mem_count); - } - if (do_callback) { - g_string_append_printf(out, "callback mem accesses: %" PRIu64 "\n", cb_mem_count); + if (do_inline || do_callback) { + g_string_printf(out, "mem accesses: %" PRIu64 "\n", + qemu_plugin_u64_sum(mem_count)); } if (do_haddr) { - g_string_append_printf(out, "io accesses: %" PRIu64 "\n", io_count); + g_string_append_printf(out, "io accesses: %" PRIu64 "\n", + qemu_plugin_u64_sum(io_count)); } qemu_plugin_outs(out->str); + qemu_plugin_scoreboard_free(counts); } static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, @@ -46,12 +51,12 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, struct qemu_plugin_hwaddr *hwaddr; hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr); if (qemu_plugin_hwaddr_is_io(hwaddr)) { - io_count++; + qemu_plugin_u64_add(io_count, cpu_index, 1); } else { - cb_mem_count++; + qemu_plugin_u64_add(mem_count, cpu_index, 1); } } else { - cb_mem_count++; + qemu_plugin_u64_add(mem_count, cpu_index, 1); } } @@ -64,9 +69,10 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); if (do_inline) { - qemu_plugin_register_vcpu_mem_inline(insn, rw, - QEMU_PLUGIN_INLINE_ADD_U64, - &inline_mem_count, 1); + qemu_plugin_register_vcpu_mem_inline_per_vcpu( + insn, rw, + QEMU_PLUGIN_INLINE_ADD_U64, + mem_count, 1); } if (do_callback) { qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, @@ -117,6 +123,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } } + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); + mem_count = qemu_plugin_scoreboard_u64_in_struct( + counts, CPUCount, mem_count); + io_count = qemu_plugin_scoreboard_u64_in_struct(counts, CPUCount, io_count); qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); return 0;
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> --- tests/plugin/mem.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-)