Message ID | 20230530083526.2174430-9-fei2.wu@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | TCG code quality tracking | expand |
On 5/30/23 01:35, Fei Wu wrote: > +static void do_dump_tbs_info(int total, int sort_by) > +{ > + id = 1; > + GList *i; > + int count = total; > + > + g_list_free(last_search); > + last_search = NULL; > + > + qht_iter(&tb_ctx.tb_stats, collect_tb_stats, NULL); > + > + last_search = g_list_sort_with_data(last_search, inverse_sort_tbs, > + &sort_by); > + Why are you sorting on a list and not an array? Intuitively, sorting a list of 1 million elements seems like the wrong choice. Why did you put all the comparisons in one inverse_sort_tbs function, and require examining sort_by? Better would be N sorting functions where sort_by is evaluated once before starting the sort. > +++ b/disas/disas.c > @@ -8,6 +8,8 @@ > #include "hw/core/cpu.h" > #include "exec/memory.h" > > +#include "qemu/log-for-trace.h" > + > /* Filled in by elfload.c. Simplistic, but will do for now. */ > struct syminfo *syminfos = NULL; > > @@ -199,6 +201,24 @@ static void initialize_debug_host(CPUDebug *s) > #endif > } > > +static int > +__attribute__((format(printf, 2, 3))) > +fprintf_log(FILE *a, const char *b, ...) > +{ > + va_list ap; > + va_start(ap, b); > + > + if (!to_string) { > + vfprintf(a, b, ap); > + } else { > + qemu_vlog(b, ap); > + } > + > + va_end(ap); > + > + return 1; > +} > + Not need on this either. Global variable being checked on each callback, instead of selecting the proper callback earlier -- preferably without the global variable. Did you really need something different than monitor_disas? You almost certainly want to read physical memory and not virtual anyway. > +void qemu_log_to_monitor(bool enable) > +{ > + to_monitor = enable; > +} > + > +void qemu_log_to_string(bool enable, GString *s) > +{ > + to_string = enable; > + string = s; > +} What are these for, and why do you need them? Why would to_string ever be anything other than (string != NULL)? Why are you using such very generic names for global variables? r~
On 6/1/2023 10:40 AM, Richard Henderson wrote: > On 5/30/23 01:35, Fei Wu wrote: >> +static void do_dump_tbs_info(int total, int sort_by) >> +{ >> + id = 1; >> + GList *i; >> + int count = total; >> + >> + g_list_free(last_search); >> + last_search = NULL; >> + >> + qht_iter(&tb_ctx.tb_stats, collect_tb_stats, NULL); >> + >> + last_search = g_list_sort_with_data(last_search, inverse_sort_tbs, >> + &sort_by); >> + > > Why are you sorting on a list and not an array? > Intuitively, sorting a list of 1 million elements seems like the wrong > choice. > > Why did you put all the comparisons in one inverse_sort_tbs function, > and require examining sort_by? Better would be N sorting functions > where sort_by is evaluated once before starting the sort. > > >> +++ b/disas/disas.c >> @@ -8,6 +8,8 @@ >> #include "hw/core/cpu.h" >> #include "exec/memory.h" >> >> +#include "qemu/log-for-trace.h" >> + >> /* Filled in by elfload.c. Simplistic, but will do for now. */ >> struct syminfo *syminfos = NULL; >> >> @@ -199,6 +201,24 @@ static void initialize_debug_host(CPUDebug *s) >> #endif >> } >> >> +static int >> +__attribute__((format(printf, 2, 3))) >> +fprintf_log(FILE *a, const char *b, ...) >> +{ >> + va_list ap; >> + va_start(ap, b); >> + >> + if (!to_string) { >> + vfprintf(a, b, ap); >> + } else { >> + qemu_vlog(b, ap); >> + } >> + >> + va_end(ap); >> + >> + return 1; >> +} >> + > > Not need on this either. Global variable being checked on each > callback, instead of selecting the proper callback earlier -- preferably > without the global variable. > > Did you really need something different than monitor_disas? You almost > certainly want to read physical memory and not virtual anyway. > This makes me think the necessity of 'info tb', the primary extra info it adds is guest instruction, which can be gotten from 'x/i' w/o calling tb_gen_code. (qemu) info tb 1 ------------------------------ TB id:1 | phys:0x79bc86 virt:0xffffffff8059bc86 flags:0x01024001 0 inv/1 | exec:56962444/0 guest inst cov:0.61% | trans:1 ints: g:8 op:27 op_opt:22 spills:0 | h/g (host bytes / guest insts): 26.000000 | time to gen at 2.4GHz => code:747.08(ns) IR:477.92(ns) ---------------- IN: Priv: 1; Virt: 0 0xffffffff8059bc86: 00000013 nop 0xffffffff8059bc8a: 00000013 nop 0xffffffff8059bc8e: 00000013 nop 0xffffffff8059bc92: 00000013 nop 0xffffffff8059bc96: 1141 addi sp,sp,-16 0xffffffff8059bc98: e422 sd s0,8(sp) 0xffffffff8059bc9a: 0800 addi s0,sp,16 0xffffffff8059bc9c: c0102573 rdtime a0 ------------------------------ (qemu) x/8i 0xffffffff8059bc86 x/8i 0xffffffff8059bc86 0xffffffff8059bc86: 00000013 nop 0xffffffff8059bc8a: 00000013 nop 0xffffffff8059bc8e: 00000013 nop 0xffffffff8059bc92: 00000013 nop 0xffffffff8059bc96: 1141 addi sp,sp,-16 0xffffffff8059bc98: e422 sd s0,8(sp) 0xffffffff8059bc9a: 0800 addi s0,sp,16 0xffffffff8059bc9c: c0102573 rdtime a0 Thanks, Fei. >> +void qemu_log_to_monitor(bool enable) >> +{ >> + to_monitor = enable; >> +} >> + >> +void qemu_log_to_string(bool enable, GString *s) >> +{ >> + to_string = enable; >> + string = s; >> +} > > What are these for, and why do you need them? > Why would to_string ever be anything other than (string != NULL)? > Why are you using such very generic names for global variables? > > > r~
On 6/1/2023 8:12 PM, Wu, Fei wrote: > On 6/1/2023 10:40 AM, Richard Henderson wrote: >> On 5/30/23 01:35, Fei Wu wrote: >>> +static void do_dump_tbs_info(int total, int sort_by) >>> +{ >>> + id = 1; >>> + GList *i; >>> + int count = total; >>> + >>> + g_list_free(last_search); >>> + last_search = NULL; >>> + >>> + qht_iter(&tb_ctx.tb_stats, collect_tb_stats, NULL); >>> + >>> + last_search = g_list_sort_with_data(last_search, inverse_sort_tbs, >>> + &sort_by); >>> + >> >> Why are you sorting on a list and not an array? >> Intuitively, sorting a list of 1 million elements seems like the wrong >> choice. >> >> Why did you put all the comparisons in one inverse_sort_tbs function, >> and require examining sort_by? Better would be N sorting functions >> where sort_by is evaluated once before starting the sort. >> >> >>> +++ b/disas/disas.c >>> @@ -8,6 +8,8 @@ >>> #include "hw/core/cpu.h" >>> #include "exec/memory.h" >>> >>> +#include "qemu/log-for-trace.h" >>> + >>> /* Filled in by elfload.c. Simplistic, but will do for now. */ >>> struct syminfo *syminfos = NULL; >>> >>> @@ -199,6 +201,24 @@ static void initialize_debug_host(CPUDebug *s) >>> #endif >>> } >>> >>> +static int >>> +__attribute__((format(printf, 2, 3))) >>> +fprintf_log(FILE *a, const char *b, ...) >>> +{ >>> + va_list ap; >>> + va_start(ap, b); >>> + >>> + if (!to_string) { >>> + vfprintf(a, b, ap); >>> + } else { >>> + qemu_vlog(b, ap); >>> + } >>> + >>> + va_end(ap); >>> + >>> + return 1; >>> +} >>> + >> >> Not need on this either. Global variable being checked on each >> callback, instead of selecting the proper callback earlier -- preferably >> without the global variable. >> >> Did you really need something different than monitor_disas? You almost >> certainly want to read physical memory and not virtual anyway. >> 'info tb' has extra flag to specify dump out_asm, in_asm & op, so there looks more user cases than monitor_disas, I think it's nice to have, but not strongly required, usually we have a general idea if we get the IN ops, plus the other counters such as g/op/... in the output. Right now I am inclined to switch to monitor_disas, I found it causes guest kernel panic with the selective virt addr, it happens to the original v10 too. ------------------------------ TB id:1998 | phys:0x9c5f5ce2 virt:0x00ffffff8ff8ece2 flags:0x00024018 0 inv/1 | exec:68/0 guest inst cov:0.00% | trans:1 ints: g:1 op:17 op_opt:16 spills:0 | h/g (host bytes / guest insts): 129.000000 | time to gen at 2.4GHz => code:1960.42(ns) IR:915.83(ns) could not gen code for this TB (no longer mapped?) ------------------------------ [ 98.213363] Unable to handle kernel paging request at virtual address 00ffffff8ff8ece2 [ 98.219153] Oops [#1] [ 98.219299] Modules linked in: binfmt_misc nls_ascii nls_cp437 vfat fat cfg80211 rfkill drm fuse drm_panel_orientation_quirks configfs i2c_core ip_tables x_tables autofs4 ext4 crc32c_generic crc16 mbcache jbd2 virtio_net net_failover virtio_blk failover virtio_mmio virtio virtio_ring [ 98.220832] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.2.0+ #1 [ 98.221122] Hardware name: riscv-virtio,qemu (DT) [ 98.221330] epc : arch_cpu_idle+0x1e/0x28 [ 98.221985] ra : default_idle_call+0x3c/0xb8 [ 98.222150] epc : ffffffff80004398 ra : ffffffff808a0bc2 sp : ff2000000068bf40 [ 98.222339] gp : ffffffff815688e8 tp : ff6000007ffcc240 t0 : ff2000000064bad8 [ 98.222562] t1 : 0000000000000001 t2 : ffffffff80e02ed0 s0 : ff2000000068bf50 [ 98.222780] s1 : 0000000000000001 a0 : 00000000000154ac a1 : ff6000027d0f3000 [ 98.223001] a2 : ff6000007ffcc240 a3 : 0000000000000001 a4 : ff6000027d0f3000 [ 98.223217] a5 : ff600001fdd5d500 a6 : 4000000000000000 a7 : 0000000000000000 [ 98.223427] s2 : ffffffff8156a220 s3 : ffffffff8156a418 s4 : ffffffff80c6ba18 [ 98.223636] s5 : ffffffff815a9588 s6 : 0000000000000000 s7 : 0000000000000000 [ 98.223853] s8 : fffffffffffffffe s9 : 000000008003d6a8 s10: 0000000000000000 [ 98.224065] s11: 0000000000000000 t3 : ffffffff8156a438 t4 : 0000000000000000 [ 98.224272] t5 : 0000000000000000 t6 : 0000000000000000 [ 98.224436] status: 0000000200000100 badaddr: 00ffffff8ff8ece2 cause: 000000000000000c [ 98.224778] [<ffffffff80004398>] arch_cpu_idle+0x1e/0x28 [ 98.226393] ---[ end trace 0000000000000000 ]--- [ 98.226770] Kernel panic - not syncing: Attempted to kill the idle task! [ 98.227188] SMP: stopping secondary CPUs [ 98.227980] ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]--- Thanks, Fei. > This makes me think the necessity of 'info tb', the primary extra info > it adds is guest instruction, which can be gotten from 'x/i' w/o calling > tb_gen_code. > > (qemu) info tb 1 > ------------------------------ > > TB id:1 | phys:0x79bc86 virt:0xffffffff8059bc86 flags:0x01024001 0 inv/1 > | exec:56962444/0 guest inst cov:0.61% > | trans:1 ints: g:8 op:27 op_opt:22 spills:0 > | h/g (host bytes / guest insts): 26.000000 > | time to gen at 2.4GHz => code:747.08(ns) IR:477.92(ns) > > ---------------- > IN: > Priv: 1; Virt: 0 > > 0xffffffff8059bc86: 00000013 nop > 0xffffffff8059bc8a: 00000013 nop > 0xffffffff8059bc8e: 00000013 nop > 0xffffffff8059bc92: 00000013 nop > 0xffffffff8059bc96: 1141 addi sp,sp,-16 > 0xffffffff8059bc98: e422 sd s0,8(sp) > 0xffffffff8059bc9a: 0800 addi s0,sp,16 > 0xffffffff8059bc9c: c0102573 rdtime a0 > ------------------------------ > > (qemu) x/8i 0xffffffff8059bc86 > x/8i 0xffffffff8059bc86 > 0xffffffff8059bc86: 00000013 nop > 0xffffffff8059bc8a: 00000013 nop > 0xffffffff8059bc8e: 00000013 nop > 0xffffffff8059bc92: 00000013 nop > 0xffffffff8059bc96: 1141 addi sp,sp,-16 > 0xffffffff8059bc98: e422 sd s0,8(sp) > 0xffffffff8059bc9a: 0800 addi s0,sp,16 > 0xffffffff8059bc9c: c0102573 rdtime a0 > > > Thanks, > Fei. > >>> +void qemu_log_to_monitor(bool enable) >>> +{ >>> + to_monitor = enable; >>> +} >>> + >>> +void qemu_log_to_string(bool enable, GString *s) >>> +{ >>> + to_string = enable; >>> + string = s; >>> +} >> >> What are these for, and why do you need them? >> Why would to_string ever be anything other than (string != NULL)? >> Why are you using such very generic names for global variables? >> >> >> r~ >
On 6/1/2023 10:40 AM, Richard Henderson wrote: >> +static int >> +__attribute__((format(printf, 2, 3))) >> +fprintf_log(FILE *a, const char *b, ...) >> +{ >> + va_list ap; >> + va_start(ap, b); >> + >> + if (!to_string) { >> + vfprintf(a, b, ap); >> + } else { >> + qemu_vlog(b, ap); >> + } >> + >> + va_end(ap); >> + >> + return 1; >> +} >> + > > Not need on this either. Global variable being checked on each > callback, instead of selecting the proper callback earlier -- preferably > without the global variable. > > Did you really need something different than monitor_disas? You almost > certainly want to read physical memory and not virtual anyway. > Yes, it's usually okay for kernel address, but cannot re-gen the code for userspace virtual address (guest kernel panic on riscv guest). I tried monitor_disas() but it failed to disas too: monitor_disas(mon, mon_get_cpu(mon), tbs->phys_pc, num_inst, true); How to use this function correctly? Thanks, Fei. >> +void qemu_log_to_monitor(bool enable) >> +{ >> + to_monitor = enable; >> +} >> + >> +void qemu_log_to_string(bool enable, GString *s) >> +{ >> + to_string = enable; >> + string = s; >> +} > > What are these for, and why do you need them? > Why would to_string ever be anything other than (string != NULL)? > Why are you using such very generic names for global variables? > > > r~
On 6/7/2023 8:49 PM, Wu, Fei wrote: > On 6/1/2023 10:40 AM, Richard Henderson wrote: >>> +static int >>> +__attribute__((format(printf, 2, 3))) >>> +fprintf_log(FILE *a, const char *b, ...) >>> +{ >>> + va_list ap; >>> + va_start(ap, b); >>> + >>> + if (!to_string) { >>> + vfprintf(a, b, ap); >>> + } else { >>> + qemu_vlog(b, ap); >>> + } >>> + >>> + va_end(ap); >>> + >>> + return 1; >>> +} >>> + >> >> Not need on this either. Global variable being checked on each >> callback, instead of selecting the proper callback earlier -- preferably >> without the global variable. >> >> Did you really need something different than monitor_disas? You almost >> certainly want to read physical memory and not virtual anyway. >> > Yes, it's usually okay for kernel address, but cannot re-gen the code > for userspace virtual address (guest kernel panic on riscv guest). I > tried monitor_disas() but it failed to disas too: > monitor_disas(mon, mon_get_cpu(mon), tbs->phys_pc, num_inst, true); > > How to use this function correctly? > 'phys_pc' in tbs is returned by get_page_addr_code_hostp(), which is not guest phys address actually, but ram_addr_t instead, so it's always wrong for monitor_disas. After some dirty changes, tbs can record the guest pa. Now we can disas both kernel and user space code. But still, no code is regenerated, disas in 'info tb' is just a convenient way to 'xp'. Is there any existing function to convert ram_addr_t to guest pa? (qemu) info tb-list 2 TB id:0 | phys:0x11b97762e virt:0x00aaaaaab76c262e flags:0x00024010 0 inv/1 | exec:4447539979/0 guest inst cov:69.06% | trans:1 ints: g:8 op:28 op_opt:23 spills:0 | h/g (host bytes / guest insts): 37.000000 TB id:1 | phys:0x8063474e virt:0xffffffff8043474e flags:0x01024001 0 inv/1 | exec:131719290/0 guest inst cov:2.38% | trans:1 ints: g:9 op:37 op_opt:35 spills:0 | h/g (host bytes / guest insts): 51.555557 (qemu) info tb 0 ------------------------------ TB id:0 | phys:0x11b97762e virt:0x00aaaaaab76c262e flags:0x00024010 0 inv/1 | exec:5841751800/0 guest inst cov:69.06% | trans:1 ints: g:8 op:28 op_opt:23 spills:0 | h/g (host bytes / guest insts): 37.000000 0x11b97762e: 00002797 auipc a5,8192 # 0x11b97962e 0x11b977632: a2278793 addi a5,a5,-1502 0x11b977636: 639c ld a5,0(a5) 0x11b977638: 00178713 addi a4,a5,1 0x11b97763c: 00002797 auipc a5,8192 # 0x11b97963c 0x11b977640: a1478793 addi a5,a5,-1516 0x11b977644: e398 sd a4,0(a5) 0x11b977646: b7e5 j -24 # 0x11b97762e ------------------------------ (qemu) xp/8i 0x11b97762e 0x11b97762e: 00002797 auipc a5,8192 # 0x11b97962e 0x11b977632: a2278793 addi a5,a5,-1502 0x11b977636: 639c ld a5,0(a5) 0x11b977638: 00178713 addi a4,a5,1 0x11b97763c: 00002797 auipc a5,8192 # 0x11b97963c 0x11b977640: a1478793 addi a5,a5,-1516 0x11b977644: e398 sd a4,0(a5) 0x11b977646: b7e5 j -24 # 0x11b97762e (qemu) (qemu) info tb 1 ------------------------------ TB id:1 | phys:0x8063474e virt:0xffffffff8043474e flags:0x01024001 0 inv/1 | exec:131719290/0 guest inst cov:2.38% | trans:1 ints: g:9 op:37 op_opt:35 spills:0 | h/g (host bytes / guest insts): 51.555557 0x8063474e: 00194a83 lbu s5,1(s2) 0x80634752: 00094803 lbu a6,0(s2) 0x80634756: 0b09 addi s6,s6,2 0x80634758: 008a9a9b slliw s5,s5,8 0x8063475c: 01586833 or a6,a6,s5 0x80634760: ff0b1f23 sh a6,-2(s6) 0x80634764: 1c7d addi s8,s8,-1 0x80634766: 0909 addi s2,s2,2 0x80634768: fe0c13e3 bnez s8,-26 # 0x8063474e ------------------------------ (qemu) xp/9i 0x8063474e 0x8063474e: 00194a83 lbu s5,1(s2) 0x80634752: 00094803 lbu a6,0(s2) 0x80634756: 0b09 addi s6,s6,2 0x80634758: 008a9a9b slliw s5,s5,8 0x8063475c: 01586833 or a6,a6,s5 0x80634760: ff0b1f23 sh a6,-2(s6) 0x80634764: 1c7d addi s8,s8,-1 0x80634766: 0909 addi s2,s2,2 0x80634768: fe0c13e3 bnez s8,-26 # 0x8063474e Thanks, Fei. > Thanks, > Fei. > >>> +void qemu_log_to_monitor(bool enable) >>> +{ >>> + to_monitor = enable; >>> +} >>> + >>> +void qemu_log_to_string(bool enable, GString *s) >>> +{ >>> + to_string = enable; >>> + string = s; >>> +} >> >> What are these for, and why do you need them? >> Why would to_string ever be anything other than (string != NULL)? >> Why are you using such very generic names for global variables? >> >> >> r~ >
On Thu, 8 Jun 2023 at 08:44, Wu, Fei <fei2.wu@intel.com> wrote: > > On 6/7/2023 8:49 PM, Wu, Fei wrote: > > On 6/1/2023 10:40 AM, Richard Henderson wrote: > >> Did you really need something different than monitor_disas? You almost > >> certainly want to read physical memory and not virtual anyway. > >> > > Yes, it's usually okay for kernel address, but cannot re-gen the code > > for userspace virtual address (guest kernel panic on riscv guest). I > > tried monitor_disas() but it failed to disas too: > > monitor_disas(mon, mon_get_cpu(mon), tbs->phys_pc, num_inst, true); > > > > How to use this function correctly? > > > 'phys_pc' in tbs is returned by get_page_addr_code_hostp(), which is not > guest phys address actually, but ram_addr_t instead, so it's always > wrong for monitor_disas. After some dirty changes, tbs can record the > guest pa. Now we can disas both kernel and user space code. But still, > no code is regenerated, disas in 'info tb' is just a convenient way to 'xp'. > > Is there any existing function to convert ram_addr_t to guest pa? Such a function would not be well-defined, because a block of RAM as specified by a ram_addr_t could be present at (aliased to) multiple guest physical addresses, or even currently not mapped to any guest physical address at all. And it could be present at different physical addresses for different vCPUs. thanks -- PMM
* Peter Maydell (peter.maydell@linaro.org) wrote: > On Thu, 8 Jun 2023 at 08:44, Wu, Fei <fei2.wu@intel.com> wrote: > > > > On 6/7/2023 8:49 PM, Wu, Fei wrote: > > > On 6/1/2023 10:40 AM, Richard Henderson wrote: > > >> Did you really need something different than monitor_disas? You almost > > >> certainly want to read physical memory and not virtual anyway. > > >> > > > Yes, it's usually okay for kernel address, but cannot re-gen the code > > > for userspace virtual address (guest kernel panic on riscv guest). I > > > tried monitor_disas() but it failed to disas too: > > > monitor_disas(mon, mon_get_cpu(mon), tbs->phys_pc, num_inst, true); > > > > > > How to use this function correctly? > > > > > 'phys_pc' in tbs is returned by get_page_addr_code_hostp(), which is not > > guest phys address actually, but ram_addr_t instead, so it's always > > wrong for monitor_disas. After some dirty changes, tbs can record the > > guest pa. Now we can disas both kernel and user space code. But still, > > no code is regenerated, disas in 'info tb' is just a convenient way to 'xp'. > > > > Is there any existing function to convert ram_addr_t to guest pa? > > Such a function would not be well-defined, because a block of RAM > as specified by a ram_addr_t could be present at (aliased to) multiple > guest physical addresses, or even currently not mapped to any guest > physical address at all. And it could be present at different physical > addresses for different vCPUs. True, but isn't there a similar mechanism for when an MCE happens in the host memory? Dave > thanks > -- PMM
On Thu, 8 Jun 2023 at 13:06, Dr. David Alan Gilbert <dave@treblig.org> wrote: > > * Peter Maydell (peter.maydell@linaro.org) wrote: > > On Thu, 8 Jun 2023 at 08:44, Wu, Fei <fei2.wu@intel.com> wrote: > > > > > > On 6/7/2023 8:49 PM, Wu, Fei wrote: > > > > On 6/1/2023 10:40 AM, Richard Henderson wrote: > > > >> Did you really need something different than monitor_disas? You almost > > > >> certainly want to read physical memory and not virtual anyway. > > > >> > > > > Yes, it's usually okay for kernel address, but cannot re-gen the code > > > > for userspace virtual address (guest kernel panic on riscv guest). I > > > > tried monitor_disas() but it failed to disas too: > > > > monitor_disas(mon, mon_get_cpu(mon), tbs->phys_pc, num_inst, true); > > > > > > > > How to use this function correctly? > > > > > > > 'phys_pc' in tbs is returned by get_page_addr_code_hostp(), which is not > > > guest phys address actually, but ram_addr_t instead, so it's always > > > wrong for monitor_disas. After some dirty changes, tbs can record the > > > guest pa. Now we can disas both kernel and user space code. But still, > > > no code is regenerated, disas in 'info tb' is just a convenient way to 'xp'. > > > > > > Is there any existing function to convert ram_addr_t to guest pa? > > > > Such a function would not be well-defined, because a block of RAM > > as specified by a ram_addr_t could be present at (aliased to) multiple > > guest physical addresses, or even currently not mapped to any guest > > physical address at all. And it could be present at different physical > > addresses for different vCPUs. > > True, but isn't there a similar mechanism for when an MCE happens > in the host memory? That goes via kvm_physical_memory_addr_from_host(), which looks up the host address in the KVMSlot array (and just picks the first hit). It won't work for TCG. thanks -- PMM
On 6/8/2023 5:23 PM, Peter Maydell wrote: > On Thu, 8 Jun 2023 at 08:44, Wu, Fei <fei2.wu@intel.com> wrote: >> >> On 6/7/2023 8:49 PM, Wu, Fei wrote: >>> On 6/1/2023 10:40 AM, Richard Henderson wrote: >>>> Did you really need something different than monitor_disas? You almost >>>> certainly want to read physical memory and not virtual anyway. >>>> >>> Yes, it's usually okay for kernel address, but cannot re-gen the code >>> for userspace virtual address (guest kernel panic on riscv guest). I >>> tried monitor_disas() but it failed to disas too: >>> monitor_disas(mon, mon_get_cpu(mon), tbs->phys_pc, num_inst, true); >>> >>> How to use this function correctly? >>> >> 'phys_pc' in tbs is returned by get_page_addr_code_hostp(), which is not >> guest phys address actually, but ram_addr_t instead, so it's always >> wrong for monitor_disas. After some dirty changes, tbs can record the >> guest pa. Now we can disas both kernel and user space code. But still, >> no code is regenerated, disas in 'info tb' is just a convenient way to 'xp'. >> >> Is there any existing function to convert ram_addr_t to guest pa? > > Such a function would not be well-defined, because a block of RAM > as specified by a ram_addr_t could be present at (aliased to) multiple > guest physical addresses, or even currently not mapped to any guest > physical address at all. And it could be present at different physical > addresses for different vCPUs. > Thank you, Peter. What's the scenario of the last different physical addresses for different vCPUs? For this specific case, I found I don't have to convert ram_addr_t to gpa, the real requirement is converting ram_addr_t to hva, this one seems well defined using qemu_map_ram_ptr(0, ram_addr) ? Thanks, Fei. > thanks > -- PMM
On Fri, 9 Jun 2023 at 15:32, Wu, Fei <fei2.wu@intel.com> wrote: > > On 6/8/2023 5:23 PM, Peter Maydell wrote: > > On Thu, 8 Jun 2023 at 08:44, Wu, Fei <fei2.wu@intel.com> wrote: > >> Is there any existing function to convert ram_addr_t to guest pa? > > > > Such a function would not be well-defined, because a block of RAM > > as specified by a ram_addr_t could be present at (aliased to) multiple > > guest physical addresses, or even currently not mapped to any guest > > physical address at all. And it could be present at different physical > > addresses for different vCPUs. > > > Thank you, Peter. What's the scenario of the last different physical > addresses for different vCPUs? You can have a board with two different CPUs, where one of them has a RAM MemoryRegion that it puts at address A in its address space, and the other puts it at address B. This is most likely with 'heterogenous' configurations where you have an application processor A and a board-support processor B. (I don't know if we actually have any board models like that currently, but it's logically possible.) > For this specific case, I found I don't have to convert ram_addr_t to > gpa, the real requirement is converting ram_addr_t to hva, this one > seems well defined using qemu_map_ram_ptr(0, ram_addr) ? That does work for ram_addr_t to hva, but but note the warning on the comment above that function: you need to be in an RCU critical section to avoid some other thread coming along and de-allocating the RAM under your feet. (Also it's tempting to remove that support for passing in 0 as the RAMBlock, because every other use in the codebase seems to pass in a RAMBlock pointer.) thanks -- PMM
On 6/9/2023 11:51 PM, Peter Maydell wrote: > On Fri, 9 Jun 2023 at 15:32, Wu, Fei <fei2.wu@intel.com> wrote: >> >> On 6/8/2023 5:23 PM, Peter Maydell wrote: >>> On Thu, 8 Jun 2023 at 08:44, Wu, Fei <fei2.wu@intel.com> wrote: >>>> Is there any existing function to convert ram_addr_t to guest pa? >>> >>> Such a function would not be well-defined, because a block of RAM >>> as specified by a ram_addr_t could be present at (aliased to) multiple >>> guest physical addresses, or even currently not mapped to any guest >>> physical address at all. And it could be present at different physical >>> addresses for different vCPUs. >>> >> Thank you, Peter. What's the scenario of the last different physical >> addresses for different vCPUs? > > You can have a board with two different CPUs, where one of them > has a RAM MemoryRegion that it puts at address A in its address > space, and the other puts it at address B. This is most likely > with 'heterogenous' configurations where you have an application > processor A and a board-support processor B. (I don't know if > we actually have any board models like that currently, but it's > logically possible.) > >> For this specific case, I found I don't have to convert ram_addr_t to >> gpa, the real requirement is converting ram_addr_t to hva, this one >> seems well defined using qemu_map_ram_ptr(0, ram_addr) ? > > That does work for ram_addr_t to hva, but > but note the warning on the comment above that function: > you need to be in an RCU critical section to avoid some other > thread coming along and de-allocating the RAM under your feet. > > (Also it's tempting to remove that support for passing in 0 > as the RAMBlock, because every other use in the codebase seems > to pass in a RAMBlock pointer.) > Got it, thank you very much. Fei. > thanks > -- PMM
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c index 2e00f10267..a1780c5920 100644 --- a/accel/tcg/monitor.c +++ b/accel/tcg/monitor.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qemu/accel.h" +#include "qemu/log.h" #include "qapi/error.h" #include "qapi/type-helpers.h" #include "qapi/qapi-commands-machine.h" @@ -18,6 +19,7 @@ #include "sysemu/cpu-timers.h" #include "sysemu/tcg.h" #include "exec/tb-stats.h" +#include "tb-context.h" #include "internal.h" @@ -132,6 +134,58 @@ void hmp_tbstats(Monitor *mon, const QDict *qdict) } +void hmp_info_tblist(Monitor *mon, const QDict *qdict) +{ + int number_int; + const char *sortedby_str = NULL; + if (!tcg_enabled()) { + error_report("TB information is only available with accel=tcg"); + return; + } + if (!tb_ctx.tb_stats.map) { + error_report("no TB information recorded"); + return; + } + + number_int = qdict_get_try_int(qdict, "number", 10); + sortedby_str = qdict_get_try_str(qdict, "sortedby"); + + int sortedby = SORT_BY_HOTNESS; + if (sortedby_str == NULL || strcmp(sortedby_str, "hotness") == 0) { + sortedby = SORT_BY_HOTNESS; + } else if (strcmp(sortedby_str, "hg") == 0) { + sortedby = SORT_BY_HG; + } else if (strcmp(sortedby_str, "spills") == 0) { + sortedby = SORT_BY_SPILLS; + } else { + error_report("valid sort options are: hotness hg spills"); + return; + } + + dump_tbs_info(number_int, sortedby, true); +} + +void hmp_info_tb(Monitor *mon, const QDict *qdict) +{ + const int id = qdict_get_int(qdict, "id"); + const char *flags = qdict_get_try_str(qdict, "flags"); + int mask; + + if (!tcg_enabled()) { + error_report("TB information is only available with accel=tcg"); + return; + } + + mask = flags ? qemu_str_to_log_mask(flags) : CPU_LOG_TB_IN_ASM; + + if (!mask) { + error_report("Unable to parse log flags, see 'help log'"); + return; + } + + dump_tb_info(id, mask, true); +} + HumanReadableText *qmp_x_query_profile(Error **errp) { g_autoptr(GString) buf = g_string_new(""); diff --git a/accel/tcg/tb-stats.c b/accel/tcg/tb-stats.c index 139f049ffc..4b358cb421 100644 --- a/accel/tcg/tb-stats.c +++ b/accel/tcg/tb-stats.c @@ -11,14 +11,18 @@ #include "disas/disas.h" #include "exec/exec-all.h" #include "tcg/tcg.h" +#include "qapi/error.h" #include "qemu/qemu-print.h" #include "qemu/timer.h" +#include "qemu/log.h" #include "exec/tb-stats.h" #include "exec/tb-flush.h" #include "tb-context.h" +#include "internal.h" + /* TBStatistic collection controls */ enum TBStatsStatus { TB_STATS_DISABLED = 0, @@ -31,8 +35,23 @@ static uint32_t default_tbstats_flag; /* only accessed in safe work */ static GList *last_search; +static int id = 1; /* display_id increment counter */ uint64_t dev_time; +static TBStatistics *get_tbstats_by_id(int id) +{ + GList *iter; + + for (iter = last_search; iter; iter = g_list_next(iter)) { + TBStatistics *tbs = iter->data; + if (tbs && tbs->display_id == id) { + return tbs; + break; + } + } + return NULL; +} + struct jit_profile_info { uint64_t translations; uint64_t aborted; @@ -294,6 +313,309 @@ void init_tb_stats_htable(void) } } +static void collect_tb_stats(void *p, uint32_t hash, void *userp) +{ + last_search = g_list_prepend(last_search, p); +} + +static void count_invalid_tbs(gpointer data, gpointer user_data) +{ + TranslationBlock *tb = (TranslationBlock *) data; + unsigned *counter = (unsigned *) user_data; + if (tb->cflags & CF_INVALID) { + *counter = *counter + 1; + } +} + +static int dump_tb_header(TBStatistics *tbs) +{ + unsigned g = stat_per_translation(tbs, code.num_guest_inst); + unsigned ops = stat_per_translation(tbs, code.num_tcg_ops); + unsigned ops_opt = stat_per_translation(tbs, code.num_tcg_ops_opt); + unsigned spills = stat_per_translation(tbs, code.spills); + unsigned h = stat_per_translation(tbs, code.out_len); + unsigned act = tbs->tbs->len; + unsigned invalid = 0; + + float guest_host_prop = g ? ((float) h / g) : 0; + + g_ptr_array_foreach(tbs->tbs, &count_invalid_tbs, &invalid); + + qemu_log("TB id:%d | phys:0x"TB_PAGE_ADDR_FMT" virt:0x"TARGET_FMT_lx + " flags:0x%08x %d inv/%d\n", + tbs->display_id, tbs->phys_pc, tbs->pc, tbs->flags, + invalid, act); + + if (tbs_stats_enabled(tbs, TB_EXEC_STATS)) { + qemu_log("\t| exec:%lu/%lu guest inst cov:%.2f%%\n", + tbs->executions.normal, + tbs->executions.atomic, tbs->executions.coverage / 100.0f); + } + + if (tbs_stats_enabled(tbs, TB_JIT_STATS)) { + qemu_log("\t| trans:%lu ints: g:%u op:%u op_opt:%u spills:%d" + "\n\t| h/g (host bytes / guest insts): %f\n", + tbs->translations.total, g, ops, ops_opt, spills, guest_host_prop); + } + + if (tbs_stats_enabled(tbs, TB_JIT_TIME)) { + qemu_log("\t| time to gen at 2.4GHz => code:%0.2lf(ns) IR:%0.2lf(ns)\n", + tbs->gen_times.code / 2.4, tbs->gen_times.ir / 2.4); + } + + qemu_log("\n"); + + return act - invalid; +} + +static gint +inverse_sort_tbs(gconstpointer p1, gconstpointer p2, gpointer psort_by) +{ + const TBStatistics *tbs1 = (TBStatistics *) p1; + const TBStatistics *tbs2 = (TBStatistics *) p2; + int sort_by = *((enum SortBy *) psort_by); + unsigned long c1 = 0; + unsigned long c2 = 0; + + if (sort_by == SORT_BY_SPILLS) { + c1 = stat_per_translation(tbs1, code.spills); + c2 = stat_per_translation(tbs2, code.spills); + } else if (sort_by == SORT_BY_HOTNESS) { + c1 = stat_per_translation(tbs1, executions.normal); + c2 = stat_per_translation(tbs2, executions.normal); + } else if (sort_by == SORT_BY_HG) { + if (tbs1->code.num_guest_inst == 0) { + return -1; + } + if (tbs2->code.num_guest_inst == 0) { + return 1; + } + + c1 = tbs1->code.out_len / tbs1->code.num_guest_inst; + c2 = tbs2->code.out_len / tbs2->code.num_guest_inst; + } + return c1 < c2 ? 1 : c1 == c2 ? 0 : -1; +} + +static void dump_last_search_headers(int count) +{ + if (!last_search) { + qemu_log("No data collected yet\n"); + return; + } + + GList *l = last_search; + while (l != NULL && count--) { + TBStatistics *tbs = (TBStatistics *) l->data; + GList *next = l->next; + dump_tb_header(tbs); + l = next; + } +} + +static uint64_t calculate_last_search_coverages(void) +{ + uint64_t total_exec_count = 0; + GList *i; + + /* Compute total execution count for all tbs */ + for (i = last_search; i; i = i->next) { + TBStatistics *tbs = (TBStatistics *) i->data; + total_exec_count += + (tbs->executions.atomic + tbs->executions.normal) + * tbs->code.num_guest_inst; + } + + for (i = last_search; i; i = i->next) { + TBStatistics *tbs = (TBStatistics *) i->data; + uint64_t tb_total_execs = + (tbs->executions.atomic + tbs->executions.normal) + * tbs->code.num_guest_inst; + tbs->executions.coverage = + (10000 * tb_total_execs) / (total_exec_count + 1); + } + + return total_exec_count; +} + +static void do_dump_tbs_info(int total, int sort_by) +{ + id = 1; + GList *i; + int count = total; + + g_list_free(last_search); + last_search = NULL; + + qht_iter(&tb_ctx.tb_stats, collect_tb_stats, NULL); + + last_search = g_list_sort_with_data(last_search, inverse_sort_tbs, + &sort_by); + + if (!last_search) { + qemu_printf("No data collected yet!\n"); + return; + } + + calculate_last_search_coverages(); + + for (i = last_search; i && count--; i = i->next) { + TBStatistics *tbs = (TBStatistics *) i->data; + tbs->display_id = id++; + } + + /* free the unused bits */ + if (i) { + if (i->next) { + i->next->prev = NULL; + } + g_list_free(i->next); + i->next = NULL; + } + + dump_last_search_headers(total); +} + +struct tbs_dump_info { + int count; + int sort_by; +}; + +static void do_dump_tbs_info_safe(CPUState *cpu, run_on_cpu_data tbdi) +{ + struct tbs_dump_info *info = tbdi.host_ptr; + qemu_log_to_monitor(true); + do_dump_tbs_info(info->count, info->sort_by); + qemu_log_to_monitor(false); + g_free(info); +} + +/* + * When we dump_tbs_info on a live system via the HMP we want to + * ensure the system is quiessent before we start outputting stuff. + * Otherwise we could pollute the output with other logging output. + */ + +void dump_tbs_info(int count, int sort_by, bool use_monitor) +{ + if (use_monitor) { + struct tbs_dump_info *tbdi = g_new(struct tbs_dump_info, 1); + tbdi->count = count; + tbdi->sort_by = sort_by; + async_safe_run_on_cpu(first_cpu, do_dump_tbs_info_safe, + RUN_ON_CPU_HOST_PTR(tbdi)); + } else { + do_dump_tbs_info(count, sort_by); + } +} + +/* + * We cannot always re-generate the code even if we know there are + * valid translations still in the cache. The reason being the guest + * may have un-mapped the page code. In real life this would be + * un-reachable as the jump cache is cleared and the following QHT + * lookup will do a get_page_addr_code() and fault. + * + * TODO: can we do this safely? We need to + * a) somehow recover the mmu_idx for this translation + * b) probe MMU_INST_FETCH to know it will succeed + */ +static GString *get_code_string(TBStatistics *tbs, int log_flags) +{ + int old_log_flags = qemu_loglevel; + + CPUState *cpu = first_cpu; + uint32_t cflags = curr_cflags(cpu); + TranslationBlock *tb = NULL; + + GString *code_s = g_string_new(NULL); + qemu_log_to_string(true, code_s); + + Error *err = NULL; + if (!qemu_set_log(log_flags, &err)) { + g_string_append_printf(code_s, "%s", error_get_pretty(err)); + error_free(err); + } + + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + mmap_lock(); + tb = tb_gen_code(cpu, tbs->pc, tbs->cs_base, tbs->flags, cflags); + tb_phys_invalidate(tb, -1); + mmap_unlock(); + } else { + /* + * The mmap_lock is dropped by tb_gen_code if it runs out of + * memory. + */ + qemu_log("\ncould not gen code for this TB (no longer mapped?)\n"); + assert_no_pages_locked(); + } + + qemu_set_log(old_log_flags, &err); + qemu_log_to_string(false, NULL); + + return code_s; +} + +static void do_tb_dump_with_statistics(TBStatistics *tbs, int log_flags) +{ + g_autoptr(GString) code_s = NULL; + + qemu_log("\n------------------------------\n\n"); + + if (dump_tb_header(tbs) > 0) { + code_s = get_code_string(tbs, log_flags); + } else { + code_s = g_string_new("cannot re-translate non-active translation"); + } + qemu_log("%s", code_s->str); + qemu_log("------------------------------\n"); +} + +struct tb_dump_info { + int id; + int log_flags; + bool use_monitor; +}; + +static void do_dump_tb_info_safe(CPUState *cpu, run_on_cpu_data info) +{ + struct tb_dump_info *tbdi = (struct tb_dump_info *) info.host_ptr; + + if (!last_search) { + qemu_log("no search on record\n"); + return; + } + + qemu_log_to_monitor(tbdi->use_monitor); + + TBStatistics *tbs = get_tbstats_by_id(tbdi->id); + if (tbs) { + do_tb_dump_with_statistics(tbs, tbdi->log_flags); + } else { + qemu_log("no TB statitics found with id %d\n", tbdi->id); + } + + qemu_log_to_monitor(false); + + g_free(tbdi); +} + +void dump_tb_info(int id, int log_mask, bool use_monitor) +{ + struct tb_dump_info *tbdi = g_new(struct tb_dump_info, 1); + + tbdi->id = id; + tbdi->log_flags = log_mask; + tbdi->use_monitor = use_monitor; + + async_safe_run_on_cpu(first_cpu, do_dump_tb_info_safe, + RUN_ON_CPU_HOST_PTR(tbdi)); + + /* tbdi free'd by do_dump_tb_info_safe */ +} + + void enable_collect_tb_stats(void) { tcg_collect_tb_stats = TB_STATS_RUNNING; diff --git a/disas/disas.c b/disas/disas.c index 0d2d06c2ec..0f31733646 100644 --- a/disas/disas.c +++ b/disas/disas.c @@ -8,6 +8,8 @@ #include "hw/core/cpu.h" #include "exec/memory.h" +#include "qemu/log-for-trace.h" + /* Filled in by elfload.c. Simplistic, but will do for now. */ struct syminfo *syminfos = NULL; @@ -199,6 +201,24 @@ static void initialize_debug_host(CPUDebug *s) #endif } +static int +__attribute__((format(printf, 2, 3))) +fprintf_log(FILE *a, const char *b, ...) +{ + va_list ap; + va_start(ap, b); + + if (!to_string) { + vfprintf(a, b, ap); + } else { + qemu_vlog(b, ap); + } + + va_end(ap); + + return 1; +} + /* Disassemble this for me please... (debugging). */ void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size) { @@ -207,7 +227,7 @@ void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size) CPUDebug s; disas_initialize_debug_target(&s, cpu); - s.info.fprintf_func = fprintf; + s.info.fprintf_func = fprintf_log; s.info.stream = out; s.info.buffer_vma = code; s.info.buffer_length = size; @@ -221,9 +241,9 @@ void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size) } for (pc = code; size > 0; pc += count, size -= count) { - fprintf(out, "0x%08" PRIx64 ": ", pc); + fprintf_log(out, "0x%08" PRIx64 ": ", pc); count = s.info.print_insn(pc, &s.info); - fprintf(out, "\n"); + fprintf_log(out, "\n"); if (count < 0) { break; } diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 47d63d26db..01d9658aea 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -262,6 +262,22 @@ ERST .params = "", .help = "show dynamic compiler info", }, + { + .name = "tb-list", + .args_type = "number:i?,sortedby:s?", + .params = "[number sortedby]", + .help = "show a [number] translated blocks sorted by [sortedby]" + "sortedby opts: hotness hg spills", + .cmd = hmp_info_tblist, + }, + { + .name = "tb", + .args_type = "id:i,flags:s?", + .params = "id [flag1,flag2,...]", + .help = "show information about one translated block by id." + "dump flags can be used to set dump code level: out_asm in_asm op", + .cmd = hmp_info_tb, + }, #endif SRST diff --git a/include/exec/tb-stats.h b/include/exec/tb-stats.h index 30b788f7b2..f2a6b09da9 100644 --- a/include/exec/tb-stats.h +++ b/include/exec/tb-stats.h @@ -36,8 +36,11 @@ enum SortBy { SORT_BY_HOTNESS, SORT_BY_HG /* Host/Guest */, SORT_BY_SPILLS }; enum TbstatsCmd { START, PAUSE, STOP, FILTER }; +#define tbs_stats_enabled(tbs, JIT_STATS) \ + (tbs && (tbs->stats_enabled & JIT_STATS)) + #define tb_stats_enabled(tb, JIT_STATS) \ - (tb && tb->tb_stats && (tb->tb_stats->stats_enabled & JIT_STATS)) + (tb && tb->tb_stats && tbs_stats_enabled(tb->tb_stats, JIT_STATS)) #define stat_per_translation(stat, name) \ (stat->translations.total ? stat->name / stat->translations.total : 0) @@ -66,6 +69,8 @@ struct TBStatistics { struct { unsigned long normal; unsigned long atomic; + /* filled only when dumping x% cover set */ + uint16_t coverage; } executions; /* JIT Stats - protected by lock */ @@ -88,7 +93,6 @@ struct TBStatistics { struct { unsigned long total; - unsigned long uncached; unsigned long spanning; } translations; @@ -108,6 +112,9 @@ struct TBStatistics { uint64_t la; uint64_t code; } gen_times; + + /* HMP information - used for referring to previous search */ + int display_id; }; bool tb_stats_cmp(const void *ap, const void *bp); @@ -132,4 +139,26 @@ void do_hmp_tbstats_safe(CPUState *cpu, run_on_cpu_data icmd); */ void tbstats_reset_tbs(void); +/** + * dump_tbs_info: report the hottest blocks + * + * @count: the limit of hotblocks + * @sort_by: property in which the dump will be sorted + * @use_monitor: redirect output to monitor + * + * Report the hottest blocks to either the log or monitor + */ +void dump_tbs_info(int count, int sort_by, bool use_monitor); + +/** + * dump_tb_info: dump information about one TB + * + * @id: the display id of the block (from previous search) + * @mask: the temporary logging mask + * @Use_monitor: redirect output to monitor + * + * Re-run a translation of a block at addr for the purposes of debug output + */ +void dump_tb_info(int id, int log_mask, bool use_monitor); + #endif diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index 2e7f141754..acdd6f1561 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -182,5 +182,7 @@ void hmp_boot_set(Monitor *mon, const QDict *qdict); void hmp_info_mtree(Monitor *mon, const QDict *qdict); void hmp_info_cryptodev(Monitor *mon, const QDict *qdict); void hmp_tbstats(Monitor *mon, const QDict *qdict); +void hmp_info_tblist(Monitor *mon, const QDict *qdict); +void hmp_info_tb(Monitor *mon, const QDict *qdict); #endif diff --git a/include/qemu/log-for-trace.h b/include/qemu/log-for-trace.h index d47c9cd446..5d0afc56c7 100644 --- a/include/qemu/log-for-trace.h +++ b/include/qemu/log-for-trace.h @@ -20,6 +20,9 @@ /* Private global variable, don't use */ extern int qemu_loglevel; +extern bool to_string; + +extern int32_t max_num_hot_tbs_to_dump; #define LOG_TRACE (1 << 15) @@ -30,6 +33,7 @@ static inline bool qemu_loglevel_mask(int mask) } /* main logging function */ -void G_GNUC_PRINTF(1, 2) qemu_log(const char *fmt, ...); +int G_GNUC_PRINTF(1, 2) qemu_log(const char *fmt, ...); +int G_GNUC_PRINTF(1, 0) qemu_vlog(const char *fmt, va_list va); #endif diff --git a/include/qemu/log.h b/include/qemu/log.h index 6f3b8091cd..26b33151f3 100644 --- a/include/qemu/log.h +++ b/include/qemu/log.h @@ -85,6 +85,8 @@ extern const QEMULogItem qemu_log_items[]; bool qemu_set_log(int log_flags, Error **errp); bool qemu_set_log_filename(const char *filename, Error **errp); bool qemu_set_log_filename_flags(const char *name, int flags, Error **errp); +void qemu_log_to_monitor(bool enable); +void qemu_log_to_string(bool enable, GString *s); void qemu_set_dfilter_ranges(const char *ranges, Error **errp); bool qemu_log_in_addr_range(uint64_t addr); int qemu_str_to_log_mask(const char *str); diff --git a/util/log.c b/util/log.c index 7ae471d97c..6477eb5a5f 100644 --- a/util/log.c +++ b/util/log.c @@ -50,6 +50,8 @@ int qemu_loglevel; static bool log_per_thread; static GArray *debug_regions; int32_t max_num_hot_tbs_to_dump; +static bool to_monitor; +bool to_string; /* Returns true if qemu_log() will really write somewhere. */ bool qemu_log_enabled(void) @@ -146,19 +148,6 @@ void qemu_log_unlock(FILE *logfile) } } -void qemu_log(const char *fmt, ...) -{ - FILE *f = qemu_log_trylock(); - if (f) { - va_list ap; - - va_start(ap, fmt); - vfprintf(f, fmt, ap); - va_end(ap); - qemu_log_unlock(f); - } -} - static void __attribute__((__constructor__)) startup(void) { qemu_mutex_init(&global_mutex); @@ -206,6 +195,55 @@ valid_filename_template(const char *filename, bool per_thread, Error **errp) return filename ? vft_strdup : vft_stderr; } +GString *string; + +int qemu_vlog(const char *fmt, va_list va) +{ + int ret = 0; + + if (to_string && string) { + g_string_append_vprintf(string, fmt, va); + } else if (to_monitor) { + ret = qemu_vprintf(fmt, va); + } else { + FILE *f = qemu_log_trylock(); + if (f) { + ret = vfprintf(f, fmt, va); + } + qemu_log_unlock(f); + } + + /* Don't pass back error results. */ + if (ret < 0) { + ret = 0; + } + return ret; +} + +/* Return the number of characters emitted. */ +int qemu_log(const char *fmt, ...) +{ + int ret = 0; + va_list ap; + + va_start(ap, fmt); + ret = qemu_vlog(fmt, ap); + va_end(ap); + + return ret; +} + +void qemu_log_to_monitor(bool enable) +{ + to_monitor = enable; +} + +void qemu_log_to_string(bool enable, GString *s) +{ + to_string = enable; + string = s; +} + /* enable or disable low levels log */ static bool qemu_set_log_internal(const char *filename, bool changed_name, int log_flags, Error **errp) @@ -523,6 +561,7 @@ int qemu_str_to_log_mask(const char *str) trace_enable_events((*tmp) + 6); mask |= LOG_TRACE; #endif +#ifdef CONFIG_TCG } else if (g_str_has_prefix(*tmp, "tb_stats")) { mask |= CPU_LOG_TB_STATS; set_default_tbstats_flag(TB_ALL_STATS); @@ -553,6 +592,7 @@ int qemu_str_to_log_mask(const char *str) } set_default_tbstats_flag(flags); } +#endif } else { for (item = qemu_log_items; item->mask != 0; item++) { if (g_str_equal(*tmp, item->name)) {