Message ID | 20181123091729.29921-14-luc.michel@greensocs.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | gdbstub: support for the multiprocess extension | expand |
On 23/11/18 10:17, Luc Michel wrote: > When a new connection is established, we set the first process to be > attached, and the others detached. The first CPU of the first process > is selected as the current CPU. > > Signed-off-by: Luc Michel <luc.michel@greensocs.com> > Reviewed-by: Alistair Francis <alistair.francis@wdc.com> > Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> > --- > gdbstub.c | 20 +++++++++++++++----- > 1 file changed, 15 insertions(+), 5 deletions(-) > > diff --git a/gdbstub.c b/gdbstub.c > index 69471ea914..6518324d46 100644 > --- a/gdbstub.c > +++ b/gdbstub.c > @@ -2249,13 +2249,14 @@ static bool gdb_accept(void) > close(fd); > return false; > } > > s = g_malloc0(sizeof(GDBState)); > - s->c_cpu = first_cpu; > - s->g_cpu = first_cpu; > create_unique_process(s); > + s->processes[0].attached = true; > + s->c_cpu = gdb_first_attached_cpu(s); > + s->g_cpu = s->c_cpu; > s->fd = fd; > gdb_has_xml = false; > > gdbserver_state = s; > return true; > @@ -2337,12 +2338,23 @@ static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size) > } > } > > static void gdb_chr_event(void *opaque, int event) > { > + int i; > + GDBState *s = (GDBState *) opaque; > + > switch (event) { > case CHR_EVENT_OPENED: > + /* Start with first process attached, others detached */ > + for (i = 0; i < s->process_num; i++) { > + s->processes[i].attached = !i; > + } > + > + s->c_cpu = gdb_first_attached_cpu(s); > + s->g_cpu = s->c_cpu; > + > vm_stop(RUN_STATE_PAUSED); > gdb_has_xml = false; > break; > default: > break; > @@ -2528,19 +2540,17 @@ int gdbserver_start(const char *device) > mon_chr = s->mon_chr; > cleanup_processes(s); > memset(s, 0, sizeof(GDBState)); > s->mon_chr = mon_chr; > } > - s->c_cpu = first_cpu; > - s->g_cpu = first_cpu; > > create_processes(s); > > if (chr) { > qemu_chr_fe_init(&s->chr, chr, &error_abort); > qemu_chr_fe_set_handlers(&s->chr, gdb_chr_can_receive, gdb_chr_receive, > - gdb_chr_event, NULL, NULL, NULL, true); > + gdb_chr_event, NULL, s, NULL, true); > } > s->state = chr ? RS_IDLE : RS_INACTIVE; > s->mon_chr = mon_chr; > s->current_syscall_cb = NULL; > >
diff --git a/gdbstub.c b/gdbstub.c index 69471ea914..6518324d46 100644 --- a/gdbstub.c +++ b/gdbstub.c @@ -2249,13 +2249,14 @@ static bool gdb_accept(void) close(fd); return false; } s = g_malloc0(sizeof(GDBState)); - s->c_cpu = first_cpu; - s->g_cpu = first_cpu; create_unique_process(s); + s->processes[0].attached = true; + s->c_cpu = gdb_first_attached_cpu(s); + s->g_cpu = s->c_cpu; s->fd = fd; gdb_has_xml = false; gdbserver_state = s; return true; @@ -2337,12 +2338,23 @@ static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size) } } static void gdb_chr_event(void *opaque, int event) { + int i; + GDBState *s = (GDBState *) opaque; + switch (event) { case CHR_EVENT_OPENED: + /* Start with first process attached, others detached */ + for (i = 0; i < s->process_num; i++) { + s->processes[i].attached = !i; + } + + s->c_cpu = gdb_first_attached_cpu(s); + s->g_cpu = s->c_cpu; + vm_stop(RUN_STATE_PAUSED); gdb_has_xml = false; break; default: break; @@ -2528,19 +2540,17 @@ int gdbserver_start(const char *device) mon_chr = s->mon_chr; cleanup_processes(s); memset(s, 0, sizeof(GDBState)); s->mon_chr = mon_chr; } - s->c_cpu = first_cpu; - s->g_cpu = first_cpu; create_processes(s); if (chr) { qemu_chr_fe_init(&s->chr, chr, &error_abort); qemu_chr_fe_set_handlers(&s->chr, gdb_chr_can_receive, gdb_chr_receive, - gdb_chr_event, NULL, NULL, NULL, true); + gdb_chr_event, NULL, s, NULL, true); } s->state = chr ? RS_IDLE : RS_INACTIVE; s->mon_chr = mon_chr; s->current_syscall_cb = NULL;