Message ID | 20191212202430.1079725-4-stefanb@linux.vnet.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add vTPM emulator support for ppc64 platform | expand |
On Thu, Dec 12, 2019 at 03:24:28PM -0500, Stefan Berger wrote: > Extend the tpm_spapr frontend with VM suspend and resume support. > > Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com> > > diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c > index c4a67e2403..8f5a142bd4 100644 > --- a/hw/tpm/tpm_spapr.c > +++ b/hw/tpm/tpm_spapr.c > @@ -87,6 +87,8 @@ typedef struct { > TPMVersion be_tpm_version; > > size_t be_buffer_size; > + > + bool deliver_response; /* whether to deliver response after VM resume */ > } SPAPRvTPMState; > > static void tpm_spapr_show_buffer(const unsigned char *buffer, > @@ -256,6 +258,12 @@ static void tpm_spapr_request_completed(TPMIf *ti, int ret) > uint32_t len; > int rc; > > + if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { I'm trying to figure out the circumstances in which request_completed() would get called before post_load on the destination. > + /* defer delivery of response until .post_load */ > + s->deliver_response |= true; |= is a bitwise OR which is not what you want, although it will *probably* work in practice. Better to just use s->deliver_response = true; > + return; > + } > + > s->state = SPAPR_VTPM_STATE_COMPLETION; > > /* a max. of be_buffer_size bytes can be transported */ > @@ -316,6 +324,7 @@ static void tpm_spapr_reset(SpaprVioDevice *dev) > SPAPRvTPMState *s = VIO_SPAPR_VTPM(dev); > > s->state = SPAPR_VTPM_STATE_NONE; > + s->deliver_response = false; > > s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); > tpm_spapr_update_deviceclass(dev); > @@ -339,9 +348,53 @@ static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) > return tpm_backend_get_tpm_version(s->be_driver); > } > > +/* persistent state handling */ > + > +static int tpm_spapr_pre_save(void *opaque) > +{ > + SPAPRvTPMState *s = opaque; > + > + s->deliver_response |= tpm_backend_finish_sync(s->be_driver); Same problem here. > + trace_tpm_spapr_pre_save(s->deliver_response); > + /* > + * we cannot deliver the results to the VM since DMA would touch VM memory > + */ > + > + return 0; > +} > + > +static int tpm_spapr_post_load(void *opaque, int version_id) > +{ > + SPAPRvTPMState *s = opaque; > + > + if (s->deliver_response) { > + trace_tpm_spapr_post_load(); > + /* deliver the results to the VM via DMA */ > + tpm_spapr_request_completed(TPM_IF(s), 0); > + s->deliver_response = false; > + } > + > + return 0; > +} > + > static const VMStateDescription vmstate_spapr_vtpm = { > .name = "tpm-spapr", > - .unmigratable = 1, > + .version_id = 1, > + .minimum_version_id = 0, > + .minimum_version_id_old = 0, > + .pre_save = tpm_spapr_pre_save, > + .post_load = tpm_spapr_post_load, > + .fields = (VMStateField[]) { > + VMSTATE_SPAPR_VIO(vdev, SPAPRvTPMState), > + > + VMSTATE_UINT8(state, SPAPRvTPMState), > + VMSTATE_BUFFER(buffer, SPAPRvTPMState), Transferring the whole 4kiB buffer unconditionally when it mostly won't have anything useful in it doesn't seem like a great idea. > + /* remember DMA address */ > + VMSTATE_UINT32(crq.s.data, SPAPRvTPMState), > + VMSTATE_BOOL(deliver_response, SPAPRvTPMState), > + VMSTATE_END_OF_LIST(), > + } > }; > > static Property tpm_spapr_properties[] = { > diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events > index 6278a39618..d109661b96 100644 > --- a/hw/tpm/trace-events > +++ b/hw/tpm/trace-events > @@ -67,3 +67,5 @@ tpm_spapr_do_crq_get_version(uint32_t version) "response: version %u" > tpm_spapr_do_crq_prepare_to_suspend(void) "response: preparing to suspend" > tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x" > tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..." > +tpm_spapr_pre_save(bool v) "TPM response to deliver after resume: %d" > +tpm_spapr_post_load(void) "Delivering TPM response after resume"
On 12/13/19 12:39 AM, David Gibson wrote: > On Thu, Dec 12, 2019 at 03:24:28PM -0500, Stefan Berger wrote: >> Extend the tpm_spapr frontend with VM suspend and resume support. >> >> Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com> >> >> diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c >> index c4a67e2403..8f5a142bd4 100644 >> --- a/hw/tpm/tpm_spapr.c >> +++ b/hw/tpm/tpm_spapr.c >> @@ -87,6 +87,8 @@ typedef struct { >> TPMVersion be_tpm_version; >> >> size_t be_buffer_size; >> + >> + bool deliver_response; /* whether to deliver response after VM resume */ >> } SPAPRvTPMState; >> >> static void tpm_spapr_show_buffer(const unsigned char *buffer, >> @@ -256,6 +258,12 @@ static void tpm_spapr_request_completed(TPMIf *ti, int ret) >> uint32_t len; >> int rc; >> >> + if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { > I'm trying to figure out the circumstances in which > request_completed() would get called before post_load on the > destination. This is on the source side where we must not deliver the response in case the devices are now suspending but defer the delivery to after the resume. > >> + /* defer delivery of response until .post_load */ >> + s->deliver_response |= true; > |= is a bitwise OR which is not what you want, although it will > *probably* work in practice. Better to just use > s->deliver_response = true; > >> + return; >> + } >> + >> s->state = SPAPR_VTPM_STATE_COMPLETION; >> >> /* a max. of be_buffer_size bytes can be transported */ >> @@ -316,6 +324,7 @@ static void tpm_spapr_reset(SpaprVioDevice *dev) >> SPAPRvTPMState *s = VIO_SPAPR_VTPM(dev); >> >> s->state = SPAPR_VTPM_STATE_NONE; >> + s->deliver_response = false; >> >> s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); >> tpm_spapr_update_deviceclass(dev); >> @@ -339,9 +348,53 @@ static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) >> return tpm_backend_get_tpm_version(s->be_driver); >> } >> >> +/* persistent state handling */ >> + >> +static int tpm_spapr_pre_save(void *opaque) >> +{ >> + SPAPRvTPMState *s = opaque; >> + >> + s->deliver_response |= tpm_backend_finish_sync(s->be_driver); > Same problem here. > >> + trace_tpm_spapr_pre_save(s->deliver_response); >> + /* >> + * we cannot deliver the results to the VM since DMA would touch VM memory >> + */ >> + >> + return 0; >> +} >> + >> +static int tpm_spapr_post_load(void *opaque, int version_id) >> +{ >> + SPAPRvTPMState *s = opaque; >> + >> + if (s->deliver_response) { >> + trace_tpm_spapr_post_load(); >> + /* deliver the results to the VM via DMA */ >> + tpm_spapr_request_completed(TPM_IF(s), 0); >> + s->deliver_response = false; >> + } >> + >> + return 0; >> +} >> + >> static const VMStateDescription vmstate_spapr_vtpm = { >> .name = "tpm-spapr", >> - .unmigratable = 1, >> + .version_id = 1, >> + .minimum_version_id = 0, >> + .minimum_version_id_old = 0, >> + .pre_save = tpm_spapr_pre_save, >> + .post_load = tpm_spapr_post_load, >> + .fields = (VMStateField[]) { >> + VMSTATE_SPAPR_VIO(vdev, SPAPRvTPMState), >> + >> + VMSTATE_UINT8(state, SPAPRvTPMState), >> + VMSTATE_BUFFER(buffer, SPAPRvTPMState), > Transferring the whole 4kiB buffer unconditionally when it mostly > won't have anything useful in it doesn't seem like a great idea. It's really only needed in case of a 'delayed response'. So, yeah, we could transfer data in only that case then. > >> + /* remember DMA address */ >> + VMSTATE_UINT32(crq.s.data, SPAPRvTPMState), >> + VMSTATE_BOOL(deliver_response, SPAPRvTPMState), >> + VMSTATE_END_OF_LIST(), >> + } >> }; >> >> static Property tpm_spapr_properties[] = { >> diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events >> index 6278a39618..d109661b96 100644 >> --- a/hw/tpm/trace-events >> +++ b/hw/tpm/trace-events >> @@ -67,3 +67,5 @@ tpm_spapr_do_crq_get_version(uint32_t version) "response: version %u" >> tpm_spapr_do_crq_prepare_to_suspend(void) "response: preparing to suspend" >> tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x" >> tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..." >> +tpm_spapr_pre_save(bool v) "TPM response to deliver after resume: %d" >> +tpm_spapr_post_load(void) "Delivering TPM response after resume"
On Fri, Dec 13, 2019 at 07:46:44AM -0500, Stefan Berger wrote: > On 12/13/19 12:39 AM, David Gibson wrote: > > On Thu, Dec 12, 2019 at 03:24:28PM -0500, Stefan Berger wrote: > > > Extend the tpm_spapr frontend with VM suspend and resume support. > > > > > > Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com> > > > > > > diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c > > > index c4a67e2403..8f5a142bd4 100644 > > > --- a/hw/tpm/tpm_spapr.c > > > +++ b/hw/tpm/tpm_spapr.c > > > @@ -87,6 +87,8 @@ typedef struct { > > > TPMVersion be_tpm_version; > > > size_t be_buffer_size; > > > + > > > + bool deliver_response; /* whether to deliver response after VM resume */ > > > } SPAPRvTPMState; > > > static void tpm_spapr_show_buffer(const unsigned char *buffer, > > > @@ -256,6 +258,12 @@ static void tpm_spapr_request_completed(TPMIf *ti, int ret) > > > uint32_t len; > > > int rc; > > > + if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { > > I'm trying to figure out the circumstances in which > > request_completed() would get called before post_load on the > > destination. > > This is on the source side where we must not deliver the response in case > the devices are now suspending but defer the delivery to after the > resume. Ah, I see. But in that case, AFAICT this means we've received the completion when we're in the last stages of migration, which means it's entirely possible we've already transferred the vtpm device's state. So there's no guarantee that either the deliver_response change here, or the response buffer will actually make it to the other side. > > > + /* defer delivery of response until .post_load */ > > > + s->deliver_response |= true; > > |= is a bitwise OR which is not what you want, although it will > > *probably* work in practice. Better to just use > > s->deliver_response = true; > > > > > + return; > > > + } > > > + > > > s->state = SPAPR_VTPM_STATE_COMPLETION; > > > /* a max. of be_buffer_size bytes can be transported */ > > > @@ -316,6 +324,7 @@ static void tpm_spapr_reset(SpaprVioDevice *dev) > > > SPAPRvTPMState *s = VIO_SPAPR_VTPM(dev); > > > s->state = SPAPR_VTPM_STATE_NONE; > > > + s->deliver_response = false; > > > s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); > > > tpm_spapr_update_deviceclass(dev); > > > @@ -339,9 +348,53 @@ static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) > > > return tpm_backend_get_tpm_version(s->be_driver); > > > } > > > +/* persistent state handling */ > > > + > > > +static int tpm_spapr_pre_save(void *opaque) > > > +{ > > > + SPAPRvTPMState *s = opaque; > > > + > > > + s->deliver_response |= tpm_backend_finish_sync(s->be_driver); > > Same problem here. > > > > > + trace_tpm_spapr_pre_save(s->deliver_response); > > > + /* > > > + * we cannot deliver the results to the VM since DMA would touch VM memory > > > + */ > > > + > > > + return 0; > > > +} > > > + > > > +static int tpm_spapr_post_load(void *opaque, int version_id) > > > +{ > > > + SPAPRvTPMState *s = opaque; > > > + > > > + if (s->deliver_response) { > > > + trace_tpm_spapr_post_load(); > > > + /* deliver the results to the VM via DMA */ > > > + tpm_spapr_request_completed(TPM_IF(s), 0); > > > + s->deliver_response = false; > > > + } > > > + > > > + return 0; > > > +} > > > + > > > static const VMStateDescription vmstate_spapr_vtpm = { > > > .name = "tpm-spapr", > > > - .unmigratable = 1, > > > + .version_id = 1, > > > + .minimum_version_id = 0, > > > + .minimum_version_id_old = 0, > > > + .pre_save = tpm_spapr_pre_save, > > > + .post_load = tpm_spapr_post_load, > > > + .fields = (VMStateField[]) { > > > + VMSTATE_SPAPR_VIO(vdev, SPAPRvTPMState), > > > + > > > + VMSTATE_UINT8(state, SPAPRvTPMState), > > > + VMSTATE_BUFFER(buffer, SPAPRvTPMState), > > Transferring the whole 4kiB buffer unconditionally when it mostly > > won't have anything useful in it doesn't seem like a great idea. > > > It's really only needed in case of a 'delayed response'. So, yeah, we could > transfer data in only that case then. > > > > > > > + /* remember DMA address */ > > > + VMSTATE_UINT32(crq.s.data, SPAPRvTPMState), > > > + VMSTATE_BOOL(deliver_response, SPAPRvTPMState), > > > + VMSTATE_END_OF_LIST(), > > > + } > > > }; > > > static Property tpm_spapr_properties[] = { > > > diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events > > > index 6278a39618..d109661b96 100644 > > > --- a/hw/tpm/trace-events > > > +++ b/hw/tpm/trace-events > > > @@ -67,3 +67,5 @@ tpm_spapr_do_crq_get_version(uint32_t version) "response: version %u" > > > tpm_spapr_do_crq_prepare_to_suspend(void) "response: preparing to suspend" > > > tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x" > > > tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..." > > > +tpm_spapr_pre_save(bool v) "TPM response to deliver after resume: %d" > > > +tpm_spapr_post_load(void) "Delivering TPM response after resume" > >
diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c index c4a67e2403..8f5a142bd4 100644 --- a/hw/tpm/tpm_spapr.c +++ b/hw/tpm/tpm_spapr.c @@ -87,6 +87,8 @@ typedef struct { TPMVersion be_tpm_version; size_t be_buffer_size; + + bool deliver_response; /* whether to deliver response after VM resume */ } SPAPRvTPMState; static void tpm_spapr_show_buffer(const unsigned char *buffer, @@ -256,6 +258,12 @@ static void tpm_spapr_request_completed(TPMIf *ti, int ret) uint32_t len; int rc; + if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { + /* defer delivery of response until .post_load */ + s->deliver_response |= true; + return; + } + s->state = SPAPR_VTPM_STATE_COMPLETION; /* a max. of be_buffer_size bytes can be transported */ @@ -316,6 +324,7 @@ static void tpm_spapr_reset(SpaprVioDevice *dev) SPAPRvTPMState *s = VIO_SPAPR_VTPM(dev); s->state = SPAPR_VTPM_STATE_NONE; + s->deliver_response = false; s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); tpm_spapr_update_deviceclass(dev); @@ -339,9 +348,53 @@ static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) return tpm_backend_get_tpm_version(s->be_driver); } +/* persistent state handling */ + +static int tpm_spapr_pre_save(void *opaque) +{ + SPAPRvTPMState *s = opaque; + + s->deliver_response |= tpm_backend_finish_sync(s->be_driver); + + trace_tpm_spapr_pre_save(s->deliver_response); + /* + * we cannot deliver the results to the VM since DMA would touch VM memory + */ + + return 0; +} + +static int tpm_spapr_post_load(void *opaque, int version_id) +{ + SPAPRvTPMState *s = opaque; + + if (s->deliver_response) { + trace_tpm_spapr_post_load(); + /* deliver the results to the VM via DMA */ + tpm_spapr_request_completed(TPM_IF(s), 0); + s->deliver_response = false; + } + + return 0; +} + static const VMStateDescription vmstate_spapr_vtpm = { .name = "tpm-spapr", - .unmigratable = 1, + .version_id = 1, + .minimum_version_id = 0, + .minimum_version_id_old = 0, + .pre_save = tpm_spapr_pre_save, + .post_load = tpm_spapr_post_load, + .fields = (VMStateField[]) { + VMSTATE_SPAPR_VIO(vdev, SPAPRvTPMState), + + VMSTATE_UINT8(state, SPAPRvTPMState), + VMSTATE_BUFFER(buffer, SPAPRvTPMState), + /* remember DMA address */ + VMSTATE_UINT32(crq.s.data, SPAPRvTPMState), + VMSTATE_BOOL(deliver_response, SPAPRvTPMState), + VMSTATE_END_OF_LIST(), + } }; static Property tpm_spapr_properties[] = { diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events index 6278a39618..d109661b96 100644 --- a/hw/tpm/trace-events +++ b/hw/tpm/trace-events @@ -67,3 +67,5 @@ tpm_spapr_do_crq_get_version(uint32_t version) "response: version %u" tpm_spapr_do_crq_prepare_to_suspend(void) "response: preparing to suspend" tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x" tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..." +tpm_spapr_pre_save(bool v) "TPM response to deliver after resume: %d" +tpm_spapr_post_load(void) "Delivering TPM response after resume"
Extend the tpm_spapr frontend with VM suspend and resume support. Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>