diff mbox series

[4/4] migration: Make sure that we don't call write() in case of error

Message ID 20191218050439.5989-5-quintela@redhat.com (mailing list archive)
State New, archived
Headers show
Series Fix multifd + cancel + multifd | expand

Commit Message

Juan Quintela Dec. 18, 2019, 5:04 a.m. UTC
If we are exiting due to an error/finish/.... Just don't try to even
touch the channel with one IO operation.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/ram.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

Comments

Dr. David Alan Gilbert Dec. 18, 2019, 4:33 p.m. UTC | #1
* Juan Quintela (quintela@redhat.com) wrote:
> If we are exiting due to an error/finish/.... Just don't try to even
> touch the channel with one IO operation.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  migration/ram.c | 19 +++++++++++++++++++
>  1 file changed, 19 insertions(+)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 4b44578e57..909ef6d237 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1601,6 +1601,12 @@ struct {
>      QemuSemaphore channels_ready;
>      /* multifd ops */
>      MultiFDMethods *ops;
> +    /*
> +     * Have we already run terminate threads.  There is a race when it
> +     * happens that we got one error while we are exiting.
> +     * We will use atomic operations.  Only valid values are 0 and 1.
> +     */
> +    int exiting;
>  } *multifd_send_state;
>  
>  /*
> @@ -1629,6 +1635,10 @@ static int multifd_send_pages(RAMState *rs)
>      MultiFDPages_t *pages = multifd_send_state->pages;
>      uint64_t transferred;
>  
> +    if (atomic_read(&multifd_send_state->exiting)) {
> +        return -1;
> +    }
> +
>      qemu_sem_wait(&multifd_send_state->channels_ready);
>      for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
>          p = &multifd_send_state->params[i];
> @@ -1710,6 +1720,10 @@ static void multifd_send_terminate_threads(Error *err)
>          }
>      }
>  
> +    if (atomic_xchg(&multifd_send_state->exiting, 1)) {
> +        return;
> +    }

That could do with a comment on it;  I think what you're saying is
'don't do send_terminate_threads twice'

With a comment,


Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

>      for (i = 0; i < migrate_multifd_channels(); i++) {
>          MultiFDSendParams *p = &multifd_send_state->params[i];
>  
> @@ -1824,6 +1838,10 @@ static void *multifd_send_thread(void *opaque)
>  
>      while (true) {
>          qemu_sem_wait(&p->sem);
> +
> +        if (atomic_read(&multifd_send_state->exiting)) {
> +            break;
> +        }
>          qemu_mutex_lock(&p->mutex);
>  
>          if (p->pending_job) {
> @@ -1938,6 +1956,7 @@ int multifd_save_setup(Error **errp)
>      multifd_send_state->pages = multifd_pages_init(page_count);
>      qemu_sem_init(&multifd_send_state->channels_ready, 0);
>      multifd_send_state->ops = multifd_ops[migrate_multifd_method()];
> +    atomic_set(&multifd_send_state->exiting, 0);
>  
>      for (i = 0; i < thread_count; i++) {
>          MultiFDSendParams *p = &multifd_send_state->params[i];
> -- 
> 2.23.0
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 4b44578e57..909ef6d237 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1601,6 +1601,12 @@  struct {
     QemuSemaphore channels_ready;
     /* multifd ops */
     MultiFDMethods *ops;
+    /*
+     * Have we already run terminate threads.  There is a race when it
+     * happens that we got one error while we are exiting.
+     * We will use atomic operations.  Only valid values are 0 and 1.
+     */
+    int exiting;
 } *multifd_send_state;
 
 /*
@@ -1629,6 +1635,10 @@  static int multifd_send_pages(RAMState *rs)
     MultiFDPages_t *pages = multifd_send_state->pages;
     uint64_t transferred;
 
+    if (atomic_read(&multifd_send_state->exiting)) {
+        return -1;
+    }
+
     qemu_sem_wait(&multifd_send_state->channels_ready);
     for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
         p = &multifd_send_state->params[i];
@@ -1710,6 +1720,10 @@  static void multifd_send_terminate_threads(Error *err)
         }
     }
 
+    if (atomic_xchg(&multifd_send_state->exiting, 1)) {
+        return;
+    }
+
     for (i = 0; i < migrate_multifd_channels(); i++) {
         MultiFDSendParams *p = &multifd_send_state->params[i];
 
@@ -1824,6 +1838,10 @@  static void *multifd_send_thread(void *opaque)
 
     while (true) {
         qemu_sem_wait(&p->sem);
+
+        if (atomic_read(&multifd_send_state->exiting)) {
+            break;
+        }
         qemu_mutex_lock(&p->mutex);
 
         if (p->pending_job) {
@@ -1938,6 +1956,7 @@  int multifd_save_setup(Error **errp)
     multifd_send_state->pages = multifd_pages_init(page_count);
     qemu_sem_init(&multifd_send_state->channels_ready, 0);
     multifd_send_state->ops = multifd_ops[migrate_multifd_method()];
+    atomic_set(&multifd_send_state->exiting, 0);
 
     for (i = 0; i < thread_count; i++) {
         MultiFDSendParams *p = &multifd_send_state->params[i];