diff mbox

[1/1] migration: fix use-after-free in loadvm_postcopy_handle_run_bh

Message ID 1457537708-8622-1-git-send-email-den@openvz.org (mailing list archive)
State New, archived
Headers show

Commit Message

Denis V. Lunev March 9, 2016, 3:35 p.m. UTC
MigrationState is destroyed before we can come into bottom half.

Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Juan Quintela <quintela@redhat.com>
CC: Amit Shah <amit.shah@redhat.com>
CC: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
Dave, do you have tests you have mention available on public? I'd better
run them in advance next time.

 migration/savevm.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

Comments

Dr. David Alan Gilbert March 9, 2016, 6:54 p.m. UTC | #1
* Denis V. Lunev (den@openvz.org) wrote:
> MigrationState is destroyed before we can come into bottom half.
> 
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> CC: Juan Quintela <quintela@redhat.com>
> CC: Amit Shah <amit.shah@redhat.com>
> CC: Dr. David Alan Gilbert <dgilbert@redhat.com>

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

(I was about to post a similar fix; although I'd used 
a static QEMUBH * in loadvm_postcopy_handle_run)

This one does work (and I checked the binary actually is the
version I'm running!)

> ---
> Dave, do you have tests you have mention available on public? I'd better
> run them in advance next time.

Not yet; I've got two test sets that both need cleaning up:
   1) Is a test harness that boots a full vm, runs a heavy stress test etc
      - I can clean that up and put it somewhere; but it has a rather hacky
      set of waits for login prompts etc but my intention is
      to write a qemu test script in the next month or two.

   2) I've got a very hacky autotest/virt-test world that is good
      at finding races (it runs no load and has a random delay as to when the
      postcopy phase starts);  unfortunately virt-test is deprecated
      and I've not had time to look at it's replacement yet (avocado);
      but my intention is to do that.

I tend to run both of them in a loop - 2 in particular is really good
at finding race conditions such as what happens if the listen thread finishes
at just the same time as something else.

Dave

> 
>  migration/savevm.c | 16 ++++++++++++----
>  1 file changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/migration/savevm.c b/migration/savevm.c
> index 96e7db5..384e872 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1501,10 +1501,15 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
>      return 0;
>  }
>  
> +
> +typedef struct {
> +    QEMUBH *bh;
> +} HandleRunBhData;
> +
>  static void loadvm_postcopy_handle_run_bh(void *opaque)
>  {
>      Error *local_err = NULL;
> -    MigrationIncomingState *mis = opaque;
> +    HandleRunBhData *data = opaque;
>  
>      /* TODO we should move all of this lot into postcopy_ram.c or a shared code
>       * in migration.c
> @@ -1532,13 +1537,15 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
>          runstate_set(RUN_STATE_PAUSED);
>      }
>  
> -    qemu_bh_delete(mis->bh);
> +    qemu_bh_delete(data->bh);
> +    g_free(data);
>  }
>  
>  /* After all discards we can start running and asking for pages */
>  static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
>  {
>      PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
> +    HandleRunBhData *data;
>  
>      trace_loadvm_postcopy_handle_run();
>      if (ps != POSTCOPY_INCOMING_LISTENING) {
> @@ -1546,8 +1553,9 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
>          return -1;
>      }
>  
> -    mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);
> -    qemu_bh_schedule(mis->bh);
> +    data = g_new(HandleRunBhData, 1);
> +    data->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, data);
> +    qemu_bh_schedule(data->bh);
>  
>      /* We need to finish reading the stream from the package
>       * and also stop reading anything more from the stream that loaded the
> -- 
> 2.1.4
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox

Patch

diff --git a/migration/savevm.c b/migration/savevm.c
index 96e7db5..384e872 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1501,10 +1501,15 @@  static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
     return 0;
 }
 
+
+typedef struct {
+    QEMUBH *bh;
+} HandleRunBhData;
+
 static void loadvm_postcopy_handle_run_bh(void *opaque)
 {
     Error *local_err = NULL;
-    MigrationIncomingState *mis = opaque;
+    HandleRunBhData *data = opaque;
 
     /* TODO we should move all of this lot into postcopy_ram.c or a shared code
      * in migration.c
@@ -1532,13 +1537,15 @@  static void loadvm_postcopy_handle_run_bh(void *opaque)
         runstate_set(RUN_STATE_PAUSED);
     }
 
-    qemu_bh_delete(mis->bh);
+    qemu_bh_delete(data->bh);
+    g_free(data);
 }
 
 /* After all discards we can start running and asking for pages */
 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
 {
     PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
+    HandleRunBhData *data;
 
     trace_loadvm_postcopy_handle_run();
     if (ps != POSTCOPY_INCOMING_LISTENING) {
@@ -1546,8 +1553,9 @@  static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
         return -1;
     }
 
-    mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);
-    qemu_bh_schedule(mis->bh);
+    data = g_new(HandleRunBhData, 1);
+    data->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, data);
+    qemu_bh_schedule(data->bh);
 
     /* We need to finish reading the stream from the package
      * and also stop reading anything more from the stream that loaded the