diff mbox

fixup! block-migration: acquire AioContext as necessary

Message ID 1455894643-16280-1-git-send-email-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini Feb. 19, 2016, 3:10 p.m. UTC
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 migration/block.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

Comments

Fam Zheng Feb. 23, 2016, 5:39 a.m. UTC | #1
On Fri, 02/19 16:10, Paolo Bonzini wrote:
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  migration/block.c | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/migration/block.c b/migration/block.c
> index 3bfa908..eea4788 100644
> --- a/migration/block.c
> +++ b/migration/block.c
> @@ -54,6 +54,7 @@ typedef struct BlkMigDevState {
>      int shared_base;
>      int64_t total_sectors;
>      QSIMPLEQ_ENTRY(BlkMigDevState) entry;
> +    Error *blocker;
>  
>      /* Only used by migration thread.  Does not need a lock.  */
>      int bulk_completed;
> @@ -68,9 +69,10 @@ typedef struct BlkMigDevState {
>      /* Protected by block migration lock.  */
>      int64_t completed_sectors;
>  
> -    /* Protected by iothread lock / AioContext.  */
> +    /* During migration this is protected by iothread lock / AioContext.
> +     * Allocation and free happen during setup and cleanup respectively.
> +     */
>      BdrvDirtyBitmap *dirty_bitmap;
> -    Error *blocker;
>  } BlkMigDevState;
>  
>  typedef struct BlkMigBlock {
> @@ -339,9 +341,10 @@ static int set_dirty_tracking(void)
>      int ret;
>  
>      QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
> -        /* Creating/dropping dirty bitmaps only requires the big QEMU lock.  */
> +        aio_context_acquire(bdrv_get_aio_context(bmds->bs));
>          bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
>                                                        NULL, NULL);
> +        aio_context_release(bdrv_get_aio_context(bmds->bs));
>          if (!bmds->dirty_bitmap) {
>              ret = -errno;
>              goto fail;
> @@ -352,7 +355,9 @@ static int set_dirty_tracking(void)
>  fail:
>      QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
>          if (bmds->dirty_bitmap) {
> +            aio_context_acquire(bdrv_get_aio_context(bmds->bs));
>              bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
> +            aio_context_release(bdrv_get_aio_context(bmds->bs));
>          }
>      }
>      return ret;
> @@ -365,8 +370,9 @@ static void unset_dirty_tracking(void)
>      BlkMigDevState *bmds;
>  
>      QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
> -        /* Creating/dropping dirty bitmaps only requires the big QEMU lock.  */
> +        aio_context_acquire(bdrv_get_aio_context(bmds->bs));
>          bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
> +        aio_context_release(bdrv_get_aio_context(bmds->bs));
>      }
>  }
>  
> -- 
> 2.5.0
> 

Reviewed-by: Fam Zheng <famz@redhat.com>

BTW, could you also explain the blk_mig_lock() question (*) I had?

*: https://lists.gnu.org/archive/html/qemu-devel/2016-02/msg03317.html

Fam
Paolo Bonzini Feb. 23, 2016, 9:42 a.m. UTC | #2
On 23/02/2016 06:39, Fam Zheng wrote:
> BTW, could you also explain the blk_mig_lock() question (*) I had?
> 
> *: https://lists.gnu.org/archive/html/qemu-devel/2016-02/msg03317.html

Sorry, missed that:

> 
>> @@ -597,21 +627,28 @@ static void block_migration_cleanup(void *opaque)
>>  {
>>      BlkMigDevState *bmds;
>>      BlkMigBlock *blk;
>> +    AioContext *ctx;
>>  
>>      bdrv_drain_all();
>>  
>>      unset_dirty_tracking();
>>  
>> -    blk_mig_lock();
> 
> Why is it okay to skip the blk_mig_lock() for block_mig_state.bmds_list?

The bmds_list is not protected by blk_mig_lock:

    /* Written during setup phase.  Can be read without a lock.  */
    int blk_enable;
    int shared_base;
    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
    int64_t total_sector_sum;
    bool zero_blocks;

block_migration_cleanup is called from qemu_savevm_state_cleanup, when
there can be no concurrency between the migration thread and the I/O
thread.  In fact the call of qemu_savevm_state_cleanup might as well be
moved out of the migration thread.

Paolo
Fam Zheng Feb. 23, 2016, 12:56 p.m. UTC | #3
On Tue, 02/23 10:42, Paolo Bonzini wrote:
> 
> 
> On 23/02/2016 06:39, Fam Zheng wrote:
> > BTW, could you also explain the blk_mig_lock() question (*) I had?
> > 
> > *: https://lists.gnu.org/archive/html/qemu-devel/2016-02/msg03317.html
> 
> Sorry, missed that:
> 
> > 
> >> @@ -597,21 +627,28 @@ static void block_migration_cleanup(void *opaque)
> >>  {
> >>      BlkMigDevState *bmds;
> >>      BlkMigBlock *blk;
> >> +    AioContext *ctx;
> >>  
> >>      bdrv_drain_all();
> >>  
> >>      unset_dirty_tracking();
> >>  
> >> -    blk_mig_lock();
> > 
> > Why is it okay to skip the blk_mig_lock() for block_mig_state.bmds_list?
> 
> The bmds_list is not protected by blk_mig_lock:
> 
>     /* Written during setup phase.  Can be read without a lock.  */
>     int blk_enable;
>     int shared_base;
>     QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
>     int64_t total_sector_sum;
>     bool zero_blocks;
> 
> block_migration_cleanup is called from qemu_savevm_state_cleanup, when
> there can be no concurrency between the migration thread and the I/O
> thread.  In fact the call of qemu_savevm_state_cleanup might as well be
> moved out of the migration thread.

Thanks!

Fam
diff mbox

Patch

diff --git a/migration/block.c b/migration/block.c
index 3bfa908..eea4788 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -54,6 +54,7 @@  typedef struct BlkMigDevState {
     int shared_base;
     int64_t total_sectors;
     QSIMPLEQ_ENTRY(BlkMigDevState) entry;
+    Error *blocker;
 
     /* Only used by migration thread.  Does not need a lock.  */
     int bulk_completed;
@@ -68,9 +69,10 @@  typedef struct BlkMigDevState {
     /* Protected by block migration lock.  */
     int64_t completed_sectors;
 
-    /* Protected by iothread lock / AioContext.  */
+    /* During migration this is protected by iothread lock / AioContext.
+     * Allocation and free happen during setup and cleanup respectively.
+     */
     BdrvDirtyBitmap *dirty_bitmap;
-    Error *blocker;
 } BlkMigDevState;
 
 typedef struct BlkMigBlock {
@@ -339,9 +341,10 @@  static int set_dirty_tracking(void)
     int ret;
 
     QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        /* Creating/dropping dirty bitmaps only requires the big QEMU lock.  */
+        aio_context_acquire(bdrv_get_aio_context(bmds->bs));
         bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
                                                       NULL, NULL);
+        aio_context_release(bdrv_get_aio_context(bmds->bs));
         if (!bmds->dirty_bitmap) {
             ret = -errno;
             goto fail;
@@ -352,7 +355,9 @@  static int set_dirty_tracking(void)
 fail:
     QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
         if (bmds->dirty_bitmap) {
+            aio_context_acquire(bdrv_get_aio_context(bmds->bs));
             bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
+            aio_context_release(bdrv_get_aio_context(bmds->bs));
         }
     }
     return ret;
@@ -365,8 +370,9 @@  static void unset_dirty_tracking(void)
     BlkMigDevState *bmds;
 
     QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
-        /* Creating/dropping dirty bitmaps only requires the big QEMU lock.  */
+        aio_context_acquire(bdrv_get_aio_context(bmds->bs));
         bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
+        aio_context_release(bdrv_get_aio_context(bmds->bs));
     }
 }