diff mbox series

[V1,01/32] savevm: add vmstate handler iterators

Message ID 1596122076-341293-2-git-send-email-steven.sistare@oracle.com (mailing list archive)
State New, archived
Headers show
Series Live Update | expand

Commit Message

Steven Sistare July 30, 2020, 3:14 p.m. UTC
Provide the SAVEVM_FOREACH and SAVEVM_FORALL macros to loop over all save
VM state handlers.  The former will filter handlers based on the operation
in the later patch "savevm: VM handlers mode mask".  The latter loops over
all handlers.

No functional change.

Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
---
 migration/savevm.c | 57 ++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 38 insertions(+), 19 deletions(-)

Comments

Dr. David Alan Gilbert Sept. 11, 2020, 4:24 p.m. UTC | #1
Apologies for taking a while to get around to this, 

* Steve Sistare (steven.sistare@oracle.com) wrote:
> Provide the SAVEVM_FOREACH and SAVEVM_FORALL macros to loop over all save
> VM state handlers.  The former will filter handlers based on the operation
> in the later patch "savevm: VM handlers mode mask".  The latter loops over
> all handlers.
> 
> No functional change.
> 
> Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
> ---
>  migration/savevm.c | 57 ++++++++++++++++++++++++++++++++++++------------------
>  1 file changed, 38 insertions(+), 19 deletions(-)
> 
> diff --git a/migration/savevm.c b/migration/savevm.c
> index 45c9dd9..a07fcad 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -266,6 +266,25 @@ static SaveState savevm_state = {
>      .global_section_id = 0,
>  };
>  
> +/*
> + * The FOREACH macros will filter handlers based on the current operation when
> + * additional conditions are added in a subsequent patch.
> + */
> +
> +#define SAVEVM_FOREACH(se, entry)                                    \
> +    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)                \
> +
> +#define SAVEVM_FOREACH_SAFE(se, entry, new_se)                       \
> +    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)   \
> +
> +/* The FORALL macros unconditionally loop over all handlers. */
> +
> +#define SAVEVM_FORALL(se, entry)                                     \
> +    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)
> +
> +#define SAVEVM_FORALL_SAFE(se, entry, new_se)                        \
> +    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)
> +

OK, can I ask you to merge this with the next patch but to spin it the
other way, so that we have:

  SAVEVM_FOR(se, entry, mask)

and the places you use SAVEVM_FORALL_SAFE would become

  SAVEVM_FOR(se, entry, VMS_MODE_ALL)

I'm thinking at some point in the future we could merge a bunch of the
other flag checks in there.

Dave


>  static bool should_validate_capability(int capability)
>  {
>      assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX);
> @@ -673,7 +692,7 @@ static uint32_t calculate_new_instance_id(const char *idstr)
>      SaveStateEntry *se;
>      uint32_t instance_id = 0;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FORALL(se, entry) {
>          if (strcmp(idstr, se->idstr) == 0
>              && instance_id <= se->instance_id) {
>              instance_id = se->instance_id + 1;
> @@ -689,7 +708,7 @@ static int calculate_compat_instance_id(const char *idstr)
>      SaveStateEntry *se;
>      int instance_id = 0;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FORALL(se, entry) {
>          if (!se->compat) {
>              continue;
>          }
> @@ -803,7 +822,7 @@ void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque)
>      }
>      pstrcat(id, sizeof(id), idstr);
>  
> -    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
> +    SAVEVM_FORALL_SAFE(se, entry, new_se) {
>          if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
>              savevm_state_handler_remove(se);
>              g_free(se->compat);
> @@ -867,7 +886,7 @@ void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
>  {
>      SaveStateEntry *se, *new_se;
>  
> -    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
> +    SAVEVM_FORALL_SAFE(se, entry, new_se) {
>          if (se->vmsd == vmsd && se->opaque == opaque) {
>              savevm_state_handler_remove(se);
>              g_free(se->compat);
> @@ -1119,7 +1138,7 @@ bool qemu_savevm_state_blocked(Error **errp)
>  {
>      SaveStateEntry *se;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FORALL(se, entry) {
>          if (se->vmsd && se->vmsd->unmigratable) {
>              error_setg(errp, "State blocked by non-migratable device '%s'",
>                         se->idstr);
> @@ -1145,7 +1164,7 @@ bool qemu_savevm_state_guest_unplug_pending(void)
>  {
>      SaveStateEntry *se;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (se->vmsd && se->vmsd->dev_unplug_pending &&
>              se->vmsd->dev_unplug_pending(se->opaque)) {
>              return true;
> @@ -1162,7 +1181,7 @@ void qemu_savevm_state_setup(QEMUFile *f)
>      int ret;
>  
>      trace_savevm_state_setup();
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops || !se->ops->save_setup) {
>              continue;
>          }
> @@ -1193,7 +1212,7 @@ int qemu_savevm_state_resume_prepare(MigrationState *s)
>  
>      trace_savevm_state_resume_prepare();
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops || !se->ops->resume_prepare) {
>              continue;
>          }
> @@ -1223,7 +1242,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
>      int ret = 1;
>  
>      trace_savevm_state_iterate();
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops || !se->ops->save_live_iterate) {
>              continue;
>          }
> @@ -1291,7 +1310,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
>      SaveStateEntry *se;
>      int ret;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops || !se->ops->save_live_complete_postcopy) {
>              continue;
>          }
> @@ -1324,7 +1343,7 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
>      SaveStateEntry *se;
>      int ret;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops ||
>              (in_postcopy && se->ops->has_postcopy &&
>               se->ops->has_postcopy(se->opaque)) ||
> @@ -1366,7 +1385,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
>      vmdesc = qjson_new();
>      json_prop_int(vmdesc, "page_size", qemu_target_page_size());
>      json_start_array(vmdesc, "devices");
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>  
>          if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
>              continue;
> @@ -1476,7 +1495,7 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
>      *res_postcopy_only = 0;
>  
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops || !se->ops->save_live_pending) {
>              continue;
>          }
> @@ -1501,7 +1520,7 @@ void qemu_savevm_state_cleanup(void)
>      }
>  
>      trace_savevm_state_cleanup();
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (se->ops && se->ops->save_cleanup) {
>              se->ops->save_cleanup(se->opaque);
>          }
> @@ -1580,7 +1599,7 @@ int qemu_save_device_state(QEMUFile *f)
>      }
>      cpu_synchronize_all_states();
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          int ret;
>  
>          if (se->is_ram) {
> @@ -1612,7 +1631,7 @@ static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id)
>  {
>      SaveStateEntry *se;
>  
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FORALL(se, entry) {
>          if (!strcmp(se->idstr, idstr) &&
>              (instance_id == se->instance_id ||
>               instance_id == se->alias_id))
> @@ -2334,7 +2353,7 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
>      }
>  
>      trace_qemu_loadvm_state_section_partend(section_id);
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (se->load_section_id == section_id) {
>              break;
>          }
> @@ -2400,7 +2419,7 @@ static int qemu_loadvm_state_setup(QEMUFile *f)
>      int ret;
>  
>      trace_loadvm_state_setup();
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (!se->ops || !se->ops->load_setup) {
>              continue;
>          }
> @@ -2425,7 +2444,7 @@ void qemu_loadvm_state_cleanup(void)
>      SaveStateEntry *se;
>  
>      trace_loadvm_state_cleanup();
> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> +    SAVEVM_FOREACH(se, entry) {
>          if (se->ops && se->ops->load_cleanup) {
>              se->ops->load_cleanup(se->opaque);
>          }
> -- 
> 1.8.3.1
>
Steven Sistare Sept. 24, 2020, 9:43 p.m. UTC | #2
On 9/11/2020 12:24 PM, Dr. David Alan Gilbert wrote:
> Apologies for taking a while to get around to this, 
> 
> * Steve Sistare (steven.sistare@oracle.com) wrote:
>> Provide the SAVEVM_FOREACH and SAVEVM_FORALL macros to loop over all save
>> VM state handlers.  The former will filter handlers based on the operation
>> in the later patch "savevm: VM handlers mode mask".  The latter loops over
>> all handlers.
>>
>> No functional change.
>>
>> Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
>> ---
>>  migration/savevm.c | 57 ++++++++++++++++++++++++++++++++++++------------------
>>  1 file changed, 38 insertions(+), 19 deletions(-)
>>
>> diff --git a/migration/savevm.c b/migration/savevm.c
>> index 45c9dd9..a07fcad 100644
>> --- a/migration/savevm.c
>> +++ b/migration/savevm.c
>> @@ -266,6 +266,25 @@ static SaveState savevm_state = {
>>      .global_section_id = 0,
>>  };
>>  
>> +/*
>> + * The FOREACH macros will filter handlers based on the current operation when
>> + * additional conditions are added in a subsequent patch.
>> + */
>> +
>> +#define SAVEVM_FOREACH(se, entry)                                    \
>> +    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)                \
>> +
>> +#define SAVEVM_FOREACH_SAFE(se, entry, new_se)                       \
>> +    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)   \
>> +
>> +/* The FORALL macros unconditionally loop over all handlers. */
>> +
>> +#define SAVEVM_FORALL(se, entry)                                     \
>> +    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)
>> +
>> +#define SAVEVM_FORALL_SAFE(se, entry, new_se)                        \
>> +    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)
>> +
> 
> OK, can I ask you to merge this with the next patch but to spin it the
> other way, so that we have:
> 
>   SAVEVM_FOR(se, entry, mask)
> 
> and the places you use SAVEVM_FORALL_SAFE would become
> 
>   SAVEVM_FOR(se, entry, VMS_MODE_ALL)
> 
> I'm thinking at some point in the future we could merge a bunch of the
> other flag checks in there.

Sure.  Is this what you have in mind?

#define SAVEVM_FOR(se, entry, mask)                    \
    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)  \
        if (savevm_state.mode & mask)

#define SAVEVM_FOR_SAFE(se, entry, new_se, mask)                    \
    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)  \
        if (savevm_state.mode & mask)

Callers:
  SAVEVM_FOR(se, entry, mode_mask(se))
  SAVEVM_FOR(se, entry, VMS_MODE_ALL)
  SAVEVM_FOR_SAFE(se, entry, mode_mask(se))
  SAVEVM_FOR_SAFE(se, entry, VMS_MODE_ALL)

- Steve

>>  static bool should_validate_capability(int capability)
>>  {
>>      assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX);
>> @@ -673,7 +692,7 @@ static uint32_t calculate_new_instance_id(const char *idstr)
>>      SaveStateEntry *se;
>>      uint32_t instance_id = 0;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FORALL(se, entry) {
>>          if (strcmp(idstr, se->idstr) == 0
>>              && instance_id <= se->instance_id) {
>>              instance_id = se->instance_id + 1;
>> @@ -689,7 +708,7 @@ static int calculate_compat_instance_id(const char *idstr)
>>      SaveStateEntry *se;
>>      int instance_id = 0;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FORALL(se, entry) {
>>          if (!se->compat) {
>>              continue;
>>          }
>> @@ -803,7 +822,7 @@ void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque)
>>      }
>>      pstrcat(id, sizeof(id), idstr);
>>  
>> -    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
>> +    SAVEVM_FORALL_SAFE(se, entry, new_se) {
>>          if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
>>              savevm_state_handler_remove(se);
>>              g_free(se->compat);
>> @@ -867,7 +886,7 @@ void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
>>  {
>>      SaveStateEntry *se, *new_se;
>>  
>> -    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
>> +    SAVEVM_FORALL_SAFE(se, entry, new_se) {
>>          if (se->vmsd == vmsd && se->opaque == opaque) {
>>              savevm_state_handler_remove(se);
>>              g_free(se->compat);
>> @@ -1119,7 +1138,7 @@ bool qemu_savevm_state_blocked(Error **errp)
>>  {
>>      SaveStateEntry *se;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FORALL(se, entry) {
>>          if (se->vmsd && se->vmsd->unmigratable) {
>>              error_setg(errp, "State blocked by non-migratable device '%s'",
>>                         se->idstr);
>> @@ -1145,7 +1164,7 @@ bool qemu_savevm_state_guest_unplug_pending(void)
>>  {
>>      SaveStateEntry *se;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (se->vmsd && se->vmsd->dev_unplug_pending &&
>>              se->vmsd->dev_unplug_pending(se->opaque)) {
>>              return true;
>> @@ -1162,7 +1181,7 @@ void qemu_savevm_state_setup(QEMUFile *f)
>>      int ret;
>>  
>>      trace_savevm_state_setup();
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops || !se->ops->save_setup) {
>>              continue;
>>          }
>> @@ -1193,7 +1212,7 @@ int qemu_savevm_state_resume_prepare(MigrationState *s)
>>  
>>      trace_savevm_state_resume_prepare();
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops || !se->ops->resume_prepare) {
>>              continue;
>>          }
>> @@ -1223,7 +1242,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
>>      int ret = 1;
>>  
>>      trace_savevm_state_iterate();
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops || !se->ops->save_live_iterate) {
>>              continue;
>>          }
>> @@ -1291,7 +1310,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
>>      SaveStateEntry *se;
>>      int ret;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops || !se->ops->save_live_complete_postcopy) {
>>              continue;
>>          }
>> @@ -1324,7 +1343,7 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
>>      SaveStateEntry *se;
>>      int ret;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops ||
>>              (in_postcopy && se->ops->has_postcopy &&
>>               se->ops->has_postcopy(se->opaque)) ||
>> @@ -1366,7 +1385,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
>>      vmdesc = qjson_new();
>>      json_prop_int(vmdesc, "page_size", qemu_target_page_size());
>>      json_start_array(vmdesc, "devices");
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>  
>>          if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
>>              continue;
>> @@ -1476,7 +1495,7 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
>>      *res_postcopy_only = 0;
>>  
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops || !se->ops->save_live_pending) {
>>              continue;
>>          }
>> @@ -1501,7 +1520,7 @@ void qemu_savevm_state_cleanup(void)
>>      }
>>  
>>      trace_savevm_state_cleanup();
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (se->ops && se->ops->save_cleanup) {
>>              se->ops->save_cleanup(se->opaque);
>>          }
>> @@ -1580,7 +1599,7 @@ int qemu_save_device_state(QEMUFile *f)
>>      }
>>      cpu_synchronize_all_states();
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          int ret;
>>  
>>          if (se->is_ram) {
>> @@ -1612,7 +1631,7 @@ static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id)
>>  {
>>      SaveStateEntry *se;
>>  
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FORALL(se, entry) {
>>          if (!strcmp(se->idstr, idstr) &&
>>              (instance_id == se->instance_id ||
>>               instance_id == se->alias_id))
>> @@ -2334,7 +2353,7 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
>>      }
>>  
>>      trace_qemu_loadvm_state_section_partend(section_id);
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (se->load_section_id == section_id) {
>>              break;
>>          }
>> @@ -2400,7 +2419,7 @@ static int qemu_loadvm_state_setup(QEMUFile *f)
>>      int ret;
>>  
>>      trace_loadvm_state_setup();
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (!se->ops || !se->ops->load_setup) {
>>              continue;
>>          }
>> @@ -2425,7 +2444,7 @@ void qemu_loadvm_state_cleanup(void)
>>      SaveStateEntry *se;
>>  
>>      trace_loadvm_state_cleanup();
>> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
>> +    SAVEVM_FOREACH(se, entry) {
>>          if (se->ops && se->ops->load_cleanup) {
>>              se->ops->load_cleanup(se->opaque);
>>          }
>> -- 
>> 1.8.3.1
>>
Dr. David Alan Gilbert Sept. 25, 2020, 9:07 a.m. UTC | #3
* Steven Sistare (steven.sistare@oracle.com) wrote:
> On 9/11/2020 12:24 PM, Dr. David Alan Gilbert wrote:
> > Apologies for taking a while to get around to this, 
> > 
> > * Steve Sistare (steven.sistare@oracle.com) wrote:
> >> Provide the SAVEVM_FOREACH and SAVEVM_FORALL macros to loop over all save
> >> VM state handlers.  The former will filter handlers based on the operation
> >> in the later patch "savevm: VM handlers mode mask".  The latter loops over
> >> all handlers.
> >>
> >> No functional change.
> >>
> >> Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
> >> ---
> >>  migration/savevm.c | 57 ++++++++++++++++++++++++++++++++++++------------------
> >>  1 file changed, 38 insertions(+), 19 deletions(-)
> >>
> >> diff --git a/migration/savevm.c b/migration/savevm.c
> >> index 45c9dd9..a07fcad 100644
> >> --- a/migration/savevm.c
> >> +++ b/migration/savevm.c
> >> @@ -266,6 +266,25 @@ static SaveState savevm_state = {
> >>      .global_section_id = 0,
> >>  };
> >>  
> >> +/*
> >> + * The FOREACH macros will filter handlers based on the current operation when
> >> + * additional conditions are added in a subsequent patch.
> >> + */
> >> +
> >> +#define SAVEVM_FOREACH(se, entry)                                    \
> >> +    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)                \
> >> +
> >> +#define SAVEVM_FOREACH_SAFE(se, entry, new_se)                       \
> >> +    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)   \
> >> +
> >> +/* The FORALL macros unconditionally loop over all handlers. */
> >> +
> >> +#define SAVEVM_FORALL(se, entry)                                     \
> >> +    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)
> >> +
> >> +#define SAVEVM_FORALL_SAFE(se, entry, new_se)                        \
> >> +    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)
> >> +
> > 
> > OK, can I ask you to merge this with the next patch but to spin it the
> > other way, so that we have:
> > 
> >   SAVEVM_FOR(se, entry, mask)
> > 
> > and the places you use SAVEVM_FORALL_SAFE would become
> > 
> >   SAVEVM_FOR(se, entry, VMS_MODE_ALL)
> > 
> > I'm thinking at some point in the future we could merge a bunch of the
> > other flag checks in there.
> 
> Sure.  Is this what you have in mind?
> 
> #define SAVEVM_FOR(se, entry, mask)                    \
>     QTAILQ_FOREACH(se, &savevm_state.handlers, entry)  \
>         if (savevm_state.mode & mask)
> 
> #define SAVEVM_FOR_SAFE(se, entry, new_se, mask)                    \
>     QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)  \
>         if (savevm_state.mode & mask)
> 
> Callers:
>   SAVEVM_FOR(se, entry, mode_mask(se))
>   SAVEVM_FOR(se, entry, VMS_MODE_ALL)
>   SAVEVM_FOR_SAFE(se, entry, mode_mask(se))
>   SAVEVM_FOR_SAFE(se, entry, VMS_MODE_ALL)

Yeh that looks about OK.

Dave

> - Steve
> 
> >>  static bool should_validate_capability(int capability)
> >>  {
> >>      assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX);
> >> @@ -673,7 +692,7 @@ static uint32_t calculate_new_instance_id(const char *idstr)
> >>      SaveStateEntry *se;
> >>      uint32_t instance_id = 0;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FORALL(se, entry) {
> >>          if (strcmp(idstr, se->idstr) == 0
> >>              && instance_id <= se->instance_id) {
> >>              instance_id = se->instance_id + 1;
> >> @@ -689,7 +708,7 @@ static int calculate_compat_instance_id(const char *idstr)
> >>      SaveStateEntry *se;
> >>      int instance_id = 0;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FORALL(se, entry) {
> >>          if (!se->compat) {
> >>              continue;
> >>          }
> >> @@ -803,7 +822,7 @@ void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque)
> >>      }
> >>      pstrcat(id, sizeof(id), idstr);
> >>  
> >> -    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
> >> +    SAVEVM_FORALL_SAFE(se, entry, new_se) {
> >>          if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
> >>              savevm_state_handler_remove(se);
> >>              g_free(se->compat);
> >> @@ -867,7 +886,7 @@ void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
> >>  {
> >>      SaveStateEntry *se, *new_se;
> >>  
> >> -    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
> >> +    SAVEVM_FORALL_SAFE(se, entry, new_se) {
> >>          if (se->vmsd == vmsd && se->opaque == opaque) {
> >>              savevm_state_handler_remove(se);
> >>              g_free(se->compat);
> >> @@ -1119,7 +1138,7 @@ bool qemu_savevm_state_blocked(Error **errp)
> >>  {
> >>      SaveStateEntry *se;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FORALL(se, entry) {
> >>          if (se->vmsd && se->vmsd->unmigratable) {
> >>              error_setg(errp, "State blocked by non-migratable device '%s'",
> >>                         se->idstr);
> >> @@ -1145,7 +1164,7 @@ bool qemu_savevm_state_guest_unplug_pending(void)
> >>  {
> >>      SaveStateEntry *se;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (se->vmsd && se->vmsd->dev_unplug_pending &&
> >>              se->vmsd->dev_unplug_pending(se->opaque)) {
> >>              return true;
> >> @@ -1162,7 +1181,7 @@ void qemu_savevm_state_setup(QEMUFile *f)
> >>      int ret;
> >>  
> >>      trace_savevm_state_setup();
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops || !se->ops->save_setup) {
> >>              continue;
> >>          }
> >> @@ -1193,7 +1212,7 @@ int qemu_savevm_state_resume_prepare(MigrationState *s)
> >>  
> >>      trace_savevm_state_resume_prepare();
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops || !se->ops->resume_prepare) {
> >>              continue;
> >>          }
> >> @@ -1223,7 +1242,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
> >>      int ret = 1;
> >>  
> >>      trace_savevm_state_iterate();
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops || !se->ops->save_live_iterate) {
> >>              continue;
> >>          }
> >> @@ -1291,7 +1310,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
> >>      SaveStateEntry *se;
> >>      int ret;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops || !se->ops->save_live_complete_postcopy) {
> >>              continue;
> >>          }
> >> @@ -1324,7 +1343,7 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
> >>      SaveStateEntry *se;
> >>      int ret;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops ||
> >>              (in_postcopy && se->ops->has_postcopy &&
> >>               se->ops->has_postcopy(se->opaque)) ||
> >> @@ -1366,7 +1385,7 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
> >>      vmdesc = qjson_new();
> >>      json_prop_int(vmdesc, "page_size", qemu_target_page_size());
> >>      json_start_array(vmdesc, "devices");
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>  
> >>          if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
> >>              continue;
> >> @@ -1476,7 +1495,7 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
> >>      *res_postcopy_only = 0;
> >>  
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops || !se->ops->save_live_pending) {
> >>              continue;
> >>          }
> >> @@ -1501,7 +1520,7 @@ void qemu_savevm_state_cleanup(void)
> >>      }
> >>  
> >>      trace_savevm_state_cleanup();
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (se->ops && se->ops->save_cleanup) {
> >>              se->ops->save_cleanup(se->opaque);
> >>          }
> >> @@ -1580,7 +1599,7 @@ int qemu_save_device_state(QEMUFile *f)
> >>      }
> >>      cpu_synchronize_all_states();
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          int ret;
> >>  
> >>          if (se->is_ram) {
> >> @@ -1612,7 +1631,7 @@ static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id)
> >>  {
> >>      SaveStateEntry *se;
> >>  
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FORALL(se, entry) {
> >>          if (!strcmp(se->idstr, idstr) &&
> >>              (instance_id == se->instance_id ||
> >>               instance_id == se->alias_id))
> >> @@ -2334,7 +2353,7 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
> >>      }
> >>  
> >>      trace_qemu_loadvm_state_section_partend(section_id);
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (se->load_section_id == section_id) {
> >>              break;
> >>          }
> >> @@ -2400,7 +2419,7 @@ static int qemu_loadvm_state_setup(QEMUFile *f)
> >>      int ret;
> >>  
> >>      trace_loadvm_state_setup();
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (!se->ops || !se->ops->load_setup) {
> >>              continue;
> >>          }
> >> @@ -2425,7 +2444,7 @@ void qemu_loadvm_state_cleanup(void)
> >>      SaveStateEntry *se;
> >>  
> >>      trace_loadvm_state_cleanup();
> >> -    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> >> +    SAVEVM_FOREACH(se, entry) {
> >>          if (se->ops && se->ops->load_cleanup) {
> >>              se->ops->load_cleanup(se->opaque);
> >>          }
> >> -- 
> >> 1.8.3.1
> >>
>
diff mbox series

Patch

diff --git a/migration/savevm.c b/migration/savevm.c
index 45c9dd9..a07fcad 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -266,6 +266,25 @@  static SaveState savevm_state = {
     .global_section_id = 0,
 };
 
+/*
+ * The FOREACH macros will filter handlers based on the current operation when
+ * additional conditions are added in a subsequent patch.
+ */
+
+#define SAVEVM_FOREACH(se, entry)                                    \
+    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)                \
+
+#define SAVEVM_FOREACH_SAFE(se, entry, new_se)                       \
+    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)   \
+
+/* The FORALL macros unconditionally loop over all handlers. */
+
+#define SAVEVM_FORALL(se, entry)                                     \
+    QTAILQ_FOREACH(se, &savevm_state.handlers, entry)
+
+#define SAVEVM_FORALL_SAFE(se, entry, new_se)                        \
+    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se)
+
 static bool should_validate_capability(int capability)
 {
     assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX);
@@ -673,7 +692,7 @@  static uint32_t calculate_new_instance_id(const char *idstr)
     SaveStateEntry *se;
     uint32_t instance_id = 0;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FORALL(se, entry) {
         if (strcmp(idstr, se->idstr) == 0
             && instance_id <= se->instance_id) {
             instance_id = se->instance_id + 1;
@@ -689,7 +708,7 @@  static int calculate_compat_instance_id(const char *idstr)
     SaveStateEntry *se;
     int instance_id = 0;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FORALL(se, entry) {
         if (!se->compat) {
             continue;
         }
@@ -803,7 +822,7 @@  void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque)
     }
     pstrcat(id, sizeof(id), idstr);
 
-    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
+    SAVEVM_FORALL_SAFE(se, entry, new_se) {
         if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
             savevm_state_handler_remove(se);
             g_free(se->compat);
@@ -867,7 +886,7 @@  void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
 {
     SaveStateEntry *se, *new_se;
 
-    QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
+    SAVEVM_FORALL_SAFE(se, entry, new_se) {
         if (se->vmsd == vmsd && se->opaque == opaque) {
             savevm_state_handler_remove(se);
             g_free(se->compat);
@@ -1119,7 +1138,7 @@  bool qemu_savevm_state_blocked(Error **errp)
 {
     SaveStateEntry *se;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FORALL(se, entry) {
         if (se->vmsd && se->vmsd->unmigratable) {
             error_setg(errp, "State blocked by non-migratable device '%s'",
                        se->idstr);
@@ -1145,7 +1164,7 @@  bool qemu_savevm_state_guest_unplug_pending(void)
 {
     SaveStateEntry *se;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (se->vmsd && se->vmsd->dev_unplug_pending &&
             se->vmsd->dev_unplug_pending(se->opaque)) {
             return true;
@@ -1162,7 +1181,7 @@  void qemu_savevm_state_setup(QEMUFile *f)
     int ret;
 
     trace_savevm_state_setup();
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops || !se->ops->save_setup) {
             continue;
         }
@@ -1193,7 +1212,7 @@  int qemu_savevm_state_resume_prepare(MigrationState *s)
 
     trace_savevm_state_resume_prepare();
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops || !se->ops->resume_prepare) {
             continue;
         }
@@ -1223,7 +1242,7 @@  int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
     int ret = 1;
 
     trace_savevm_state_iterate();
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops || !se->ops->save_live_iterate) {
             continue;
         }
@@ -1291,7 +1310,7 @@  void qemu_savevm_state_complete_postcopy(QEMUFile *f)
     SaveStateEntry *se;
     int ret;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops || !se->ops->save_live_complete_postcopy) {
             continue;
         }
@@ -1324,7 +1343,7 @@  int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
     SaveStateEntry *se;
     int ret;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops ||
             (in_postcopy && se->ops->has_postcopy &&
              se->ops->has_postcopy(se->opaque)) ||
@@ -1366,7 +1385,7 @@  int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
     vmdesc = qjson_new();
     json_prop_int(vmdesc, "page_size", qemu_target_page_size());
     json_start_array(vmdesc, "devices");
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
 
         if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
             continue;
@@ -1476,7 +1495,7 @@  void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
     *res_postcopy_only = 0;
 
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops || !se->ops->save_live_pending) {
             continue;
         }
@@ -1501,7 +1520,7 @@  void qemu_savevm_state_cleanup(void)
     }
 
     trace_savevm_state_cleanup();
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (se->ops && se->ops->save_cleanup) {
             se->ops->save_cleanup(se->opaque);
         }
@@ -1580,7 +1599,7 @@  int qemu_save_device_state(QEMUFile *f)
     }
     cpu_synchronize_all_states();
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         int ret;
 
         if (se->is_ram) {
@@ -1612,7 +1631,7 @@  static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id)
 {
     SaveStateEntry *se;
 
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FORALL(se, entry) {
         if (!strcmp(se->idstr, idstr) &&
             (instance_id == se->instance_id ||
              instance_id == se->alias_id))
@@ -2334,7 +2353,7 @@  qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
     }
 
     trace_qemu_loadvm_state_section_partend(section_id);
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (se->load_section_id == section_id) {
             break;
         }
@@ -2400,7 +2419,7 @@  static int qemu_loadvm_state_setup(QEMUFile *f)
     int ret;
 
     trace_loadvm_state_setup();
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (!se->ops || !se->ops->load_setup) {
             continue;
         }
@@ -2425,7 +2444,7 @@  void qemu_loadvm_state_cleanup(void)
     SaveStateEntry *se;
 
     trace_loadvm_state_cleanup();
-    QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+    SAVEVM_FOREACH(se, entry) {
         if (se->ops && se->ops->load_cleanup) {
             se->ops->load_cleanup(se->opaque);
         }