diff mbox

[v9,5/5] x86/ioreq server: Synchronously reset outstanding p2m_ioreq_server entries when an ioreq server unmaps.

Message ID 1490064773-26751-6-git-send-email-yu.c.zhang@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Yu Zhang March 21, 2017, 2:52 a.m. UTC
After an ioreq server has unmapped, the remaining p2m_ioreq_server
entries need to be reset back to p2m_ram_rw. This patch does this
synchronously by iterating the p2m table.

The synchronous resetting is necessary because we need to guarantee
the p2m table is clean before another ioreq server is mapped. And
since the sweeping of p2m table could be time consuming, it is done
with hypercall continuation.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>

changes in v2: 
  - According to comments from Jan and Andrew: do not use the
    HVMOP type hypercall continuation method. Instead, adding
    an opaque in xen_dm_op_map_mem_type_to_ioreq_server to
    store the gfn.
  - According to comments from Jan: change routine's comments
    and name of parameters of p2m_finish_type_change().

changes in v1: 
  - This patch is splitted from patch 4 of last version.
  - According to comments from Jan: update the gfn_start for 
    when use hypercall continuation to reset the p2m type.
  - According to comments from Jan: use min() to compare gfn_end
    and max mapped pfn in p2m_finish_type_change()
---
 xen/arch/x86/hvm/dm.c     | 41 ++++++++++++++++++++++++++++++++++++++---
 xen/arch/x86/mm/p2m.c     | 29 +++++++++++++++++++++++++++++
 xen/include/asm-x86/p2m.h |  7 +++++++
 3 files changed, 74 insertions(+), 3 deletions(-)

Comments

Paul Durrant March 21, 2017, 10 a.m. UTC | #1
> -----Original Message-----
> From: Yu Zhang [mailto:yu.c.zhang@linux.intel.com]
> Sent: 21 March 2017 02:53
> To: xen-devel@lists.xen.org
> Cc: zhiyuan.lv@intel.com; Paul Durrant <Paul.Durrant@citrix.com>; Jan
> Beulich <jbeulich@suse.com>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; George Dunlap
> <George.Dunlap@citrix.com>
> Subject: [PATCH v9 5/5] x86/ioreq server: Synchronously reset outstanding
> p2m_ioreq_server entries when an ioreq server unmaps.
> 
> After an ioreq server has unmapped, the remaining p2m_ioreq_server
> entries need to be reset back to p2m_ram_rw. This patch does this
> synchronously by iterating the p2m table.
> 
> The synchronous resetting is necessary because we need to guarantee
> the p2m table is clean before another ioreq server is mapped. And
> since the sweeping of p2m table could be time consuming, it is done
> with hypercall continuation.
> 
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> ---
> Cc: Paul Durrant <paul.durrant@citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> 
> changes in v2:
>   - According to comments from Jan and Andrew: do not use the
>     HVMOP type hypercall continuation method. Instead, adding
>     an opaque in xen_dm_op_map_mem_type_to_ioreq_server to
>     store the gfn.
>   - According to comments from Jan: change routine's comments
>     and name of parameters of p2m_finish_type_change().
> 
> changes in v1:
>   - This patch is splitted from patch 4 of last version.
>   - According to comments from Jan: update the gfn_start for
>     when use hypercall continuation to reset the p2m type.
>   - According to comments from Jan: use min() to compare gfn_end
>     and max mapped pfn in p2m_finish_type_change()
> ---
>  xen/arch/x86/hvm/dm.c     | 41
> ++++++++++++++++++++++++++++++++++++++---
>  xen/arch/x86/mm/p2m.c     | 29 +++++++++++++++++++++++++++++
>  xen/include/asm-x86/p2m.h |  7 +++++++
>  3 files changed, 74 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index 3f9484d..a24d0f8 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
> 
>      case XEN_DMOP_map_mem_type_to_ioreq_server:
>      {
> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>              &op.u.map_mem_type_to_ioreq_server;
> +        unsigned long first_gfn = data->opaque;
> +        unsigned long last_gfn;
> +
> +        const_op = false;
> 
>          rc = -EOPNOTSUPP;
>          /* Only support for HAP enabled hvm. */
>          if ( !hap_enabled(d) )
>              break;
> 
> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> -                                              data->type, data->flags);
> +        if ( first_gfn == 0 )
> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> +                                                  data->type, data->flags);
> +        /*
> +         * Iterate p2m table when an ioreq server unmaps from
> p2m_ioreq_server,
> +         * and reset the remaining p2m_ioreq_server entries back to
> p2m_ram_rw.
> +         */
> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
> +        {
> +            struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +
> +            while ( read_atomic(&p2m->ioreq.entry_count) &&
> +                    first_gfn <= p2m->max_mapped_pfn )
> +            {
> +                /* Iterate p2m table for 256 gfns each time. */
> +                last_gfn = first_gfn + 0xff;
> +

Might be worth a comment here to sat that p2m_finish_type_change() limits last_gfn appropriately because it kind of looks wrong to be blindly calling it with first_gfn + 0xff. Or perhaps, rather than passing last_gfn, pass a 'max_nr' parameter of 256 instead. Then you can drop last_gfn altogether. If you prefer the parameters as they are then at least limit the scope of last_gfn to this while loop.

> +                p2m_finish_type_change(d, first_gfn, last_gfn,
> +                                       p2m_ioreq_server, p2m_ram_rw);
> +
> +                first_gfn = last_gfn + 1;
> +
> +                /* Check for continuation if it's not the last iteration. */
> +                if ( first_gfn <= p2m->max_mapped_pfn &&
> +                     hypercall_preempt_check() )
> +                {
> +                    rc = -ERESTART;
> +                    data->opaque = first_gfn;
> +                    break;
> +                }
> +            }
> +        }
> +
>          break;
>      }
> 
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index e3e54f1..0a2f276 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,
>      p2m_unlock(p2m);
>  }
> 
> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
> +void p2m_finish_type_change(struct domain *d,

As I said above, consider a 'max_nr' parameter here rather than last_gfn.

  Paul

> +                            unsigned long first_gfn, unsigned long last_gfn,
> +                            p2m_type_t ot, p2m_type_t nt)
> +{
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    p2m_type_t t;
> +    unsigned long gfn = first_gfn;
> +
> +    ASSERT(first_gfn <= last_gfn);
> +    ASSERT(ot != nt);
> +    ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
> +
> +    p2m_lock(p2m);
> +
> +    last_gfn = min(last_gfn, p2m->max_mapped_pfn);
> +    while ( gfn <= last_gfn )
> +    {
> +        get_gfn_query_unlocked(d, gfn, &t);
> +
> +        if ( t == ot )
> +            p2m_change_type_one(d, gfn, t, nt);
> +
> +        gfn++;
> +    }
> +
> +    p2m_unlock(p2m);
> +}
> +
>  /*
>   * Returns:
>   *    0              for success
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 395f125..3d665e8 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -611,6 +611,13 @@ void p2m_change_type_range(struct domain *d,
>  int p2m_change_type_one(struct domain *d, unsigned long gfn,
>                          p2m_type_t ot, p2m_type_t nt);
> 
> +/* Synchronously change the p2m type for a range of gfns:
> + * [first_gfn ... last_gfn]. */
> +void p2m_finish_type_change(struct domain *d,
> +                            unsigned long first_gfn,
> +                            unsigned long last_gfn,
> +                            p2m_type_t ot, p2m_type_t nt);
> +
>  /* Report a change affecting memory types. */
>  void p2m_memory_type_changed(struct domain *d);
> 
> --
> 1.9.1
Yu Zhang March 21, 2017, 11:15 a.m. UTC | #2
On 3/21/2017 6:00 PM, Paul Durrant wrote:
>> -----Original Message-----
>> From: Yu Zhang [mailto:yu.c.zhang@linux.intel.com]
>> Sent: 21 March 2017 02:53
>> To: xen-devel@lists.xen.org
>> Cc: zhiyuan.lv@intel.com; Paul Durrant <Paul.Durrant@citrix.com>; Jan
>> Beulich <jbeulich@suse.com>; Andrew Cooper
>> <Andrew.Cooper3@citrix.com>; George Dunlap
>> <George.Dunlap@citrix.com>
>> Subject: [PATCH v9 5/5] x86/ioreq server: Synchronously reset outstanding
>> p2m_ioreq_server entries when an ioreq server unmaps.
>>
>> After an ioreq server has unmapped, the remaining p2m_ioreq_server
>> entries need to be reset back to p2m_ram_rw. This patch does this
>> synchronously by iterating the p2m table.
>>
>> The synchronous resetting is necessary because we need to guarantee
>> the p2m table is clean before another ioreq server is mapped. And
>> since the sweeping of p2m table could be time consuming, it is done
>> with hypercall continuation.
>>
>> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
>> ---
>> Cc: Paul Durrant <paul.durrant@citrix.com>
>> Cc: Jan Beulich <jbeulich@suse.com>
>> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
>> Cc: George Dunlap <george.dunlap@eu.citrix.com>
>>
>> changes in v2:
>>    - According to comments from Jan and Andrew: do not use the
>>      HVMOP type hypercall continuation method. Instead, adding
>>      an opaque in xen_dm_op_map_mem_type_to_ioreq_server to
>>      store the gfn.
>>    - According to comments from Jan: change routine's comments
>>      and name of parameters of p2m_finish_type_change().
>>
>> changes in v1:
>>    - This patch is splitted from patch 4 of last version.
>>    - According to comments from Jan: update the gfn_start for
>>      when use hypercall continuation to reset the p2m type.
>>    - According to comments from Jan: use min() to compare gfn_end
>>      and max mapped pfn in p2m_finish_type_change()
>> ---
>>   xen/arch/x86/hvm/dm.c     | 41
>> ++++++++++++++++++++++++++++++++++++++---
>>   xen/arch/x86/mm/p2m.c     | 29 +++++++++++++++++++++++++++++
>>   xen/include/asm-x86/p2m.h |  7 +++++++
>>   3 files changed, 74 insertions(+), 3 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
>> index 3f9484d..a24d0f8 100644
>> --- a/xen/arch/x86/hvm/dm.c
>> +++ b/xen/arch/x86/hvm/dm.c
>> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
>>
>>       case XEN_DMOP_map_mem_type_to_ioreq_server:
>>       {
>> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>               &op.u.map_mem_type_to_ioreq_server;
>> +        unsigned long first_gfn = data->opaque;
>> +        unsigned long last_gfn;
>> +
>> +        const_op = false;
>>
>>           rc = -EOPNOTSUPP;
>>           /* Only support for HAP enabled hvm. */
>>           if ( !hap_enabled(d) )
>>               break;
>>
>> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>> -                                              data->type, data->flags);
>> +        if ( first_gfn == 0 )
>> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>> +                                                  data->type, data->flags);
>> +        /*
>> +         * Iterate p2m table when an ioreq server unmaps from
>> p2m_ioreq_server,
>> +         * and reset the remaining p2m_ioreq_server entries back to
>> p2m_ram_rw.
>> +         */
>> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
>> +        {
>> +            struct p2m_domain *p2m = p2m_get_hostp2m(d);
>> +
>> +            while ( read_atomic(&p2m->ioreq.entry_count) &&
>> +                    first_gfn <= p2m->max_mapped_pfn )
>> +            {
>> +                /* Iterate p2m table for 256 gfns each time. */
>> +                last_gfn = first_gfn + 0xff;
>> +
> Might be worth a comment here to sat that p2m_finish_type_change() limits last_gfn appropriately because it kind of looks wrong to be blindly calling it with first_gfn + 0xff. Or perhaps, rather than passing last_gfn, pass a 'max_nr' parameter of 256 instead. Then you can drop last_gfn altogether. If you prefer the parameters as they are then at least limit the scope of last_gfn to this while loop.
Thanks for your comments, Paul. :)
Well, setting last_gfn with first_gfn+0xff looks a bit awkward. But why 
using a 'max_nr' with a magic number, say 256, looks better? Or any 
other benefits? :-)

Yu
>
>> +                p2m_finish_type_change(d, first_gfn, last_gfn,
>> +                                       p2m_ioreq_server, p2m_ram_rw);
>> +
>> +                first_gfn = last_gfn + 1;
>> +
>> +                /* Check for continuation if it's not the last iteration. */
>> +                if ( first_gfn <= p2m->max_mapped_pfn &&
>> +                     hypercall_preempt_check() )
>> +                {
>> +                    rc = -ERESTART;
>> +                    data->opaque = first_gfn;
>> +                    break;
>> +                }
>> +            }
>> +        }
>> +
>>           break;
>>       }
>>
>> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
>> index e3e54f1..0a2f276 100644
>> --- a/xen/arch/x86/mm/p2m.c
>> +++ b/xen/arch/x86/mm/p2m.c
>> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,
>>       p2m_unlock(p2m);
>>   }
>>
>> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
>> +void p2m_finish_type_change(struct domain *d,
> As I said above, consider a 'max_nr' parameter here rather than last_gfn.
>
>    Paul
>
>> +                            unsigned long first_gfn, unsigned long last_gfn,
>> +                            p2m_type_t ot, p2m_type_t nt)
>> +{
>> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
>> +    p2m_type_t t;
>> +    unsigned long gfn = first_gfn;
>> +
>> +    ASSERT(first_gfn <= last_gfn);
>> +    ASSERT(ot != nt);
>> +    ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
>> +
>> +    p2m_lock(p2m);
>> +
>> +    last_gfn = min(last_gfn, p2m->max_mapped_pfn);
>> +    while ( gfn <= last_gfn )
>> +    {
>> +        get_gfn_query_unlocked(d, gfn, &t);
>> +
>> +        if ( t == ot )
>> +            p2m_change_type_one(d, gfn, t, nt);
>> +
>> +        gfn++;
>> +    }
>> +
>> +    p2m_unlock(p2m);
>> +}
>> +
>>   /*
>>    * Returns:
>>    *    0              for success
>> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
>> index 395f125..3d665e8 100644
>> --- a/xen/include/asm-x86/p2m.h
>> +++ b/xen/include/asm-x86/p2m.h
>> @@ -611,6 +611,13 @@ void p2m_change_type_range(struct domain *d,
>>   int p2m_change_type_one(struct domain *d, unsigned long gfn,
>>                           p2m_type_t ot, p2m_type_t nt);
>>
>> +/* Synchronously change the p2m type for a range of gfns:
>> + * [first_gfn ... last_gfn]. */
>> +void p2m_finish_type_change(struct domain *d,
>> +                            unsigned long first_gfn,
>> +                            unsigned long last_gfn,
>> +                            p2m_type_t ot, p2m_type_t nt);
>> +
>>   /* Report a change affecting memory types. */
>>   void p2m_memory_type_changed(struct domain *d);
>>
>> --
>> 1.9.1
>
Paul Durrant March 21, 2017, 1:49 p.m. UTC | #3
> -----Original Message-----
[snip]
> >> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
> >> +        {
> >> +            struct p2m_domain *p2m = p2m_get_hostp2m(d);
> >> +
> >> +            while ( read_atomic(&p2m->ioreq.entry_count) &&
> >> +                    first_gfn <= p2m->max_mapped_pfn )
> >> +            {
> >> +                /* Iterate p2m table for 256 gfns each time. */
> >> +                last_gfn = first_gfn + 0xff;
> >> +
> > Might be worth a comment here to sat that p2m_finish_type_change()
> limits last_gfn appropriately because it kind of looks wrong to be blindly
> calling it with first_gfn + 0xff. Or perhaps, rather than passing last_gfn, pass a
> 'max_nr' parameter of 256 instead. Then you can drop last_gfn altogether. If
> you prefer the parameters as they are then at least limit the scope of
> last_gfn to this while loop.
> Thanks for your comments, Paul. :)
> Well, setting last_gfn with first_gfn+0xff looks a bit awkward. But why
> using a 'max_nr' with a magic number, say 256, looks better? Or any
> other benefits? :-)
> 

Well, to my eyes calling it max_nr in the function would make it clear it's a limit rather than a definite count and then passing 256 in the call would make it clear that it is the chosen batch size.

Does that make sense?

  Paul
Yu Zhang March 21, 2017, 2:14 p.m. UTC | #4
On 3/21/2017 9:49 PM, Paul Durrant wrote:
>> -----Original Message-----
> [snip]
>>>> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
>>>> +        {
>>>> +            struct p2m_domain *p2m = p2m_get_hostp2m(d);
>>>> +
>>>> +            while ( read_atomic(&p2m->ioreq.entry_count) &&
>>>> +                    first_gfn <= p2m->max_mapped_pfn )
>>>> +            {
>>>> +                /* Iterate p2m table for 256 gfns each time. */
>>>> +                last_gfn = first_gfn + 0xff;
>>>> +
>>> Might be worth a comment here to sat that p2m_finish_type_change()
>> limits last_gfn appropriately because it kind of looks wrong to be blindly
>> calling it with first_gfn + 0xff. Or perhaps, rather than passing last_gfn, pass a
>> 'max_nr' parameter of 256 instead. Then you can drop last_gfn altogether. If
>> you prefer the parameters as they are then at least limit the scope of
>> last_gfn to this while loop.
>> Thanks for your comments, Paul. :)
>> Well, setting last_gfn with first_gfn+0xff looks a bit awkward. But why
>> using a 'max_nr' with a magic number, say 256, looks better? Or any
>> other benefits? :-)
>>
> Well, to my eyes calling it max_nr in the function would make it clear it's a limit rather than a definite count and then passing 256 in the call would make it clear that it is the chosen batch size.
>
> Does that make sense?

Sounds reasonable. Thanks! :-)
Yu
>    Paul
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
Tian, Kevin March 22, 2017, 8:28 a.m. UTC | #5
> From: Yu Zhang

> Sent: Tuesday, March 21, 2017 10:53 AM

> 

> After an ioreq server has unmapped, the remaining p2m_ioreq_server

> entries need to be reset back to p2m_ram_rw. This patch does this

> synchronously by iterating the p2m table.

> 

> The synchronous resetting is necessary because we need to guarantee the

> p2m table is clean before another ioreq server is mapped. And since the

> sweeping of p2m table could be time consuming, it is done with hypercall

> continuation.

> 

> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>

> ---

> Cc: Paul Durrant <paul.durrant@citrix.com>

> Cc: Jan Beulich <jbeulich@suse.com>

> Cc: Andrew Cooper <andrew.cooper3@citrix.com>

> Cc: George Dunlap <george.dunlap@eu.citrix.com>

> 

> changes in v2:

>   - According to comments from Jan and Andrew: do not use the

>     HVMOP type hypercall continuation method. Instead, adding

>     an opaque in xen_dm_op_map_mem_type_to_ioreq_server to

>     store the gfn.

>   - According to comments from Jan: change routine's comments

>     and name of parameters of p2m_finish_type_change().

> 

> changes in v1:

>   - This patch is splitted from patch 4 of last version.

>   - According to comments from Jan: update the gfn_start for

>     when use hypercall continuation to reset the p2m type.

>   - According to comments from Jan: use min() to compare gfn_end

>     and max mapped pfn in p2m_finish_type_change()

> ---

>  xen/arch/x86/hvm/dm.c     | 41

> ++++++++++++++++++++++++++++++++++++++---

>  xen/arch/x86/mm/p2m.c     | 29 +++++++++++++++++++++++++++++

>  xen/include/asm-x86/p2m.h |  7 +++++++

>  3 files changed, 74 insertions(+), 3 deletions(-)

> 

> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index

> 3f9484d..a24d0f8 100644

> --- a/xen/arch/x86/hvm/dm.c

> +++ b/xen/arch/x86/hvm/dm.c

> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,

> 

>      case XEN_DMOP_map_mem_type_to_ioreq_server:

>      {

> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =

> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =

>              &op.u.map_mem_type_to_ioreq_server;

> +        unsigned long first_gfn = data->opaque;

> +        unsigned long last_gfn;

> +

> +        const_op = false;

> 

>          rc = -EOPNOTSUPP;

>          /* Only support for HAP enabled hvm. */

>          if ( !hap_enabled(d) )

>              break;

> 

> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,

> -                                              data->type, data->flags);

> +        if ( first_gfn == 0 )

> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,

> +                                                  data->type, data->flags);

> +        /*

> +         * Iterate p2m table when an ioreq server unmaps from

> p2m_ioreq_server,

> +         * and reset the remaining p2m_ioreq_server entries back to

> p2m_ram_rw.

> +         */


can you elaborate how device model is expected to use this
new extension, i.e. on deciding first_gfn?

> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )

> +        {

> +            struct p2m_domain *p2m = p2m_get_hostp2m(d);

> +

> +            while ( read_atomic(&p2m->ioreq.entry_count) &&

> +                    first_gfn <= p2m->max_mapped_pfn )

> +            {

> +                /* Iterate p2m table for 256 gfns each time. */

> +                last_gfn = first_gfn + 0xff;

> +

> +                p2m_finish_type_change(d, first_gfn, last_gfn,

> +                                       p2m_ioreq_server, p2m_ram_rw);

> +

> +                first_gfn = last_gfn + 1;

> +

> +                /* Check for continuation if it's not the last iteration. */

> +                if ( first_gfn <= p2m->max_mapped_pfn &&

> +                     hypercall_preempt_check() )

> +                {

> +                    rc = -ERESTART;

> +                    data->opaque = first_gfn;

> +                    break;

> +                }

> +            }

> +        }

> +

>          break;

>      }

> 

> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index

> e3e54f1..0a2f276 100644

> --- a/xen/arch/x86/mm/p2m.c

> +++ b/xen/arch/x86/mm/p2m.c

> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,

>      p2m_unlock(p2m);

>  }

> 

> +/* Synchronously modify the p2m type for a range of gfns from ot to nt.

> +*/ void p2m_finish_type_change(struct domain *d,

> +                            unsigned long first_gfn, unsigned long last_gfn,

> +                            p2m_type_t ot, p2m_type_t nt) {

> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);

> +    p2m_type_t t;

> +    unsigned long gfn = first_gfn;

> +

> +    ASSERT(first_gfn <= last_gfn);

> +    ASSERT(ot != nt);

> +    ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));

> +

> +    p2m_lock(p2m);

> +

> +    last_gfn = min(last_gfn, p2m->max_mapped_pfn);

> +    while ( gfn <= last_gfn )

> +    {

> +        get_gfn_query_unlocked(d, gfn, &t);

> +

> +        if ( t == ot )

> +            p2m_change_type_one(d, gfn, t, nt);

> +

> +        gfn++;

> +    }

> +

> +    p2m_unlock(p2m);

> +}

> +

>  /*

>   * Returns:

>   *    0              for success

> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index

> 395f125..3d665e8 100644

> --- a/xen/include/asm-x86/p2m.h

> +++ b/xen/include/asm-x86/p2m.h

> @@ -611,6 +611,13 @@ void p2m_change_type_range(struct domain *d,

> int p2m_change_type_one(struct domain *d, unsigned long gfn,

>                          p2m_type_t ot, p2m_type_t nt);

> 

> +/* Synchronously change the p2m type for a range of gfns:

> + * [first_gfn ... last_gfn]. */

> +void p2m_finish_type_change(struct domain *d,

> +                            unsigned long first_gfn,

> +                            unsigned long last_gfn,

> +                            p2m_type_t ot, p2m_type_t nt);

> +

>  /* Report a change affecting memory types. */  void

> p2m_memory_type_changed(struct domain *d);

> 

> --

> 1.9.1

> 

> 

> _______________________________________________

> Xen-devel mailing list

> Xen-devel@lists.xen.org

> https://lists.xen.org/xen-devel
Jan Beulich March 22, 2017, 8:54 a.m. UTC | #6
>>> On 22.03.17 at 09:28, <kevin.tian@intel.com> wrote:
>>  From: Yu Zhang
>> Sent: Tuesday, March 21, 2017 10:53 AM
>> --- a/xen/arch/x86/hvm/dm.c
>> +++ b/xen/arch/x86/hvm/dm.c
>> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
>> 
>>      case XEN_DMOP_map_mem_type_to_ioreq_server:
>>      {
>> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>              &op.u.map_mem_type_to_ioreq_server;
>> +        unsigned long first_gfn = data->opaque;
>> +        unsigned long last_gfn;
>> +
>> +        const_op = false;
>> 
>>          rc = -EOPNOTSUPP;
>>          /* Only support for HAP enabled hvm. */
>>          if ( !hap_enabled(d) )
>>              break;
>> 
>> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>> -                                              data->type, data->flags);
>> +        if ( first_gfn == 0 )
>> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>> +                                                  data->type, data->flags);
>> +        /*
>> +         * Iterate p2m table when an ioreq server unmaps from
>> p2m_ioreq_server,
>> +         * and reset the remaining p2m_ioreq_server entries back to
>> p2m_ram_rw.
>> +         */
> 
> can you elaborate how device model is expected to use this
> new extension, i.e. on deciding first_gfn?

The device model doesn't decide anything here (hence the field's
name being "opaque"), it simply has to pass zero for correct
operation.

Jan
Tian, Kevin March 22, 2017, 9:02 a.m. UTC | #7
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Wednesday, March 22, 2017 4:54 PM
> 
> >>> On 22.03.17 at 09:28, <kevin.tian@intel.com> wrote:
> >>  From: Yu Zhang
> >> Sent: Tuesday, March 21, 2017 10:53 AM
> >> --- a/xen/arch/x86/hvm/dm.c
> >> +++ b/xen/arch/x86/hvm/dm.c
> >> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
> >>
> >>      case XEN_DMOP_map_mem_type_to_ioreq_server:
> >>      {
> >> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
> >> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
> >>              &op.u.map_mem_type_to_ioreq_server;
> >> +        unsigned long first_gfn = data->opaque;
> >> +        unsigned long last_gfn;
> >> +
> >> +        const_op = false;
> >>
> >>          rc = -EOPNOTSUPP;
> >>          /* Only support for HAP enabled hvm. */
> >>          if ( !hap_enabled(d) )
> >>              break;
> >>
> >> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> >> -                                              data->type, data->flags);
> >> +        if ( first_gfn == 0 )
> >> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> >> +                                                  data->type, data->flags);
> >> +        /*
> >> +         * Iterate p2m table when an ioreq server unmaps from
> >> p2m_ioreq_server,
> >> +         * and reset the remaining p2m_ioreq_server entries back to
> >> p2m_ram_rw.
> >> +         */
> >
> > can you elaborate how device model is expected to use this new
> > extension, i.e. on deciding first_gfn?
> 
> The device model doesn't decide anything here (hence the field's name being
> "opaque"), it simply has to pass zero for correct operation.
> 

Got it. It's for hypercall continuation. :-)
Jan Beulich March 22, 2017, 2:39 p.m. UTC | #8
>>> On 21.03.17 at 03:52, <yu.c.zhang@linux.intel.com> wrote:
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
>  
>      case XEN_DMOP_map_mem_type_to_ioreq_server:
>      {
> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>              &op.u.map_mem_type_to_ioreq_server;
> +        unsigned long first_gfn = data->opaque;
> +        unsigned long last_gfn;
> +
> +        const_op = false;
>  
>          rc = -EOPNOTSUPP;
>          /* Only support for HAP enabled hvm. */
>          if ( !hap_enabled(d) )
>              break;
>  
> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> -                                              data->type, data->flags);
> +        if ( first_gfn == 0 )
> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> +                                                  data->type, data->flags);
> +        /*
> +         * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
> +         * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
> +         */
> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )

Instead of putting the rc check on the right side, please do

        if ( rc == 0 && (first_gfn > 0) || data->flags == 0) )

That'll require setting rc to zero in an else to the previous if(),
but that's needed anyway afaics in order to not return
-EOPNOTSUPP once no further continuation is necessary.

I further wonder why the if() here needs to look at first_gfn at
all - data->flags is supposed to remain at zero for continuations
(unless we have a misbehaving caller, in which case it'll harm
the guest only afaict). It seems to me, however, that this may
have been discussed once already, a long time ago. I'm sorry
for not remembering the outcome, if so.

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,
>      p2m_unlock(p2m);
>  }
>  
> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
> +void p2m_finish_type_change(struct domain *d,
> +                            unsigned long first_gfn, unsigned long last_gfn,

I think we'd prefer new functions to properly use gfn_t.

Jan
Yu Zhang March 23, 2017, 3:23 a.m. UTC | #9
On 3/22/2017 10:39 PM, Jan Beulich wrote:
>>>> On 21.03.17 at 03:52, <yu.c.zhang@linux.intel.com> wrote:
>> --- a/xen/arch/x86/hvm/dm.c
>> +++ b/xen/arch/x86/hvm/dm.c
>> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
>>   
>>       case XEN_DMOP_map_mem_type_to_ioreq_server:
>>       {
>> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>               &op.u.map_mem_type_to_ioreq_server;
>> +        unsigned long first_gfn = data->opaque;
>> +        unsigned long last_gfn;
>> +
>> +        const_op = false;
>>   
>>           rc = -EOPNOTSUPP;
>>           /* Only support for HAP enabled hvm. */
>>           if ( !hap_enabled(d) )
>>               break;
>>   
>> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>> -                                              data->type, data->flags);
>> +        if ( first_gfn == 0 )
>> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>> +                                                  data->type, data->flags);
>> +        /*
>> +         * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
>> +         * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
>> +         */
>> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
> Instead of putting the rc check on the right side, please do
>
>          if ( rc == 0 && (first_gfn > 0) || data->flags == 0) )
>
> That'll require setting rc to zero in an else to the previous if(),
> but that's needed anyway afaics in order to not return
> -EOPNOTSUPP once no further continuation is necessary.
>
> I further wonder why the if() here needs to look at first_gfn at
> all - data->flags is supposed to remain at zero for continuations
> (unless we have a misbehaving caller, in which case it'll harm
> the guest only afaict). It seems to me, however, that this may
> have been discussed once already, a long time ago. I'm sorry
> for not remembering the outcome, if so.

We have not discussed this. Our previous discussion is about the if 
condition before
calling hvm_map_mem_type_to_ioreq_server(). :-)

Maybe above code should be changed to:
@@ -400,11 +400,14 @@ static int dm_op(domid_t domid,
          if ( first_gfn == 0 )
              rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
                                                    data->type, 
data->flags);
+       else
+           rc = 0;
+
          /*
           * Iterate p2m table when an ioreq server unmaps from 
p2m_ioreq_server,
           * and reset the remaining p2m_ioreq_server entries back to 
p2m_ram_rw.
           */
-        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
+        if ( data->flags == 0 && rc == 0 )
          {
              struct p2m_domain *p2m = p2m_get_hostp2m(d);

>> --- a/xen/arch/x86/mm/p2m.c
>> +++ b/xen/arch/x86/mm/p2m.c
>> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,
>>       p2m_unlock(p2m);
>>   }
>>   
>> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
>> +void p2m_finish_type_change(struct domain *d,
>> +                            unsigned long first_gfn, unsigned long last_gfn,
> I think we'd prefer new functions to properly use gfn_t.
Sorry? I do not get it.
Paul suggested we replace last_gfn with max_nr, which sounds reasonable 
to me. Guess you mean
something else?

Thanks
Yu

> Jan
>
>
Jan Beulich March 23, 2017, 9:02 a.m. UTC | #10
>>> On 23.03.17 at 04:23, <yu.c.zhang@linux.intel.com> wrote:

> 
> On 3/22/2017 10:39 PM, Jan Beulich wrote:
>>>>> On 21.03.17 at 03:52, <yu.c.zhang@linux.intel.com> wrote:
>>> --- a/xen/arch/x86/hvm/dm.c
>>> +++ b/xen/arch/x86/hvm/dm.c
>>> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
>>>   
>>>       case XEN_DMOP_map_mem_type_to_ioreq_server:
>>>       {
>>> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>>               &op.u.map_mem_type_to_ioreq_server;
>>> +        unsigned long first_gfn = data->opaque;
>>> +        unsigned long last_gfn;
>>> +
>>> +        const_op = false;
>>>   
>>>           rc = -EOPNOTSUPP;
>>>           /* Only support for HAP enabled hvm. */
>>>           if ( !hap_enabled(d) )
>>>               break;
>>>   
>>> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>>> -                                              data->type, data->flags);
>>> +        if ( first_gfn == 0 )
>>> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>>> +                                                  data->type, data->flags);
>>> +        /*
>>> +         * Iterate p2m table when an ioreq server unmaps from 
> p2m_ioreq_server,
>>> +         * and reset the remaining p2m_ioreq_server entries back to 
> p2m_ram_rw.
>>> +         */
>>> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
>> Instead of putting the rc check on the right side, please do
>>
>>          if ( rc == 0 && (first_gfn > 0) || data->flags == 0) )
>>
>> That'll require setting rc to zero in an else to the previous if(),
>> but that's needed anyway afaics in order to not return
>> -EOPNOTSUPP once no further continuation is necessary.
>>
>> I further wonder why the if() here needs to look at first_gfn at
>> all - data->flags is supposed to remain at zero for continuations
>> (unless we have a misbehaving caller, in which case it'll harm
>> the guest only afaict). It seems to me, however, that this may
>> have been discussed once already, a long time ago. I'm sorry
>> for not remembering the outcome, if so.
> 
> We have not discussed this. Our previous discussion is about the if 
> condition before
> calling hvm_map_mem_type_to_ioreq_server(). :-)
> 
> Maybe above code should be changed to:
> @@ -400,11 +400,14 @@ static int dm_op(domid_t domid,
>           if ( first_gfn == 0 )
>               rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>                                                     data->type, 
> data->flags);
> +       else
> +           rc = 0;
> +
>           /*
>            * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
>            * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
>            */
> -        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
> +        if ( data->flags == 0 && rc == 0 )
>           {
>               struct p2m_domain *p2m = p2m_get_hostp2m(d);

Yes, that's what I was trying to hint at.

>>> --- a/xen/arch/x86/mm/p2m.c
>>> +++ b/xen/arch/x86/mm/p2m.c
>>> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,
>>>       p2m_unlock(p2m);
>>>   }
>>>   
>>> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
>>> +void p2m_finish_type_change(struct domain *d,
>>> +                            unsigned long first_gfn, unsigned long last_gfn,
>> I think we'd prefer new functions to properly use gfn_t.
> Sorry? I do not get it.
> Paul suggested we replace last_gfn with max_nr, which sounds reasonable 
> to me. Guess you mean
> something else?

Indeed - even with Paul's suggestion, first_gfn would remain as a
parameter, and it should be of type gfn_t.

Jan
Yu Zhang March 24, 2017, 9:05 a.m. UTC | #11
On 3/23/2017 5:02 PM, Jan Beulich wrote:
>>>> On 23.03.17 at 04:23, <yu.c.zhang@linux.intel.com> wrote:
>> On 3/22/2017 10:39 PM, Jan Beulich wrote:
>>>>>> On 21.03.17 at 03:52, <yu.c.zhang@linux.intel.com> wrote:
>>>> --- a/xen/arch/x86/hvm/dm.c
>>>> +++ b/xen/arch/x86/hvm/dm.c
>>>> @@ -385,16 +385,51 @@ static int dm_op(domid_t domid,
>>>>    
>>>>        case XEN_DMOP_map_mem_type_to_ioreq_server:
>>>>        {
>>>> -        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>>> +        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>>>>                &op.u.map_mem_type_to_ioreq_server;
>>>> +        unsigned long first_gfn = data->opaque;
>>>> +        unsigned long last_gfn;
>>>> +
>>>> +        const_op = false;
>>>>    
>>>>            rc = -EOPNOTSUPP;
>>>>            /* Only support for HAP enabled hvm. */
>>>>            if ( !hap_enabled(d) )
>>>>                break;
>>>>    
>>>> -        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>>>> -                                              data->type, data->flags);
>>>> +        if ( first_gfn == 0 )
>>>> +            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>>>> +                                                  data->type, data->flags);
>>>> +        /*
>>>> +         * Iterate p2m table when an ioreq server unmaps from
>> p2m_ioreq_server,
>>>> +         * and reset the remaining p2m_ioreq_server entries back to
>> p2m_ram_rw.
>>>> +         */
>>>> +        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
>>> Instead of putting the rc check on the right side, please do
>>>
>>>           if ( rc == 0 && (first_gfn > 0) || data->flags == 0) )
>>>
>>> That'll require setting rc to zero in an else to the previous if(),
>>> but that's needed anyway afaics in order to not return
>>> -EOPNOTSUPP once no further continuation is necessary.
>>>
>>> I further wonder why the if() here needs to look at first_gfn at
>>> all - data->flags is supposed to remain at zero for continuations
>>> (unless we have a misbehaving caller, in which case it'll harm
>>> the guest only afaict). It seems to me, however, that this may
>>> have been discussed once already, a long time ago. I'm sorry
>>> for not remembering the outcome, if so.
>> We have not discussed this. Our previous discussion is about the if
>> condition before
>> calling hvm_map_mem_type_to_ioreq_server(). :-)
>>
>> Maybe above code should be changed to:
>> @@ -400,11 +400,14 @@ static int dm_op(domid_t domid,
>>            if ( first_gfn == 0 )
>>                rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
>>                                                      data->type,
>> data->flags);
>> +       else
>> +           rc = 0;
>> +
>>            /*
>>             * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
>>             * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
>>             */
>> -        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
>> +        if ( data->flags == 0 && rc == 0 )
>>            {
>>                struct p2m_domain *p2m = p2m_get_hostp2m(d);
> Yes, that's what I was trying to hint at.

Great. Thanks.
>>>> --- a/xen/arch/x86/mm/p2m.c
>>>> +++ b/xen/arch/x86/mm/p2m.c
>>>> @@ -1038,6 +1038,35 @@ void p2m_change_type_range(struct domain *d,
>>>>        p2m_unlock(p2m);
>>>>    }
>>>>    
>>>> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
>>>> +void p2m_finish_type_change(struct domain *d,
>>>> +                            unsigned long first_gfn, unsigned long last_gfn,
>>> I think we'd prefer new functions to properly use gfn_t.
>> Sorry? I do not get it.
>> Paul suggested we replace last_gfn with max_nr, which sounds reasonable
>> to me. Guess you mean
>> something else?
> Indeed - even with Paul's suggestion, first_gfn would remain as a
> parameter, and it should be of type gfn_t.

Oh. I see, you mean change the type of first_gfn to gfn_t.

Thanks
Yu
> Jan
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 3f9484d..a24d0f8 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -385,16 +385,51 @@  static int dm_op(domid_t domid,
 
     case XEN_DMOP_map_mem_type_to_ioreq_server:
     {
-        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
+        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
             &op.u.map_mem_type_to_ioreq_server;
+        unsigned long first_gfn = data->opaque;
+        unsigned long last_gfn;
+
+        const_op = false;
 
         rc = -EOPNOTSUPP;
         /* Only support for HAP enabled hvm. */
         if ( !hap_enabled(d) )
             break;
 
-        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
-                                              data->type, data->flags);
+        if ( first_gfn == 0 )
+            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
+                                                  data->type, data->flags);
+        /*
+         * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
+         * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
+         */
+        if ( (first_gfn > 0) || (data->flags == 0 && rc == 0) )
+        {
+            struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+            while ( read_atomic(&p2m->ioreq.entry_count) &&
+                    first_gfn <= p2m->max_mapped_pfn )
+            {
+                /* Iterate p2m table for 256 gfns each time. */
+                last_gfn = first_gfn + 0xff;
+
+                p2m_finish_type_change(d, first_gfn, last_gfn,
+                                       p2m_ioreq_server, p2m_ram_rw);
+
+                first_gfn = last_gfn + 1;
+
+                /* Check for continuation if it's not the last iteration. */
+                if ( first_gfn <= p2m->max_mapped_pfn &&
+                     hypercall_preempt_check() )
+                {
+                    rc = -ERESTART;
+                    data->opaque = first_gfn;
+                    break;
+                }
+            }
+        }
+
         break;
     }
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e3e54f1..0a2f276 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1038,6 +1038,35 @@  void p2m_change_type_range(struct domain *d,
     p2m_unlock(p2m);
 }
 
+/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
+void p2m_finish_type_change(struct domain *d,
+                            unsigned long first_gfn, unsigned long last_gfn,
+                            p2m_type_t ot, p2m_type_t nt)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_type_t t;
+    unsigned long gfn = first_gfn;
+
+    ASSERT(first_gfn <= last_gfn);
+    ASSERT(ot != nt);
+    ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
+
+    p2m_lock(p2m);
+
+    last_gfn = min(last_gfn, p2m->max_mapped_pfn);
+    while ( gfn <= last_gfn )
+    {
+        get_gfn_query_unlocked(d, gfn, &t);
+
+        if ( t == ot )
+            p2m_change_type_one(d, gfn, t, nt);
+
+        gfn++;
+    }
+
+    p2m_unlock(p2m);
+}
+
 /*
  * Returns:
  *    0              for success
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 395f125..3d665e8 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -611,6 +611,13 @@  void p2m_change_type_range(struct domain *d,
 int p2m_change_type_one(struct domain *d, unsigned long gfn,
                         p2m_type_t ot, p2m_type_t nt);
 
+/* Synchronously change the p2m type for a range of gfns:
+ * [first_gfn ... last_gfn]. */
+void p2m_finish_type_change(struct domain *d,
+                            unsigned long first_gfn,
+                            unsigned long last_gfn,
+                            p2m_type_t ot, p2m_type_t nt);
+
 /* Report a change affecting memory types. */
 void p2m_memory_type_changed(struct domain *d);