diff mbox series

[V4,2/4] x86/altp2m: Add hypercall to set a range of sve bits

Message ID 20191217151144.9781-2-aisaila@bitdefender.com (mailing list archive)
State Superseded
Headers show
Series [V4,1/4] x86/mm: Add array_index_nospec to guest provided index values | expand

Commit Message

Alexandru Stefan ISAILA Dec. 17, 2019, 3:12 p.m. UTC
By default the sve bits are not set.
This patch adds a new hypercall, xc_altp2m_set_supress_ve_multi(),
to set a range of sve bits.
The core function, p2m_set_suppress_ve_multi(), does not brake in case
of a error and it is doing a best effort for setting the bits in the
given range. A check for continuation is made in order to have
preemption on big ranges.
The gfn of the first error is stored in
xen_hvm_altp2m_suppress_ve_multi.first_error and the error code is
stored in xen_hvm_altp2m_suppress_ve_multi.first_error_code.
If no error occurred the values will be 0.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
CC: Ian Jackson <ian.jackson@eu.citrix.com>
CC: Wei Liu <wl@xen.org>
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: George Dunlap <George.Dunlap@eu.citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
CC: Julien Grall <julien@xen.org>
CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: "Roger Pau Monné" <roger.pau@citrix.com>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Razvan Cojocaru <rcojocaru@bitdefender.com>
CC: Tamas K Lengyel <tamas@tklengyel.com>
CC: Petre Pircalabu <ppircalabu@bitdefender.com>
---
Changes since V3:
	- Update commit message
	- Check rc and __copy_to_guest() in the same if
	- Fix style issue
	- Fix comment typo
	- Init p2m with host_p2m
	- Use array_index_nospec() in altp2m_p2m[] and altp2m_eptp[]
	- Drop opaque
	- Use pad2 to return first error code
	- Update first_gfn
	- Stop the range loop at cpuid->extd.maxphysaddr.
---
 tools/libxc/include/xenctrl.h   |  4 +++
 tools/libxc/xc_altp2m.c         | 33 +++++++++++++++++
 xen/arch/x86/hvm/hvm.c          | 15 ++++++++
 xen/arch/x86/mm/p2m.c           | 64 +++++++++++++++++++++++++++++++++
 xen/include/public/hvm/hvm_op.h | 13 +++++++
 xen/include/xen/mem_access.h    |  3 ++
 6 files changed, 132 insertions(+)

Comments

Jan Beulich Dec. 17, 2019, 5 p.m. UTC | #1
On 17.12.2019 16:12, Alexandru Stefan ISAILA wrote:
> @@ -4711,6 +4712,20 @@ static int do_altp2m_op(
>          }
>          break;
>  
> +    case HVMOP_altp2m_set_suppress_ve_multi:
> +        if ( a.u.suppress_ve_multi.pad1 ||
> +             a.u.suppress_ve_multi.first_error_code ||
> +             a.u.suppress_ve_multi.first_error ||
> +             a.u.suppress_ve_multi.first_gfn > a.u.suppress_ve_multi.last_gfn )
> +            rc = -EINVAL;

An error having occurred doesn't prevent scheduling of a
continuation. When you come back here, you'll then return
-EINVAL instead of continuing the prior operation.

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -3064,6 +3064,70 @@ out:
>      return rc;
>  }
>  
> +/*
> + * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
> + */
> +int p2m_set_suppress_ve_multi(struct domain *d,
> +                              struct xen_hvm_altp2m_suppress_ve_multi *sve)
> +{
> +    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
> +    struct p2m_domain *ap2m = NULL;
> +    struct p2m_domain *p2m = host_p2m;
> +    uint64_t start = sve->first_gfn;
> +    int rc = 0;
> +    uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
> +
> +    if ( sve->view > 0 )
> +    {
> +        if ( sve->view >= MAX_ALTP2M ||
> +             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
> +             mfn_x(INVALID_MFN) )
> +            return -EINVAL;
> +
> +        p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
> +                                                           MAX_ALTP2M)];
> +    }
> +
> +    p2m_lock(host_p2m);
> +
> +    if ( ap2m )
> +        p2m_lock(ap2m);
> +
> +    while ( sve->last_gfn >= start && start < max_phys_addr )

Why don't you clip ->last_gfn ahead of the loop, saving one
comparison per iteration?

> +    {
> +        p2m_access_t a;
> +        p2m_type_t t;
> +        mfn_t mfn;
> +        int err = 0;
> +
> +        if ( altp2m_get_effective_entry(p2m, _gfn(start), &mfn, &t, &a, AP2MGET_query) )
> +            a = p2m->default_access;
> +
> +        if ( (err = p2m->set_entry(p2m, _gfn(start), mfn, PAGE_ORDER_4K, t, a,
> +                                   sve->suppress_ve)) && !sve->first_error )
> +        {
> +            sve->first_error = start; /* Save the gfn of the first error */
> +            sve->first_error_code = err; /* Save the first error code */
> +        }

What if the first error occurs on GFN 0? I guess you want to check
->first_error_code against zero in the condition.

> --- a/xen/include/public/hvm/hvm_op.h
> +++ b/xen/include/public/hvm/hvm_op.h
> @@ -46,6 +46,16 @@ struct xen_hvm_altp2m_suppress_ve {
>      uint64_t gfn;
>  };
>  
> +struct xen_hvm_altp2m_suppress_ve_multi {
> +    uint16_t view;
> +    uint8_t suppress_ve; /* Boolean type. */
> +    uint8_t pad1;
> +    uint32_t first_error_code; /* Must be set to 0 . */

int32_t perhaps, since error codes are negative?

Jan
Alexandru Stefan ISAILA Dec. 18, 2019, 8:13 a.m. UTC | #2
>> +/*
>> + * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
>> + */
>> +int p2m_set_suppress_ve_multi(struct domain *d,
>> +                              struct xen_hvm_altp2m_suppress_ve_multi *sve)
>> +{
>> +    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
>> +    struct p2m_domain *ap2m = NULL;
>> +    struct p2m_domain *p2m = host_p2m;
>> +    uint64_t start = sve->first_gfn;
>> +    int rc = 0;
>> +    uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
>> +
>> +    if ( sve->view > 0 )
>> +    {
>> +        if ( sve->view >= MAX_ALTP2M ||
>> +             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
>> +             mfn_x(INVALID_MFN) )
>> +            return -EINVAL;
>> +
>> +        p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
>> +                                                           MAX_ALTP2M)];
>> +    }
>> +
>> +    p2m_lock(host_p2m);
>> +
>> +    if ( ap2m )
>> +        p2m_lock(ap2m);
>> +
>> +    while ( sve->last_gfn >= start && start < max_phys_addr )
> 
> Why don't you clip ->last_gfn ahead of the loop, saving one
> comparison per iteration?

I've done this so it will have fewer lines but sure, I can have the 
->last_gfn check before the loop.

> 
>> +    {
>> +        p2m_access_t a;
>> +        p2m_type_t t;
>> +        mfn_t mfn;
>> +        int err = 0;
>> +
>> +        if ( altp2m_get_effective_entry(p2m, _gfn(start), &mfn, &t, &a, AP2MGET_query) )
>> +            a = p2m->default_access;
>> +
>> +        if ( (err = p2m->set_entry(p2m, _gfn(start), mfn, PAGE_ORDER_4K, t, a,
>> +                                   sve->suppress_ve)) && !sve->first_error )
>> +        {
>> +            sve->first_error = start; /* Save the gfn of the first error */
>> +            sve->first_error_code = err; /* Save the first error code */
>> +        }
> 
> What if the first error occurs on GFN 0? I guess you want to check
> ->first_error_code against zero in the condition.

That is right, I will change to check ->first_error_code instead of 
->first_error.

Thanks,
Alex
Alexandru Stefan ISAILA Dec. 18, 2019, 8:45 a.m. UTC | #3
On 18.12.2019 10:13, Alexandru Stefan ISAILA wrote:
> 
>>> +/*
>>> + * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
>>> + */
>>> +int p2m_set_suppress_ve_multi(struct domain *d,
>>> +                              struct xen_hvm_altp2m_suppress_ve_multi *sve)
>>> +{
>>> +    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
>>> +    struct p2m_domain *ap2m = NULL;
>>> +    struct p2m_domain *p2m = host_p2m;
>>> +    uint64_t start = sve->first_gfn;
>>> +    int rc = 0;
>>> +    uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
>>> +
>>> +    if ( sve->view > 0 )
>>> +    {
>>> +        if ( sve->view >= MAX_ALTP2M ||
>>> +             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
>>> +             mfn_x(INVALID_MFN) )
>>> +            return -EINVAL;
>>> +
>>> +        p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
>>> +                                                           MAX_ALTP2M)];
>>> +    }
>>> +
>>> +    p2m_lock(host_p2m);
>>> +
>>> +    if ( ap2m )
>>> +        p2m_lock(ap2m);
>>> +
>>> +    while ( sve->last_gfn >= start && start < max_phys_addr )
>>
>> Why don't you clip ->last_gfn ahead of the loop, saving one
>> comparison per iteration?
> 
> I've done this so it will have fewer lines but sure, I can have the
> ->last_gfn check before the loop.
> 

Wouldn't there be a issue if start goes over ->last_gfn and there is no 
break for preemption? Then the loop will run until max_phys_addr.

Alex
Jan Beulich Dec. 18, 2019, 10:18 a.m. UTC | #4
On 18.12.2019 09:45, Alexandru Stefan ISAILA wrote:
> 
> 
> On 18.12.2019 10:13, Alexandru Stefan ISAILA wrote:
>>
>>>> +/*
>>>> + * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
>>>> + */
>>>> +int p2m_set_suppress_ve_multi(struct domain *d,
>>>> +                              struct xen_hvm_altp2m_suppress_ve_multi *sve)
>>>> +{
>>>> +    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
>>>> +    struct p2m_domain *ap2m = NULL;
>>>> +    struct p2m_domain *p2m = host_p2m;
>>>> +    uint64_t start = sve->first_gfn;
>>>> +    int rc = 0;
>>>> +    uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
>>>> +
>>>> +    if ( sve->view > 0 )
>>>> +    {
>>>> +        if ( sve->view >= MAX_ALTP2M ||
>>>> +             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
>>>> +             mfn_x(INVALID_MFN) )
>>>> +            return -EINVAL;
>>>> +
>>>> +        p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
>>>> +                                                           MAX_ALTP2M)];
>>>> +    }
>>>> +
>>>> +    p2m_lock(host_p2m);
>>>> +
>>>> +    if ( ap2m )
>>>> +        p2m_lock(ap2m);
>>>> +
>>>> +    while ( sve->last_gfn >= start && start < max_phys_addr )
>>>
>>> Why don't you clip ->last_gfn ahead of the loop, saving one
>>> comparison per iteration?
>>
>> I've done this so it will have fewer lines but sure, I can have the
>> ->last_gfn check before the loop.
> 
> Wouldn't there be a issue if start goes over ->last_gfn and there is no 
> break for preemption? Then the loop will run until max_phys_addr.

I'm not sure I understand. My guess is a misunderstanding - I'm
asking to replace the right side of the &&, and it looks you
understood me to mean the least side. Note how I said "clip" in
my earlier reply, meaning you to update ->last_gfn ahead of the
loop if it's above (1UL << d->arch.cpuid->extd.maxphysaddr) - 1.
Perhaps this could even be done in the caller together with (and
possibly ahead of) the other sanity checking of incoming values.

Jan
Alexandru Stefan ISAILA Dec. 18, 2019, 10:32 a.m. UTC | #5
On 18.12.2019 12:18, Jan Beulich wrote:
> On 18.12.2019 09:45, Alexandru Stefan ISAILA wrote:
>>
>>
>> On 18.12.2019 10:13, Alexandru Stefan ISAILA wrote:
>>>
>>>>> +/*
>>>>> + * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
>>>>> + */
>>>>> +int p2m_set_suppress_ve_multi(struct domain *d,
>>>>> +                              struct xen_hvm_altp2m_suppress_ve_multi *sve)
>>>>> +{
>>>>> +    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
>>>>> +    struct p2m_domain *ap2m = NULL;
>>>>> +    struct p2m_domain *p2m = host_p2m;
>>>>> +    uint64_t start = sve->first_gfn;
>>>>> +    int rc = 0;
>>>>> +    uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
>>>>> +
>>>>> +    if ( sve->view > 0 )
>>>>> +    {
>>>>> +        if ( sve->view >= MAX_ALTP2M ||
>>>>> +             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
>>>>> +             mfn_x(INVALID_MFN) )
>>>>> +            return -EINVAL;
>>>>> +
>>>>> +        p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
>>>>> +                                                           MAX_ALTP2M)];
>>>>> +    }
>>>>> +
>>>>> +    p2m_lock(host_p2m);
>>>>> +
>>>>> +    if ( ap2m )
>>>>> +        p2m_lock(ap2m);
>>>>> +
>>>>> +    while ( sve->last_gfn >= start && start < max_phys_addr )
>>>>
>>>> Why don't you clip ->last_gfn ahead of the loop, saving one
>>>> comparison per iteration?
>>>
>>> I've done this so it will have fewer lines but sure, I can have the
>>> ->last_gfn check before the loop.
>>
>> Wouldn't there be a issue if start goes over ->last_gfn and there is no
>> break for preemption? Then the loop will run until max_phys_addr.
> 
> I'm not sure I understand. My guess is a misunderstanding - I'm
> asking to replace the right side of the &&, and it looks you
> understood me to mean the least side. Note how I said "clip" in
> my earlier reply, meaning you to update ->last_gfn ahead of the
> loop if it's above (1UL << d->arch.cpuid->extd.maxphysaddr) - 1.
> Perhaps this could even be done in the caller together with (and
> possibly ahead of) the other sanity checking of incoming values.
> 

Then I could have  "sve->last_gfn = min(sve->last_gfn, max_phys_addr)"
and then drop the "start < max_phys_addr" check from the while loop.

Alex
diff mbox series

Patch

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index f4431687b3..2ace8ea80e 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1923,6 +1923,10 @@  int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
                              uint16_t view_id);
 int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
                               uint16_t view_id, xen_pfn_t gfn, bool sve);
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+                                   uint16_t view_id, xen_pfn_t first_gfn,
+                                   xen_pfn_t last_gfn, bool sve,
+                                   xen_pfn_t *error_gfn, uint32_t *error_code);
 int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
                               uint16_t view_id, xen_pfn_t gfn, bool *sve);
 int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
diff --git a/tools/libxc/xc_altp2m.c b/tools/libxc/xc_altp2m.c
index 09dad0355e..9f7e8315b3 100644
--- a/tools/libxc/xc_altp2m.c
+++ b/tools/libxc/xc_altp2m.c
@@ -234,6 +234,39 @@  int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
     return rc;
 }
 
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+                                   uint16_t view_id, xen_pfn_t first_gfn,
+                                   xen_pfn_t last_gfn, bool sve,
+                                   xen_pfn_t *error_gfn, uint32_t *error_code)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_suppress_ve_multi;
+    arg->domain = domid;
+    arg->u.suppress_ve_multi.view = view_id;
+    arg->u.suppress_ve_multi.first_gfn = first_gfn;
+    arg->u.suppress_ve_multi.last_gfn = last_gfn;
+    arg->u.suppress_ve_multi.suppress_ve = sve;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    if ( arg->u.suppress_ve_multi.first_error )
+    {
+        *error_gfn = arg->u.suppress_ve_multi.first_error;
+        *error_code = arg->u.suppress_ve_multi.first_error_code;
+    }
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
 int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
                              uint16_t view_id, xen_pfn_t gfn,
                              xenmem_access_t access)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 47573f71b8..a129049d6b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4553,6 +4553,7 @@  static int do_altp2m_op(
     case HVMOP_altp2m_destroy_p2m:
     case HVMOP_altp2m_switch_p2m:
     case HVMOP_altp2m_set_suppress_ve:
+    case HVMOP_altp2m_set_suppress_ve_multi:
     case HVMOP_altp2m_get_suppress_ve:
     case HVMOP_altp2m_set_mem_access:
     case HVMOP_altp2m_set_mem_access_multi:
@@ -4711,6 +4712,20 @@  static int do_altp2m_op(
         }
         break;
 
+    case HVMOP_altp2m_set_suppress_ve_multi:
+        if ( a.u.suppress_ve_multi.pad1 ||
+             a.u.suppress_ve_multi.first_error_code ||
+             a.u.suppress_ve_multi.first_error ||
+             a.u.suppress_ve_multi.first_gfn > a.u.suppress_ve_multi.last_gfn )
+            rc = -EINVAL;
+        else
+        {
+            rc = p2m_set_suppress_ve_multi(d, &a.u.suppress_ve_multi);
+            if ( (!rc || rc == -ERESTART) && __copy_to_guest(arg, &a, 1) )
+                rc = -EFAULT;
+        }
+        break;
+
     case HVMOP_altp2m_get_suppress_ve:
         if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
             rc = -EINVAL;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 7e7f4f1a7c..253cab3458 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -3064,6 +3064,70 @@  out:
     return rc;
 }
 
+/*
+ * Set/clear the #VE suppress bit for multiple pages.  Only available on VMX.
+ */
+int p2m_set_suppress_ve_multi(struct domain *d,
+                              struct xen_hvm_altp2m_suppress_ve_multi *sve)
+{
+    struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
+    struct p2m_domain *ap2m = NULL;
+    struct p2m_domain *p2m = host_p2m;
+    uint64_t start = sve->first_gfn;
+    int rc = 0;
+    uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
+
+    if ( sve->view > 0 )
+    {
+        if ( sve->view >= MAX_ALTP2M ||
+             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
+             mfn_x(INVALID_MFN) )
+            return -EINVAL;
+
+        p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
+                                                           MAX_ALTP2M)];
+    }
+
+    p2m_lock(host_p2m);
+
+    if ( ap2m )
+        p2m_lock(ap2m);
+
+    while ( sve->last_gfn >= start && start < max_phys_addr )
+    {
+        p2m_access_t a;
+        p2m_type_t t;
+        mfn_t mfn;
+        int err = 0;
+
+        if ( altp2m_get_effective_entry(p2m, _gfn(start), &mfn, &t, &a, AP2MGET_query) )
+            a = p2m->default_access;
+
+        if ( (err = p2m->set_entry(p2m, _gfn(start), mfn, PAGE_ORDER_4K, t, a,
+                                   sve->suppress_ve)) && !sve->first_error )
+        {
+            sve->first_error = start; /* Save the gfn of the first error */
+            sve->first_error_code = err; /* Save the first error code */
+        }
+
+        /* Check for continuation if it's not the last iteration. */
+        if ( sve->last_gfn >= ++start && hypercall_preempt_check() )
+        {
+            rc = -ERESTART;
+            break;
+        }
+    }
+
+    sve->first_gfn = start;
+
+    if ( ap2m )
+        p2m_unlock(ap2m);
+
+    p2m_unlock(host_p2m);
+
+    return rc;
+}
+
 int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
                         unsigned int altp2m_idx)
 {
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 353f8034d9..49965d256c 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -46,6 +46,16 @@  struct xen_hvm_altp2m_suppress_ve {
     uint64_t gfn;
 };
 
+struct xen_hvm_altp2m_suppress_ve_multi {
+    uint16_t view;
+    uint8_t suppress_ve; /* Boolean type. */
+    uint8_t pad1;
+    uint32_t first_error_code; /* Must be set to 0 . */
+    uint64_t first_gfn; /* Value will be updated */
+    uint64_t last_gfn;
+    uint64_t first_error; /* Gfn of the first error. Must be set to 0. */
+};
+
 #if __XEN_INTERFACE_VERSION__ < 0x00040900
 
 /* Set the logical level of one of a domain's PCI INTx wires. */
@@ -339,6 +349,8 @@  struct xen_hvm_altp2m_op {
 #define HVMOP_altp2m_vcpu_disable_notify  13
 /* Get the active vcpu p2m index */
 #define HVMOP_altp2m_get_p2m_idx          14
+/* Set the "Supress #VE" bit for a range of pages */
+#define HVMOP_altp2m_set_suppress_ve_multi 15
     domid_t domain;
     uint16_t pad1;
     uint32_t pad2;
@@ -353,6 +365,7 @@  struct xen_hvm_altp2m_op {
         struct xen_hvm_altp2m_change_gfn           change_gfn;
         struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
         struct xen_hvm_altp2m_suppress_ve          suppress_ve;
+        struct xen_hvm_altp2m_suppress_ve_multi    suppress_ve_multi;
         struct xen_hvm_altp2m_vcpu_disable_notify  disable_notify;
         struct xen_hvm_altp2m_get_vcpu_p2m_idx     get_vcpu_p2m_idx;
         uint8_t pad[64];
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index e4d24502e0..00e594a0ad 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -75,6 +75,9 @@  long p2m_set_mem_access_multi(struct domain *d,
 int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
                         unsigned int altp2m_idx);
 
+int p2m_set_suppress_ve_multi(struct domain *d,
+                              struct xen_hvm_altp2m_suppress_ve_multi *suppress_ve);
+
 int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
                         unsigned int altp2m_idx);