diff mbox series

[8/9] vm_event: Add vm_event_ng interface

Message ID 3ec19ed5425a62ecbc524e44c4bba86d5fe41762.1559224640.git.ppircalabu@bitdefender.com (mailing list archive)
State Superseded
Headers show
Series Per vcpu vm_event channels | expand

Commit Message

Petre Ovidiu PIRCALABU May 30, 2019, 2:18 p.m. UTC
In high throughput introspection scenarios where lots of monitor
vm_events are generated, the ring buffer can fill up before the monitor
application gets a chance to handle all the requests thus blocking
other vcpus which will have to wait for a slot to become available.

This patch adds support for a different mechanism to handle synchronous
vm_event requests / responses. As each synchronous request pauses the
vcpu until the corresponding response is handled, it can be stored in
a slotted memory buffer (one per vcpu) shared between the hypervisor and
the controlling domain.

Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>
---
 tools/libxc/include/xenctrl.h |   6 +
 tools/libxc/xc_monitor.c      |  15 ++
 tools/libxc/xc_private.h      |   8 +
 tools/libxc/xc_vm_event.c     |  53 +++++
 xen/arch/x86/mm.c             |   5 +
 xen/common/Makefile           |   1 +
 xen/common/domctl.c           |   7 +
 xen/common/vm_event.c         |  94 ++++-----
 xen/common/vm_event_ng.c      | 449 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/domctl.h   |  20 ++
 xen/include/public/memory.h   |   2 +
 xen/include/public/vm_event.h |  16 ++
 xen/include/xen/vm_event.h    |  10 +
 13 files changed, 642 insertions(+), 44 deletions(-)
 create mode 100644 xen/common/vm_event_ng.c

Comments

Andrew Cooper June 4, 2019, 2:43 p.m. UTC | #1
On 30/05/2019 15:18, Petre Pircalabu wrote:
> In high throughput introspection scenarios where lots of monitor
> vm_events are generated, the ring buffer can fill up before the monitor
> application gets a chance to handle all the requests thus blocking
> other vcpus which will have to wait for a slot to become available.
>
> This patch adds support for a different mechanism to handle synchronous
> vm_event requests / responses. As each synchronous request pauses the
> vcpu until the corresponding response is handled, it can be stored in
> a slotted memory buffer (one per vcpu) shared between the hypervisor and
> the controlling domain.
>
> Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>

There are a number of concerns here.

First and foremost, why is a new domctl being added?  Surely this should
just be a "type of ring access" parameter to event_enable?  Everything
else in the vm_event set of APIs should be unchanged as a result of the
interface differences.

Or am I missing something?

> diff --git a/xen/common/vm_event_ng.c b/xen/common/vm_event_ng.c
> new file mode 100644
> index 0000000..17ae33c
> --- /dev/null
> +++ b/xen/common/vm_event_ng.c
> <snip>
>
> +static int vm_event_channels_alloc_buffer(struct vm_event_channels_domain *impl)
> +{
> +    int i, rc = -ENOMEM;
> +
> +    for ( i = 0; i < impl->nr_frames; i++ )
> +    {
> +        struct page_info *page = alloc_domheap_page(impl->ved.d, 0);

This creates pages which are reference-able (in principle) by the guest,
and are bounded by d->max_pages.

Both of these are properties of the existing interface which we'd prefer
to remove.

> +        if ( !page )
> +            goto err;
> +
> +        if ( !get_page_and_type(page, impl->ved.d, PGT_writable_page) )
> +        {
> +            rc = -ENODATA;
> +            goto err;
> +        }
> +
> +        impl->mfn[i] = page_to_mfn(page);
> +    }
> +
> +    impl->slots = (struct vm_event_slot *)vmap(impl->mfn, impl->nr_frames);

You appear to have opencoded vmalloc() here.  Is there any reason not to
use that?

> +    if ( !impl->slots )
> +        goto err;
> +
> +    for ( i = 0; i < impl->nr_frames; i++ )
> +        clear_page((void*)impl->slots + i * PAGE_SIZE);
> +
> +    return 0;
> +
> +err:
> +    while ( --i >= 0 )
> +    {
> +        struct page_info *page = mfn_to_page(impl->mfn[i]);
> +
> +        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
> +            put_page(page);
> +        put_page_and_type(page);
> +    }
> +
> +    return rc;
> +}
> +
> +static void vm_event_channels_free_buffer(struct vm_event_channels_domain *impl)
> +{
> +    int i;
> +
> +    ASSERT(impl);
> +
> +    if ( !impl->slots )
> +        return;
> +
> +    vunmap(impl->slots);
> +
> +    for ( i = 0; i < impl->nr_frames; i++ )
> +    {
> +        struct page_info *page = mfn_to_page(impl->mfn[i]);
> +
> +        ASSERT(page);

mfn_to_page() is going to explode before this ASSERT() does.

> +        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
> +            put_page(page);
> +        put_page_and_type(page);
> +    }
> +}
> +
> +static int vm_event_channels_create(
> +    struct domain *d,
> +    struct xen_domctl_vm_event_ng_op *vec,
> +    struct vm_event_domain **_ved,
> +    int pause_flag,
> +    xen_event_channel_notification_t notification_fn)
> +{
> +    int rc, i;
> +    unsigned int nr_frames = PFN_UP(d->max_vcpus * sizeof(struct vm_event_slot));
> +    struct vm_event_channels_domain *impl;
> +
> +    if ( *_ved )
> +        return -EBUSY;
> +
> +    impl = _xzalloc(sizeof(struct vm_event_channels_domain) +
> +                           nr_frames * sizeof(mfn_t),
> +                    __alignof__(struct vm_event_channels_domain));
> +    if ( unlikely(!impl) )
> +        return -ENOMEM;
> +
> +    spin_lock_init(&impl->ved.lock);
> +    spin_lock(&impl->ved.lock);
> +
> +    impl->nr_frames = nr_frames;
> +    impl->ved.d = d;
> +    impl->ved.ops = &vm_event_channels_ops;
> +
> +    rc = vm_event_init_domain(d);
> +    if ( rc < 0 )
> +        goto err;
> +
> +    rc = vm_event_channels_alloc_buffer(impl);
> +    if ( rc )
> +        goto err;
> +
> +    for ( i = 0; i < d->max_vcpus; i++ )
> +    {
> +        rc = alloc_unbound_xen_event_channel(d, i, current->domain->domain_id,
> +                                             notification_fn);
> +        if ( rc < 0 )
> +            goto err;
> +
> +        impl->slots[i].port = rc;
> +        impl->slots[i].state = STATE_VM_EVENT_SLOT_IDLE;
> +    }
> +
> +    impl->enabled = false;
> +
> +    spin_unlock(&impl->ved.lock);
> +    *_ved = &impl->ved;
> +    return 0;
> +
> +err:
> +    spin_unlock(&impl->ved.lock);
> +    XFREE(impl);

You don't free the event channels on error.

Please write make the destructor idempotent and call it from here.

> +    return rc;
> +}
> +
> <snip>
> +int vm_event_ng_domctl(struct domain *d, struct xen_domctl_vm_event_ng_op *vec,
> +                       XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> +{
> +    int rc;
> +
> +    if ( vec->op == XEN_VM_EVENT_NG_GET_VERSION )
> +    {
> +        vec->u.version = VM_EVENT_INTERFACE_VERSION;
> +        return 0;
> +    }
> +
> +    if ( unlikely(d == NULL) )
> +        return -ESRCH;
> +
> +    rc = xsm_vm_event_control(XSM_PRIV, d, vec->type, vec->op);
> +    if ( rc )
> +        return rc;
> +
> +    if ( unlikely(d == current->domain) ) /* no domain_pause() */
> +    {
> +        gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
> +        return -EINVAL;
> +    }
> +
> +    if ( unlikely(d->is_dying) )
> +    {
> +        gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
> +                 d->domain_id);
> +        return 0;
> +    }
> +
> +    if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
> +    {
> +        gdprintk(XENLOG_INFO,
> +                 "Memory event op on a domain (%u) with no vcpus\n",
> +                 d->domain_id);
> +        return -EINVAL;
> +    }
> +
> +    switch ( vec->type )
> +    {
> +    case XEN_VM_EVENT_TYPE_MONITOR:
> +    {
> +        rc = -EINVAL;
> +
> +        switch ( vec-> op)
> +        {
> +        case XEN_VM_EVENT_NG_CREATE:
> +            /* domain_pause() not required here, see XSA-99 */
> +            rc = arch_monitor_init_domain(d);
> +            if ( rc )
> +                break;
> +            rc = vm_event_channels_create(d, vec, &d->vm_event_monitor,
> +                                     _VPF_mem_access, monitor_notification);
> +            break;
> +
> +        case XEN_VM_EVENT_NG_DESTROY:
> +            if ( !vm_event_check(d->vm_event_monitor) )
> +                break;
> +            domain_pause(d);
> +            rc = vm_event_channels_destroy(&d->vm_event_monitor);
> +            arch_monitor_cleanup_domain(d);
> +            domain_unpause(d);
> +            break;
> +
> +        case XEN_VM_EVENT_NG_SET_STATE:
> +            if ( !vm_event_check(d->vm_event_monitor) )
> +                break;
> +            domain_pause(d);
> +            to_channels(d->vm_event_monitor)->enabled = !!vec->u.enabled;
> +            domain_unpause(d);
> +            rc = 0;
> +            break;
> +
> +        default:
> +            rc = -ENOSYS;
> +        }
> +        break;
> +    }
> +
> +#ifdef CONFIG_HAS_MEM_PAGING
> +    case XEN_VM_EVENT_TYPE_PAGING:
> +#endif
> +
> +#ifdef CONFIG_HAS_MEM_SHARING
> +    case XEN_VM_EVENT_TYPE_SHARING:
> +#endif

These are unnecessary, as they don't deviate from the default.

~Andrew

> +
> +    default:
> +        rc = -ENOSYS;
> +    }
> +
> +    return rc;
> +}
>
Petre Ovidiu PIRCALABU June 5, 2019, 5:01 p.m. UTC | #2
On Tue, 2019-06-04 at 15:43 +0100, Andrew Cooper wrote:
> On 30/05/2019 15:18, Petre Pircalabu wrote:
> > 
> > Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com>
> 
> There are a number of concerns here.
> 
> First and foremost, why is a new domctl being added?  Surely this
> should
> just be a "type of ring access" parameter to event_enable? 
> Everything
> else in the vm_event set of APIs should be unchanged as a result of
> the
> interface differences.
> 
> Or am I missing something?
> 
I've used different domctls in order to completely separate the new
interface from the old one.
One thing I don't really like about the old vm_event interface is that
the "create" and "start" operations are handled in the same call
(XEN_VM_EVENT_ENABLE).
These calls should separated in the new interface because the client
needs to perform its own initalization (mapping the resource and event
channel binding) between "create" and "start".

> > diff --git a/xen/common/vm_event_ng.c b/xen/common/vm_event_ng.c
> > new file mode 100644
> > index 0000000..17ae33c
> > --- /dev/null
> > +++ b/xen/common/vm_event_ng.c
> > <snip>
> > 
> > +static int vm_event_channels_alloc_buffer(struct
> > vm_event_channels_domain *impl)
> > +{
> > +    int i, rc = -ENOMEM;
> > +
> > +    for ( i = 0; i < impl->nr_frames; i++ )
> > +    {
> > +        struct page_info *page = alloc_domheap_page(impl->ved.d,
> > 0);
> 
> This creates pages which are reference-able (in principle) by the
> guest,
> and are bounded by d->max_pages.
> 
> Both of these are properties of the existing interface which we'd
> prefer
> to remove.
The allocation mechanism is similar with the one used by ioreq (the
main difference is the number of pages).
> 
> > +        if ( !page )
> > +            goto err;
> > +
> > +        if ( !get_page_and_type(page, impl->ved.d,
> > PGT_writable_page) )
> > +        {
> > +            rc = -ENODATA;
> > +            goto err;
> > +        }
> > +
> > +        impl->mfn[i] = page_to_mfn(page);
> > +    }
> > +
> > +    impl->slots = (struct vm_event_slot *)vmap(impl->mfn, impl-
> > >nr_frames);
> 
> You appear to have opencoded vmalloc() here.  Is there any reason not
> to
> use that?
> 

The problem with vmalloc is that if the pages are not assigned to a
specific domain the remapping fails in the monitor domain.
e.g.:
...
(XEN) mm.c:1015:d0v2 pg_owner d1 l1e_owner d0, but real_pg_owner d-1
(XEN) mm.c:1091:d0v7 Error getting mfn 5fbf53 (pfn ffffffffffffffff)
from L1 entry 80000005fbf53227 for l1e_owner d0, pg_owner d1

> > +err:
> > +    spin_unlock(&impl->ved.lock);
> > +    XFREE(impl);
> 
> You don't free the event channels on error.
> 
> Please write make the destructor idempotent and call it from here.
> 
> > 
> > +#ifdef CONFIG_HAS_MEM_PAGING
> > +    case XEN_VM_EVENT_TYPE_PAGING:
> > +#endif
> > +
> > +#ifdef CONFIG_HAS_MEM_SHARING
> > +    case XEN_VM_EVENT_TYPE_SHARING:
> > +#endif
> 
> These are unnecessary, as they don't deviate from the default.
> 
> ~Andrew
> 
> > 
I will correct these in the next patchset iteration.

Many thanks for your support,
Petre
Jan Beulich June 6, 2019, 8:37 a.m. UTC | #3
>>> On 05.06.19 at 19:01, <ppircalabu@bitdefender.com> wrote:
> On Tue, 2019-06-04 at 15:43 +0100, Andrew Cooper wrote:
>> On 30/05/2019 15:18, Petre Pircalabu wrote:
>> > +static int vm_event_channels_alloc_buffer(struct
>> > vm_event_channels_domain *impl)
>> > +{
>> > +    int i, rc = -ENOMEM;
>> > +
>> > +    for ( i = 0; i < impl->nr_frames; i++ )
>> > +    {
>> > +        struct page_info *page = alloc_domheap_page(impl->ved.d,
>> > 0);
>> 
>> This creates pages which are reference-able (in principle) by the
>> guest,
>> and are bounded by d->max_pages.

Not by a HVM one, because they can't reference pages by MFN.
Or else, as Petre implies, the ioreq approach would be wrong, too.

>> Both of these are properties of the existing interface which we'd
>> prefer
>> to remove.
> The allocation mechanism is similar with the one used by ioreq (the
> main difference is the number of pages).

Question is whether here you want to use the "caller owned"
variant. I haven't thought through whether this would actually
be better, so it's merely a remark.

>> > +        if ( !page )
>> > +            goto err;
>> > +
>> > +        if ( !get_page_and_type(page, impl->ved.d,
>> > PGT_writable_page) )
>> > +        {
>> > +            rc = -ENODATA;
>> > +            goto err;
>> > +        }
>> > +
>> > +        impl->mfn[i] = page_to_mfn(page);
>> > +    }
>> > +
>> > +    impl->slots = (struct vm_event_slot *)vmap(impl->mfn, impl-
>> > >nr_frames);
>> 
>> You appear to have opencoded vmalloc() here.  Is there any reason not
>> to
>> use that?
>> 
> 
> The problem with vmalloc is that if the pages are not assigned to a
> specific domain the remapping fails in the monitor domain.
> e.g.:
> ...
> (XEN) mm.c:1015:d0v2 pg_owner d1 l1e_owner d0, but real_pg_owner d-1
> (XEN) mm.c:1091:d0v7 Error getting mfn 5fbf53 (pfn ffffffffffffffff)
> from L1 entry 80000005fbf53227 for l1e_owner d0, pg_owner d1

In which case maybe use vmalloc() and then assign_pages()?

Jan
Petre Ovidiu PIRCALABU June 6, 2019, 1:48 p.m. UTC | #4
On Thu, 2019-06-06 at 02:37 -0600, Jan Beulich wrote:
> > > > On 05.06.19 at 19:01, <ppircalabu@bitdefender.com> wrote:
> > 
> > On Tue, 2019-06-04 at 15:43 +0100, Andrew Cooper wrote:
> > > On 30/05/2019 15:18, Petre Pircalabu wrote:
> > > > +static int vm_event_channels_alloc_buffer(struct
> > > > vm_event_channels_domain *impl)
> > > > +{
> > > > +    int i, rc = -ENOMEM;
> > > > +
> > > > +    for ( i = 0; i < impl->nr_frames; i++ )
> > > > +    {
> > > > +        struct page_info *page = alloc_domheap_page(impl-
> > > > >ved.d,
> > > > 0);
> > > 
> > > This creates pages which are reference-able (in principle) by the
> > > guest,
> > > and are bounded by d->max_pages.
> 
> Not by a HVM one, because they can't reference pages by MFN.
> Or else, as Petre implies, the ioreq approach would be wrong, too.
> 
> > > Both of these are properties of the existing interface which we'd
> > > prefer
> > > to remove.
> > 
> > The allocation mechanism is similar with the one used by ioreq (the
> > main difference is the number of pages).
> 
> Question is whether here you want to use the "caller owned"
> variant. I haven't thought through whether this would actually
> be better, so it's merely a remark.
> 
XENMEM_rsrc_acq_caller_owned flag can only be used then the calling
domain is the hardware domain. Unfortunately for us, this is a severe
limitation as we're running the monitor client form a separate DomU.

From xen/common/memory.c :
....
/*
 * FIXME: Until foreign pages inserted into the P2M are properly
 *        reference counted, it is unsafe to allow mapping of
 *        non-caller-owned resource pages unless the caller is
 *        the hardware domain.
 */
 if ( !(xmar.flags & XENMEM_rsrc_acq_caller_owned) &&
      !is_hardware_domain(currd) )
     return -EACCES;
...
> > > > +        if ( !page )
> > > > +            goto err;
> > > > +
> > > > +        if ( !get_page_and_type(page, impl->ved.d,
> > > > PGT_writable_page) )
> > > > +        {
> > > > +            rc = -ENODATA;
> > > > +            goto err;
> > > > +        }
> > > > +
> > > > +        impl->mfn[i] = page_to_mfn(page);
> > > > +    }
> > > > +
> > > > +    impl->slots = (struct vm_event_slot *)vmap(impl->mfn,
> > > > impl-
> > > > > nr_frames);
> > > 
> > > You appear to have opencoded vmalloc() here.  Is there any reason
> > > not
> > > to
> > > use that?
> > > 
> > 
> > The problem with vmalloc is that if the pages are not assigned to a
> > specific domain the remapping fails in the monitor domain.
> > e.g.:
> > ...
> > (XEN) mm.c:1015:d0v2 pg_owner d1 l1e_owner d0, but real_pg_owner d-
> > 1
> > (XEN) mm.c:1091:d0v7 Error getting mfn 5fbf53 (pfn
> > ffffffffffffffff)
> > from L1 entry 80000005fbf53227 for l1e_owner d0, pg_owner d1
> 
> In which case maybe use vmalloc() and then assign_pages()?
> Jan
Unfortunately I wasn't able to make it work:
I replaced the buffer allocation with this code:
....
    impl->slots = vzalloc(impl->nr_frames * PAGE_SIZE);
    if ( !impl->slots )
        return -ENOMEM;

    for ( i = 0; i < impl->nr_frames; i++ )
    {
        impl->mfn[i] = vmap_to_mfn(impl->slots + i * PAGE_SIZE);
        if ( assign_pages(current->domain, mfn_to_page(impl->mfn[i]),
0, 0/*MEMF_no_refcount*/ ) )
        {
            printk("%s: assign_pages returned error\n", __func__);
        }
    }
...
And the error is similar with the one without assign_pages:
....
(XEN) mm.c:1015:d0v4 pg_owner d1 l1e_owner d0, but real_pg_owner d0
(XEN) mm.c:1091:d0v4 Error getting mfn 60deaf (pfn ffffffffffffffff)
from L1 entry 800000060deaf227 for l1e_owner d0, pg_owner d1

Am I missing something?

Many thanks,
Petre
Jan Beulich June 6, 2019, 2:16 p.m. UTC | #5
>>> On 06.06.19 at 15:48, <ppircalabu@bitdefender.com> wrote:
> On Thu, 2019-06-06 at 02:37 -0600, Jan Beulich wrote:
>> In which case maybe use vmalloc() and then assign_pages()?
>> Jan
> Unfortunately I wasn't able to make it work:
> I replaced the buffer allocation with this code:
> ....
>     impl->slots = vzalloc(impl->nr_frames * PAGE_SIZE);
>     if ( !impl->slots )
>         return -ENOMEM;
> 
>     for ( i = 0; i < impl->nr_frames; i++ )
>     {
>         impl->mfn[i] = vmap_to_mfn(impl->slots + i * PAGE_SIZE);
>         if ( assign_pages(current->domain, mfn_to_page(impl->mfn[i]),
> 0, 0/*MEMF_no_refcount*/ ) )

Who is current->domain at this point?

>         {
>             printk("%s: assign_pages returned error\n", __func__);
>         }
>     }
> ...
> And the error is similar with the one without assign_pages:
> ....
> (XEN) mm.c:1015:d0v4 pg_owner d1 l1e_owner d0, but real_pg_owner d0
> (XEN) mm.c:1091:d0v4 Error getting mfn 60deaf (pfn ffffffffffffffff)
> from L1 entry 800000060deaf227 for l1e_owner d0, pg_owner d1

This looks to be an attempt by Dom0 to map a page into its own
page tables which it believes to be owned by Dom1. This would
imply that current->domain above is also Dom1. I would instead
have expected this to be a Dom0-owned page, in which case you
shouldn't specify Dom1 to be the supposed owner of it in the
mapping request.

Jan
diff mbox series

Patch

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 943b933..c36b623 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1993,6 +1993,7 @@  int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
  * Returns the VM_EVENT_INTERFACE version.
  */
 int xc_vm_event_get_version(xc_interface *xch);
+int xc_vm_event_ng_get_version(xc_interface *xch);
 
 /***
  * Monitor control operations.
@@ -2007,6 +2008,11 @@  int xc_vm_event_get_version(xc_interface *xch);
 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
 int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
 int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
+
+/* Monitor NG interface */
+int xc_monitor_ng_create(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_ng_destroy(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_ng_set_state(xc_interface *xch, uint32_t domain_id, bool enabled);
 /*
  * Get a bitmap of supported monitor events in the form
  * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index 718fe8b..4c7ef2b 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -265,6 +265,21 @@  int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
     return do_domctl(xch, &domctl);
 }
 
+int xc_monitor_ng_create(xc_interface *xch, uint32_t domain_id)
+{
+    return xc_vm_event_ng_create(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR);
+}
+
+int xc_monitor_ng_destroy(xc_interface *xch, uint32_t domain_id)
+{
+    return xc_vm_event_ng_destroy(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR);
+}
+
+int xc_monitor_ng_set_state(xc_interface *xch, uint32_t domain_id, bool enabled)
+{
+    return xc_vm_event_ng_set_state(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR, enabled);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index 482451c..1904a1e 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -420,6 +420,14 @@  int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
 void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
                          uint32_t *port);
 
+/**
+ * VM_EVENT NG operations. Internal use only.
+ */
+int xc_vm_event_ng_create(xc_interface *xch, uint32_t domain_id, int type);
+int xc_vm_event_ng_destroy(xc_interface *xch, uint32_t domain_id, int type);
+int xc_vm_event_ng_set_state(xc_interface *xch, uint32_t domain_id, int type, bool enabled);
+
+
 int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
 
 #endif /* __XC_PRIVATE_H__ */
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index 3b1018b..07243a6 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -154,6 +154,59 @@  int xc_vm_event_get_version(xc_interface *xch)
     return rc;
 }
 
+int xc_vm_event_ng_get_version(xc_interface *xch)
+{
+    DECLARE_DOMCTL;
+    int rc;
+
+    domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+    domctl.domain = DOMID_INVALID;
+    domctl.u.vm_event_op.op = XEN_VM_EVENT_NG_GET_VERSION;
+    domctl.u.vm_event_op.type = XEN_VM_EVENT_TYPE_MONITOR;
+
+    rc = do_domctl(xch, &domctl);
+    if ( !rc )
+        rc = domctl.u.vm_event_ng_op.u.version;
+    return rc;
+}
+
+int xc_vm_event_ng_create(xc_interface *xch, uint32_t domain_id, int type)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_ng_op.op = XEN_VM_EVENT_NG_CREATE;
+    domctl.u.vm_event_ng_op.type = type;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_vm_event_ng_destroy(xc_interface *xch, uint32_t domain_id, int type)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_ng_op.op = XEN_VM_EVENT_NG_DESTROY;
+    domctl.u.vm_event_ng_op.type = type;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_vm_event_ng_set_state(xc_interface *xch, uint32_t domain_id, int type, bool enabled)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_ng_op.op = XEN_VM_EVENT_NG_SET_STATE;
+    domctl.u.vm_event_ng_op.type = type;
+    domctl.u.vm_event_ng_op.u.enabled = enabled;
+
+    return do_domctl(xch, &domctl);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 2f620d9..030b5bd 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -119,6 +119,7 @@ 
 #include <xen/efi.h>
 #include <xen/grant_table.h>
 #include <xen/hypercall.h>
+#include <xen/vm_event.h>
 #include <asm/paging.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
@@ -4584,6 +4585,10 @@  int arch_acquire_resource(struct domain *d, unsigned int type,
     }
 #endif
 
+    case XENMEM_resource_vm_event:
+        rc = vm_event_ng_get_frames(d, id, frame, nr_frames, mfn_list);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 33d03b8..8cb33e2 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -59,6 +59,7 @@  obj-y += trace.o
 obj-y += version.o
 obj-y += virtual_region.o
 obj-y += vm_event.o
+obj-y += vm_event_ng.o
 obj-y += vmap.o
 obj-y += vsprintf.o
 obj-y += wait.o
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index bade9a6..23f6e56 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -393,6 +393,7 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
     {
     case XEN_DOMCTL_test_assign_device:
     case XEN_DOMCTL_vm_event_op:
+    case XEN_DOMCTL_vm_event_ng_op:
         if ( op->domain == DOMID_INVALID )
         {
     case XEN_DOMCTL_createdomain:
@@ -1023,6 +1024,12 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         copyback = 1;
         break;
 
+    case XEN_DOMCTL_vm_event_ng_op:
+        ret = vm_event_ng_domctl(d, &op->u.vm_event_ng_op,
+                                 guest_handle_cast(u_domctl, void));
+        copyback = 1;
+        break;
+
 #ifdef CONFIG_MEM_ACCESS
     case XEN_DOMCTL_set_access_required:
         if ( unlikely(current->domain == d) ) /* no domain_pause() */
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 1d85f3e..e94fe3c 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -380,6 +380,51 @@  static int vm_event_ring_get_response(struct vm_event_ring_domain *impl,
     return 1;
 }
 
+void vm_event_handle_response(struct domain *d, struct vcpu *v,
+                              vm_event_response_t *rsp)
+{
+    /* Check flags which apply only when the vCPU is paused */
+    if ( atomic_read(&v->vm_event_pause_count) )
+    {
+#ifdef CONFIG_HAS_MEM_PAGING
+        if ( rsp->reason == VM_EVENT_REASON_MEM_PAGING )
+            p2m_mem_paging_resume(d, rsp);
+#endif
+
+        /*
+         * Check emulation flags in the arch-specific handler only, as it
+         * has to set arch-specific flags when supported, and to avoid
+         * bitmask overhead when it isn't supported.
+         */
+        vm_event_emulate_check(v, rsp);
+
+        /*
+         * Check in arch-specific handler to avoid bitmask overhead when
+         * not supported.
+         */
+        vm_event_register_write_resume(v, rsp);
+
+        /*
+         * Check in arch-specific handler to avoid bitmask overhead when
+         * not supported.
+         */
+        vm_event_toggle_singlestep(d, v, rsp);
+
+        /* Check for altp2m switch */
+        if ( rsp->flags & VM_EVENT_FLAG_ALTERNATE_P2M )
+            p2m_altp2m_check(v, rsp->altp2m_idx);
+
+        if ( rsp->flags & VM_EVENT_FLAG_SET_REGISTERS )
+            vm_event_set_registers(v, rsp);
+
+        if ( rsp->flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
+            vm_event_monitor_next_interrupt(v);
+
+        if ( rsp->flags & VM_EVENT_FLAG_VCPU_PAUSED )
+            vm_event_vcpu_unpause(v);
+    }
+}
+
 /*
  * Pull all responses from the given ring and unpause the corresponding vCPU
  * if required. Based on the response type, here we can also call custom
@@ -427,47 +472,7 @@  static int vm_event_ring_resume(struct vm_event_ring_domain *impl)
          * In some cases the response type needs extra handling, so here
          * we call the appropriate handlers.
          */
-
-        /* Check flags which apply only when the vCPU is paused */
-        if ( atomic_read(&v->vm_event_pause_count) )
-        {
-#ifdef CONFIG_HAS_MEM_PAGING
-            if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
-                p2m_mem_paging_resume(impl->ved.d, &rsp);
-#endif
-
-            /*
-             * Check emulation flags in the arch-specific handler only, as it
-             * has to set arch-specific flags when supported, and to avoid
-             * bitmask overhead when it isn't supported.
-             */
-            vm_event_emulate_check(v, &rsp);
-
-            /*
-             * Check in arch-specific handler to avoid bitmask overhead when
-             * not supported.
-             */
-            vm_event_register_write_resume(v, &rsp);
-
-            /*
-             * Check in arch-specific handler to avoid bitmask overhead when
-             * not supported.
-             */
-            vm_event_toggle_singlestep(impl->ved.d, v, &rsp);
-
-            /* Check for altp2m switch */
-            if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
-                p2m_altp2m_check(v, rsp.altp2m_idx);
-
-            if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
-                vm_event_set_registers(v, &rsp);
-
-            if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
-                vm_event_monitor_next_interrupt(v);
-
-            if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
-                vm_event_vcpu_unpause(v);
-        }
+        vm_event_handle_response(impl->ved.d, v, &rsp);
     }
 
     return 0;
@@ -709,9 +714,10 @@  int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
             rc = arch_monitor_init_domain(d);
             if ( rc )
                 break;
-            rc = vm_event_ring_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
-                                 HVM_PARAM_MONITOR_RING_PFN,
-                                 monitor_notification);
+            rc = vm_event_ring_enable(d, vec, &d->vm_event_monitor,
+                                      _VPF_mem_access,
+                                      HVM_PARAM_MONITOR_RING_PFN,
+                                      monitor_notification);
             break;
 
         case XEN_VM_EVENT_DISABLE:
diff --git a/xen/common/vm_event_ng.c b/xen/common/vm_event_ng.c
new file mode 100644
index 0000000..17ae33c
--- /dev/null
+++ b/xen/common/vm_event_ng.c
@@ -0,0 +1,449 @@ 
+/******************************************************************************
+ * vm_event_ng.c
+ *
+ * VM event support (new generation).
+ *
+ * Copyright (c) 2019, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <xen/vm_event.h>
+#include <xen/vmap.h>
+#include <asm/monitor.h>
+#include <asm/vm_event.h>
+#include <xsm/xsm.h>
+
+#define to_channels(_ved) container_of((_ved), \
+                                        struct vm_event_channels_domain, ved)
+
+#define VM_EVENT_CHANNELS_ENABLED       1
+
+struct vm_event_channels_domain
+{
+    /* VM event domain */
+    struct vm_event_domain ved;
+    /* shared channels buffer */
+    struct vm_event_slot *slots;
+    /* the buffer size (number of frames) */
+    unsigned int nr_frames;
+    /* state */
+    bool enabled;
+    /* buffer's mnf list */
+    mfn_t mfn[0];
+};
+
+static const struct vm_event_ops vm_event_channels_ops;
+
+static int vm_event_channels_alloc_buffer(struct vm_event_channels_domain *impl)
+{
+    int i, rc = -ENOMEM;
+
+    for ( i = 0; i < impl->nr_frames; i++ )
+    {
+        struct page_info *page = alloc_domheap_page(impl->ved.d, 0);
+        if ( !page )
+            goto err;
+
+        if ( !get_page_and_type(page, impl->ved.d, PGT_writable_page) )
+        {
+            rc = -ENODATA;
+            goto err;
+        }
+
+        impl->mfn[i] = page_to_mfn(page);
+    }
+
+    impl->slots = (struct vm_event_slot *)vmap(impl->mfn, impl->nr_frames);
+    if ( !impl->slots )
+        goto err;
+
+    for ( i = 0; i < impl->nr_frames; i++ )
+        clear_page((void*)impl->slots + i * PAGE_SIZE);
+
+    return 0;
+
+err:
+    while ( --i >= 0 )
+    {
+        struct page_info *page = mfn_to_page(impl->mfn[i]);
+
+        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            put_page(page);
+        put_page_and_type(page);
+    }
+
+    return rc;
+}
+
+static void vm_event_channels_free_buffer(struct vm_event_channels_domain *impl)
+{
+    int i;
+
+    ASSERT(impl);
+
+    if ( !impl->slots )
+        return;
+
+    vunmap(impl->slots);
+
+    for ( i = 0; i < impl->nr_frames; i++ )
+    {
+        struct page_info *page = mfn_to_page(impl->mfn[i]);
+
+        ASSERT(page);
+        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+            put_page(page);
+        put_page_and_type(page);
+    }
+}
+
+static int vm_event_channels_create(
+    struct domain *d,
+    struct xen_domctl_vm_event_ng_op *vec,
+    struct vm_event_domain **_ved,
+    int pause_flag,
+    xen_event_channel_notification_t notification_fn)
+{
+    int rc, i;
+    unsigned int nr_frames = PFN_UP(d->max_vcpus * sizeof(struct vm_event_slot));
+    struct vm_event_channels_domain *impl;
+
+    if ( *_ved )
+        return -EBUSY;
+
+    impl = _xzalloc(sizeof(struct vm_event_channels_domain) +
+                           nr_frames * sizeof(mfn_t),
+                    __alignof__(struct vm_event_channels_domain));
+    if ( unlikely(!impl) )
+        return -ENOMEM;
+
+    spin_lock_init(&impl->ved.lock);
+    spin_lock(&impl->ved.lock);
+
+    impl->nr_frames = nr_frames;
+    impl->ved.d = d;
+    impl->ved.ops = &vm_event_channels_ops;
+
+    rc = vm_event_init_domain(d);
+    if ( rc < 0 )
+        goto err;
+
+    rc = vm_event_channels_alloc_buffer(impl);
+    if ( rc )
+        goto err;
+
+    for ( i = 0; i < d->max_vcpus; i++ )
+    {
+        rc = alloc_unbound_xen_event_channel(d, i, current->domain->domain_id,
+                                             notification_fn);
+        if ( rc < 0 )
+            goto err;
+
+        impl->slots[i].port = rc;
+        impl->slots[i].state = STATE_VM_EVENT_SLOT_IDLE;
+    }
+
+    impl->enabled = false;
+
+    spin_unlock(&impl->ved.lock);
+    *_ved = &impl->ved;
+    return 0;
+
+err:
+    spin_unlock(&impl->ved.lock);
+    XFREE(impl);
+    return rc;
+}
+
+static int vm_event_channels_destroy(struct vm_event_domain **_ved)
+{
+    struct vcpu *v;
+    struct vm_event_channels_domain *impl = to_channels(*_ved);
+    int i;
+
+    spin_lock(&(*_ved)->lock);
+
+    for_each_vcpu( (*_ved)->d, v )
+    {
+        if ( atomic_read(&v->vm_event_pause_count) )
+            vm_event_vcpu_unpause(v);
+    }
+
+    for ( i = 0; i < (*_ved)->d->max_vcpus; i++ )
+        evtchn_close((*_ved)->d, impl->slots[i].port, 0);
+
+    vm_event_channels_free_buffer(impl);
+    spin_unlock(&(*_ved)->lock);
+    XFREE(*_ved);
+
+    return 0;
+}
+
+static bool vm_event_channels_check(struct vm_event_domain *ved)
+{
+    return to_channels(ved)->slots != NULL;
+}
+
+static void vm_event_channels_cleanup(struct vm_event_domain **_ved)
+{
+    vm_event_channels_destroy(_ved);
+}
+
+static int vm_event_channels_claim_slot(struct vm_event_domain *ved,
+                                        bool allow_sleep)
+{
+    return 0;
+}
+
+static void vm_event_channels_cancel_slot(struct vm_event_domain *ved)
+{
+}
+
+static void vm_event_channels_put_request(struct vm_event_domain *ved,
+                                          vm_event_request_t *req)
+{
+    struct vm_event_channels_domain *impl = to_channels(ved);
+    struct vm_event_slot *slot;
+
+    /* exit if the vm_event_domain was not specifically enabled */
+    if ( !impl->enabled )
+        return;
+
+    ASSERT( req->vcpu_id >= 0 && req->vcpu_id < ved->d->max_vcpus );
+
+    slot = &impl->slots[req->vcpu_id];
+
+    if ( current->domain != ved->d )
+    {
+        req->flags |= VM_EVENT_FLAG_FOREIGN;
+#ifndef NDEBUG
+        if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
+            gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
+                     ved->d->domain_id, req->vcpu_id);
+#endif
+    }
+
+    req->version = VM_EVENT_INTERFACE_VERSION;
+
+    spin_lock(&impl->ved.lock);
+    if ( slot->state != STATE_VM_EVENT_SLOT_IDLE )
+    {
+        gdprintk(XENLOG_G_WARNING, "The VM event slot for d%dv%d is not IDLE.\n",
+                 impl->ved.d->domain_id, req->vcpu_id);
+        spin_unlock(&impl->ved.lock);
+        return;
+    }
+
+    slot->u.req = *req;
+    slot->state = STATE_VM_EVENT_SLOT_SUBMIT;
+    spin_unlock(&impl->ved.lock);
+    notify_via_xen_event_channel(impl->ved.d, slot->port);
+}
+
+static int vm_event_channels_get_response(struct vm_event_channels_domain *impl,
+                                          struct vcpu *v, vm_event_response_t *rsp)
+{
+    struct vm_event_slot *slot = &impl->slots[v->vcpu_id];
+
+    ASSERT( slot != NULL );
+    spin_lock(&impl->ved.lock);
+
+    if ( slot->state != STATE_VM_EVENT_SLOT_FINISH )
+    {
+        gdprintk(XENLOG_G_WARNING, "The VM event slot state for d%dv%d is invalid.\n",
+                 impl->ved.d->domain_id, v->vcpu_id);
+        spin_unlock(&impl->ved.lock);
+        return -1;
+    }
+
+    *rsp = slot->u.rsp;
+    slot->state = STATE_VM_EVENT_SLOT_IDLE;
+
+    spin_unlock(&impl->ved.lock);
+    return 0;
+}
+
+static int vm_event_channels_resume(struct vm_event_channels_domain *impl,
+                                    struct vcpu *v)
+{
+    vm_event_response_t rsp;
+
+    if ( unlikely(!impl || !vm_event_check(&impl->ved)) )
+         return -ENODEV;
+
+    ASSERT(impl->ved.d != current->domain);
+
+    if ( vm_event_channels_get_response(impl, v, &rsp) ||
+         rsp.version != VM_EVENT_INTERFACE_VERSION ||
+         rsp.vcpu_id != v->vcpu_id )
+        return -1;
+
+    vm_event_handle_response(impl->ved.d, v, &rsp);
+
+    return 0;
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void monitor_notification(struct vcpu *v, unsigned int port)
+{
+    vm_event_channels_resume(to_channels(v->domain->vm_event_monitor), v);
+}
+
+int vm_event_ng_domctl(struct domain *d, struct xen_domctl_vm_event_ng_op *vec,
+                       XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+{
+    int rc;
+
+    if ( vec->op == XEN_VM_EVENT_NG_GET_VERSION )
+    {
+        vec->u.version = VM_EVENT_INTERFACE_VERSION;
+        return 0;
+    }
+
+    if ( unlikely(d == NULL) )
+        return -ESRCH;
+
+    rc = xsm_vm_event_control(XSM_PRIV, d, vec->type, vec->op);
+    if ( rc )
+        return rc;
+
+    if ( unlikely(d == current->domain) ) /* no domain_pause() */
+    {
+        gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
+        return -EINVAL;
+    }
+
+    if ( unlikely(d->is_dying) )
+    {
+        gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
+                 d->domain_id);
+        return 0;
+    }
+
+    if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
+    {
+        gdprintk(XENLOG_INFO,
+                 "Memory event op on a domain (%u) with no vcpus\n",
+                 d->domain_id);
+        return -EINVAL;
+    }
+
+    switch ( vec->type )
+    {
+    case XEN_VM_EVENT_TYPE_MONITOR:
+    {
+        rc = -EINVAL;
+
+        switch ( vec-> op)
+        {
+        case XEN_VM_EVENT_NG_CREATE:
+            /* domain_pause() not required here, see XSA-99 */
+            rc = arch_monitor_init_domain(d);
+            if ( rc )
+                break;
+            rc = vm_event_channels_create(d, vec, &d->vm_event_monitor,
+                                     _VPF_mem_access, monitor_notification);
+            break;
+
+        case XEN_VM_EVENT_NG_DESTROY:
+            if ( !vm_event_check(d->vm_event_monitor) )
+                break;
+            domain_pause(d);
+            rc = vm_event_channels_destroy(&d->vm_event_monitor);
+            arch_monitor_cleanup_domain(d);
+            domain_unpause(d);
+            break;
+
+        case XEN_VM_EVENT_NG_SET_STATE:
+            if ( !vm_event_check(d->vm_event_monitor) )
+                break;
+            domain_pause(d);
+            to_channels(d->vm_event_monitor)->enabled = !!vec->u.enabled;
+            domain_unpause(d);
+            rc = 0;
+            break;
+
+        default:
+            rc = -ENOSYS;
+        }
+        break;
+    }
+
+#ifdef CONFIG_HAS_MEM_PAGING
+    case XEN_VM_EVENT_TYPE_PAGING:
+#endif
+
+#ifdef CONFIG_HAS_MEM_SHARING
+    case XEN_VM_EVENT_TYPE_SHARING:
+#endif
+
+    default:
+        rc = -ENOSYS;
+    }
+
+    return rc;
+}
+
+int vm_event_ng_get_frames(struct domain *d, unsigned int id,
+                           unsigned long frame, unsigned int nr_frames,
+                           xen_pfn_t mfn_list[])
+{
+    struct vm_event_domain *ved;
+    int i;
+
+    switch (id )
+    {
+    case XEN_VM_EVENT_TYPE_MONITOR:
+        ved = d->vm_event_monitor;
+        break;
+
+    default:
+        return -ENOSYS;
+    }
+
+    if ( !vm_event_check(ved) )
+        return -EINVAL;
+
+    if ( frame != 0 || nr_frames != to_channels(ved)->nr_frames )
+        return -EINVAL;
+
+    spin_lock(&ved->lock);
+
+    for ( i = 0; i < to_channels(ved)->nr_frames; i++ )
+        mfn_list[i] = mfn_x(to_channels(ved)->mfn[i]);
+
+    spin_unlock(&ved->lock);
+    return 0;
+}
+
+static const struct vm_event_ops vm_event_channels_ops = {
+    .check = vm_event_channels_check,
+    .cleanup = vm_event_channels_cleanup,
+    .claim_slot = vm_event_channels_claim_slot,
+    .cancel_slot = vm_event_channels_cancel_slot,
+    .put_request = vm_event_channels_put_request
+};
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 19281fa..ff8b680 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -792,6 +792,24 @@  struct xen_domctl_vm_event_op {
 };
 
 /*
+ * XEN_DOMCTL_vm_event_ng_op.
+ * Next Generation vm_event operations.
+ */
+#define XEN_VM_EVENT_NG_CREATE            0
+#define XEN_VM_EVENT_NG_DESTROY           1
+#define XEN_VM_EVENT_NG_SET_STATE         2
+#define XEN_VM_EVENT_NG_GET_VERSION       3
+
+struct xen_domctl_vm_event_ng_op {
+    uint32_t        op;             /* XEN_VM_EVENT_NG_* */
+    uint32_t        type;           /* XEN_VM_EVENT_TYPE_* */
+    union {
+        uint32_t version;           /* OUT: version number */
+        uint8_t  enabled;           /* IN: state */
+    } u;
+};
+
+/*
  * Memory sharing operations
  */
 /* XEN_DOMCTL_mem_sharing_op.
@@ -1142,6 +1160,7 @@  struct xen_domctl {
 /* #define XEN_DOMCTL_set_gnttab_limits          80 - Moved into XEN_DOMCTL_createdomain */
 #define XEN_DOMCTL_vuart_op                      81
 #define XEN_DOMCTL_get_cpu_policy                82
+#define XEN_DOMCTL_vm_event_ng_op                83
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1183,6 +1202,7 @@  struct xen_domctl {
         struct xen_domctl_subscribe         subscribe;
         struct xen_domctl_debug_op          debug_op;
         struct xen_domctl_vm_event_op       vm_event_op;
+        struct xen_domctl_vm_event_ng_op    vm_event_ng_op;
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 68ddadb..2e8912e 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -612,6 +612,7 @@  struct xen_mem_acquire_resource {
 
 #define XENMEM_resource_ioreq_server 0
 #define XENMEM_resource_grant_table 1
+#define XENMEM_resource_vm_event 2
 
     /*
      * IN - a type-specific resource identifier, which must be zero
@@ -619,6 +620,7 @@  struct xen_mem_acquire_resource {
      *
      * type == XENMEM_resource_ioreq_server -> id == ioreq server id
      * type == XENMEM_resource_grant_table -> id defined below
+     * type == XENMEM_resource_vm_event -> id == vm_event type
      */
     uint32_t id;
 
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index c48bc21..2f2160b 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -421,6 +421,22 @@  typedef struct vm_event_st {
 
 DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
 
+/* VM Event slot state */
+#define STATE_VM_EVENT_SLOT_IDLE     0 /* the slot data is invalid */
+#define STATE_VM_EVENT_SLOT_SUBMIT   1 /* a request was submitted */
+#define STATE_VM_EVENT_SLOT_FINISH   2 /* a response was issued */
+
+struct vm_event_slot
+{
+    uint32_t port;      /* evtchn for notifications to/from helper */
+    uint32_t state:4;
+    uint32_t pad:28;
+    union {
+        vm_event_request_t req;
+        vm_event_response_t rsp;
+    } u;
+};
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 #endif /* _XEN_PUBLIC_VM_EVENT_H */
 
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 15c15e6..df0aafc 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -110,6 +110,13 @@  static inline void vm_event_put_request(struct vm_event_domain *ved,
 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
                     XEN_GUEST_HANDLE_PARAM(void) u_domctl);
 
+int vm_event_ng_domctl(struct domain *d, struct xen_domctl_vm_event_ng_op *vec,
+                       XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+
+int vm_event_ng_get_frames(struct domain *d, unsigned int id,
+                           unsigned long frame, unsigned int nr_frames,
+                           xen_pfn_t mfn_list[]);
+
 void vm_event_vcpu_pause(struct vcpu *v);
 void vm_event_vcpu_unpause(struct vcpu *v);
 
@@ -118,6 +125,9 @@  void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
 
 void vm_event_monitor_next_interrupt(struct vcpu *v);
 
+void vm_event_handle_response(struct domain *d, struct vcpu *v,
+                              vm_event_response_t *rsp);
+
 #endif /* __VM_EVENT_H__ */
 
 /*