diff mbox series

[v2,3/5] xen/domctl, tools: Introduce a new domctl to get guest memory map

Message ID 20240308015435.4044339-4-xin.wang2@amd.com (mailing list archive)
State New
Headers show
Series DOMCTL-based guest magic regions allocation for dom0less | expand

Commit Message

Henry Wang March 8, 2024, 1:54 a.m. UTC
There are some use cases where the toolstack needs to know the guest
memory map. For example, the toolstack helper application
"init-dom0less" needs to know the guest magic page regions for 1:1
direct-mapped dom0less DomUs to allocate magic pages.

To address such needs, add XEN_DOMCTL_get_mem_map hypercall and
related data structures to query the hypervisor for the guest memory
map. The guest memory map is recorded in the domain structure and
currently only guest magic page region is recorded in the guest
memory map. The guest magic page region is initialized at the domain
creation time as the layout in the public header, and it is updated
for 1:1 dom0less DomUs (see the following commit) to avoid conflict
with RAM.

Take the opportunity to drop an unnecessary empty line to keep the
coding style consistent in the file.

Reported-by: Alec Kwapis <alec.kwapis@medtronic.com>
Signed-off-by: Henry Wang <xin.wang2@amd.com>
---
v2:
- New patch
RFC: I think the newly introduced "struct xen_domctl_mem_map" is
quite duplicated with "struct xen_memory_map", any comment on reuse
the "struct xen_memory_map" for simplicity?
---
 tools/include/xenctrl.h           |  4 ++++
 tools/libs/ctrl/xc_domain.c       | 32 +++++++++++++++++++++++++++++++
 xen/arch/arm/domain.c             |  6 ++++++
 xen/arch/arm/domctl.c             | 19 +++++++++++++++++-
 xen/arch/arm/include/asm/domain.h |  8 ++++++++
 xen/include/public/arch-arm.h     |  4 ++++
 xen/include/public/domctl.h       | 21 ++++++++++++++++++++
 7 files changed, 93 insertions(+), 1 deletion(-)

Comments

Michal Orzel March 11, 2024, 9:10 a.m. UTC | #1
Hi Henry,

On 08/03/2024 02:54, Henry Wang wrote:
> There are some use cases where the toolstack needs to know the guest
> memory map. For example, the toolstack helper application
> "init-dom0less" needs to know the guest magic page regions for 1:1
> direct-mapped dom0less DomUs to allocate magic pages.
> 
> To address such needs, add XEN_DOMCTL_get_mem_map hypercall and
> related data structures to query the hypervisor for the guest memory
> map. The guest memory map is recorded in the domain structure and
> currently only guest magic page region is recorded in the guest
> memory map. The guest magic page region is initialized at the domain
> creation time as the layout in the public header, and it is updated
> for 1:1 dom0less DomUs (see the following commit) to avoid conflict
> with RAM.
> 
> Take the opportunity to drop an unnecessary empty line to keep the
> coding style consistent in the file.
> 
> Reported-by: Alec Kwapis <alec.kwapis@medtronic.com>
> Signed-off-by: Henry Wang <xin.wang2@amd.com>
> ---
> v2:
> - New patch
> RFC: I think the newly introduced "struct xen_domctl_mem_map" is
> quite duplicated with "struct xen_memory_map", any comment on reuse
> the "struct xen_memory_map" for simplicity?
> ---
>  tools/include/xenctrl.h           |  4 ++++
>  tools/libs/ctrl/xc_domain.c       | 32 +++++++++++++++++++++++++++++++
>  xen/arch/arm/domain.c             |  6 ++++++
>  xen/arch/arm/domctl.c             | 19 +++++++++++++++++-
>  xen/arch/arm/include/asm/domain.h |  8 ++++++++
>  xen/include/public/arch-arm.h     |  4 ++++
>  xen/include/public/domctl.h       | 21 ++++++++++++++++++++
>  7 files changed, 93 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
> index 2ef8b4e054..b25e9772a2 100644
> --- a/tools/include/xenctrl.h
> +++ b/tools/include/xenctrl.h
> @@ -1195,6 +1195,10 @@ int xc_domain_setmaxmem(xc_interface *xch,
>                          uint32_t domid,
>                          uint64_t max_memkb);
>  
> +int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
> +                          struct xen_mem_region mem_regions[],
> +                          uint32_t *nr_regions);
> +
>  int xc_domain_set_memmap_limit(xc_interface *xch,
>                                 uint32_t domid,
>                                 unsigned long map_limitkb);
> diff --git a/tools/libs/ctrl/xc_domain.c b/tools/libs/ctrl/xc_domain.c
> index f2d9d14b4d..64b46bdfb4 100644
> --- a/tools/libs/ctrl/xc_domain.c
> +++ b/tools/libs/ctrl/xc_domain.c
> @@ -697,6 +697,38 @@ int xc_domain_setmaxmem(xc_interface *xch,
>      return do_domctl(xch, &domctl);
>  }
>  
> +int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
> +                          struct xen_mem_region mem_regions[],
> +                          uint32_t *nr_regions)
> +{
> +    int rc;
> +    struct xen_domctl domctl = {
> +        .cmd         = XEN_DOMCTL_get_mem_map,
> +        .domain      = domid,
> +        .u.mem_map = {
> +            .nr_mem_regions = *nr_regions,
> +        },
> +    };
> +
> +    DECLARE_HYPERCALL_BOUNCE(mem_regions,
> +                             sizeof(xen_mem_region_t) * (*nr_regions),
> +                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
> +
> +    if ( !mem_regions || xc_hypercall_bounce_pre(xch, mem_regions) ||
> +         (*nr_regions) < 1 )
> +        return -1;
> +
> +    set_xen_guest_handle(domctl.u.mem_map.buffer, mem_regions);
> +
> +    rc = do_domctl(xch, &domctl);
> +
> +    xc_hypercall_bounce_post(xch, mem_regions);
> +
> +    *nr_regions = domctl.u.mem_map.nr_mem_regions;
> +
> +    return rc;
> +}
> +
>  #if defined(__i386__) || defined(__x86_64__)
>  int xc_domain_set_memory_map(xc_interface *xch,
>                                 uint32_t domid,
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 5e7a7f3e7e..54f3601ab0 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -696,6 +696,7 @@ int arch_domain_create(struct domain *d,
>  {
>      unsigned int count = 0;
>      int rc;
> +    struct mem_map_domain *mem_map = &d->arch.mem_map;
>  
>      BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
>  
> @@ -785,6 +786,11 @@ int arch_domain_create(struct domain *d,
>      d->arch.sve_vl = config->arch.sve_vl;
>  #endif
>  
> +    mem_map->regions[mem_map->nr_mem_regions].start = GUEST_MAGIC_BASE;
You don't check for exceeding max number of regions. Is the expectation that nr_mem_regions
is 0 at this stage? Maybe add an ASSERT here.

> +    mem_map->regions[mem_map->nr_mem_regions].size = GUEST_MAGIC_SIZE;
> +    mem_map->regions[mem_map->nr_mem_regions].type = GUEST_MEM_REGION_MAGIC;
> +    mem_map->nr_mem_regions++;
> +
>      return 0;
>  
>  fail:
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index ad56efb0f5..92024bcaa0 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -148,7 +148,6 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>  
>          return 0;
>      }
> -
>      case XEN_DOMCTL_vuart_op:
>      {
>          int rc;
> @@ -176,6 +175,24 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>  
>          return rc;
>      }
> +    case XEN_DOMCTL_get_mem_map:
> +    {
> +        int rc;
Without initialization, what will be the rc value on success?

> +        /*
> +         * Cap the number of regions to the minimum value between toolstack and
> +         * hypervisor to avoid overflowing the buffer.
> +         */
> +        uint32_t nr_regions = min(d->arch.mem_map.nr_mem_regions,
> +                                  domctl->u.mem_map.nr_mem_regions);
> +
> +        if ( copy_to_guest(domctl->u.mem_map.buffer,
> +                           d->arch.mem_map.regions,
> +                           nr_regions) ||
> +             __copy_to_guest(u_domctl, domctl, 1) )
In domctl.h, you wrote that nr_regions is IN/OUT but you don't seem to write back the actual number
of regions.

> +            rc = -EFAULT;
> +
> +        return rc;
> +    }
>      default:
>          return subarch_do_domctl(domctl, d, u_domctl);
>      }
> diff --git a/xen/arch/arm/include/asm/domain.h b/xen/arch/arm/include/asm/domain.h
> index f1d72c6e48..a559a9e499 100644
> --- a/xen/arch/arm/include/asm/domain.h
> +++ b/xen/arch/arm/include/asm/domain.h
> @@ -10,6 +10,7 @@
>  #include <asm/gic.h>
>  #include <asm/vgic.h>
>  #include <asm/vpl011.h>
> +#include <public/domctl.h>
>  #include <public/hvm/params.h>
>  
>  struct hvm_domain
> @@ -59,6 +60,11 @@ struct paging_domain {
>      unsigned long p2m_total_pages;
>  };
>  
> +struct mem_map_domain {
> +    unsigned int nr_mem_regions;
> +    struct xen_mem_region regions[XEN_MAX_MEM_REGIONS];
> +};
> +
>  struct arch_domain
>  {
>  #ifdef CONFIG_ARM_64
> @@ -77,6 +83,8 @@ struct arch_domain
>  
>      struct paging_domain paging;
>  
> +    struct mem_map_domain mem_map;
> +
>      struct vmmio vmmio;
>  
>      /* Continuable domain_relinquish_resources(). */
> diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
> index a25e87dbda..a06eaf2dab 100644
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -420,6 +420,10 @@ typedef uint64_t xen_callback_t;
>   * should instead use the FDT.
>   */
>  
> +/* Guest memory region types */
> +#define GUEST_MEM_REGION_DEFAULT    0
What's the purpose of this default type? It seems unusued.

> +#define GUEST_MEM_REGION_MAGIC      1
> +
>  /* Physical Address Space */
>  
>  /* Virtio MMIO mappings */
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index a33f9ec32b..77bf999651 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -946,6 +946,25 @@ struct xen_domctl_paging_mempool {
>      uint64_aligned_t size; /* Size in bytes. */
>  };
>  
> +#define XEN_MAX_MEM_REGIONS 1
The max number of regions can differ between arches. How are you going to handle it?

~Michal
Henry Wang March 11, 2024, 9:46 a.m. UTC | #2
Hi Michal,

On 3/11/2024 5:10 PM, Michal Orzel wrote:
> Hi Henry,
>
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 5e7a7f3e7e..54f3601ab0 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -696,6 +696,7 @@ int arch_domain_create(struct domain *d,
>   {
>       unsigned int count = 0;
>       int rc;
> +    struct mem_map_domain *mem_map = &d->arch.mem_map;
>   
>       BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
>   
> @@ -785,6 +786,11 @@ int arch_domain_create(struct domain *d,
>       d->arch.sve_vl = config->arch.sve_vl;
>   #endif
>   
> +    mem_map->regions[mem_map->nr_mem_regions].start = GUEST_MAGIC_BASE;
> You don't check for exceeding max number of regions. Is the expectation that nr_mem_regions
> is 0 at this stage? Maybe add an ASSERT here.

Sure, I will add the checking.

>> +    mem_map->regions[mem_map->nr_mem_regions].size = GUEST_MAGIC_SIZE;
>> +    mem_map->regions[mem_map->nr_mem_regions].type = GUEST_MEM_REGION_MAGIC;
>> +    mem_map->nr_mem_regions++;
>> +
>>       return 0;
>>   
>>   fail:
>> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
>> index ad56efb0f5..92024bcaa0 100644
>> --- a/xen/arch/arm/domctl.c
>> +++ b/xen/arch/arm/domctl.c
>> @@ -148,7 +148,6 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>>   
>>           return 0;
>>       }
>> -
>>       case XEN_DOMCTL_vuart_op:
>>       {
>>           int rc;
>> @@ -176,6 +175,24 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>>   
>>           return rc;
>>       }
>> +    case XEN_DOMCTL_get_mem_map:
>> +    {
>> +        int rc;
> Without initialization, what will be the rc value on success?

Thanks for catching this (and the copy back issue below). I made a silly 
mistake here and didn't catch it as I also missed checking the rc in the 
toolstack side...I will fix both side.

>> +        /*
>> +         * Cap the number of regions to the minimum value between toolstack and
>> +         * hypervisor to avoid overflowing the buffer.
>> +         */
>> +        uint32_t nr_regions = min(d->arch.mem_map.nr_mem_regions,
>> +                                  domctl->u.mem_map.nr_mem_regions);
>> +
>> +        if ( copy_to_guest(domctl->u.mem_map.buffer,
>> +                           d->arch.mem_map.regions,
>> +                           nr_regions) ||
>> +             __copy_to_guest(u_domctl, domctl, 1) )
> In domctl.h, you wrote that nr_regions is IN/OUT but you don't seem to write back the actual number
> of regions.

Thanks. Added "domctl->u.mem_map.nr_mem_regions = nr_regions;" locally.

>> +/* Guest memory region types */
>> +#define GUEST_MEM_REGION_DEFAULT    0
> What's the purpose of this default type? It seems unusued.

I added it because struct arch_domain (or we should say struct domain) 
is zalloc-ed. So the default type field in struct xen_mem_region is 0. 
Otherwise we may (mistakenly) define a region type as 0 and lead to 
mistakes.
>> +#define GUEST_MEM_REGION_MAGIC      1
>> +
>>   /* Physical Address Space */
>>   
>>   /* Virtio MMIO mappings */
>> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
>> index a33f9ec32b..77bf999651 100644
>> --- a/xen/include/public/domctl.h
>> +++ b/xen/include/public/domctl.h
>> @@ -946,6 +946,25 @@ struct xen_domctl_paging_mempool {
>>       uint64_aligned_t size; /* Size in bytes. */
>>   };
>>   
>> +#define XEN_MAX_MEM_REGIONS 1
> The max number of regions can differ between arches. How are you going to handle it?

I think we can add
```
#ifndef XEN_MAX_MEM_REGIONS

#define XEN_MAX_MEM_REGIONS 1

#endif
```
here and define the arch specific XEN_MAX_MEM_REGIONS in 
public/arch-*.h. I will fix this in v3.

Kind regards,
Henry

> ~Michal
Jan Beulich March 11, 2024, 4:58 p.m. UTC | #3
On 08.03.2024 02:54, Henry Wang wrote:
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -946,6 +946,25 @@ struct xen_domctl_paging_mempool {
>      uint64_aligned_t size; /* Size in bytes. */
>  };
>  
> +#define XEN_MAX_MEM_REGIONS 1
> +
> +struct xen_mem_region {
> +    uint64_t start;
> +    uint64_t size;

uint64_aligned_t?

> +    unsigned int type;

uint32_t and explicit padding (incl checking thereof) please.

> +};
> +typedef struct xen_mem_region xen_mem_region_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_mem_region_t);
> +
> +struct xen_domctl_mem_map {
> +    /* IN & OUT */
> +    uint32_t nr_mem_regions;
> +    /* OUT */
> +    XEN_GUEST_HANDLE(xen_mem_region_t) buffer;

XEN_GUEST_HANDLE_64() and explicit padding (+checking) again please.

Jan
Henry Wang March 12, 2024, 3:06 a.m. UTC | #4
Hi Jan,

On 3/12/2024 12:58 AM, Jan Beulich wrote:
> On 08.03.2024 02:54, Henry Wang wrote:
>> --- a/xen/include/public/domctl.h
>> +++ b/xen/include/public/domctl.h
>> @@ -946,6 +946,25 @@ struct xen_domctl_paging_mempool {
>>       uint64_aligned_t size; /* Size in bytes. */
>>   };
>>   
>> +#define XEN_MAX_MEM_REGIONS 1
>> +
>> +struct xen_mem_region {
>> +    uint64_t start;
>> +    uint64_t size;
> uint64_aligned_t?

Yes this makes great sense, thanks for catching it here and ...

>> +    unsigned int type;
> uint32_t and explicit padding (incl checking thereof) please.

...here and ...

>> +};
>> +typedef struct xen_mem_region xen_mem_region_t;
>> +DEFINE_XEN_GUEST_HANDLE(xen_mem_region_t);
>> +
>> +struct xen_domctl_mem_map {
>> +    /* IN & OUT */
>> +    uint32_t nr_mem_regions;
>> +    /* OUT */
>> +    XEN_GUEST_HANDLE(xen_mem_region_t) buffer;
> XEN_GUEST_HANDLE_64() and explicit padding (+checking) again please.

...here. I will update the patch accordingly and add the padding + 
checking in v3.

Kind regards,
Henry

> Jan
diff mbox series

Patch

diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
index 2ef8b4e054..b25e9772a2 100644
--- a/tools/include/xenctrl.h
+++ b/tools/include/xenctrl.h
@@ -1195,6 +1195,10 @@  int xc_domain_setmaxmem(xc_interface *xch,
                         uint32_t domid,
                         uint64_t max_memkb);
 
+int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
+                          struct xen_mem_region mem_regions[],
+                          uint32_t *nr_regions);
+
 int xc_domain_set_memmap_limit(xc_interface *xch,
                                uint32_t domid,
                                unsigned long map_limitkb);
diff --git a/tools/libs/ctrl/xc_domain.c b/tools/libs/ctrl/xc_domain.c
index f2d9d14b4d..64b46bdfb4 100644
--- a/tools/libs/ctrl/xc_domain.c
+++ b/tools/libs/ctrl/xc_domain.c
@@ -697,6 +697,38 @@  int xc_domain_setmaxmem(xc_interface *xch,
     return do_domctl(xch, &domctl);
 }
 
+int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
+                          struct xen_mem_region mem_regions[],
+                          uint32_t *nr_regions)
+{
+    int rc;
+    struct xen_domctl domctl = {
+        .cmd         = XEN_DOMCTL_get_mem_map,
+        .domain      = domid,
+        .u.mem_map = {
+            .nr_mem_regions = *nr_regions,
+        },
+    };
+
+    DECLARE_HYPERCALL_BOUNCE(mem_regions,
+                             sizeof(xen_mem_region_t) * (*nr_regions),
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( !mem_regions || xc_hypercall_bounce_pre(xch, mem_regions) ||
+         (*nr_regions) < 1 )
+        return -1;
+
+    set_xen_guest_handle(domctl.u.mem_map.buffer, mem_regions);
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, mem_regions);
+
+    *nr_regions = domctl.u.mem_map.nr_mem_regions;
+
+    return rc;
+}
+
 #if defined(__i386__) || defined(__x86_64__)
 int xc_domain_set_memory_map(xc_interface *xch,
                                uint32_t domid,
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 5e7a7f3e7e..54f3601ab0 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -696,6 +696,7 @@  int arch_domain_create(struct domain *d,
 {
     unsigned int count = 0;
     int rc;
+    struct mem_map_domain *mem_map = &d->arch.mem_map;
 
     BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
 
@@ -785,6 +786,11 @@  int arch_domain_create(struct domain *d,
     d->arch.sve_vl = config->arch.sve_vl;
 #endif
 
+    mem_map->regions[mem_map->nr_mem_regions].start = GUEST_MAGIC_BASE;
+    mem_map->regions[mem_map->nr_mem_regions].size = GUEST_MAGIC_SIZE;
+    mem_map->regions[mem_map->nr_mem_regions].type = GUEST_MEM_REGION_MAGIC;
+    mem_map->nr_mem_regions++;
+
     return 0;
 
 fail:
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index ad56efb0f5..92024bcaa0 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -148,7 +148,6 @@  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
 
         return 0;
     }
-
     case XEN_DOMCTL_vuart_op:
     {
         int rc;
@@ -176,6 +175,24 @@  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
 
         return rc;
     }
+    case XEN_DOMCTL_get_mem_map:
+    {
+        int rc;
+        /*
+         * Cap the number of regions to the minimum value between toolstack and
+         * hypervisor to avoid overflowing the buffer.
+         */
+        uint32_t nr_regions = min(d->arch.mem_map.nr_mem_regions,
+                                  domctl->u.mem_map.nr_mem_regions);
+
+        if ( copy_to_guest(domctl->u.mem_map.buffer,
+                           d->arch.mem_map.regions,
+                           nr_regions) ||
+             __copy_to_guest(u_domctl, domctl, 1) )
+            rc = -EFAULT;
+
+        return rc;
+    }
     default:
         return subarch_do_domctl(domctl, d, u_domctl);
     }
diff --git a/xen/arch/arm/include/asm/domain.h b/xen/arch/arm/include/asm/domain.h
index f1d72c6e48..a559a9e499 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -10,6 +10,7 @@ 
 #include <asm/gic.h>
 #include <asm/vgic.h>
 #include <asm/vpl011.h>
+#include <public/domctl.h>
 #include <public/hvm/params.h>
 
 struct hvm_domain
@@ -59,6 +60,11 @@  struct paging_domain {
     unsigned long p2m_total_pages;
 };
 
+struct mem_map_domain {
+    unsigned int nr_mem_regions;
+    struct xen_mem_region regions[XEN_MAX_MEM_REGIONS];
+};
+
 struct arch_domain
 {
 #ifdef CONFIG_ARM_64
@@ -77,6 +83,8 @@  struct arch_domain
 
     struct paging_domain paging;
 
+    struct mem_map_domain mem_map;
+
     struct vmmio vmmio;
 
     /* Continuable domain_relinquish_resources(). */
diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
index a25e87dbda..a06eaf2dab 100644
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -420,6 +420,10 @@  typedef uint64_t xen_callback_t;
  * should instead use the FDT.
  */
 
+/* Guest memory region types */
+#define GUEST_MEM_REGION_DEFAULT    0
+#define GUEST_MEM_REGION_MAGIC      1
+
 /* Physical Address Space */
 
 /* Virtio MMIO mappings */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index a33f9ec32b..77bf999651 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -946,6 +946,25 @@  struct xen_domctl_paging_mempool {
     uint64_aligned_t size; /* Size in bytes. */
 };
 
+#define XEN_MAX_MEM_REGIONS 1
+
+struct xen_mem_region {
+    uint64_t start;
+    uint64_t size;
+    unsigned int type;
+};
+typedef struct xen_mem_region xen_mem_region_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_region_t);
+
+struct xen_domctl_mem_map {
+    /* IN & OUT */
+    uint32_t nr_mem_regions;
+    /* OUT */
+    XEN_GUEST_HANDLE(xen_mem_region_t) buffer;
+};
+typedef struct xen_domctl_mem_map xen_domctl_mem_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_map_t);
+
 #if defined(__i386__) || defined(__x86_64__)
 struct xen_domctl_vcpu_msr {
     uint32_t         index;
@@ -1277,6 +1296,7 @@  struct xen_domctl {
 #define XEN_DOMCTL_vmtrace_op                    84
 #define XEN_DOMCTL_get_paging_mempool_size       85
 #define XEN_DOMCTL_set_paging_mempool_size       86
+#define XEN_DOMCTL_get_mem_map                   87
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1339,6 +1359,7 @@  struct xen_domctl {
         struct xen_domctl_vuart_op          vuart_op;
         struct xen_domctl_vmtrace_op        vmtrace_op;
         struct xen_domctl_paging_mempool    paging_mempool;
+        struct xen_domctl_mem_map           mem_map;
         uint8_t                             pad[128];
     } u;
 };