@@ -4736,15 +4736,18 @@ static int _handle_iomem_range(unsigned long s, unsigned long e,
XEN_GUEST_HANDLE_PARAM(e820entry_t) buffer_param;
XEN_GUEST_HANDLE(e820entry_t) buffer;
- if ( ctxt->n + 1 >= ctxt->map.nr_entries )
- return -E2BIG;
- ent.addr = (uint64_t)ctxt->s << PAGE_SHIFT;
- ent.size = (uint64_t)(s - ctxt->s) << PAGE_SHIFT;
- ent.type = E820_RESERVED;
- buffer_param = guest_handle_cast(ctxt->map.buffer, e820entry_t);
- buffer = guest_handle_from_param(buffer_param, e820entry_t);
- if ( __copy_to_guest_offset(buffer, ctxt->n, &ent, 1) )
- return -EFAULT;
+ if ( !guest_handle_is_null(ctxt->map.buffer) )
+ {
+ if ( ctxt->n + 1 >= ctxt->map.nr_entries )
+ return -E2BIG;
+ ent.addr = (uint64_t)ctxt->s << PAGE_SHIFT;
+ ent.size = (uint64_t)(s - ctxt->s) << PAGE_SHIFT;
+ ent.type = E820_RESERVED;
+ buffer_param = guest_handle_cast(ctxt->map.buffer, e820entry_t);
+ buffer = guest_handle_from_param(buffer_param, e820entry_t);
+ if ( __copy_to_guest_offset(buffer, ctxt->n, &ent, 1) )
+ return -EFAULT;
+ }
ctxt->n++;
}
ctxt->s = e + 1;
@@ -4978,6 +4981,7 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
XEN_GUEST_HANDLE(e820entry_t) buffer;
XEN_GUEST_HANDLE_PARAM(e820entry_t) buffer_param;
unsigned int i;
+ bool store;
rc = xsm_machine_memory_map(XSM_PRIV);
if ( rc )
@@ -4986,9 +4990,10 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(&ctxt.map, arg, 1) )
return -EFAULT;
+ store = !guest_handle_is_null(ctxt.map.buffer);
buffer_param = guest_handle_cast(ctxt.map.buffer, e820entry_t);
buffer = guest_handle_from_param(buffer_param, e820entry_t);
- if ( !guest_handle_okay(buffer, ctxt.map.nr_entries) )
+ if ( store && !guest_handle_okay(buffer, ctxt.map.nr_entries) )
return -EFAULT;
for ( i = 0, ctxt.n = 0, ctxt.s = 0; i < e820.nr_map; ++i, ++ctxt.n )
@@ -5005,13 +5010,16 @@ long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( rc )
break;
}
- if ( ctxt.map.nr_entries <= ctxt.n + 1 )
+ if ( store )
{
- rc = -E2BIG;
- break;
+ if ( ctxt.map.nr_entries <= ctxt.n + 1 )
+ {
+ rc = -E2BIG;
+ break;
+ }
+ if ( __copy_to_guest_offset(buffer, ctxt.n, e820.map + i, 1) )
+ return -EFAULT;
}
- if ( __copy_to_guest_offset(buffer, ctxt.n, e820.map + i, 1) )
- return -EFAULT;
ctxt.s = PFN_UP(e820.map[i].addr + e820.map[i].size);
}
@@ -341,6 +341,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
* XENMEM_memory_map.
* In case of a buffer not capable to hold all entries of the physical
* memory map -E2BIG is returned and the buffer is filled completely.
+ * Specifying buffer as NULL will return the number of entries required
+ * to store the complete memory map.
* arg == addr of xen_memory_map_t.
*/
#define XENMEM_machine_memory_map 10
Today there is no way for a domain to obtain the number of entries of the machine memory map returned by XENMEM_machine_memory_map hypercall. Modify the interface to return just the needed number of map entries in case the buffer was specified as NULL. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/arch/x86/mm.c | 38 +++++++++++++++++++++++--------------- xen/include/public/memory.h | 2 ++ 2 files changed, 25 insertions(+), 15 deletions(-)