@@ -2334,6 +2334,21 @@ int xc_memshr_add_to_physmap(xc_interface *xch,
domid_t client_domain,
unsigned long client_gfn);
+/* Allows to deduplicate a range of memory of a client domain. Using
+ * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
+ * in the two domains followed by xc_memshr_share_gfns.
+ *
+ * May fail with -EINVAL if the source and client domain have different
+ * memory size or if memory sharing is not enabled on either of the domains.
+ * May also fail with -ENOMEM if there isn't enough memory available to store
+ * the sharing metadata before deduplication can happen.
+ */
+int xc_memshr_range_share(xc_interface *xch,
+ domid_t source_domain,
+ domid_t client_domain,
+ uint64_t first_gfn,
+ uint64_t last_gfn);
+
/* Debug calls: return the number of pages referencing the shared frame backing
* the input argument. Should be one or greater.
*
@@ -181,6 +181,25 @@ int xc_memshr_add_to_physmap(xc_interface *xch,
return xc_memshr_memop(xch, source_domain, &mso);
}
+int xc_memshr_range_share(xc_interface *xch,
+ domid_t source_domain,
+ domid_t client_domain,
+ uint64_t first_gfn,
+ uint64_t last_gfn)
+{
+ xen_mem_sharing_op_t mso;
+
+ memset(&mso, 0, sizeof(mso));
+
+ mso.op = XENMEM_sharing_op_range_share;
+
+ mso.u.range.client_domain = client_domain;
+ mso.u.range.first_gfn = first_gfn;
+ mso.u.range.last_gfn = last_gfn;
+
+ return xc_memshr_memop(xch, source_domain, &mso);
+}
+
int xc_memshr_domain_resume(xc_interface *xch,
domid_t domid)
{
@@ -24,6 +24,8 @@ static int usage(const char* prog)
printf(" nominate <domid> <gfn> - Nominate a page for sharing.\n");
printf(" share <domid> <gfn> <handle> <source> <source-gfn> <source-handle>\n");
printf(" - Share two pages.\n");
+ printf(" range <source-domid> <destination-domid> <first-gfn> <last-gfn>\n");
+ printf(" - Share pages between domains in a range.\n");
printf(" unshare <domid> <gfn> - Unshare a page by grabbing a writable map.\n");
printf(" add-to-physmap <domid> <gfn> <source> <source-gfn> <source-handle>\n");
printf(" - Populate a page in a domain with a shared page.\n");
@@ -180,6 +182,26 @@ int main(int argc, const char** argv)
}
printf("Audit returned %d errors.\n", rc);
}
+ else if( !strcasecmp(cmd, "range") )
+ {
+ domid_t sdomid, cdomid;
+ int rc;
+ uint64_t first_gfn, last_gfn;
+
+ if ( argc != 6 )
+ return usage(argv[0]);
+ sdomid = strtol(argv[2], NULL, 0);
+ cdomid = strtol(argv[3], NULL, 0);
+ first_gfn = strtoul(argv[4], NULL, 0);
+ last_gfn = strtoul(argv[5], NULL, 0);
+
+ rc = xc_memshr_range_share(xch, sdomid, cdomid, first_gfn, last_gfn);
+ if ( rc < 0 )
+ {
+ printf("error executing xc_memshr_range_share: %s\n", strerror(errno));
+ return rc;
+ }
+ }
return 0;
}
@@ -1324,6 +1324,58 @@ int relinquish_shared_pages(struct domain *d)
return rc;
}
+static int range_share(struct domain *d, struct domain *cd,
+ struct mem_sharing_op_range *range)
+{
+ int rc = 0;
+ shr_handle_t sh, ch;
+ unsigned long start = range->opaque ?: range->first_gfn;
+
+ while ( range->last_gfn >= start )
+ {
+ /*
+ * We only break out if we run out of memory as individual pages may
+ * legitimately be unsharable and we just want to skip over those.
+ */
+ rc = mem_sharing_nominate_page(d, start, 0, &sh);
+ if ( rc == -ENOMEM )
+ break;
+
+ if ( !rc )
+ {
+ rc = mem_sharing_nominate_page(cd, start, 0, &ch);
+ if ( rc == -ENOMEM )
+ break;
+
+ if ( !rc )
+ {
+ /* If we get here this should be guaranteed to succeed. */
+ rc = mem_sharing_share_pages(d, start, sh,
+ cd, start, ch);
+ ASSERT(!rc);
+ }
+ }
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( range->last_gfn >= ++start && hypercall_preempt_check() )
+ {
+ rc = 1;
+ break;
+ }
+ }
+
+ range->opaque = start;
+
+ /*
+ * The last page may fail with -EINVAL, and for range sharing we don't
+ * care about that.
+ */
+ if ( range->last_gfn < start && rc == -EINVAL )
+ rc = 0;
+
+ return rc;
+}
+
int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
{
int rc;
@@ -1498,6 +1550,96 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
}
break;
+ case XENMEM_sharing_op_range_share:
+ {
+ unsigned long max_sgfn, max_cgfn;
+ struct domain *cd;
+
+ rc = -EINVAL;
+ if ( mso.u.range._pad[0] || mso.u.range._pad[1] ||
+ mso.u.range._pad[2] )
+ goto out;
+
+ /*
+ * We use opaque for the hypercall continuation value.
+ * Ideally the user sets this to 0 in the beginning but
+ * there is no good way of enforcing that here, so we just check
+ * that it's at least in range.
+ */
+ if ( mso.u.range.opaque &&
+ (mso.u.range.opaque < mso.u.range.first_gfn ||
+ mso.u.range.opaque > mso.u.range.last_gfn) )
+ goto out;
+
+ if ( !mem_sharing_enabled(d) )
+ goto out;
+
+ rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain,
+ &cd);
+ if ( rc )
+ goto out;
+
+ /*
+ * We reuse XENMEM_sharing_op_share XSM check here as this is
+ * essentially the same concept repeated over multiple pages.
+ */
+ rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd,
+ XENMEM_sharing_op_share);
+ if ( rc )
+ {
+ rcu_unlock_domain(cd);
+ goto out;
+ }
+
+ if ( !mem_sharing_enabled(cd) )
+ {
+ rcu_unlock_domain(cd);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Sanity check only, the client should keep the domains paused for
+ * the duration of this op.
+ */
+ if ( !atomic_read(&d->pause_count) ||
+ !atomic_read(&cd->pause_count) )
+ {
+ rcu_unlock_domain(cd);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ max_sgfn = domain_get_maximum_gpfn(d);
+ max_cgfn = domain_get_maximum_gpfn(cd);
+
+ if ( max_sgfn < mso.u.range.first_gfn ||
+ max_sgfn < mso.u.range.last_gfn ||
+ max_cgfn < mso.u.range.first_gfn ||
+ max_cgfn < mso.u.range.last_gfn )
+ {
+ rcu_unlock_domain(cd);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = range_share(d, cd, &mso.u.range);
+ rcu_unlock_domain(cd);
+
+ if ( rc > 0 )
+ {
+ if ( __copy_to_guest(arg, &mso, 1) )
+ rc = -EFAULT;
+ else
+ rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
+ "lh", XENMEM_sharing_op,
+ arg);
+ }
+ else
+ mso.u.range.opaque = 0;
+ }
+ break;
+
case XENMEM_sharing_op_debug_gfn:
{
unsigned long gfn = mso.u.debug.u.gfn;
@@ -465,6 +465,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
#define XENMEM_sharing_op_debug_gref 5
#define XENMEM_sharing_op_add_physmap 6
#define XENMEM_sharing_op_audit 7
+#define XENMEM_sharing_op_range_share 8
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
@@ -500,7 +501,14 @@ struct xen_mem_sharing_op {
uint64_aligned_t client_gfn; /* IN: the client gfn */
uint64_aligned_t client_handle; /* IN: handle to the client page */
domid_t client_domain; /* IN: the client domain id */
- } share;
+ } share;
+ struct mem_sharing_op_range { /* OP_RANGE_SHARE */
+ uint64_aligned_t first_gfn; /* IN: the first gfn */
+ uint64_aligned_t last_gfn; /* IN: the last gfn */
+ uint64_aligned_t opaque; /* Must be set to 0 */
+ domid_t client_domain; /* IN: the client domain id */
+ uint16_t _pad[3]; /* Must be set to 0 */
+ } range;
struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
union {
uint64_aligned_t gfn; /* IN: gfn to debug */