@@ -1627,7 +1627,7 @@ int xc_hvm_track_dirty_vram(
* Notify that some pages got modified by the Device Model
*/
int xc_hvm_modified_memory(
- xc_interface *xch, domid_t dom, uint64_t first_pfn, uint64_t nr);
+ xc_interface *xch, domid_t dom, uint64_t first_pfn, uint32_t nr);
/*
* Set a range of memory to a specific type.
@@ -573,29 +573,20 @@ int xc_hvm_track_dirty_vram(
}
int xc_hvm_modified_memory(
- xc_interface *xch, domid_t dom, uint64_t first_pfn, uint64_t nr)
+ xc_interface *xch, domid_t dom, uint64_t first_pfn, uint32_t nr)
{
- DECLARE_HYPERCALL_BUFFER(struct xen_hvm_modified_memory, arg);
- int rc;
-
- arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
- if ( arg == NULL )
- {
- PERROR("Could not allocate memory for xc_hvm_modified_memory hypercall");
- return -1;
- }
+ struct xen_dm_op op;
+ struct xen_dm_op_modified_memory *data;
- arg->domid = dom;
- arg->first_pfn = first_pfn;
- arg->nr = nr;
+ memset(&op, 0, sizeof(op));
- rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
- HVMOP_modified_memory,
- HYPERCALL_BUFFER_AS_ARG(arg));
+ op.op = XEN_DMOP_modified_memory;
+ data = &op.u.modified_memory;
- xc_hypercall_buffer_free(xch, arg);
+ data->first_pfn = first_pfn;
+ data->nr = nr;
- return rc;
+ return do_dm_op(xch, dom, 1, &op, sizeof(op));
}
int xc_hvm_set_mem_type(
@@ -14,6 +14,7 @@
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
+#include <xen/event.h>
#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <xen/sched.h>
@@ -107,6 +108,59 @@ static int set_isa_irq_level(struct domain *d, uint8_t isa_irq,
return 0;
}
+static int modified_memory(struct domain *d, xen_pfn_t *first_pfn,
+ unsigned int *nr)
+{
+ xen_pfn_t last_pfn = *first_pfn + *nr - 1;
+ unsigned int iter = 0;
+ int rc = 0;
+
+ if ( (*first_pfn > last_pfn) ||
+ (last_pfn > domain_get_maximum_gpfn(d)) )
+ return -EINVAL;
+
+ if ( !paging_mode_log_dirty(d) )
+ return 0;
+
+ while ( iter < *nr )
+ {
+ unsigned long pfn = *first_pfn + iter;
+ struct page_info *page;
+
+ page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+ if ( page )
+ {
+ mfn_t gmfn = _mfn(page_to_mfn(page));
+
+ paging_mark_dirty(d, gmfn);
+ /*
+ * These are most probably not page tables any more
+ * don't take a long time and don't die either.
+ */
+ sh_remove_shadows(d, gmfn, 1, 0);
+ put_page(page);
+ }
+
+ iter++;
+
+ /*
+ * Check for continuation every 256th iteration and if the
+ * iteration is not the last.
+ */
+ if ( (iter < *nr) && ((iter & 0xff) == 0) &&
+ hypercall_preempt_check() )
+ {
+ *first_pfn += iter;
+ *nr -= iter;
+
+ rc = -ERESTART;
+ break;
+ }
+ }
+
+ return rc;
+}
+
static int dm_op(domid_t domid,
unsigned int nr_bufs,
xen_dm_op_buf_t bufs[])
@@ -268,12 +322,25 @@ static int dm_op(domid_t domid,
break;
}
+ case XEN_DMOP_modified_memory:
+ {
+ struct xen_dm_op_modified_memory *data =
+ &op.u.modified_memory;
+
+ rc = -EINVAL;
+ if ( data->pad )
+ break;
+
+ rc = modified_memory(d, &data->first_pfn, &data->nr);
+ break;
+ }
+
default:
rc = -EOPNOTSUPP;
break;
}
- if ( !rc &&
+ if ( (!rc || rc == -ERESTART) &&
!copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) )
rc = -EFAULT;
@@ -291,6 +358,7 @@ int compat_dm_op(domid_t domid,
{
struct xen_dm_op_buf nat[MAX_NR_BUFS];
unsigned int i;
+ int rc;
if ( nr_bufs > MAX_NR_BUFS )
return -EINVAL;
@@ -310,7 +378,13 @@ int compat_dm_op(domid_t domid,
#undef XLAT_dm_op_buf_HNDL_h
}
- return dm_op(domid, nr_bufs, nat);
+ rc = dm_op(domid, nr_bufs, nat);
+
+ if ( rc == -ERESTART )
+ rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
+ domid, nr_bufs, bufs);
+
+ return rc;
}
long do_dm_op(domid_t domid,
@@ -318,6 +392,7 @@ long do_dm_op(domid_t domid,
XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
{
struct xen_dm_op_buf nat[MAX_NR_BUFS];
+ int rc;
if ( nr_bufs > MAX_NR_BUFS )
return -EINVAL;
@@ -325,7 +400,13 @@ long do_dm_op(domid_t domid,
if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) )
return -EFAULT;
- return dm_op(domid, nr_bufs, nat);
+ rc = dm_op(domid, nr_bufs, nat);
+
+ if ( rc == -ERESTART )
+ rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih",
+ domid, nr_bufs, bufs);
+
+ return rc;
}
/*
@@ -4965,7 +4965,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
default:
mask = ~0UL;
break;
- case HVMOP_modified_memory:
case HVMOP_set_mem_type:
mask = HVMOP_op_mask;
break;
@@ -4998,65 +4997,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL;
break;
- case HVMOP_modified_memory:
- {
- struct xen_hvm_modified_memory a;
- struct domain *d;
-
- if ( copy_from_guest(&a, arg, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(a.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto modmem_fail;
-
- rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
- if ( rc )
- goto modmem_fail;
-
- rc = -EINVAL;
- if ( a.nr < start_iter ||
- ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
- ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
- goto modmem_fail;
-
- rc = 0;
- if ( !paging_mode_log_dirty(d) )
- goto modmem_fail;
-
- while ( a.nr > start_iter )
- {
- unsigned long pfn = a.first_pfn + start_iter;
- struct page_info *page;
-
- page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
- if ( page )
- {
- paging_mark_dirty(d, _mfn(page_to_mfn(page)));
- /* These are most probably not page tables any more */
- /* don't take a long time and don't die either */
- sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
- put_page(page);
- }
-
- /* Check for continuation if it's not the last interation */
- if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
- hypercall_preempt_check() )
- {
- rc = -ERESTART;
- break;
- }
- }
-
- modmem_fail:
- rcu_unlock_domain(d);
- break;
- }
-
case HVMOP_get_mem_type:
rc = hvmop_get_mem_type(
guest_handle_cast(arg, xen_hvm_get_mem_type_t));
@@ -232,6 +232,24 @@ struct xen_dm_op_set_pci_link_route {
uint8_t isa_irq;
};
+/*
+ * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
+ * an emulator.
+ *
+ * NOTE: In the event of a continuation, the @first_pfn is set to the
+ * value of the pfn of the remaining set of pages and @nr reduced
+ * to the size of the remaining set.
+ */
+#define XEN_DMOP_modified_memory 11
+
+struct xen_dm_op_modified_memory {
+ /* IN - number of contiguous pages modified */
+ uint32_t nr;
+ uint32_t pad;
+ /* IN - first pfn modified */
+ uint64_aligned_t first_pfn;
+};
+
struct xen_dm_op {
uint32_t op;
uint32_t pad;
@@ -246,6 +264,7 @@ struct xen_dm_op {
struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
struct xen_dm_op_set_pci_link_route set_pci_link_route;
+ struct xen_dm_op_modified_memory modified_memory;
} u;
};
@@ -99,19 +99,6 @@ typedef enum {
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
-/* Notify that some pages got modified by the Device Model. */
-#define HVMOP_modified_memory 7
-struct xen_hvm_modified_memory {
- /* Domain to be updated. */
- domid_t domid;
- /* Number of pages. */
- uint32_t nr;
- /* First pfn. */
- uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
-
#define HVMOP_set_mem_type 8
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
@@ -262,7 +262,7 @@ class hvm
bind_irq
# XEN_DOMCTL_pin_mem_cacheattr
cacheattr
-# HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
+# HVMOP_get_mem_type, HVMOP_set_mem_type,
# HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
# HVMOP_inject_trap
hvmctl