Message ID | 20171023090509.4338-4-ross.lagerwall@citrix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
> -----Original Message----- > From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of > Ross Lagerwall > Sent: 23 October 2017 10:05 > To: xen-devel@lists.xen.org > Cc: Stefano Stabellini <sstabellini@kernel.org>; Wei Liu > <wei.liu2@citrix.com>; Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>; > George Dunlap <George.Dunlap@citrix.com>; Andrew Cooper > <Andrew.Cooper3@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Tim > (Xen.org) <tim@xen.org>; Ross Lagerwall <ross.lagerwall@citrix.com>; Jan > Beulich <jbeulich@suse.com> > Subject: [Xen-devel] [PATCH v2 3/5] xen: Provide > XEN_DMOP_pin_memory_cacheattr > > Provide XEN_DMOP_pin_memory_cacheattr to allow a deprivileged QEMU > to > pin the caching type of RAM after moving the VRAM. It is equivalent to > XEN_DOMCTL_pin_memory_cacheattr. > > Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com> Reviewed-by: Paul Durrant <paul.durrant@citrix.com> > --- > > Changed in v2: > * Check pad is 0. > > xen/arch/x86/hvm/dm.c | 18 ++++++++++++++++++ > xen/include/public/hvm/dm_op.h | 14 ++++++++++++++ > xen/include/xlat.lst | 1 + > 3 files changed, 33 insertions(+) > > diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c > index 0027567..42d02cc 100644 > --- a/xen/arch/x86/hvm/dm.c > +++ b/xen/arch/x86/hvm/dm.c > @@ -21,6 +21,7 @@ > > #include <asm/hap.h> > #include <asm/hvm/ioreq.h> > +#include <asm/hvm/cacheattr.h> > #include <asm/shadow.h> > > #include <xsm/xsm.h> > @@ -670,6 +671,22 @@ static int dm_op(const struct dmop_args *op_args) > break; > } > > + case XEN_DMOP_pin_memory_cacheattr: > + { > + const struct xen_dm_op_pin_memory_cacheattr *data = > + &op.u.pin_memory_cacheattr; > + > + if ( data->pad ) > + { > + rc = -EINVAL; > + break; > + } > + > + rc = hvm_set_mem_pinned_cacheattr(d, data->start, data->end, > + data->type); > + break; > + } > + > default: > rc = -EOPNOTSUPP; > break; > @@ -700,6 +717,7 @@ CHECK_dm_op_inject_event; > CHECK_dm_op_inject_msi; > CHECK_dm_op_remote_shutdown; > CHECK_dm_op_add_to_physmap; > +CHECK_dm_op_pin_memory_cacheattr; > > int compat_dm_op(domid_t domid, > unsigned int nr_bufs, > diff --git a/xen/include/public/hvm/dm_op.h > b/xen/include/public/hvm/dm_op.h > index f685110..f9c86b8 100644 > --- a/xen/include/public/hvm/dm_op.h > +++ b/xen/include/public/hvm/dm_op.h > @@ -384,6 +384,19 @@ struct xen_dm_op_add_to_physmap { > uint64_aligned_t gpfn; /* Starting GPFN where the GMFNs should appear. > */ > }; > > +/* > + * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space. > + * Identical to XEN_DOMCTL_pin_mem_cacheattr. > + */ > +#define XEN_DMOP_pin_memory_cacheattr 18 > + > +struct xen_dm_op_pin_memory_cacheattr { > + uint64_aligned_t start; /* Start gfn. */ > + uint64_aligned_t end; /* End gfn. */ > + uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ > + uint32_t pad; > +}; > + > struct xen_dm_op { > uint32_t op; > uint32_t pad; > @@ -406,6 +419,7 @@ struct xen_dm_op { > map_mem_type_to_ioreq_server; > struct xen_dm_op_remote_shutdown remote_shutdown; > struct xen_dm_op_add_to_physmap add_to_physmap; > + struct xen_dm_op_pin_memory_cacheattr pin_memory_cacheattr; > } u; > }; > > diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst > index d40bac6..fffb308 100644 > --- a/xen/include/xlat.lst > +++ b/xen/include/xlat.lst > @@ -65,6 +65,7 @@ > ? dm_op_inject_msi hvm/dm_op.h > ? dm_op_ioreq_server_range hvm/dm_op.h > ? dm_op_modified_memory hvm/dm_op.h > +? dm_op_pin_memory_cacheattr hvm/dm_op.h > ? dm_op_remote_shutdown hvm/dm_op.h > ? dm_op_set_ioreq_server_state hvm/dm_op.h > ? dm_op_set_isa_irq_level hvm/dm_op.h > -- > 2.9.5 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > https://lists.xen.org/xen-devel
>>> On 23.10.17 at 11:05, <ross.lagerwall@citrix.com> wrote: > --- a/xen/arch/x86/hvm/dm.c > +++ b/xen/arch/x86/hvm/dm.c > @@ -21,6 +21,7 @@ > > #include <asm/hap.h> > #include <asm/hvm/ioreq.h> > +#include <asm/hvm/cacheattr.h> > #include <asm/shadow.h> With this addition moved up a line to result in a properly sorted set Reviewed-by: Jan Beulich <jbeulich@suse.com> However, the series should be extended by a patch removing the no longer needed domctl (wiring the libxc function through to the libxendevicemodel one if necessary), at once converting the XEN_DOMCTL_MEM_CACHEATTR_* values to ones with names suitable for use with this new interface (compatibility defines in domctl.h would need to be retained until both qemu-s have had their patches applied). Jan
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index 0027567..42d02cc 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -21,6 +21,7 @@ #include <asm/hap.h> #include <asm/hvm/ioreq.h> +#include <asm/hvm/cacheattr.h> #include <asm/shadow.h> #include <xsm/xsm.h> @@ -670,6 +671,22 @@ static int dm_op(const struct dmop_args *op_args) break; } + case XEN_DMOP_pin_memory_cacheattr: + { + const struct xen_dm_op_pin_memory_cacheattr *data = + &op.u.pin_memory_cacheattr; + + if ( data->pad ) + { + rc = -EINVAL; + break; + } + + rc = hvm_set_mem_pinned_cacheattr(d, data->start, data->end, + data->type); + break; + } + default: rc = -EOPNOTSUPP; break; @@ -700,6 +717,7 @@ CHECK_dm_op_inject_event; CHECK_dm_op_inject_msi; CHECK_dm_op_remote_shutdown; CHECK_dm_op_add_to_physmap; +CHECK_dm_op_pin_memory_cacheattr; int compat_dm_op(domid_t domid, unsigned int nr_bufs, diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h index f685110..f9c86b8 100644 --- a/xen/include/public/hvm/dm_op.h +++ b/xen/include/public/hvm/dm_op.h @@ -384,6 +384,19 @@ struct xen_dm_op_add_to_physmap { uint64_aligned_t gpfn; /* Starting GPFN where the GMFNs should appear. */ }; +/* + * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space. + * Identical to XEN_DOMCTL_pin_mem_cacheattr. + */ +#define XEN_DMOP_pin_memory_cacheattr 18 + +struct xen_dm_op_pin_memory_cacheattr { + uint64_aligned_t start; /* Start gfn. */ + uint64_aligned_t end; /* End gfn. */ + uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ + uint32_t pad; +}; + struct xen_dm_op { uint32_t op; uint32_t pad; @@ -406,6 +419,7 @@ struct xen_dm_op { map_mem_type_to_ioreq_server; struct xen_dm_op_remote_shutdown remote_shutdown; struct xen_dm_op_add_to_physmap add_to_physmap; + struct xen_dm_op_pin_memory_cacheattr pin_memory_cacheattr; } u; }; diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst index d40bac6..fffb308 100644 --- a/xen/include/xlat.lst +++ b/xen/include/xlat.lst @@ -65,6 +65,7 @@ ? dm_op_inject_msi hvm/dm_op.h ? dm_op_ioreq_server_range hvm/dm_op.h ? dm_op_modified_memory hvm/dm_op.h +? dm_op_pin_memory_cacheattr hvm/dm_op.h ? dm_op_remote_shutdown hvm/dm_op.h ? dm_op_set_ioreq_server_state hvm/dm_op.h ? dm_op_set_isa_irq_level hvm/dm_op.h
Provide XEN_DMOP_pin_memory_cacheattr to allow a deprivileged QEMU to pin the caching type of RAM after moving the VRAM. It is equivalent to XEN_DOMCTL_pin_memory_cacheattr. Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com> --- Changed in v2: * Check pad is 0. xen/arch/x86/hvm/dm.c | 18 ++++++++++++++++++ xen/include/public/hvm/dm_op.h | 14 ++++++++++++++ xen/include/xlat.lst | 1 + 3 files changed, 33 insertions(+)