Message ID | 20180425112415.12327-2-pagupta@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Apr 25, 2018 at 4:24 AM, Pankaj Gupta <pagupta@redhat.com> wrote: > This patch adds virtio-pmem driver for KVM > guest. Minor nit, please expand your changelog line wrapping to 72 columns. > > Guest reads the persistent memory range > information from Qemu over VIRTIO and registers > it on nvdimm_bus. It also creates a nd_region > object with the persistent memory range > information so that existing 'nvdimm/pmem' > driver can reserve this into system memory map. > This way 'virtio-pmem' driver uses existing > functionality of pmem driver to register persistent > memory compatible for DAX capable filesystems. We need some additional enabling to disable MAP_SYNC for this configuration. In other words, if fsync() is required then we must disable the MAP_SYNC optimization. I think this should be a struct dax_device property looked up at mmap time in each MAP_SYNC capable ->mmap() file operation implementation.
[ adding Jeff directly since he has also been looking at infrastructure to track when MAP_SYNC should be disabled ] On Wed, Apr 25, 2018 at 7:21 AM, Dan Williams <dan.j.williams@intel.com> wrote: > On Wed, Apr 25, 2018 at 4:24 AM, Pankaj Gupta <pagupta@redhat.com> wrote: >> This patch adds virtio-pmem driver for KVM >> guest. > > Minor nit, please expand your changelog line wrapping to 72 columns. > >> >> Guest reads the persistent memory range >> information from Qemu over VIRTIO and registers >> it on nvdimm_bus. It also creates a nd_region >> object with the persistent memory range >> information so that existing 'nvdimm/pmem' >> driver can reserve this into system memory map. >> This way 'virtio-pmem' driver uses existing >> functionality of pmem driver to register persistent >> memory compatible for DAX capable filesystems. > > We need some additional enabling to disable MAP_SYNC for this > configuration. In other words, if fsync() is required then we must > disable the MAP_SYNC optimization. I think this should be a struct > dax_device property looked up at mmap time in each MAP_SYNC capable > ->mmap() file operation implementation.
On Wed, Apr 25, 2018 at 04:54:13PM +0530, Pankaj Gupta wrote: > diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h > index 6d5c3b2..5ebd049 100644 > --- a/include/uapi/linux/virtio_ids.h > +++ b/include/uapi/linux/virtio_ids.h > @@ -43,5 +43,6 @@ > #define VIRTIO_ID_INPUT 18 /* virtio input */ > #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ > #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ > +#define VIRTIO_ID_PMEM 21 /* virtio pmem */ > > #endif /* _LINUX_VIRTIO_IDS_H */ Please register the device id with virtio TC. > diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h > new file mode 100644 > index 0000000..2ec27cb > --- /dev/null > +++ b/include/uapi/linux/virtio_pmem.h > @@ -0,0 +1,58 @@ > +/* Virtio pmem Driver > + * > + * Discovers persitent memory range information > + * from host and provides a virtio based flushing > + * interface. > + */ > + > +#ifndef _LINUX_VIRTIO_PMEM_H > +#define _LINUX_VIRTIO_PMEM_H > + > +#include <linux/types.h> > +#include <linux/virtio_types.h> > +#include <linux/virtio_ids.h> > +#include <linux/virtio_config.h> > +#include <linux/virtio_ring.h> > + > + > +struct virtio_pmem_config { > + don't add empty lines pls. > + uint64_t start; > + uint64_t size; Used LE fields for everything. > +}; > + > +struct virtio_pmem { > + > + struct virtio_device *vdev; > + struct virtqueue *req_vq; > + > + uint64_t start; > + uint64_t size; > +} __packed; This does not belong in uapi, and should not be packed either. > + > +static struct virtio_device_id id_table[] = { > + { VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID }, > + { 0 }, > +}; > + > +void virtio_pmem_flush(struct device *dev) > +{ > + struct scatterlist sg; > + struct virtio_device *vdev = dev_to_virtio(dev->parent->parent); > + struct virtio_pmem *vpmem = vdev->priv; > + char *buf = "FLUSH"; > + int err; > + > + sg_init_one(&sg, buf, sizeof(buf)); > + > + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); > + > + if (err) { > + dev_err(&vdev->dev, "failed to send command to virtio pmem device\n"); > + return; > + } > + > + virtqueue_kick(vpmem->req_vq); > +}; this doesn't belong in uapi. > + > +#endif > -- > 2.9.3
> > On Wed, Apr 25, 2018 at 04:54:13PM +0530, Pankaj Gupta wrote: > > diff --git a/include/uapi/linux/virtio_ids.h > > b/include/uapi/linux/virtio_ids.h > > index 6d5c3b2..5ebd049 100644 > > --- a/include/uapi/linux/virtio_ids.h > > +++ b/include/uapi/linux/virtio_ids.h > > @@ -43,5 +43,6 @@ > > #define VIRTIO_ID_INPUT 18 /* virtio input */ > > #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ > > #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ > > +#define VIRTIO_ID_PMEM 21 /* virtio pmem */ > > > > #endif /* _LINUX_VIRTIO_IDS_H */ > > Please register the device id with virtio TC. O.K Will create virtio spec and follow the procedure. > > > > diff --git a/include/uapi/linux/virtio_pmem.h > > b/include/uapi/linux/virtio_pmem.h > > new file mode 100644 > > index 0000000..2ec27cb > > --- /dev/null > > +++ b/include/uapi/linux/virtio_pmem.h > > @@ -0,0 +1,58 @@ > > +/* Virtio pmem Driver > > + * > > + * Discovers persitent memory range information > > + * from host and provides a virtio based flushing > > + * interface. > > + */ > > + > > +#ifndef _LINUX_VIRTIO_PMEM_H > > +#define _LINUX_VIRTIO_PMEM_H > > + > > +#include <linux/types.h> > > +#include <linux/virtio_types.h> > > +#include <linux/virtio_ids.h> > > +#include <linux/virtio_config.h> > > +#include <linux/virtio_ring.h> > > + > > + > > +struct virtio_pmem_config { > > + > > don't add empty lines pls. o.k > > > + uint64_t start; > > + uint64_t size; > > Used LE fields for everything. o.k > > > +}; > > + > > +struct virtio_pmem { > > + > > + struct virtio_device *vdev; > > + struct virtqueue *req_vq; > > + > > + uint64_t start; > > + uint64_t size; > > +} __packed; > > This does not belong in uapi, and should not be packed either. o.k > > > + > > +static struct virtio_device_id id_table[] = { > > + { VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID }, > > + { 0 }, > > +}; > > + > > +void virtio_pmem_flush(struct device *dev) > > +{ > > + struct scatterlist sg; > > + struct virtio_device *vdev = dev_to_virtio(dev->parent->parent); > > + struct virtio_pmem *vpmem = vdev->priv; > > + char *buf = "FLUSH"; > > + int err; > > + > > + sg_init_one(&sg, buf, sizeof(buf)); > > + > > + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); > > + > > + if (err) { > > + dev_err(&vdev->dev, "failed to send command to virtio pmem device\n"); > > + return; > > + } > > + > > + virtqueue_kick(vpmem->req_vq); > > +}; > > this doesn't belong in uapi. o.k. Thanks, Pankaj
Dan Williams <dan.j.williams@intel.com> writes: > [ adding Jeff directly since he has also been looking at > infrastructure to track when MAP_SYNC should be disabled ] > > On Wed, Apr 25, 2018 at 7:21 AM, Dan Williams <dan.j.williams@intel.com> wrote: >> On Wed, Apr 25, 2018 at 4:24 AM, Pankaj Gupta <pagupta@redhat.com> wrote: >>> This patch adds virtio-pmem driver for KVM >>> guest. >> >> Minor nit, please expand your changelog line wrapping to 72 columns. >> >>> >>> Guest reads the persistent memory range >>> information from Qemu over VIRTIO and registers >>> it on nvdimm_bus. It also creates a nd_region >>> object with the persistent memory range >>> information so that existing 'nvdimm/pmem' >>> driver can reserve this into system memory map. >>> This way 'virtio-pmem' driver uses existing >>> functionality of pmem driver to register persistent >>> memory compatible for DAX capable filesystems. >> >> We need some additional enabling to disable MAP_SYNC for this enable to disable... I like it! ;-) >> configuration. In other words, if fsync() is required then we must >> disable the MAP_SYNC optimization. I think this should be a struct >> dax_device property looked up at mmap time in each MAP_SYNC capable >> ->mmap() file operation implementation. Ideally, qemu (seabios?) would advertise a platform capabilities sub-table that doesn't fill in the flush bits. -Jeff
On Wed, Apr 25, 2018 at 04:54:13PM +0530, Pankaj Gupta wrote: > This patch adds virtio-pmem driver for KVM > guest. > > Guest reads the persistent memory range > information from Qemu over VIRTIO and registers > it on nvdimm_bus. It also creates a nd_region > object with the persistent memory range > information so that existing 'nvdimm/pmem' > driver can reserve this into system memory map. > This way 'virtio-pmem' driver uses existing > functionality of pmem driver to register persistent > memory compatible for DAX capable filesystems. > > This also provides function to perform guest flush > over VIRTIO from 'pmem' driver when userspace > performs flush on DAX memory range. > > Signed-off-by: Pankaj Gupta <pagupta@redhat.com> > --- > drivers/virtio/Kconfig | 12 ++++ > drivers/virtio/Makefile | 1 + > drivers/virtio/virtio_pmem.c | 118 +++++++++++++++++++++++++++++++++++++++ > include/linux/libnvdimm.h | 4 ++ > include/uapi/linux/virtio_ids.h | 1 + > include/uapi/linux/virtio_pmem.h | 58 +++++++++++++++++++ > 6 files changed, 194 insertions(+) > create mode 100644 drivers/virtio/virtio_pmem.c > create mode 100644 include/uapi/linux/virtio_pmem.h > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > index 3589764..879335d 100644 > --- a/drivers/virtio/Kconfig > +++ b/drivers/virtio/Kconfig > @@ -42,6 +42,18 @@ config VIRTIO_PCI_LEGACY > > If unsure, say Y. > > +config VIRTIO_PMEM > + tristate "Virtio pmem driver" > + depends on VIRTIO > + help > + This driver adds persistent memory range to nd_region and registers > + with nvdimm bus. NVDIMM 'pmem' driver later allocates a persistent > + memory range on the memory information added by this driver. In addition > + to this, 'virtio-pmem' driver also provides a paravirt flushing interface > + from guest to host. > + > + If unsure, say M. > + > config VIRTIO_BALLOON > tristate "Virtio balloon driver" > depends on VIRTIO > diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile > index 3a2b5c5..cbe91c6 100644 > --- a/drivers/virtio/Makefile > +++ b/drivers/virtio/Makefile > @@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o > virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o > obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o > obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o > +obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o > diff --git a/drivers/virtio/virtio_pmem.c b/drivers/virtio/virtio_pmem.c > new file mode 100644 > index 0000000..0906d2d > --- /dev/null > +++ b/drivers/virtio/virtio_pmem.c > @@ -0,0 +1,118 @@ SPDX license line? See Documentation/process/license-rules.rst. > +/* Virtio pmem Driver > + * > + * Discovers persitent memory range information s/persitent/persistent/ > + * from host and provides a virtio based flushing > + * interface. > + */ > + > +#include <linux/virtio.h> > +#include <linux/swap.h> > +#include <linux/workqueue.h> > +#include <linux/delay.h> > +#include <linux/slab.h> > +#include <linux/module.h> > +#include <linux/oom.h> > +#include <linux/wait.h> > +#include <linux/magic.h> > +#include <linux/virtio_pmem.h> > +#include <linux/libnvdimm.h> Are all these headers really needed? delay.h? oom.h? > + > +static int init_vq(struct virtio_pmem *vpmem) > +{ > + struct virtqueue *vq; > + > + /* single vq */ > + vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev, > + NULL, "flush_queue"); > + > + if (IS_ERR(vq)) > + return PTR_ERR(vq); > + > + return 0; > +}; > + > +static int virtio_pmem_probe(struct virtio_device *vdev) > +{ > + int err = 0; > + struct resource res; > + struct virtio_pmem *vpmem; > + struct nvdimm_bus *nvdimm_bus; > + struct nd_region_desc ndr_desc; > + int nid = dev_to_node(&vdev->dev); > + static struct nvdimm_bus_descriptor nd_desc; > + > + if (!vdev->config->get) { > + dev_err(&vdev->dev, "%s failure: config disabled\n", > + __func__); > + return -EINVAL; > + } > + > + vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem), > + GFP_KERNEL); > + if (!vpmem) { > + err = -ENOMEM; > + goto out; > + } > + > + vpmem->vdev = vdev; > + err = init_vq(vpmem); > + if (err) > + goto out; > + > + virtio_cread(vpmem->vdev, struct virtio_pmem_config, > + start, &vpmem->start); > + virtio_cread(vpmem->vdev, struct virtio_pmem_config, > + size, &vpmem->size); > + > + res.start = vpmem->start; > + res.end = vpmem->start + vpmem->size-1; > + > + memset(&nd_desc, 0, sizeof(nd_desc)); > + nd_desc.provider_name = "virtio-pmem"; > + nd_desc.module = THIS_MODULE; > + nvdimm_bus = nvdimm_bus_register(&vdev->dev, &nd_desc); > + > + if (!nvdimm_bus) > + goto out_nd; > + dev_set_drvdata(&vdev->dev, nvdimm_bus); > + > + memset(&ndr_desc, 0, sizeof(ndr_desc)); > + ndr_desc.res = &res; > + ndr_desc.numa_node = nid; > + set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); > + set_bit(ND_REGION_VIRTIO, &ndr_desc.flags); > + > + if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) > + goto out_nd; > + > + virtio_device_ready(vdev); > + return 0; > + > +out_nd: > + nvdimm_bus_unregister(nvdimm_bus); > +out: > + dev_err(&vdev->dev, "failed to register virtio pmem memory\n"); > + vdev->config->del_vqs(vdev); > + return err; > +} > + > +static void virtio_pmem_remove(struct virtio_device *vdev) > +{ > + struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev); > + > + nvdimm_bus_unregister(nvdimm_bus); > + vdev->config->del_vqs(vdev); > +} > + > +static struct virtio_driver virtio_pmem_driver = { > + .driver.name = KBUILD_MODNAME, > + .driver.owner = THIS_MODULE, > + .id_table = id_table, > + .probe = virtio_pmem_probe, > + .remove = virtio_pmem_remove, > +}; > + > +module_virtio_driver(virtio_pmem_driver); > +MODULE_DEVICE_TABLE(virtio, id_table); > +MODULE_DESCRIPTION("Virtio pmem driver"); > +MODULE_LICENSE("GPL"); > diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h > index 097072c..b1b7f14 100644 > --- a/include/linux/libnvdimm.h > +++ b/include/linux/libnvdimm.h > @@ -58,6 +58,10 @@ enum { > * (ADR) > */ > ND_REGION_PERSIST_MEMCTRL = 2, > + /* > + * region flag indicating to use VIRTIO flush interface for pmem > + */ > + ND_REGION_VIRTIO = 3, Can you add a generic flush callback to libnvdimm instead? That way virtio and other drivers can hook in without hardcoding knowledge of these drivers into libnvdimm. > > /* mark newly adjusted resources as requiring a label update */ > DPA_RESOURCE_ADJUSTED = 1 << 0, > diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h > index 6d5c3b2..5ebd049 100644 > --- a/include/uapi/linux/virtio_ids.h > +++ b/include/uapi/linux/virtio_ids.h > @@ -43,5 +43,6 @@ > #define VIRTIO_ID_INPUT 18 /* virtio input */ > #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ > #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ > +#define VIRTIO_ID_PMEM 21 /* virtio pmem */ > > #endif /* _LINUX_VIRTIO_IDS_H */ > diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h > new file mode 100644 > index 0000000..2ec27cb > --- /dev/null > +++ b/include/uapi/linux/virtio_pmem.h > @@ -0,0 +1,58 @@ > +/* Virtio pmem Driver > + * > + * Discovers persitent memory range information s/persitent/persistent/ > + * from host and provides a virtio based flushing > + * interface. > + */ > + > +#ifndef _LINUX_VIRTIO_PMEM_H > +#define _LINUX_VIRTIO_PMEM_H > + > +#include <linux/types.h> > +#include <linux/virtio_types.h> > +#include <linux/virtio_ids.h> > +#include <linux/virtio_config.h> > +#include <linux/virtio_ring.h> > + > + > +struct virtio_pmem_config { > + > + uint64_t start; > + uint64_t size; > +}; > + > +struct virtio_pmem { > + > + struct virtio_device *vdev; > + struct virtqueue *req_vq; > + > + uint64_t start; > + uint64_t size; > +} __packed; This is a userspace API header file, it should contain definitions that userspace programs need. struct virtio_pmem is a kernel-internal struct that should not be in the uapi headers. Only define virtio spec structs in this header file (e.g. config space, request structs, etc). > +static struct virtio_device_id id_table[] = { > + { VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID }, > + { 0 }, > +}; Why is static variable in the header file? > + > +void virtio_pmem_flush(struct device *dev) This only implements flush command submission, not completion. Maybe the next patch will implement that but it's a little strange to only see half of the flush operation. Please put the whole flush operation in one patch so it can be reviewed easily. At this point I don't know if you've forgotten to implement wait for completion. > +{ Why is this function body in the header file? > + struct scatterlist sg; > + struct virtio_device *vdev = dev_to_virtio(dev->parent->parent); > + struct virtio_pmem *vpmem = vdev->priv; > + char *buf = "FLUSH"; I'm surprised this compiles without a warning. String literals should be constant but the char pointer isn't constant. > + int err; > + > + sg_init_one(&sg, buf, sizeof(buf)); > + > + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); > + > + if (err) { > + dev_err(&vdev->dev, "failed to send command to virtio pmem device\n"); > + return; > + } > + > + virtqueue_kick(vpmem->req_vq); Is any locking necessary? Two CPUs must not invoke virtio_pmem_flush() at the same time. Not sure if anything guarantees this, maybe you're relying on libnvdimm but I haven't checked. > +}; > + > +#endif > -- > 2.9.3 > >
> > This patch adds virtio-pmem driver for KVM > > guest. > > > > Guest reads the persistent memory range > > information from Qemu over VIRTIO and registers > > it on nvdimm_bus. It also creates a nd_region > > object with the persistent memory range > > information so that existing 'nvdimm/pmem' > > driver can reserve this into system memory map. > > This way 'virtio-pmem' driver uses existing > > functionality of pmem driver to register persistent > > memory compatible for DAX capable filesystems. > > > > This also provides function to perform guest flush > > over VIRTIO from 'pmem' driver when userspace > > performs flush on DAX memory range. > > > > Signed-off-by: Pankaj Gupta <pagupta@redhat.com> > > --- > > drivers/virtio/Kconfig | 12 ++++ > > drivers/virtio/Makefile | 1 + > > drivers/virtio/virtio_pmem.c | 118 > > +++++++++++++++++++++++++++++++++++++++ > > include/linux/libnvdimm.h | 4 ++ > > include/uapi/linux/virtio_ids.h | 1 + > > include/uapi/linux/virtio_pmem.h | 58 +++++++++++++++++++ > > 6 files changed, 194 insertions(+) > > create mode 100644 drivers/virtio/virtio_pmem.c > > create mode 100644 include/uapi/linux/virtio_pmem.h > > > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > > index 3589764..879335d 100644 > > --- a/drivers/virtio/Kconfig > > +++ b/drivers/virtio/Kconfig > > @@ -42,6 +42,18 @@ config VIRTIO_PCI_LEGACY > > > > If unsure, say Y. > > > > +config VIRTIO_PMEM > > + tristate "Virtio pmem driver" > > + depends on VIRTIO > > + help > > + This driver adds persistent memory range to nd_region and registers > > + with nvdimm bus. NVDIMM 'pmem' driver later allocates a persistent > > + memory range on the memory information added by this driver. In addition > > + to this, 'virtio-pmem' driver also provides a paravirt flushing > > interface > > + from guest to host. > > + > > + If unsure, say M. > > + > > config VIRTIO_BALLOON > > tristate "Virtio balloon driver" > > depends on VIRTIO > > diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile > > index 3a2b5c5..cbe91c6 100644 > > --- a/drivers/virtio/Makefile > > +++ b/drivers/virtio/Makefile > > @@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o > > virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o > > obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o > > obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o > > +obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o > > diff --git a/drivers/virtio/virtio_pmem.c b/drivers/virtio/virtio_pmem.c > > new file mode 100644 > > index 0000000..0906d2d > > --- /dev/null > > +++ b/drivers/virtio/virtio_pmem.c > > @@ -0,0 +1,118 @@ > > SPDX license line? See Documentation/process/license-rules.rst. o.k. > > > +/* Virtio pmem Driver > > + * > > + * Discovers persitent memory range information > > s/persitent/persistent/ > > > + * from host and provides a virtio based flushing > > + * interface. > > + */ > > + > > +#include <linux/virtio.h> > > +#include <linux/swap.h> > > +#include <linux/workqueue.h> > > +#include <linux/delay.h> > > +#include <linux/slab.h> > > +#include <linux/module.h> > > +#include <linux/oom.h> > > +#include <linux/wait.h> > > +#include <linux/magic.h> > > +#include <linux/virtio_pmem.h> > > +#include <linux/libnvdimm.h> > > Are all these headers really needed? delay.h? oom.h? Will remove not required ones. There are from previous RFC where used *memremap* and other mm & block includes. > > > + > > +static int init_vq(struct virtio_pmem *vpmem) > > +{ > > + struct virtqueue *vq; > > + > > + /* single vq */ > > + vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev, > > + NULL, "flush_queue"); > > + > > + if (IS_ERR(vq)) > > + return PTR_ERR(vq); > > + > > + return 0; > > +}; > > + > > +static int virtio_pmem_probe(struct virtio_device *vdev) > > +{ > > + int err = 0; > > + struct resource res; > > + struct virtio_pmem *vpmem; > > + struct nvdimm_bus *nvdimm_bus; > > + struct nd_region_desc ndr_desc; > > + int nid = dev_to_node(&vdev->dev); > > + static struct nvdimm_bus_descriptor nd_desc; > > + > > + if (!vdev->config->get) { > > + dev_err(&vdev->dev, "%s failure: config disabled\n", > > + __func__); > > + return -EINVAL; > > + } > > + > > + vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem), > > + GFP_KERNEL); > > + if (!vpmem) { > > + err = -ENOMEM; > > + goto out; > > + } > > + > > + vpmem->vdev = vdev; > > + err = init_vq(vpmem); > > + if (err) > > + goto out; > > + > > + virtio_cread(vpmem->vdev, struct virtio_pmem_config, > > + start, &vpmem->start); > > + virtio_cread(vpmem->vdev, struct virtio_pmem_config, > > + size, &vpmem->size); > > + > > + res.start = vpmem->start; > > + res.end = vpmem->start + vpmem->size-1; > > + > > + memset(&nd_desc, 0, sizeof(nd_desc)); > > + nd_desc.provider_name = "virtio-pmem"; > > + nd_desc.module = THIS_MODULE; > > + nvdimm_bus = nvdimm_bus_register(&vdev->dev, &nd_desc); > > + > > + if (!nvdimm_bus) > > + goto out_nd; > > + dev_set_drvdata(&vdev->dev, nvdimm_bus); > > + > > + memset(&ndr_desc, 0, sizeof(ndr_desc)); > > + ndr_desc.res = &res; > > + ndr_desc.numa_node = nid; > > + set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); > > + set_bit(ND_REGION_VIRTIO, &ndr_desc.flags); > > + > > + if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) > > + goto out_nd; > > + > > + virtio_device_ready(vdev); > > + return 0; > > + > > +out_nd: > > + nvdimm_bus_unregister(nvdimm_bus); > > +out: > > + dev_err(&vdev->dev, "failed to register virtio pmem memory\n"); > > + vdev->config->del_vqs(vdev); > > + return err; > > +} > > + > > +static void virtio_pmem_remove(struct virtio_device *vdev) > > +{ > > + struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev); > > + > > + nvdimm_bus_unregister(nvdimm_bus); > > + vdev->config->del_vqs(vdev); > > +} > > + > > +static struct virtio_driver virtio_pmem_driver = { > > + .driver.name = KBUILD_MODNAME, > > + .driver.owner = THIS_MODULE, > > + .id_table = id_table, > > + .probe = virtio_pmem_probe, > > + .remove = virtio_pmem_remove, > > +}; > > + > > +module_virtio_driver(virtio_pmem_driver); > > +MODULE_DEVICE_TABLE(virtio, id_table); > > +MODULE_DESCRIPTION("Virtio pmem driver"); > > +MODULE_LICENSE("GPL"); > > diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h > > index 097072c..b1b7f14 100644 > > --- a/include/linux/libnvdimm.h > > +++ b/include/linux/libnvdimm.h > > @@ -58,6 +58,10 @@ enum { > > * (ADR) > > */ > > ND_REGION_PERSIST_MEMCTRL = 2, > > + /* > > + * region flag indicating to use VIRTIO flush interface for pmem > > + */ > > + ND_REGION_VIRTIO = 3, > > Can you add a generic flush callback to libnvdimm instead? That way > virtio and other drivers can hook in without hardcoding knowledge of > these drivers into libnvdimm. Sure! Working on this. Same suggestion by Dan. > > > > > /* mark newly adjusted resources as requiring a label update */ > > DPA_RESOURCE_ADJUSTED = 1 << 0, > > diff --git a/include/uapi/linux/virtio_ids.h > > b/include/uapi/linux/virtio_ids.h > > index 6d5c3b2..5ebd049 100644 > > --- a/include/uapi/linux/virtio_ids.h > > +++ b/include/uapi/linux/virtio_ids.h > > @@ -43,5 +43,6 @@ > > #define VIRTIO_ID_INPUT 18 /* virtio input */ > > #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ > > #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ > > +#define VIRTIO_ID_PMEM 21 /* virtio pmem */ > > > > #endif /* _LINUX_VIRTIO_IDS_H */ > > diff --git a/include/uapi/linux/virtio_pmem.h > > b/include/uapi/linux/virtio_pmem.h > > new file mode 100644 > > index 0000000..2ec27cb > > --- /dev/null > > +++ b/include/uapi/linux/virtio_pmem.h > > @@ -0,0 +1,58 @@ > > +/* Virtio pmem Driver > > + * > > + * Discovers persitent memory range information > > s/persitent/persistent/ > > > + * from host and provides a virtio based flushing > > + * interface. > > + */ > > + > > +#ifndef _LINUX_VIRTIO_PMEM_H > > +#define _LINUX_VIRTIO_PMEM_H > > + > > +#include <linux/types.h> > > +#include <linux/virtio_types.h> > > +#include <linux/virtio_ids.h> > > +#include <linux/virtio_config.h> > > +#include <linux/virtio_ring.h> > > + > > + > > +struct virtio_pmem_config { > > + > > + uint64_t start; > > + uint64_t size; > > +}; > > + > > +struct virtio_pmem { > > + > > + struct virtio_device *vdev; > > + struct virtqueue *req_vq; > > + > > + uint64_t start; > > + uint64_t size; > > +} __packed; > > This is a userspace API header file, it should contain definitions that > userspace programs need. struct virtio_pmem is a kernel-internal struct > that should not be in the uapi headers. > > Only define virtio spec structs in this header file (e.g. config space, > request structs, etc). o.k > > > +static struct virtio_device_id id_table[] = { > > + { VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID }, > > + { 0 }, > > +}; > > Why is static variable in the header file? mistake :) > > > + > > +void virtio_pmem_flush(struct device *dev) > > This only implements flush command submission, not completion. Maybe > the next patch will implement that but it's a little strange to only see > half of the flush operation. > > Please put the whole flush operation in one patch so it can be reviewed > easily. At this point I don't know if you've forgotten to implement > wait for completion. > > > +{ > > Why is this function body in the header file? Because I was trying to use it from pmem module without loading virtio_pmem driver or load it dynamically. I think adding flush function in 'nd_region' struct and set it as per region type looks better solution. Suggested by Dan & you. > > > + struct scatterlist sg; > > + struct virtio_device *vdev = dev_to_virtio(dev->parent->parent); > > + struct virtio_pmem *vpmem = vdev->priv; > > + char *buf = "FLUSH"; > > I'm surprised this compiles without a warning. String literals should > be constant but the char pointer isn't constant. Point taken. > > > + int err; > > + > > + sg_init_one(&sg, buf, sizeof(buf)); > > + > > + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); > > + > > + if (err) { > > + dev_err(&vdev->dev, "failed to send command to virtio pmem device\n"); > > + return; > > + } > > + > > + virtqueue_kick(vpmem->req_vq); > > Is any locking necessary? Two CPUs must not invoke virtio_pmem_flush() > at the same time. Not sure if anything guarantees this, maybe you're > relying on libnvdimm but I haven't checked. I thought about it to some extent, and wanted to go ahead with simple version first: - I think file 'inode -> locking' sill is there for request on single file. - For multiple files, our aim is to just flush the backend block image. - Even there is collision for virt queue read/write entry it should just trigger a Qemu fsync. We just want most recent flush to assure guest writes are synced properly. Important point here: We are doing entire block fsync for guest virtual disk. > > > +}; > > + > > +#endif > > -- > > 2.9.3 > > > > >
> > Dan Williams <dan.j.williams@intel.com> writes: > > > [ adding Jeff directly since he has also been looking at > > infrastructure to track when MAP_SYNC should be disabled ] > > > > On Wed, Apr 25, 2018 at 7:21 AM, Dan Williams <dan.j.williams@intel.com> > > wrote: > >> On Wed, Apr 25, 2018 at 4:24 AM, Pankaj Gupta <pagupta@redhat.com> wrote: > >>> This patch adds virtio-pmem driver for KVM > >>> guest. > >> > >> Minor nit, please expand your changelog line wrapping to 72 columns. > >> > >>> > >>> Guest reads the persistent memory range > >>> information from Qemu over VIRTIO and registers > >>> it on nvdimm_bus. It also creates a nd_region > >>> object with the persistent memory range > >>> information so that existing 'nvdimm/pmem' > >>> driver can reserve this into system memory map. > >>> This way 'virtio-pmem' driver uses existing > >>> functionality of pmem driver to register persistent > >>> memory compatible for DAX capable filesystems. > >> > >> We need some additional enabling to disable MAP_SYNC for this > > enable to disable... I like it! ;-) > > >> configuration. In other words, if fsync() is required then we must > >> disable the MAP_SYNC optimization. I think this should be a struct > >> dax_device property looked up at mmap time in each MAP_SYNC capable > >> ->mmap() file operation implementation. I understand you mean we want to disable 'MAP_SYNC' optimization as we are relying on additional fsync. You mean if we add a property/flag in dax_device struct and its set, disable 'MAP_SYNC' accordingly during mmap time for corresponding filesystems? > > Ideally, qemu (seabios?) would advertise a platform capabilities > sub-table that doesn't fill in the flush bits. Could you please elaborate on this, how its related to disabling MAP_SYNC? We are not doing entire nvdimm device emulation. > > -Jeff > >
Pankaj Gupta <pagupta@redhat.com> writes: >> Ideally, qemu (seabios?) would advertise a platform capabilities >> sub-table that doesn't fill in the flush bits. > > Could you please elaborate on this, how its related to disabling > MAP_SYNC? We are not doing entire nvdimm device emulation. My mistake. If you're not providing an NFIT, then you can ignore this comment. I'll have a closer look at your patches next week. -Jeff
On Thu, Apr 26, 2018 at 11:44:59AM -0400, Pankaj Gupta wrote: > > > + int err; > > > + > > > + sg_init_one(&sg, buf, sizeof(buf)); > > > + > > > + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); > > > + > > > + if (err) { > > > + dev_err(&vdev->dev, "failed to send command to virtio pmem device\n"); > > > + return; > > > + } > > > + > > > + virtqueue_kick(vpmem->req_vq); > > > > Is any locking necessary? Two CPUs must not invoke virtio_pmem_flush() > > at the same time. Not sure if anything guarantees this, maybe you're > > relying on libnvdimm but I haven't checked. > > I thought about it to some extent, and wanted to go ahead with simple version first: > > - I think file 'inode -> locking' sill is there for request on single file. > - For multiple files, our aim is to just flush the backend block image. > - Even there is collision for virt queue read/write entry it should just trigger a Qemu fsync. > We just want most recent flush to assure guest writes are synced properly. > > Important point here: We are doing entire block fsync for guest virtual disk. I don't understand your answer. Is locking necessary or not? From the virtqueue_add_outbuf() documentation: * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). Stefan
> > > > + int err; > > > > + > > > > + sg_init_one(&sg, buf, sizeof(buf)); > > > > + > > > > + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); > > > > + > > > > + if (err) { > > > > + dev_err(&vdev->dev, "failed to send command to virtio pmem > > > > device\n"); > > > > + return; > > > > + } > > > > + > > > > + virtqueue_kick(vpmem->req_vq); > > > > > > Is any locking necessary? Two CPUs must not invoke virtio_pmem_flush() > > > at the same time. Not sure if anything guarantees this, maybe you're > > > relying on libnvdimm but I haven't checked. > > > > I thought about it to some extent, and wanted to go ahead with simple > > version first: > > > > - I think file 'inode -> locking' sill is there for request on single file. > > - For multiple files, our aim is to just flush the backend block image. > > - Even there is collision for virt queue read/write entry it should just > > trigger a Qemu fsync. > > We just want most recent flush to assure guest writes are synced > > properly. > > > > Important point here: We are doing entire block fsync for guest virtual > > disk. > > I don't understand your answer. Is locking necessary or not? It will be required with other changes. > > From the virtqueue_add_outbuf() documentation: > > * Caller must ensure we don't call this with other virtqueue operations > * at the same time (except where noted). Yes, I also saw it. But thought if can avoid it with current functionality. :) Thanks, Pankaj
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 3589764..879335d 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -42,6 +42,18 @@ config VIRTIO_PCI_LEGACY If unsure, say Y. +config VIRTIO_PMEM + tristate "Virtio pmem driver" + depends on VIRTIO + help + This driver adds persistent memory range to nd_region and registers + with nvdimm bus. NVDIMM 'pmem' driver later allocates a persistent + memory range on the memory information added by this driver. In addition + to this, 'virtio-pmem' driver also provides a paravirt flushing interface + from guest to host. + + If unsure, say M. + config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 3a2b5c5..cbe91c6 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o +obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o diff --git a/drivers/virtio/virtio_pmem.c b/drivers/virtio/virtio_pmem.c new file mode 100644 index 0000000..0906d2d --- /dev/null +++ b/drivers/virtio/virtio_pmem.c @@ -0,0 +1,118 @@ +/* Virtio pmem Driver + * + * Discovers persitent memory range information + * from host and provides a virtio based flushing + * interface. + */ + +#include <linux/virtio.h> +#include <linux/swap.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/oom.h> +#include <linux/wait.h> +#include <linux/magic.h> +#include <linux/virtio_pmem.h> +#include <linux/libnvdimm.h> + +static int init_vq(struct virtio_pmem *vpmem) +{ + struct virtqueue *vq; + + /* single vq */ + vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev, + NULL, "flush_queue"); + + if (IS_ERR(vq)) + return PTR_ERR(vq); + + return 0; +}; + +static int virtio_pmem_probe(struct virtio_device *vdev) +{ + int err = 0; + struct resource res; + struct virtio_pmem *vpmem; + struct nvdimm_bus *nvdimm_bus; + struct nd_region_desc ndr_desc; + int nid = dev_to_node(&vdev->dev); + static struct nvdimm_bus_descriptor nd_desc; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config disabled\n", + __func__); + return -EINVAL; + } + + vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem), + GFP_KERNEL); + if (!vpmem) { + err = -ENOMEM; + goto out; + } + + vpmem->vdev = vdev; + err = init_vq(vpmem); + if (err) + goto out; + + virtio_cread(vpmem->vdev, struct virtio_pmem_config, + start, &vpmem->start); + virtio_cread(vpmem->vdev, struct virtio_pmem_config, + size, &vpmem->size); + + res.start = vpmem->start; + res.end = vpmem->start + vpmem->size-1; + + memset(&nd_desc, 0, sizeof(nd_desc)); + nd_desc.provider_name = "virtio-pmem"; + nd_desc.module = THIS_MODULE; + nvdimm_bus = nvdimm_bus_register(&vdev->dev, &nd_desc); + + if (!nvdimm_bus) + goto out_nd; + dev_set_drvdata(&vdev->dev, nvdimm_bus); + + memset(&ndr_desc, 0, sizeof(ndr_desc)); + ndr_desc.res = &res; + ndr_desc.numa_node = nid; + set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); + set_bit(ND_REGION_VIRTIO, &ndr_desc.flags); + + if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) + goto out_nd; + + virtio_device_ready(vdev); + return 0; + +out_nd: + nvdimm_bus_unregister(nvdimm_bus); +out: + dev_err(&vdev->dev, "failed to register virtio pmem memory\n"); + vdev->config->del_vqs(vdev); + return err; +} + +static void virtio_pmem_remove(struct virtio_device *vdev) +{ + struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev); + + nvdimm_bus_unregister(nvdimm_bus); + vdev->config->del_vqs(vdev); +} + +static struct virtio_driver virtio_pmem_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_pmem_probe, + .remove = virtio_pmem_remove, +}; + +module_virtio_driver(virtio_pmem_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio pmem driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 097072c..b1b7f14 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -58,6 +58,10 @@ enum { * (ADR) */ ND_REGION_PERSIST_MEMCTRL = 2, + /* + * region flag indicating to use VIRTIO flush interface for pmem + */ + ND_REGION_VIRTIO = 3, /* mark newly adjusted resources as requiring a label update */ DPA_RESOURCE_ADJUSTED = 1 << 0, diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 6d5c3b2..5ebd049 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -43,5 +43,6 @@ #define VIRTIO_ID_INPUT 18 /* virtio input */ #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ +#define VIRTIO_ID_PMEM 21 /* virtio pmem */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h new file mode 100644 index 0000000..2ec27cb --- /dev/null +++ b/include/uapi/linux/virtio_pmem.h @@ -0,0 +1,58 @@ +/* Virtio pmem Driver + * + * Discovers persitent memory range information + * from host and provides a virtio based flushing + * interface. + */ + +#ifndef _LINUX_VIRTIO_PMEM_H +#define _LINUX_VIRTIO_PMEM_H + +#include <linux/types.h> +#include <linux/virtio_types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> + + +struct virtio_pmem_config { + + uint64_t start; + uint64_t size; +}; + +struct virtio_pmem { + + struct virtio_device *vdev; + struct virtqueue *req_vq; + + uint64_t start; + uint64_t size; +} __packed; + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +void virtio_pmem_flush(struct device *dev) +{ + struct scatterlist sg; + struct virtio_device *vdev = dev_to_virtio(dev->parent->parent); + struct virtio_pmem *vpmem = vdev->priv; + char *buf = "FLUSH"; + int err; + + sg_init_one(&sg, buf, sizeof(buf)); + + err = virtqueue_add_outbuf(vpmem->req_vq, &sg, 1, buf, GFP_KERNEL); + + if (err) { + dev_err(&vdev->dev, "failed to send command to virtio pmem device\n"); + return; + } + + virtqueue_kick(vpmem->req_vq); +}; + +#endif
This patch adds virtio-pmem driver for KVM guest. Guest reads the persistent memory range information from Qemu over VIRTIO and registers it on nvdimm_bus. It also creates a nd_region object with the persistent memory range information so that existing 'nvdimm/pmem' driver can reserve this into system memory map. This way 'virtio-pmem' driver uses existing functionality of pmem driver to register persistent memory compatible for DAX capable filesystems. This also provides function to perform guest flush over VIRTIO from 'pmem' driver when userspace performs flush on DAX memory range. Signed-off-by: Pankaj Gupta <pagupta@redhat.com> --- drivers/virtio/Kconfig | 12 ++++ drivers/virtio/Makefile | 1 + drivers/virtio/virtio_pmem.c | 118 +++++++++++++++++++++++++++++++++++++++ include/linux/libnvdimm.h | 4 ++ include/uapi/linux/virtio_ids.h | 1 + include/uapi/linux/virtio_pmem.h | 58 +++++++++++++++++++ 6 files changed, 194 insertions(+) create mode 100644 drivers/virtio/virtio_pmem.c create mode 100644 include/uapi/linux/virtio_pmem.h