@@ -8,6 +8,8 @@
#include <linux/kmemleak.h>
#include <linux/sched/task.h>
+#include <xen/xen.h>
+
#include <asm/set_memory.h>
#include <asm/e820/api.h>
#include <asm/init.h>
@@ -1065,3 +1067,16 @@ unsigned long max_swapfile_size(void)
return pages;
}
#endif
+
+#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
+int arch_has_restricted_virtio_memory_access(void)
+{
+ if (IS_ENABLED(CONFIG_XEN_PV_VIRTIO) && xen_pv_domain())
+ return 1;
+ if (IS_ENABLED(CONFIG_XEN_HVM_VIRTIO_GRANT) && xen_hvm_domain())
+ return 1;
+
+ return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
+}
+EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
+#endif
@@ -77,8 +77,3 @@ void __init mem_encrypt_init(void)
print_mem_encrypt_feature_info();
}
-int arch_has_restricted_virtio_memory_access(void)
-{
- return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
-}
-EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
@@ -92,3 +92,12 @@ config XEN_DOM0
select X86_X2APIC if XEN_PVH && X86_64
help
Support running as a Xen Dom0 guest.
+
+config XEN_PV_VIRTIO
+ bool "Xen virtio support for PV guests"
+ depends on XEN_VIRTIO && XEN_PV
+ default y
+ help
+ Support virtio for running as a paravirtualized guest. This will
+ need support on the backend side (qemu or kernel, depending on the
+ virtio device types used).
@@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC
having to balloon out RAM regions in order to obtain physical memory
space to create such mappings.
+config XEN_VIRTIO
+ bool "Xen virtio support"
+ default n
+ depends on VIRTIO && DMA_OPS
+ select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
+ help
+ Enable virtio support for running as Xen guest. Depending on the
+ guest type this will require special support on the backend side
+ (qemu or kernel, depending on the virtio device types used).
+
+config XEN_HVM_VIRTIO_GRANT
+ bool "Require virtio for fully virtualized guests to use grant mappings"
+ depends on XEN_VIRTIO && X86_64
+ default y
+ help
+ Require virtio for fully virtualized guests to use grant mappings.
+ This will avoid the need to give the backend the right to map all
+ of the guest memory. This will need support on the backend side
+ (qemu or kernel, depending on the virtio device types used).
+
endmenu
@@ -39,3 +39,4 @@ xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
+obj-$(CONFIG_XEN_VIRTIO) += xen-virtio.o
new file mode 100644
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/******************************************************************************
+ * Xen virtio driver - enables using virtio devices in Xen guests.
+ *
+ * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
+ */
+
+#include <linux/module.h>
+#include <linux/dma-map-ops.h>
+#include <linux/pci.h>
+#include <linux/pfn.h>
+#include <linux/virtio_config.h>
+#include <xen/xen.h>
+#include <xen/grant_table.h>
+
+#define XEN_GRANT_ADDR_OFF 0x8000000000000000ULL
+
+static inline dma_addr_t grant_to_dma(grant_ref_t grant)
+{
+ return XEN_GRANT_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
+}
+
+static inline grant_ref_t dma_to_grant(dma_addr_t dma)
+{
+ return (grant_ref_t)((dma & ~XEN_GRANT_ADDR_OFF) >> PAGE_SHIFT);
+}
+
+/*
+ * DMA ops for Xen virtio frontends.
+ *
+ * Used to act as a kind of software IOMMU for Xen guests by using grants as
+ * DMA addresses.
+ * Such a DMA address is formed by using the grant reference as a frame
+ * number and setting the highest address bit (this bit is for the backend
+ * to be able to distinguish it from e.g. a mmio address).
+ *
+ * Note that for now we hard wire dom0 to be the backend domain. In order to
+ * support any domain as backend we'd need to add a way to communicate the
+ * domid of this backend, e.g. via Xenstore or via the PCI-device's config
+ * space.
+ */
+static void *xen_virtio_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ unsigned int n_pages = PFN_UP(size);
+ unsigned int i;
+ unsigned long pfn;
+ grant_ref_t grant;
+ void *ret;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+ if (!ret)
+ return NULL;
+
+ pfn = virt_to_pfn(ret);
+
+ if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
+ free_pages((unsigned long)ret, get_order(size));
+ return NULL;
+ }
+
+ for (i = 0; i < n_pages; i++) {
+ gnttab_grant_foreign_access_ref(grant + i, 0,
+ pfn_to_gfn(pfn + i), 0);
+ }
+
+ *dma_handle = grant_to_dma(grant);
+
+ return ret;
+}
+
+static void xen_virtio_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ unsigned int n_pages = PFN_UP(size);
+ unsigned int i;
+ grant_ref_t grant;
+
+ grant = dma_to_grant(dma_handle);
+
+ for (i = 0; i < n_pages; i++)
+ gnttab_end_foreign_access_ref(grant + i);
+
+ gnttab_free_grant_reference_seq(grant, n_pages);
+
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static struct page *xen_virtio_dma_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle,
+ enum dma_data_direction dir,
+ gfp_t gfp)
+{
+ WARN_ONCE(1, "xen_virtio_dma_alloc_pages size %ld\n", size);
+ return NULL;
+}
+
+static void xen_virtio_dma_free_pages(struct device *dev, size_t size,
+ struct page *vaddr, dma_addr_t dma_handle,
+ enum dma_data_direction dir)
+{
+ WARN_ONCE(1, "xen_virtio_dma_free_pages size %ld\n", size);
+}
+
+static dma_addr_t xen_virtio_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ grant_ref_t grant;
+
+ if (gnttab_alloc_grant_references(1, &grant))
+ return 0;
+
+ gnttab_grant_foreign_access_ref(grant, 0, xen_page_to_gfn(page),
+ dir == DMA_TO_DEVICE);
+
+ return grant_to_dma(grant) + offset;
+}
+
+static void xen_virtio_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ grant_ref_t grant;
+
+ grant = dma_to_grant(dma_handle);
+
+ gnttab_end_foreign_access_ref(grant);
+
+ gnttab_free_grant_reference(grant);
+}
+
+static int xen_virtio_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ WARN_ONCE(1, "xen_virtio_dma_map_sg nents %d\n", nents);
+ return -EINVAL;
+}
+
+static void xen_virtio_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ WARN_ONCE(1, "xen_virtio_dma_unmap_sg nents %d\n", nents);
+}
+
+static int xen_virtio_dma_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static const struct dma_map_ops xen_virtio_dma_ops = {
+ .alloc = xen_virtio_dma_alloc,
+ .free = xen_virtio_dma_free,
+ .alloc_pages = xen_virtio_dma_alloc_pages,
+ .free_pages = xen_virtio_dma_free_pages,
+ .mmap = dma_common_mmap,
+ .get_sgtable = dma_common_get_sgtable,
+ .map_page = xen_virtio_dma_map_page,
+ .unmap_page = xen_virtio_dma_unmap_page,
+ .map_sg = xen_virtio_dma_map_sg,
+ .unmap_sg = xen_virtio_dma_unmap_sg,
+ .dma_supported = xen_virtio_dma_dma_supported,
+};
+
+void xen_virtio_setup_dma_ops(struct device *dev)
+{
+ dev->dma_ops = &xen_virtio_dma_ops;
+}
+EXPORT_SYMBOL_GPL(xen_virtio_setup_dma_ops);
+
+MODULE_DESCRIPTION("Xen virtio support driver");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
+MODULE_LICENSE("GPL");
@@ -221,4 +221,12 @@ static inline void xen_preemptible_hcall_end(void) { }
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
+#ifdef CONFIG_XEN_VIRTIO
+void xen_virtio_setup_dma_ops(struct device *dev);
+#else
+static inline void xen_virtio_setup_dma_ops(struct device *dev)
+{
+}
+#endif /* CONFIG_XEN_VIRTIO */
+
#endif /* INCLUDE_XEN_OPS_H */