Message ID | 20210422081508.3942748-6-tientzu@chromium.org (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Bjorn Helgaas |
Headers | show |
Series | Restricted DMA | expand |
On 22/04/2021 09:14, Claire Chang wrote: > Add the initialization function to create restricted DMA pools from > matching reserved-memory nodes. > > Signed-off-by: Claire Chang <tientzu@chromium.org> > --- > include/linux/device.h | 4 +++ > include/linux/swiotlb.h | 3 +- > kernel/dma/swiotlb.c | 80 +++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 86 insertions(+), 1 deletion(-) > > diff --git a/include/linux/device.h b/include/linux/device.h > index 38a2071cf776..4987608ea4ff 100644 > --- a/include/linux/device.h > +++ b/include/linux/device.h > @@ -416,6 +416,7 @@ struct dev_links_info { > * @dma_pools: Dma pools (if dma'ble device). > * @dma_mem: Internal for coherent mem override. > * @cma_area: Contiguous memory area for dma allocations > + * @dma_io_tlb_mem: Internal for swiotlb io_tlb_mem override. > * @archdata: For arch-specific additions. > * @of_node: Associated device tree node. > * @fwnode: Associated device node supplied by platform firmware. > @@ -521,6 +522,9 @@ struct device { > #ifdef CONFIG_DMA_CMA > struct cma *cma_area; /* contiguous memory area for dma > allocations */ > +#endif > +#ifdef CONFIG_DMA_RESTRICTED_POOL > + struct io_tlb_mem *dma_io_tlb_mem; > #endif > /* arch specific additions */ > struct dev_archdata archdata; > diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h > index 216854a5e513..03ad6e3b4056 100644 > --- a/include/linux/swiotlb.h > +++ b/include/linux/swiotlb.h > @@ -72,7 +72,8 @@ extern enum swiotlb_force swiotlb_force; > * range check to see if the memory was in fact allocated by this > * API. > * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and > - * @end. This is command line adjustable via setup_io_tlb_npages. > + * @end. For default swiotlb, this is command line adjustable via > + * setup_io_tlb_npages. > * @used: The number of used IO TLB block. > * @list: The free list describing the number of free entries available > * from each index. > diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c > index 57a9adb920bf..ffbb8724e06c 100644 > --- a/kernel/dma/swiotlb.c > +++ b/kernel/dma/swiotlb.c > @@ -39,6 +39,13 @@ > #ifdef CONFIG_DEBUG_FS > #include <linux/debugfs.h> > #endif > +#ifdef CONFIG_DMA_RESTRICTED_POOL > +#include <linux/io.h> > +#include <linux/of.h> > +#include <linux/of_fdt.h> > +#include <linux/of_reserved_mem.h> > +#include <linux/slab.h> > +#endif > > #include <asm/io.h> > #include <asm/dma.h> > @@ -681,3 +688,76 @@ static int __init swiotlb_create_default_debugfs(void) > late_initcall(swiotlb_create_default_debugfs); > > #endif > + > +#ifdef CONFIG_DMA_RESTRICTED_POOL > +static int rmem_swiotlb_device_init(struct reserved_mem *rmem, > + struct device *dev) > +{ > + struct io_tlb_mem *mem = rmem->priv; > + unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; > + > + if (dev->dma_io_tlb_mem) > + return 0; > + > + /* Since multiple devices can share the same pool, the private data, > + * io_tlb_mem struct, will be initialized by the first device attached > + * to it. > + */ > + if (!mem) { > + mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL); > + if (!mem) > + return -ENOMEM; > +#ifdef CONFIG_ARM > + if (!PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { > + kfree(mem); > + return -EINVAL; > + } > +#endif /* CONFIG_ARM */ > + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); > + > + rmem->priv = mem; > + } > + > +#ifdef CONFIG_DEBUG_FS > + if (!io_tlb_default_mem->debugfs) > + io_tlb_default_mem->debugfs = > + debugfs_create_dir("swiotlb", NULL); At this point it's possible for io_tlb_default_mem to be NULL, leading to a splat. But even then if it's not and we have the situation where debugfs==NULL then the debugfs_create_dir() here will cause a subsequent attempt in swiotlb_create_debugfs() to fail (directory already exists) leading to mem->debugfs being assigned an error value. I suspect the creation of the debugfs directory needs to be separated from io_tlb_default_mem being set. Other than that I gave this series a go with our prototype of Arm's Confidential Computer Architecture[1] - since the majority of the guest's memory is protected from the host the restricted DMA pool allows (only) a small area to be shared with the host. After fixing (well hacking round) the above it all seems to be working fine with virtio drivers. Thanks, Steve [1] https://www.arm.com/why-arm/architecture/security-features/arm-confidential-compute-architecture
On Fri, Apr 23, 2021 at 7:34 PM Steven Price <steven.price@arm.com> wrote: > > On 22/04/2021 09:14, Claire Chang wrote: > > Add the initialization function to create restricted DMA pools from > > matching reserved-memory nodes. > > > > Signed-off-by: Claire Chang <tientzu@chromium.org> > > --- > > include/linux/device.h | 4 +++ > > include/linux/swiotlb.h | 3 +- > > kernel/dma/swiotlb.c | 80 +++++++++++++++++++++++++++++++++++++++++ > > 3 files changed, 86 insertions(+), 1 deletion(-) > > > > diff --git a/include/linux/device.h b/include/linux/device.h > > index 38a2071cf776..4987608ea4ff 100644 > > --- a/include/linux/device.h > > +++ b/include/linux/device.h > > @@ -416,6 +416,7 @@ struct dev_links_info { > > * @dma_pools: Dma pools (if dma'ble device). > > * @dma_mem: Internal for coherent mem override. > > * @cma_area: Contiguous memory area for dma allocations > > + * @dma_io_tlb_mem: Internal for swiotlb io_tlb_mem override. > > * @archdata: For arch-specific additions. > > * @of_node: Associated device tree node. > > * @fwnode: Associated device node supplied by platform firmware. > > @@ -521,6 +522,9 @@ struct device { > > #ifdef CONFIG_DMA_CMA > > struct cma *cma_area; /* contiguous memory area for dma > > allocations */ > > +#endif > > +#ifdef CONFIG_DMA_RESTRICTED_POOL > > + struct io_tlb_mem *dma_io_tlb_mem; > > #endif > > /* arch specific additions */ > > struct dev_archdata archdata; > > diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h > > index 216854a5e513..03ad6e3b4056 100644 > > --- a/include/linux/swiotlb.h > > +++ b/include/linux/swiotlb.h > > @@ -72,7 +72,8 @@ extern enum swiotlb_force swiotlb_force; > > * range check to see if the memory was in fact allocated by this > > * API. > > * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and > > - * @end. This is command line adjustable via setup_io_tlb_npages. > > + * @end. For default swiotlb, this is command line adjustable via > > + * setup_io_tlb_npages. > > * @used: The number of used IO TLB block. > > * @list: The free list describing the number of free entries available > > * from each index. > > diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c > > index 57a9adb920bf..ffbb8724e06c 100644 > > --- a/kernel/dma/swiotlb.c > > +++ b/kernel/dma/swiotlb.c > > @@ -39,6 +39,13 @@ > > #ifdef CONFIG_DEBUG_FS > > #include <linux/debugfs.h> > > #endif > > +#ifdef CONFIG_DMA_RESTRICTED_POOL > > +#include <linux/io.h> > > +#include <linux/of.h> > > +#include <linux/of_fdt.h> > > +#include <linux/of_reserved_mem.h> > > +#include <linux/slab.h> > > +#endif > > > > #include <asm/io.h> > > #include <asm/dma.h> > > @@ -681,3 +688,76 @@ static int __init swiotlb_create_default_debugfs(void) > > late_initcall(swiotlb_create_default_debugfs); > > > > #endif > > + > > +#ifdef CONFIG_DMA_RESTRICTED_POOL > > +static int rmem_swiotlb_device_init(struct reserved_mem *rmem, > > + struct device *dev) > > +{ > > + struct io_tlb_mem *mem = rmem->priv; > > + unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; > > + > > + if (dev->dma_io_tlb_mem) > > + return 0; > > + > > + /* Since multiple devices can share the same pool, the private data, > > + * io_tlb_mem struct, will be initialized by the first device attached > > + * to it. > > + */ > > + if (!mem) { > > + mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL); > > + if (!mem) > > + return -ENOMEM; > > +#ifdef CONFIG_ARM > > + if (!PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { > > + kfree(mem); > > + return -EINVAL; > > + } > > +#endif /* CONFIG_ARM */ > > + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); > > + > > + rmem->priv = mem; > > + } > > + > > +#ifdef CONFIG_DEBUG_FS > > + if (!io_tlb_default_mem->debugfs) > > + io_tlb_default_mem->debugfs = > > + debugfs_create_dir("swiotlb", NULL); > > At this point it's possible for io_tlb_default_mem to be NULL, leading > to a splat. Thanks for pointing this out. > > But even then if it's not and we have the situation where debugfs==NULL > then the debugfs_create_dir() here will cause a subsequent attempt in > swiotlb_create_debugfs() to fail (directory already exists) leading to > mem->debugfs being assigned an error value. I suspect the creation of > the debugfs directory needs to be separated from io_tlb_default_mem > being set. debugfs creation should move into the if (!mem) {...} above to avoid duplication. I think having a separated struct dentry pointer for the default debugfs should be enough? if (!debugfs) debugfs = debugfs_create_dir("swiotlb", NULL); swiotlb_create_debugfs(mem, rmem->name, debugfs); > > Other than that I gave this series a go with our prototype of Arm's > Confidential Computer Architecture[1] - since the majority of the > guest's memory is protected from the host the restricted DMA pool allows > (only) a small area to be shared with the host. > > After fixing (well hacking round) the above it all seems to be working > fine with virtio drivers. > > Thanks, > > Steve > > [1] > https://www.arm.com/why-arm/architecture/security-features/arm-confidential-compute-architecture
On 26/04/2021 17:37, Claire Chang wrote: > On Fri, Apr 23, 2021 at 7:34 PM Steven Price <steven.price@arm.com> wrote: [...] >> >> But even then if it's not and we have the situation where debugfs==NULL >> then the debugfs_create_dir() here will cause a subsequent attempt in >> swiotlb_create_debugfs() to fail (directory already exists) leading to >> mem->debugfs being assigned an error value. I suspect the creation of >> the debugfs directory needs to be separated from io_tlb_default_mem >> being set. > > debugfs creation should move into the if (!mem) {...} above to avoid > duplication. > I think having a separated struct dentry pointer for the default > debugfs should be enough? > > if (!debugfs) > debugfs = debugfs_create_dir("swiotlb", NULL); > swiotlb_create_debugfs(mem, rmem->name, debugfs); Yes that looks like a good solution to me. Although I'd name the variable something a bit more descriptive than just "debugfs" e.g. "debugfs_dir" or "debugfs_root". Thanks, Steve
diff --git a/include/linux/device.h b/include/linux/device.h index 38a2071cf776..4987608ea4ff 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -416,6 +416,7 @@ struct dev_links_info { * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations + * @dma_io_tlb_mem: Internal for swiotlb io_tlb_mem override. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. @@ -521,6 +522,9 @@ struct device { #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ +#endif +#ifdef CONFIG_DMA_RESTRICTED_POOL + struct io_tlb_mem *dma_io_tlb_mem; #endif /* arch specific additions */ struct dev_archdata archdata; diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 216854a5e513..03ad6e3b4056 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -72,7 +72,8 @@ extern enum swiotlb_force swiotlb_force; * range check to see if the memory was in fact allocated by this * API. * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and - * @end. This is command line adjustable via setup_io_tlb_npages. + * @end. For default swiotlb, this is command line adjustable via + * setup_io_tlb_npages. * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 57a9adb920bf..ffbb8724e06c 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -39,6 +39,13 @@ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #endif +#ifdef CONFIG_DMA_RESTRICTED_POOL +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> +#include <linux/slab.h> +#endif #include <asm/io.h> #include <asm/dma.h> @@ -681,3 +688,76 @@ static int __init swiotlb_create_default_debugfs(void) late_initcall(swiotlb_create_default_debugfs); #endif + +#ifdef CONFIG_DMA_RESTRICTED_POOL +static int rmem_swiotlb_device_init(struct reserved_mem *rmem, + struct device *dev) +{ + struct io_tlb_mem *mem = rmem->priv; + unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; + + if (dev->dma_io_tlb_mem) + return 0; + + /* Since multiple devices can share the same pool, the private data, + * io_tlb_mem struct, will be initialized by the first device attached + * to it. + */ + if (!mem) { + mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL); + if (!mem) + return -ENOMEM; +#ifdef CONFIG_ARM + if (!PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { + kfree(mem); + return -EINVAL; + } +#endif /* CONFIG_ARM */ + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); + + rmem->priv = mem; + } + +#ifdef CONFIG_DEBUG_FS + if (!io_tlb_default_mem->debugfs) + io_tlb_default_mem->debugfs = + debugfs_create_dir("swiotlb", NULL); + + swiotlb_create_debugfs(mem, rmem->name, io_tlb_default_mem->debugfs); +#endif /* CONFIG_DEBUG_FS */ + + dev->dma_io_tlb_mem = mem; + + return 0; +} + +static void rmem_swiotlb_device_release(struct reserved_mem *rmem, + struct device *dev) +{ + if (dev) + dev->dma_io_tlb_mem = NULL; +} + +static const struct reserved_mem_ops rmem_swiotlb_ops = { + .device_init = rmem_swiotlb_device_init, + .device_release = rmem_swiotlb_device_release, +}; + +static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) +{ + unsigned long node = rmem->fdt_node; + + if (of_get_flat_dt_prop(node, "reusable", NULL) || + of_get_flat_dt_prop(node, "linux,cma-default", NULL) || + of_get_flat_dt_prop(node, "linux,dma-default", NULL) || + of_get_flat_dt_prop(node, "no-map", NULL)) + return -EINVAL; + + rmem->ops = &rmem_swiotlb_ops; + pr_info("Reserved memory: created device swiotlb memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return 0; +} + +RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); +#endif /* CONFIG_DMA_RESTRICTED_POOL */
Add the initialization function to create restricted DMA pools from matching reserved-memory nodes. Signed-off-by: Claire Chang <tientzu@chromium.org> --- include/linux/device.h | 4 +++ include/linux/swiotlb.h | 3 +- kernel/dma/swiotlb.c | 80 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-)