Message ID | 20220110223201.31024-8-alex.sierra@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add MEMORY_DEVICE_COHERENT for coherent device memory mapping | expand |
Thanks for splitting the coherent devices into separate device nodes. Couple of comments below. On Tuesday, 11 January 2022 9:31:58 AM AEDT Alex Sierra wrote: > In order to configure device coherent in test_hmm, two module parameters > should be passed, which correspond to the SP start address of each > device (2) spm_addr_dev0 & spm_addr_dev1. If no parameters are passed, > private device type is configured. > > Signed-off-by: Alex Sierra <alex.sierra@amd.com> > --- > lib/test_hmm.c | 74 +++++++++++++++++++++++++++++++-------------- > lib/test_hmm_uapi.h | 1 + > 2 files changed, 53 insertions(+), 22 deletions(-) > > diff --git a/lib/test_hmm.c b/lib/test_hmm.c > index 97e48164d56a..9edeff52302e 100644 > --- a/lib/test_hmm.c > +++ b/lib/test_hmm.c > @@ -34,6 +34,16 @@ > #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) > #define DEVMEM_CHUNKS_RESERVE 16 > > +static unsigned long spm_addr_dev0; > +module_param(spm_addr_dev0, long, 0644); > +MODULE_PARM_DESC(spm_addr_dev0, > + "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too"); It would be useful if you could mention the required size for this region (ie. DEVMEM_CHUNK_SIZE). > + > +static unsigned long spm_addr_dev1; > +module_param(spm_addr_dev1, long, 0644); > +MODULE_PARM_DESC(spm_addr_dev1, > + "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too"); > + > static const struct dev_pagemap_ops dmirror_devmem_ops; > static const struct mmu_interval_notifier_ops dmirror_min_ops; > static dev_t dmirror_dev; > @@ -452,29 +462,44 @@ static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) > return ret; > } > > -static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, > +static int dmirror_allocate_chunk(struct dmirror_device *mdevice, > struct page **ppage) > { > struct dmirror_chunk *devmem; > - struct resource *res; > + struct resource *res = NULL; > unsigned long pfn; > unsigned long pfn_first; > unsigned long pfn_last; > void *ptr; > + int ret = -ENOMEM; > > devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); > if (!devmem) > - return false; > + return ret; > > - res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, > - "hmm_dmirror"); > - if (IS_ERR(res)) > + switch (mdevice->zone_device_type) { > + case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE: > + res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, > + "hmm_dmirror"); > + if (IS_ERR_OR_NULL(res)) > + goto err_devmem; > + devmem->pagemap.range.start = res->start; > + devmem->pagemap.range.end = res->end; > + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; > + break; > + case HMM_DMIRROR_MEMORY_DEVICE_COHERENT: > + devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? > + spm_addr_dev0 : > + spm_addr_dev1; > + devmem->pagemap.range.end = devmem->pagemap.range.start + > + DEVMEM_CHUNK_SIZE - 1; > + devmem->pagemap.type = MEMORY_DEVICE_COHERENT; > + break; > + default: > + ret = -EINVAL; > goto err_devmem; > + } > > - mdevice->zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; What initialises mdevice->zone_device_type now? It looks like it needs to get initialised in hmm_dmirror_init(), which would be easier to do in the previous patch rather than adding it here in the first place. > - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; > - devmem->pagemap.range.start = res->start; > - devmem->pagemap.range.end = res->end; > devmem->pagemap.nr_range = 1; > devmem->pagemap.ops = &dmirror_devmem_ops; > devmem->pagemap.owner = mdevice; > @@ -495,10 +520,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, > mdevice->devmem_capacity = new_capacity; > mdevice->devmem_chunks = new_chunks; > } > - > ptr = memremap_pages(&devmem->pagemap, numa_node_id()); > - if (IS_ERR(ptr)) > + if (IS_ERR_OR_NULL(ptr)) { > + if (ptr) > + ret = PTR_ERR(ptr); > + else > + ret = -EFAULT; > goto err_release; > + } > > devmem->mdevice = mdevice; > pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; > @@ -527,15 +556,17 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, > } > spin_unlock(&mdevice->lock); > > - return true; > + return 0; > > err_release: > mutex_unlock(&mdevice->devmem_lock); > - release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); > + if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) > + release_mem_region(devmem->pagemap.range.start, > + range_len(&devmem->pagemap.range)); > err_devmem: > kfree(devmem); > > - return false; > + return ret; > } > > static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) > @@ -560,7 +591,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) > spin_unlock(&mdevice->lock); > } else { > spin_unlock(&mdevice->lock); > - if (!dmirror_allocate_chunk(mdevice, &dpage)) > + if (dmirror_allocate_chunk(mdevice, &dpage)) > goto error; > } > > @@ -1220,10 +1251,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id) > if (ret) > return ret; > > - /* Build a list of free ZONE_DEVICE private struct pages */ > - dmirror_allocate_chunk(mdevice, NULL); > - > - return 0; > + /* Build a list of free ZONE_DEVICE struct pages */ > + return dmirror_allocate_chunk(mdevice, NULL); > } > > static void dmirror_device_remove(struct dmirror_device *mdevice) > @@ -1236,8 +1265,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) > mdevice->devmem_chunks[i]; > > memunmap_pages(&devmem->pagemap); > - release_mem_region(devmem->pagemap.range.start, > - range_len(&devmem->pagemap.range)); > + if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) > + release_mem_region(devmem->pagemap.range.start, > + range_len(&devmem->pagemap.range)); > kfree(devmem); > } > kfree(mdevice->devmem_chunks); > diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h > index 17f842f1aa02..625f3690d086 100644 > --- a/lib/test_hmm_uapi.h > +++ b/lib/test_hmm_uapi.h > @@ -68,6 +68,7 @@ enum { > enum { > /* 0 is reserved to catch uninitialized type fields */ > HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1, > + HMM_DMIRROR_MEMORY_DEVICE_COHERENT, > }; > > #endif /* _LIB_TEST_HMM_UAPI_H */ >
diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 97e48164d56a..9edeff52302e 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -34,6 +34,16 @@ #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) #define DEVMEM_CHUNKS_RESERVE 16 +static unsigned long spm_addr_dev0; +module_param(spm_addr_dev0, long, 0644); +MODULE_PARM_DESC(spm_addr_dev0, + "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too"); + +static unsigned long spm_addr_dev1; +module_param(spm_addr_dev1, long, 0644); +MODULE_PARM_DESC(spm_addr_dev1, + "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too"); + static const struct dev_pagemap_ops dmirror_devmem_ops; static const struct mmu_interval_notifier_ops dmirror_min_ops; static dev_t dmirror_dev; @@ -452,29 +462,44 @@ static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) return ret; } -static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, +static int dmirror_allocate_chunk(struct dmirror_device *mdevice, struct page **ppage) { struct dmirror_chunk *devmem; - struct resource *res; + struct resource *res = NULL; unsigned long pfn; unsigned long pfn_first; unsigned long pfn_last; void *ptr; + int ret = -ENOMEM; devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); if (!devmem) - return false; + return ret; - res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, - "hmm_dmirror"); - if (IS_ERR(res)) + switch (mdevice->zone_device_type) { + case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE: + res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, + "hmm_dmirror"); + if (IS_ERR_OR_NULL(res)) + goto err_devmem; + devmem->pagemap.range.start = res->start; + devmem->pagemap.range.end = res->end; + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + break; + case HMM_DMIRROR_MEMORY_DEVICE_COHERENT: + devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? + spm_addr_dev0 : + spm_addr_dev1; + devmem->pagemap.range.end = devmem->pagemap.range.start + + DEVMEM_CHUNK_SIZE - 1; + devmem->pagemap.type = MEMORY_DEVICE_COHERENT; + break; + default: + ret = -EINVAL; goto err_devmem; + } - mdevice->zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; - devmem->pagemap.range.start = res->start; - devmem->pagemap.range.end = res->end; devmem->pagemap.nr_range = 1; devmem->pagemap.ops = &dmirror_devmem_ops; devmem->pagemap.owner = mdevice; @@ -495,10 +520,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, mdevice->devmem_capacity = new_capacity; mdevice->devmem_chunks = new_chunks; } - ptr = memremap_pages(&devmem->pagemap, numa_node_id()); - if (IS_ERR(ptr)) + if (IS_ERR_OR_NULL(ptr)) { + if (ptr) + ret = PTR_ERR(ptr); + else + ret = -EFAULT; goto err_release; + } devmem->mdevice = mdevice; pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; @@ -527,15 +556,17 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, } spin_unlock(&mdevice->lock); - return true; + return 0; err_release: mutex_unlock(&mdevice->devmem_lock); - release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); + if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) + release_mem_region(devmem->pagemap.range.start, + range_len(&devmem->pagemap.range)); err_devmem: kfree(devmem); - return false; + return ret; } static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) @@ -560,7 +591,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) spin_unlock(&mdevice->lock); } else { spin_unlock(&mdevice->lock); - if (!dmirror_allocate_chunk(mdevice, &dpage)) + if (dmirror_allocate_chunk(mdevice, &dpage)) goto error; } @@ -1220,10 +1251,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id) if (ret) return ret; - /* Build a list of free ZONE_DEVICE private struct pages */ - dmirror_allocate_chunk(mdevice, NULL); - - return 0; + /* Build a list of free ZONE_DEVICE struct pages */ + return dmirror_allocate_chunk(mdevice, NULL); } static void dmirror_device_remove(struct dmirror_device *mdevice) @@ -1236,8 +1265,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) mdevice->devmem_chunks[i]; memunmap_pages(&devmem->pagemap); - release_mem_region(devmem->pagemap.range.start, - range_len(&devmem->pagemap.range)); + if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) + release_mem_region(devmem->pagemap.range.start, + range_len(&devmem->pagemap.range)); kfree(devmem); } kfree(mdevice->devmem_chunks); diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h index 17f842f1aa02..625f3690d086 100644 --- a/lib/test_hmm_uapi.h +++ b/lib/test_hmm_uapi.h @@ -68,6 +68,7 @@ enum { enum { /* 0 is reserved to catch uninitialized type fields */ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1, + HMM_DMIRROR_MEMORY_DEVICE_COHERENT, }; #endif /* _LIB_TEST_HMM_UAPI_H */
In order to configure device coherent in test_hmm, two module parameters should be passed, which correspond to the SP start address of each device (2) spm_addr_dev0 & spm_addr_dev1. If no parameters are passed, private device type is configured. Signed-off-by: Alex Sierra <alex.sierra@amd.com> --- lib/test_hmm.c | 74 +++++++++++++++++++++++++++++++-------------- lib/test_hmm_uapi.h | 1 + 2 files changed, 53 insertions(+), 22 deletions(-)