@@ -147,14 +147,14 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
if (!new_table)
return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+ ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
if (ret) {
kfree(new_table);
return ERR_PTR(-ENOMEM);
}
new_sg = new_table->sgl;
- for_each_sg(table->sgl, sg, table->nents, i) {
+ for_each_sg(table->sgl, sg, table->orig_nents, i) {
memcpy(new_sg, sg, sizeof(*sg));
new_sg->dma_address = 0;
new_sg = sg_next(new_sg);
@@ -227,8 +227,9 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
table = a->table;
- if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
- direction))
+ table->nents = dma_map_sg(attachment->dev, table->sgl,
+ table->orig_nents, direction);
+ if (!table->nents)
return ERR_PTR(-ENOMEM);
return table;
@@ -238,7 +239,7 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
- dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+ dma_unmap_sg(attachment->dev, table->sgl, table->orig_nents, direction);
}
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
@@ -297,7 +298,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->orig_nents,
direction);
}
@@ -320,8 +321,8 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
- direction);
+ dma_sync_sg_for_device(a->dev, a->table->sgl,
+ a->table->orig_nents, direction);
}
mutex_unlock(&buffer->lock);
@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
- for_each_sg(table->sgl, sg, table->nents, i) {
+ for_each_sg(table->sgl, sg, table->orig_nents, i) {
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
@@ -71,7 +71,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
int i;
int ret;
- for_each_sg(table->sgl, sg, table->nents, i) {
+ for_each_sg(table->sgl, sg, table->orig_nents, i) {
struct page *page = sg_page(sg);
unsigned long remainder = vma->vm_end - addr;
unsigned long len = sg->length;
@@ -142,7 +142,7 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer)
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
- return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+ return ion_heap_sglist_zero(table->sgl, table->orig_nents, pgprot);
}
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
@@ -162,7 +162,7 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
- for_each_sg(table->sgl, sg, table->nents, i)
+ for_each_sg(table->sgl, sg, table->orig_nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg));
sg_free_table(table);
kfree(table);
The Documentation/DMA-API-HOWTO.txt states that dma_map_sg returns the numer of the created entries in the DMA address space. However the subsequent calls to dma_sync_sg_for_{device,cpu} and dma_unmap_sg must be called with the original number of entries passed to dma_map_sg. The sg_table->nents in turn holds the result of the dma_map_sg call as stated in include/linux/scatterlist.h. Adapt the code to obey those rules. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> --- For more information, see '[PATCH v2 00/21] DRM: fix struct sg_table nents vs. orig_nents misuse' thread: https://lkml.org/lkml/2020/5/4/373 --- drivers/staging/android/ion/ion.c | 17 +++++++++-------- drivers/staging/android/ion/ion_heap.c | 6 +++--- drivers/staging/android/ion/ion_system_heap.c | 2 +- 3 files changed, 13 insertions(+), 12 deletions(-)