@@ -127,7 +127,7 @@ static void drm_cache_flush_clflush(struct page *pages[],
struct sg_page_iter sg_iter;
mb(); /*CLFLUSH is ordered only by using memory barriers*/
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ for_each_sg_page(st->sgl, &sg_iter, st->orig_nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb(); /*Make sure that all cache line entry is flushed*/
@@ -118,7 +118,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
} else {
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
- shmem->sgt->nents, DMA_BIDIRECTIONAL);
+ shmem->sgt->orig_nents, DMA_BIDIRECTIONAL);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
@@ -396,7 +396,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
- shmem->sgt->nents, DMA_BIDIRECTIONAL);
+ shmem->sgt->orig_nents, DMA_BIDIRECTIONAL);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
shmem->sgt = NULL;
@@ -623,7 +623,8 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
goto err_put_pages;
}
/* Map the pages for use by the h/w. */
- dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ sgt->nents = dma_map_sg(obj->dev->dev, sgt->sgl, sgt->orig_nents,
+ DMA_BIDIRECTIONAL);
shmem->sgt = sgt;
@@ -626,8 +626,9 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
+ sgt->nents = dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->orig_nents,
+ dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (!sgt->nents) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
@@ -652,7 +653,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return;
- dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->orig_nents, dir,
DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
@@ -975,7 +976,7 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
*/
page_index = 0;
dma_index = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, count) {
page_len = sg->length;
page = sg_page(sg);
dma_len = sg_dma_len(sg);
The Documentation/DMA-API-HOWTO.txt states that dma_map_sg returns the numer of the created entries in the DMA address space. However the subsequent calls to dma_sync_sg_for_{device,cpu} and dma_unmap_sg must be called with the original number of entries passed to dma_map_sg. The sg_table->nents in turn holds the result of the dma_map_sg call as stated in include/linux/scatterlist.h. Adapt the code to obey those rules. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> --- drivers/gpu/drm/drm_cache.c | 2 +- drivers/gpu/drm/drm_gem_shmem_helper.c | 7 ++++--- drivers/gpu/drm/drm_prime.c | 9 +++++---- 3 files changed, 10 insertions(+), 8 deletions(-)