@@ -95,13 +95,31 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
unsigned long pfn;
+ int i;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->pages)
+ if (!buf->sgt)
return -EINTR;
- pfn = page_to_pfn(buf->pages[page_offset++]);
+ sgl = buf->sgt->sgl;
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (!sgl) {
+ DRM_ERROR("invalid SG table\n");
+ return -EINTR;
+ }
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
+ }
+
+ if (i >= buf->sgt->nents) {
+ DRM_ERROR("invalid page offset\n");
+ return -EINVAL;
+ }
+
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
} else
pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
This patch fixes the problem of mapping gem objects which are non-contigous dma buffers. These buffers are described using SG table and SG lists. Each valid SG List is pointing to a single page or group of pages which are physically contigous. Current implementation just maps the first page of each SG List and leave the other pages unmapped, leading to a crash. Given solution finds the page struct for the faulting page through parsing SG table and map it. This patch is based on branch exynos-drm-next-iommu at git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git v1: 1) instead of mapping whole section, mapping single page. Signed-off-by: Rahul Sharma <rahul.sharma@samsung.com> --- drivers/gpu/drm/exynos/exynos_drm_gem.c | 22 ++++++++++++++++++++-- 1 files changed, 20 insertions(+), 2 deletions(-)