Message ID | 20191218125645.9211-4-kraxel@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/virtio: fix mmap page attributes | expand |
On Wed, Dec 18, 2019 at 4:56 AM Gerd Hoffmann <kraxel@redhat.com> wrote: > > With shmem helpers allowing to update pgprot caching flags via > drm_gem_shmem_object.map_cached we can just use that and ditch > our own implementations of mmap() and vmap(). > > We also don't need a special case for imported objects, any map > requests are handled by the exporter not udl. > > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> > --- > drivers/gpu/drm/udl/udl_gem.c | 62 ++--------------------------------- > 1 file changed, 3 insertions(+), 59 deletions(-) > > diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c > index b6e26f98aa0a..7e3a88b25b6b 100644 > --- a/drivers/gpu/drm/udl/udl_gem.c > +++ b/drivers/gpu/drm/udl/udl_gem.c > @@ -17,72 +17,15 @@ > * GEM object funcs > */ > > -static int udl_gem_object_mmap(struct drm_gem_object *obj, > - struct vm_area_struct *vma) > -{ > - int ret; > - > - ret = drm_gem_shmem_mmap(obj, vma); > - if (ret) > - return ret; > - > - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); > - if (obj->import_attach) > - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); > - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); > - > - return 0; > -} > - > -static void *udl_gem_object_vmap(struct drm_gem_object *obj) > -{ > - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); > - int ret; > - > - ret = mutex_lock_interruptible(&shmem->vmap_lock); > - if (ret) > - return ERR_PTR(ret); > - > - if (shmem->vmap_use_count++ > 0) > - goto out; > - > - ret = drm_gem_shmem_get_pages(shmem); > - if (ret) > - goto err_zero_use; > - > - if (obj->import_attach) > - shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); > - else > - shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, > - VM_MAP, PAGE_KERNEL); > - > - if (!shmem->vaddr) { > - DRM_DEBUG_KMS("Failed to vmap pages\n"); > - ret = -ENOMEM; > - goto err_put_pages; > - } > - > -out: > - mutex_unlock(&shmem->vmap_lock); > - return shmem->vaddr; > - > -err_put_pages: > - drm_gem_shmem_put_pages(shmem); > -err_zero_use: > - shmem->vmap_use_count = 0; > - mutex_unlock(&shmem->vmap_lock); > - return ERR_PTR(ret); > -} > - > static const struct drm_gem_object_funcs udl_gem_object_funcs = { > .free = drm_gem_shmem_free_object, > .print_info = drm_gem_shmem_print_info, > .pin = drm_gem_shmem_pin, > .unpin = drm_gem_shmem_unpin, > .get_sg_table = drm_gem_shmem_get_sg_table, > - .vmap = udl_gem_object_vmap, > + .vmap = drm_gem_shmem_vmap, > .vunmap = drm_gem_shmem_vunmap, > - .mmap = udl_gem_object_mmap, > + .mmap = drm_gem_shmem_mmap, > }; It looks like we can just use the default ops (drm_gem_shmem_funcs). With that, this series is Reviewed-by: Chia-I Wu <olvaffe@gmail.com>. > > /* > @@ -101,6 +44,7 @@ struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev, > > obj = &shmem->base; > obj->funcs = &udl_gem_object_funcs; > + shmem->map_cached = true; > > return obj; > } > -- > 2.18.1 > > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index b6e26f98aa0a..7e3a88b25b6b 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -17,72 +17,15 @@ * GEM object funcs */ -static int udl_gem_object_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_shmem_mmap(obj, vma); - if (ret) - return ret; - - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - if (obj->import_attach) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); - - return 0; -} - -static void *udl_gem_object_vmap(struct drm_gem_object *obj) -{ - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - int ret; - - ret = mutex_lock_interruptible(&shmem->vmap_lock); - if (ret) - return ERR_PTR(ret); - - if (shmem->vmap_use_count++ > 0) - goto out; - - ret = drm_gem_shmem_get_pages(shmem); - if (ret) - goto err_zero_use; - - if (obj->import_attach) - shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); - else - shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, - VM_MAP, PAGE_KERNEL); - - if (!shmem->vaddr) { - DRM_DEBUG_KMS("Failed to vmap pages\n"); - ret = -ENOMEM; - goto err_put_pages; - } - -out: - mutex_unlock(&shmem->vmap_lock); - return shmem->vaddr; - -err_put_pages: - drm_gem_shmem_put_pages(shmem); -err_zero_use: - shmem->vmap_use_count = 0; - mutex_unlock(&shmem->vmap_lock); - return ERR_PTR(ret); -} - static const struct drm_gem_object_funcs udl_gem_object_funcs = { .free = drm_gem_shmem_free_object, .print_info = drm_gem_shmem_print_info, .pin = drm_gem_shmem_pin, .unpin = drm_gem_shmem_unpin, .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = udl_gem_object_vmap, + .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .mmap = udl_gem_object_mmap, + .mmap = drm_gem_shmem_mmap, }; /* @@ -101,6 +44,7 @@ struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev, obj = &shmem->base; obj->funcs = &udl_gem_object_funcs; + shmem->map_cached = true; return obj; }
With shmem helpers allowing to update pgprot caching flags via drm_gem_shmem_object.map_cached we can just use that and ditch our own implementations of mmap() and vmap(). We also don't need a special case for imported objects, any map requests are handled by the exporter not udl. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> --- drivers/gpu/drm/udl/udl_gem.c | 62 ++--------------------------------- 1 file changed, 3 insertions(+), 59 deletions(-)