diff mbox series

drm/fbdev-dma: Only install deferred I/O if necessary

Message ID 20240904093140.15203-1-tzimmermann@suse.de (mailing list archive)
State New, archived
Headers show
Series drm/fbdev-dma: Only install deferred I/O if necessary | expand

Commit Message

Thomas Zimmermann Sept. 4, 2024, 9:31 a.m. UTC
Deferred I/O requires struct page for framebuffer memory, which is
not guaranteed for all DMA ranges. We thus only install deferred I/O
if we have a framebuffer that requires it.

A reported bug affected the ipu-v3 and pl111 drivers, which have video
memory in either Normal or HighMem zones

[    0.000000] Zone ranges:
[    0.000000]   Normal   [mem 0x0000000010000000-0x000000003fffffff]
[    0.000000]   HighMem  [mem 0x0000000040000000-0x000000004fffffff]

where deferred I/O only works correctly with HighMem. See the Closes
tags for bug reports.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Fixes: 808a40b69468 ("drm/fbdev-dma: Implement damage handling and deferred I/O")
Reported-by: Alexander Stein <alexander.stein@ew.tq-group.com>
Closes: https://lore.kernel.org/all/23636953.6Emhk5qWAg@steina-w/
Reported-by: Linus Walleij <linus.walleij@linaro.org>
Closes: https://lore.kernel.org/dri-devel/CACRpkdb+hb9AGavbWpY-=uQQ0apY9en_tWJioPKf_fAbXMP4Hg@mail.gmail.com/
Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Javier Martinez Canillas <javierm@redhat.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <mripard@kernel.org>
---
 drivers/gpu/drm/drm_fbdev_dma.c | 71 ++++++++++++++++++++++++---------
 1 file changed, 52 insertions(+), 19 deletions(-)

Comments

Simona Vetter Sept. 4, 2024, 9:59 a.m. UTC | #1
On Wed, Sep 04, 2024 at 11:31:23AM +0200, Thomas Zimmermann wrote:
> Deferred I/O requires struct page for framebuffer memory, which is
> not guaranteed for all DMA ranges. We thus only install deferred I/O
> if we have a framebuffer that requires it.
> 
> A reported bug affected the ipu-v3 and pl111 drivers, which have video
> memory in either Normal or HighMem zones
> 
> [    0.000000] Zone ranges:
> [    0.000000]   Normal   [mem 0x0000000010000000-0x000000003fffffff]
> [    0.000000]   HighMem  [mem 0x0000000040000000-0x000000004fffffff]
> 
> where deferred I/O only works correctly with HighMem. See the Closes
> tags for bug reports.
> 
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
> Fixes: 808a40b69468 ("drm/fbdev-dma: Implement damage handling and deferred I/O")
> Reported-by: Alexander Stein <alexander.stein@ew.tq-group.com>
> Closes: https://lore.kernel.org/all/23636953.6Emhk5qWAg@steina-w/
> Reported-by: Linus Walleij <linus.walleij@linaro.org>
> Closes: https://lore.kernel.org/dri-devel/CACRpkdb+hb9AGavbWpY-=uQQ0apY9en_tWJioPKf_fAbXMP4Hg@mail.gmail.com/
> Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com>
> Cc: Thomas Zimmermann <tzimmermann@suse.de>
> Cc: Javier Martinez Canillas <javierm@redhat.com>
> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Cc: Maxime Ripard <mripard@kernel.org>

I guess yet another reason we should look into vma-interception using
mkwrite and read-only ptes, but that's a lot of typing and I think this
should work interim at least.

> ---
>  drivers/gpu/drm/drm_fbdev_dma.c | 71 ++++++++++++++++++++++++---------
>  1 file changed, 52 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
> index 7ef5a48c8029..455dc48d17a7 100644
> --- a/drivers/gpu/drm/drm_fbdev_dma.c
> +++ b/drivers/gpu/drm/drm_fbdev_dma.c
> @@ -36,20 +36,11 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
>  	return 0;
>  }
>  
> -FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
> -				   drm_fb_helper_damage_range,
> -				   drm_fb_helper_damage_area);
> -
>  static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
>  {
>  	struct drm_fb_helper *fb_helper = info->par;
> -	struct drm_framebuffer *fb = fb_helper->fb;
> -	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
>  
> -	if (!dma->map_noncoherent)
> -		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
> -
> -	return fb_deferred_io_mmap(info, vma);
> +	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
>  }
>  
>  static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
> @@ -70,13 +61,40 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
>  }
>  
>  static const struct fb_ops drm_fbdev_dma_fb_ops = {
> +	.owner = THIS_MODULE,
> +	.fb_open = drm_fbdev_dma_fb_open,
> +	.fb_release = drm_fbdev_dma_fb_release,
> +	__FB_DEFAULT_DMAMEM_OPS_RDWR,
> +	DRM_FB_HELPER_DEFAULT_OPS,
> +	__FB_DEFAULT_DMAMEM_OPS_DRAW,
> +	.fb_mmap = drm_fbdev_dma_fb_mmap,
> +	.fb_destroy = drm_fbdev_dma_fb_destroy,
> +};
> +
> +FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
> +				   drm_fb_helper_damage_range,
> +				   drm_fb_helper_damage_area);
> +
> +static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
> +{
> +	struct drm_fb_helper *fb_helper = info->par;
> +	struct drm_framebuffer *fb = fb_helper->fb;
> +	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
> +
> +	if (!dma->map_noncoherent)
> +		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
> +
> +	return fb_deferred_io_mmap(info, vma);
> +}
> +
> +static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
>  	.owner = THIS_MODULE,
>  	.fb_open = drm_fbdev_dma_fb_open,
>  	.fb_release = drm_fbdev_dma_fb_release,
>  	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
>  	DRM_FB_HELPER_DEFAULT_OPS,
>  	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
> -	.fb_mmap = drm_fbdev_dma_fb_mmap,
> +	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
>  	.fb_destroy = drm_fbdev_dma_fb_destroy,
>  };
>  
> @@ -89,6 +107,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>  {
>  	struct drm_client_dev *client = &fb_helper->client;
>  	struct drm_device *dev = fb_helper->dev;
> +	bool use_deferred_io = false;
>  	struct drm_client_buffer *buffer;
>  	struct drm_gem_dma_object *dma_obj;
>  	struct drm_framebuffer *fb;
> @@ -111,6 +130,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>  
>  	fb = buffer->fb;
>  
> +	/*
> +	 * Deferred I/O requires struct page for framebuffer memory,
> +	 * which is not guaranteed for all DMA ranges. We thus only
> +	 * install deferred I/O if we have a framebuffer that requires
> +	 * it.
> +	 */
> +	if (fb->funcs->dirty)
> +		use_deferred_io = true;
> +
>  	ret = drm_client_buffer_vmap(buffer, &map);
>  	if (ret) {
>  		goto err_drm_client_buffer_delete;
> @@ -130,7 +158,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>  
>  	drm_fb_helper_fill_info(info, fb_helper, sizes);
>  
> -	info->fbops = &drm_fbdev_dma_fb_ops;
> +	if (use_deferred_io)
> +		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
> +	else
> +		info->fbops = &drm_fbdev_dma_fb_ops;
>  
>  	/* screen */
>  	info->flags |= FBINFO_VIRTFB; /* system memory */
> @@ -145,13 +176,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>  	info->fix.smem_len = info->screen_size;
>  
>  	/* deferred I/O */
> -	fb_helper->fbdefio.delay = HZ / 20;
> -	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
> -
> -	info->fbdefio = &fb_helper->fbdefio;
> -	ret = fb_deferred_io_init(info);
> -	if (ret)
> -		goto err_drm_fb_helper_release_info;
> +	if (use_deferred_io) {

I think a check here that roughly matches what fb_deferred_io_get_page
does would be good. Specifically this part

	if (is_vmalloc_addr(screen_buffer + offs))
		page = vmalloc_to_page(screen_buffer + offs);
	else if (info->fix.smem_start)
		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);

So maybe something like:

	if (!is_vmalloc_addr(screen_buffer))
		if (WARN_ON(!pfn_to_page())))
			use_deferred_io = false;

With maye a comment that we assume buffers aren't a hiliarious mix? 

I worry about drivers that a) are on very special platforms where our dma
memory might not be page backed and b) use manual upload like over i2c or
spi. That sounds like a rather like embedded use-case combo ...

With something like that added:

Reviewed-by: Simona Vetter <simona.vetter@ffwll.ch>

Cheers, Sima
> +		fb_helper->fbdefio.delay = HZ / 20;
> +		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
> +
> +		info->fbdefio = &fb_helper->fbdefio;
> +		ret = fb_deferred_io_init(info);
> +		if (ret)
> +			goto err_drm_fb_helper_release_info;
> +	}
>  
>  	return 0;
>  
> -- 
> 2.46.0
>
Thomas Zimmermann Sept. 4, 2024, 11:16 a.m. UTC | #2
Hi

Am 04.09.24 um 11:59 schrieb Simona Vetter:
> On Wed, Sep 04, 2024 at 11:31:23AM +0200, Thomas Zimmermann wrote:
>> Deferred I/O requires struct page for framebuffer memory, which is
>> not guaranteed for all DMA ranges. We thus only install deferred I/O
>> if we have a framebuffer that requires it.
>>
>> A reported bug affected the ipu-v3 and pl111 drivers, which have video
>> memory in either Normal or HighMem zones
>>
>> [    0.000000] Zone ranges:
>> [    0.000000]   Normal   [mem 0x0000000010000000-0x000000003fffffff]
>> [    0.000000]   HighMem  [mem 0x0000000040000000-0x000000004fffffff]
>>
>> where deferred I/O only works correctly with HighMem. See the Closes
>> tags for bug reports.
>>
>> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
>> Fixes: 808a40b69468 ("drm/fbdev-dma: Implement damage handling and deferred I/O")
>> Reported-by: Alexander Stein <alexander.stein@ew.tq-group.com>
>> Closes: https://lore.kernel.org/all/23636953.6Emhk5qWAg@steina-w/
>> Reported-by: Linus Walleij <linus.walleij@linaro.org>
>> Closes: https://lore.kernel.org/dri-devel/CACRpkdb+hb9AGavbWpY-=uQQ0apY9en_tWJioPKf_fAbXMP4Hg@mail.gmail.com/
>> Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com>
>> Cc: Thomas Zimmermann <tzimmermann@suse.de>
>> Cc: Javier Martinez Canillas <javierm@redhat.com>
>> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
>> Cc: Maxime Ripard <mripard@kernel.org>
> I guess yet another reason we should look into vma-interception using
> mkwrite and read-only ptes, but that's a lot of typing and I think this
> should work interim at least.
>
>> ---
>>   drivers/gpu/drm/drm_fbdev_dma.c | 71 ++++++++++++++++++++++++---------
>>   1 file changed, 52 insertions(+), 19 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
>> index 7ef5a48c8029..455dc48d17a7 100644
>> --- a/drivers/gpu/drm/drm_fbdev_dma.c
>> +++ b/drivers/gpu/drm/drm_fbdev_dma.c
>> @@ -36,20 +36,11 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
>>   	return 0;
>>   }
>>   
>> -FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
>> -				   drm_fb_helper_damage_range,
>> -				   drm_fb_helper_damage_area);
>> -
>>   static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
>>   {
>>   	struct drm_fb_helper *fb_helper = info->par;
>> -	struct drm_framebuffer *fb = fb_helper->fb;
>> -	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
>>   
>> -	if (!dma->map_noncoherent)
>> -		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
>> -
>> -	return fb_deferred_io_mmap(info, vma);
>> +	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
>>   }
>>   
>>   static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
>> @@ -70,13 +61,40 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
>>   }
>>   
>>   static const struct fb_ops drm_fbdev_dma_fb_ops = {
>> +	.owner = THIS_MODULE,
>> +	.fb_open = drm_fbdev_dma_fb_open,
>> +	.fb_release = drm_fbdev_dma_fb_release,
>> +	__FB_DEFAULT_DMAMEM_OPS_RDWR,
>> +	DRM_FB_HELPER_DEFAULT_OPS,
>> +	__FB_DEFAULT_DMAMEM_OPS_DRAW,
>> +	.fb_mmap = drm_fbdev_dma_fb_mmap,
>> +	.fb_destroy = drm_fbdev_dma_fb_destroy,
>> +};
>> +
>> +FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
>> +				   drm_fb_helper_damage_range,
>> +				   drm_fb_helper_damage_area);
>> +
>> +static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
>> +{
>> +	struct drm_fb_helper *fb_helper = info->par;
>> +	struct drm_framebuffer *fb = fb_helper->fb;
>> +	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
>> +
>> +	if (!dma->map_noncoherent)
>> +		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
>> +
>> +	return fb_deferred_io_mmap(info, vma);
>> +}
>> +
>> +static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
>>   	.owner = THIS_MODULE,
>>   	.fb_open = drm_fbdev_dma_fb_open,
>>   	.fb_release = drm_fbdev_dma_fb_release,
>>   	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
>>   	DRM_FB_HELPER_DEFAULT_OPS,
>>   	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
>> -	.fb_mmap = drm_fbdev_dma_fb_mmap,
>> +	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
>>   	.fb_destroy = drm_fbdev_dma_fb_destroy,
>>   };
>>   
>> @@ -89,6 +107,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>>   {
>>   	struct drm_client_dev *client = &fb_helper->client;
>>   	struct drm_device *dev = fb_helper->dev;
>> +	bool use_deferred_io = false;
>>   	struct drm_client_buffer *buffer;
>>   	struct drm_gem_dma_object *dma_obj;
>>   	struct drm_framebuffer *fb;
>> @@ -111,6 +130,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>>   
>>   	fb = buffer->fb;
>>   
>> +	/*
>> +	 * Deferred I/O requires struct page for framebuffer memory,
>> +	 * which is not guaranteed for all DMA ranges. We thus only
>> +	 * install deferred I/O if we have a framebuffer that requires
>> +	 * it.
>> +	 */
>> +	if (fb->funcs->dirty)
>> +		use_deferred_io = true;
>> +
>>   	ret = drm_client_buffer_vmap(buffer, &map);
>>   	if (ret) {
>>   		goto err_drm_client_buffer_delete;
>> @@ -130,7 +158,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>>   
>>   	drm_fb_helper_fill_info(info, fb_helper, sizes);
>>   
>> -	info->fbops = &drm_fbdev_dma_fb_ops;
>> +	if (use_deferred_io)
>> +		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
>> +	else
>> +		info->fbops = &drm_fbdev_dma_fb_ops;
>>   
>>   	/* screen */
>>   	info->flags |= FBINFO_VIRTFB; /* system memory */
>> @@ -145,13 +176,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
>>   	info->fix.smem_len = info->screen_size;
>>   
>>   	/* deferred I/O */
>> -	fb_helper->fbdefio.delay = HZ / 20;
>> -	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
>> -
>> -	info->fbdefio = &fb_helper->fbdefio;
>> -	ret = fb_deferred_io_init(info);
>> -	if (ret)
>> -		goto err_drm_fb_helper_release_info;
>> +	if (use_deferred_io) {
> I think a check here that roughly matches what fb_deferred_io_get_page
> does would be good. Specifically this part
>
> 	if (is_vmalloc_addr(screen_buffer + offs))
> 		page = vmalloc_to_page(screen_buffer + offs);
> 	else if (info->fix.smem_start)
> 		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
>
> So maybe something like:
>
> 	if (!is_vmalloc_addr(screen_buffer))
> 		if (WARN_ON(!pfn_to_page())))
> 			use_deferred_io = false;
>
> With maye a comment that we assume buffers aren't a hiliarious mix?

Ok.

>
> I worry about drivers that a) are on very special platforms where our dma
> memory might not be page backed and b) use manual upload like over i2c or
> spi. That sounds like a rather like embedded use-case combo ...

The solution here is just a quick-fix and luckily we already have a 
solution. You've probably seen the rework of the client setup in [1]. 
That series adds struct drm_driver.fbdev_probe, which will then 
implement fbdev buffer creation. In the next iteration, I plan to 
provide an implementation for DMA BOs with deferred I/O and another one 
without deferred I/O, so each driver can select the one it requires. In 
the worst case, a driver can provide its own implementation.

[1] https://patchwork.freedesktop.org/series/137391/

>
> With something like that added:
>
> Reviewed-by: Simona Vetter <simona.vetter@ffwll.ch>

Thanks for reviewing.

Best regards
Thomas

>
> Cheers, Sima
>> +		fb_helper->fbdefio.delay = HZ / 20;
>> +		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
>> +
>> +		info->fbdefio = &fb_helper->fbdefio;
>> +		ret = fb_deferred_io_init(info);
>> +		if (ret)
>> +			goto err_drm_fb_helper_release_info;
>> +	}
>>   
>>   	return 0;
>>   
>> -- 
>> 2.46.0
>>
Linus Walleij Sept. 4, 2024, 12:25 p.m. UTC | #3
Hi Thomas,

quick response as always! :)

On Wed, Sep 4, 2024 at 11:31 AM Thomas Zimmermann <tzimmermann@suse.de> wrote:

> Deferred I/O requires struct page for framebuffer memory, which is
> not guaranteed for all DMA ranges. We thus only install deferred I/O
> if we have a framebuffer that requires it.
>
> A reported bug affected the ipu-v3 and pl111 drivers, which have video
> memory in either Normal or HighMem zones
>
> [    0.000000] Zone ranges:
> [    0.000000]   Normal   [mem 0x0000000010000000-0x000000003fffffff]
> [    0.000000]   HighMem  [mem 0x0000000040000000-0x000000004fffffff]
>
> where deferred I/O only works correctly with HighMem. See the Closes
> tags for bug reports.
>
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
> Fixes: 808a40b69468 ("drm/fbdev-dma: Implement damage handling and deferred I/O")
> Reported-by: Alexander Stein <alexander.stein@ew.tq-group.com>
> Closes: https://lore.kernel.org/all/23636953.6Emhk5qWAg@steina-w/
> Reported-by: Linus Walleij <linus.walleij@linaro.org>
> Closes: https://lore.kernel.org/dri-devel/CACRpkdb+hb9AGavbWpY-=uQQ0apY9en_tWJioPKf_fAbXMP4Hg@mail.gmail.com/
> Tested-by: Alexander Stein <alexander.stein@ew.tq-group.com>
> Cc: Thomas Zimmermann <tzimmermann@suse.de>
> Cc: Javier Martinez Canillas <javierm@redhat.com>
> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Cc: Maxime Ripard <mripard@kernel.org>

It works like a charm and LGTM.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>

Yours,
Linus Walleij
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 7ef5a48c8029..455dc48d17a7 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -36,20 +36,11 @@  static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
 	return 0;
 }
 
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
-				   drm_fb_helper_damage_range,
-				   drm_fb_helper_damage_area);
-
 static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
 	struct drm_fb_helper *fb_helper = info->par;
-	struct drm_framebuffer *fb = fb_helper->fb;
-	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
 
-	if (!dma->map_noncoherent)
-		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
-	return fb_deferred_io_mmap(info, vma);
+	return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
 }
 
 static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
@@ -70,13 +61,40 @@  static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
 }
 
 static const struct fb_ops drm_fbdev_dma_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_open = drm_fbdev_dma_fb_open,
+	.fb_release = drm_fbdev_dma_fb_release,
+	__FB_DEFAULT_DMAMEM_OPS_RDWR,
+	DRM_FB_HELPER_DEFAULT_OPS,
+	__FB_DEFAULT_DMAMEM_OPS_DRAW,
+	.fb_mmap = drm_fbdev_dma_fb_mmap,
+	.fb_destroy = drm_fbdev_dma_fb_destroy,
+};
+
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+				   drm_fb_helper_damage_range,
+				   drm_fb_helper_damage_area);
+
+static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+
+	if (!dma->map_noncoherent)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	return fb_deferred_io_mmap(info, vma);
+}
+
+static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_open = drm_fbdev_dma_fb_open,
 	.fb_release = drm_fbdev_dma_fb_release,
 	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
 	DRM_FB_HELPER_DEFAULT_OPS,
 	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
-	.fb_mmap = drm_fbdev_dma_fb_mmap,
+	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
 	.fb_destroy = drm_fbdev_dma_fb_destroy,
 };
 
@@ -89,6 +107,7 @@  static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
 {
 	struct drm_client_dev *client = &fb_helper->client;
 	struct drm_device *dev = fb_helper->dev;
+	bool use_deferred_io = false;
 	struct drm_client_buffer *buffer;
 	struct drm_gem_dma_object *dma_obj;
 	struct drm_framebuffer *fb;
@@ -111,6 +130,15 @@  static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
 
 	fb = buffer->fb;
 
+	/*
+	 * Deferred I/O requires struct page for framebuffer memory,
+	 * which is not guaranteed for all DMA ranges. We thus only
+	 * install deferred I/O if we have a framebuffer that requires
+	 * it.
+	 */
+	if (fb->funcs->dirty)
+		use_deferred_io = true;
+
 	ret = drm_client_buffer_vmap(buffer, &map);
 	if (ret) {
 		goto err_drm_client_buffer_delete;
@@ -130,7 +158,10 @@  static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
 
 	drm_fb_helper_fill_info(info, fb_helper, sizes);
 
-	info->fbops = &drm_fbdev_dma_fb_ops;
+	if (use_deferred_io)
+		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+	else
+		info->fbops = &drm_fbdev_dma_fb_ops;
 
 	/* screen */
 	info->flags |= FBINFO_VIRTFB; /* system memory */
@@ -145,13 +176,15 @@  static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
 	info->fix.smem_len = info->screen_size;
 
 	/* deferred I/O */
-	fb_helper->fbdefio.delay = HZ / 20;
-	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
-	info->fbdefio = &fb_helper->fbdefio;
-	ret = fb_deferred_io_init(info);
-	if (ret)
-		goto err_drm_fb_helper_release_info;
+	if (use_deferred_io) {
+		fb_helper->fbdefio.delay = HZ / 20;
+		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+		info->fbdefio = &fb_helper->fbdefio;
+		ret = fb_deferred_io_init(info);
+		if (ret)
+			goto err_drm_fb_helper_release_info;
+	}
 
 	return 0;