diff mbox series

[5/9] drm/fb-helper: Separate deferred I/O from shadow buffers

Message ID 20220303205839.28484-6-tzimmermann@suse.de (mailing list archive)
State Handled Elsewhere
Headers show
Series drm: Support GEM SHMEM fbdev without shadow FB | expand

Commit Message

Thomas Zimmermann March 3, 2022, 8:58 p.m. UTC
DRM drivers will be able to handle deferred I/O by themselves. So
a driver will be able to use deferred I/O without an intermediate
shadow buffer.

Prepare fbdev emulation by separating shadow buffers and deferred
I/O from each other.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
 drivers/gpu/drm/drm_fb_helper.c | 35 ++++++++++++++++++++++++++++-----
 1 file changed, 30 insertions(+), 5 deletions(-)

Comments

Javier Martinez Canillas March 8, 2022, 5:24 p.m. UTC | #1
On 3/3/22 21:58, Thomas Zimmermann wrote:
> DRM drivers will be able to handle deferred I/O by themselves. So
> a driver will be able to use deferred I/O without an intermediate
> shadow buffer.
> 
> Prepare fbdev emulation by separating shadow buffers and deferred
> I/O from each other.
> 
> Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
> ---
Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dd1d72d58b35..660ec5038c4e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -660,6 +660,19 @@  static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
 	       fb->funcs->dirty;
 }
 
+static bool drm_fbdev_use_deferred_io(struct drm_fb_helper *fb_helper)
+{
+	struct drm_framebuffer *fb = fb_helper->fb;
+
+	/*
+	 * Any driver with damage handling requires deferred I/O to
+	 * keep track of the updated screen areas. Drivers with shadow
+	 * buffers need deferred I/O to forward screen updates to the
+	 * buffer object.
+	 */
+	return fb->funcs->dirty || drm_fbdev_use_shadow_fb(fb_helper);
+}
+
 static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
 				 u32 width, u32 height)
 {
@@ -667,7 +680,7 @@  static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
 	struct drm_clip_rect *clip = &helper->damage_clip;
 	unsigned long flags;
 
-	if (!drm_fbdev_use_shadow_fb(helper))
+	if (!drm_fbdev_use_deferred_io(helper))
 		return;
 
 	spin_lock_irqsave(&helper->damage_lock, flags);
@@ -2119,8 +2132,16 @@  static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	struct drm_fb_helper *fb_helper = info->par;
 
 	if (drm_fbdev_use_shadow_fb(fb_helper))
+		/*
+		 * Drivers with shadow buffer use fbdev's implementation of
+		 * deferred I/O.
+		 */
 		return fb_deferred_io_mmap(info, vma);
 	else if (fb_helper->dev->driver->gem_prime_mmap)
+		/*
+		 * Either directly mmap'ed or with deferred I/O; drivers
+		 * without shadow buffer handle mmap themselves.
+		 */
 		return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
 	else
 		return -ENODEV;
@@ -2131,7 +2152,9 @@  static bool drm_fbdev_use_iomem(struct fb_info *info)
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_client_buffer *buffer = fb_helper->buffer;
 
-	return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
+	if (drm_fbdev_use_shadow_fb(fb_helper))
+		return false;
+	return buffer->map.is_iomem;
 }
 
 static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count,
@@ -2396,9 +2419,6 @@  static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
 		if (!fbi->screen_buffer)
 			return -ENOMEM;
 		fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
-
-		fbi->fbdefio = &drm_fbdev_defio;
-		fb_deferred_io_init(fbi);
 	} else {
 		/* buffer is mapped for HW framebuffer */
 		ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
@@ -2424,6 +2444,11 @@  static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
 #endif
 	}
 
+	if (drm_fbdev_use_deferred_io(fb_helper)) {
+		fbi->fbdefio = &drm_fbdev_defio;
+		fb_deferred_io_init(fbi);
+	}
+
 	return 0;
 }