Message ID | 20230712224424.30158-3-dongwon.kim@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/virtio: synchronous guest framebuffer update | expand |
... > +static struct > +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) > +{ > + struct virtio_gpu_plane_state *new; > + > + if (WARN_ON(!plane->state)) > + return NULL; When plane->state can be NULL? > + new = kzalloc(sizeof(*new), GFP_KERNEL); > + if (!new) > + return NULL; > + > + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); > + > + return &new->base; > +} > + > +static void virtio_gpu_plane_destroy_state(struct drm_plane *plane, > + struct drm_plane_state *state) > +{ > + __drm_atomic_helper_plane_destroy_state(state); > + kfree(to_virtio_gpu_plane_state(state)); > +} > + > static const struct drm_plane_funcs virtio_gpu_plane_funcs = { > .update_plane = drm_atomic_helper_update_plane, > .disable_plane = drm_atomic_helper_disable_plane, > .reset = drm_atomic_helper_plane_reset, > - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, > - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, > + .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, > + .atomic_destroy_state = virtio_gpu_plane_destroy_state, Similar to the other email, please see how container_of() works. There is no need change .atomic_destroy_state ... > @@ -237,41 +262,29 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, > struct drm_device *dev = plane->dev; > struct virtio_gpu_device *vgdev = dev->dev_private; > struct virtio_gpu_framebuffer *vgfb; > + struct virtio_gpu_plane_state *vgplane_st; > struct virtio_gpu_object *bo; > > if (!new_state->fb) > return 0; > > vgfb = to_virtio_gpu_framebuffer(new_state->fb); > + vgplane_st = to_virtio_gpu_plane_state(new_state); > bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); > if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) > return 0; > > - if (bo->dumb && (plane->state->fb != new_state->fb)) { > - vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, > + if (bo->dumb) { Why "&& (plane->state->fb != new_state->fb)" disappeared? > + vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, > + vgdev->fence_drv.context, > 0); > - if (!vgfb->fence) > + if (!vgplane_st->fence) > return -ENOMEM; > } > > return 0; > } > > -static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, > - struct drm_plane_state *state) > -{ > - struct virtio_gpu_framebuffer *vgfb; > - > - if (!state->fb) > - return; > - > - vgfb = to_virtio_gpu_framebuffer(state->fb); > - if (vgfb->fence) { > - dma_fence_put(&vgfb->fence->f); > - vgfb->fence = NULL; > - } > -} How come that virtio_gpu_plane_cleanup_fb() isn't needed anymore? You created fence in prepare_fb(), you must release it in cleanup_fb() if fence still presents.
On 8/17/2023 7:21 PM, Dmitry Osipenko wrote: > ... >> +static struct >> +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) >> +{ >> + struct virtio_gpu_plane_state *new; >> + >> + if (WARN_ON(!plane->state)) >> + return NULL; > When plane->state can be NULL? Honestly this error check is from another drm driver. I am not sure if there is *any* case where this will hit. But wouldn't it safe to make sure this is not NULL here? >> + new = kzalloc(sizeof(*new), GFP_KERNEL); >> + if (!new) >> + return NULL; >> + >> + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); >> + >> + return &new->base; >> +} >> + >> +static void virtio_gpu_plane_destroy_state(struct drm_plane *plane, >> + struct drm_plane_state *state) >> +{ >> + __drm_atomic_helper_plane_destroy_state(state); >> + kfree(to_virtio_gpu_plane_state(state)); >> +} >> + >> static const struct drm_plane_funcs virtio_gpu_plane_funcs = { >> .update_plane = drm_atomic_helper_update_plane, >> .disable_plane = drm_atomic_helper_disable_plane, >> .reset = drm_atomic_helper_plane_reset, >> - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, >> - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, >> + .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, >> + .atomic_destroy_state = virtio_gpu_plane_destroy_state, > Similar to the other email, please see how container_of() works. There > is no need change .atomic_destroy_state Thanks again for pointing this out. I will fix this. > > ... >> @@ -237,41 +262,29 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, >> struct drm_device *dev = plane->dev; >> struct virtio_gpu_device *vgdev = dev->dev_private; >> struct virtio_gpu_framebuffer *vgfb; >> + struct virtio_gpu_plane_state *vgplane_st; >> struct virtio_gpu_object *bo; >> >> if (!new_state->fb) >> return 0; >> >> vgfb = to_virtio_gpu_framebuffer(new_state->fb); >> + vgplane_st = to_virtio_gpu_plane_state(new_state); >> bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); >> if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) >> return 0; >> >> - if (bo->dumb && (plane->state->fb != new_state->fb)) { >> - vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, >> + if (bo->dumb) { > Why "&& (plane->state->fb != new_state->fb)" disappeared? Because same FB could be flushed again and fence is needed for synchronous operation in such case. > >> + vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, >> + vgdev->fence_drv.context, >> 0); >> - if (!vgfb->fence) >> + if (!vgplane_st->fence) >> return -ENOMEM; >> } >> >> return 0; >> } >> >> -static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, >> - struct drm_plane_state *state) >> -{ >> - struct virtio_gpu_framebuffer *vgfb; >> - >> - if (!state->fb) >> - return; >> - >> - vgfb = to_virtio_gpu_framebuffer(state->fb); >> - if (vgfb->fence) { >> - dma_fence_put(&vgfb->fence->f); >> - vgfb->fence = NULL; >> - } >> -} > How come that virtio_gpu_plane_cleanup_fb() isn't needed anymore? You > created fence in prepare_fb(), you must release it in cleanup_fb() if > fence still presents. This fence is put during virt-gpu dequeuing process (the response is received) and eventually released after resource flush is done. virtio_gpu_notify(vgdev); dma_fence_wait_timeout(&vgplane_st->fence->f, true, msecs_to_jiffies(50)); dma_fence_put(&vgplane_st->fence->f); But even so, I guess what you said makes sense ("if fence still presents"). I will check this again. >
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 4126c384286b..61fd37f95fbd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -191,6 +191,13 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) +struct virtio_gpu_plane_state { + struct drm_plane_state base; + struct virtio_gpu_fence *fence; +}; +#define to_virtio_gpu_plane_state(x) \ + container_of(x, struct virtio_gpu_plane_state, base) + struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a2e045f3a000..a063f06ab6c5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -66,12 +66,36 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) return format; } +static struct +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) +{ + struct virtio_gpu_plane_state *new; + + if (WARN_ON(!plane->state)) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); + + return &new->base; +} + +static void virtio_gpu_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_virtio_gpu_plane_state(state)); +} + static const struct drm_plane_funcs virtio_gpu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, + .atomic_destroy_state = virtio_gpu_plane_destroy_state, }; static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, @@ -128,11 +152,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); - if (vgfb->fence) { + if (vgplane_st->fence) { struct virtio_gpu_object_array *objs; objs = virtio_gpu_array_alloc(1); @@ -141,13 +167,12 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, - width, height, objs, vgfb->fence); + width, height, objs, + vgplane_st->fence); virtio_gpu_notify(vgdev); - - dma_fence_wait_timeout(&vgfb->fence->f, true, + dma_fence_wait_timeout(&vgplane_st->fence->f, true, msecs_to_jiffies(50)); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + dma_fence_put(&vgplane_st->fence->f); } else { virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, width, height, NULL, NULL); @@ -237,41 +262,29 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; if (!new_state->fb) return 0; vgfb = to_virtio_gpu_framebuffer(new_state->fb); + vgplane_st = to_virtio_gpu_plane_state(new_state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) return 0; - if (bo->dumb && (plane->state->fb != new_state->fb)) { - vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + if (bo->dumb) { + vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, + vgdev->fence_drv.context, 0); - if (!vgfb->fence) + if (!vgplane_st->fence) return -ENOMEM; } return 0; } -static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct virtio_gpu_framebuffer *vgfb; - - if (!state->fb) - return; - - vgfb = to_virtio_gpu_framebuffer(state->fb); - if (vgfb->fence) { - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; - } -} - static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -281,6 +294,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo = NULL; uint32_t handle; @@ -293,6 +307,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; } else { @@ -312,11 +327,10 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, (vgdev, 0, plane->state->crtc_w, plane->state->crtc_h, - 0, 0, objs, vgfb->fence); + 0, 0, objs, vgplane_st->fence); virtio_gpu_notify(vgdev); - dma_fence_wait(&vgfb->fence->f, true); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + dma_fence_wait(&vgplane_st->fence->f, true); + dma_fence_put(&vgplane_st->fence->f); } if (plane->state->fb != old_state->fb) { @@ -351,14 +365,12 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { .prepare_fb = virtio_gpu_plane_prepare_fb, - .cleanup_fb = virtio_gpu_plane_cleanup_fb, .atomic_check = virtio_gpu_plane_atomic_check, .atomic_update = virtio_gpu_primary_plane_update, }; static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { .prepare_fb = virtio_gpu_plane_prepare_fb, - .cleanup_fb = virtio_gpu_plane_cleanup_fb, .atomic_check = virtio_gpu_plane_atomic_check, .atomic_update = virtio_gpu_cursor_plane_update, };
Having a fence linked to a virtio_gpu_framebuffer in plane update sequence would cause conflict when several planes referencing the same framebuffer especially when those planes are updated concurrently (e.g. Xorg screen covering multi-displays configured for an extended mode). So it is better for the fence to be created for every plane update event then link it to the plane state since each plane update comes with a new plane state obj. The plane state for virtio-gpu, "struct virtio_gpu_plane_state" is added for this. This structure represents drm_plane_state and it contains the reference to virtio_gpu_fence, which was previously in "struct virtio_gpu_framebuffer". "virtio_gpu_plane_duplicate_state" and "virtio_gpu_plane_destroy_state" were added as well to manage virtio_gpu_plane_state. Several drm helpers were slightly modified accordingly to use the fence in new plane state structure. virtio_gpu_plane_cleanup_fb was completely removed as none of code in the function are not required. Also, the condition for adding fence, (plane->state->fb != new_state->fb) was removed for the sychronous FB update even when the same FB is flushed again consecutively. Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Vivek Kasireddy <vivek.kasireddy@intel.com> Signed-off-by: Dongwon Kim <dongwon.kim@intel.com> --- drivers/gpu/drm/virtio/virtgpu_drv.h | 7 +++ drivers/gpu/drm/virtio/virtgpu_plane.c | 76 +++++++++++++++----------- 2 files changed, 51 insertions(+), 32 deletions(-)