Message ID | 20190702141903.1131-16-kraxel@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/virtio: switch from ttm to gem shmem helpers. | expand |
On Tue, Jul 2, 2019 at 7:19 AM Gerd Hoffmann <kraxel@redhat.com> wrote: > > Switch to the virtio_gpu_array_* helper workflow. > > Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> > --- > drivers/gpu/drm/virtio/virtgpu_drv.h | 2 +- > drivers/gpu/drm/virtio/virtgpu_ioctl.c | 43 ++++++++++++-------------- > drivers/gpu/drm/virtio/virtgpu_vq.c | 5 ++- > 3 files changed, 25 insertions(+), 25 deletions(-) > > diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h > index 4df760ba018e..b1f63a21abb6 100644 > --- a/drivers/gpu/drm/virtio/virtgpu_drv.h > +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h > @@ -308,10 +308,10 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, > struct virtio_gpu_object_array *objs, > struct virtio_gpu_fence *fence); > void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, > - struct virtio_gpu_object *bo, > uint32_t ctx_id, > uint64_t offset, uint32_t level, > struct virtio_gpu_box *box, > + struct virtio_gpu_object_array *objs, > struct virtio_gpu_fence *fence); > void > virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, > diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c > index 56182abdbf36..b220918df6a1 100644 > --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c > +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c > @@ -341,47 +341,44 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, > struct virtio_gpu_device *vgdev = dev->dev_private; > struct virtio_gpu_fpriv *vfpriv = file->driver_priv; > struct drm_virtgpu_3d_transfer_to_host *args = data; > - struct drm_gem_object *gobj = NULL; > - struct virtio_gpu_object *qobj = NULL; > + struct virtio_gpu_object_array *objs; > struct virtio_gpu_fence *fence; > struct virtio_gpu_box box; > int ret; > u32 offset = args->offset; > > - gobj = drm_gem_object_lookup(file, args->bo_handle); > - if (gobj == NULL) > + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); > + if (objs == NULL) > return -ENOENT; > > - qobj = gem_to_virtio_gpu_obj(gobj); > - > - ret = virtio_gpu_object_reserve(qobj); > - if (ret) > - goto out; > - > convert_to_hw_box(&box, &args->box); > if (!vgdev->has_virgl_3d) { > virtio_gpu_cmd_transfer_to_host_2d > - (vgdev, qobj, offset, > + (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset, > box.w, box.h, box.x, box.y, NULL); > + virtio_gpu_array_put_free(objs); Don't we need this in non-3D case as well? > } else { > + ret = virtio_gpu_array_lock_resv(objs); > + if (ret != 0) > + goto err_put_free; > + > + ret = -ENOMEM; > fence = virtio_gpu_fence_alloc(vgdev); > - if (!fence) { > - ret = -ENOMEM; > - goto out_unres; > - } > + if (!fence) > + goto err_unlock; > + > virtio_gpu_cmd_transfer_to_host_3d > - (vgdev, qobj, > + (vgdev, > vfpriv ? vfpriv->ctx_id : 0, offset, > - args->level, &box, fence); > - reservation_object_add_excl_fence(qobj->base.base.resv, > - &fence->f); > + args->level, &box, objs, fence); > dma_fence_put(&fence->f); > } > + return 0; > > -out_unres: > - virtio_gpu_object_unreserve(qobj); > -out: > - drm_gem_object_put_unlocked(gobj); > +err_unlock: > + virtio_gpu_array_unlock_resv(objs); > +err_put_free: > + virtio_gpu_array_put_free(objs); > return ret; > } > > diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c > index bef7036f4508..1c0097de419a 100644 > --- a/drivers/gpu/drm/virtio/virtgpu_vq.c > +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c > @@ -899,12 +899,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, > } > > void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, > - struct virtio_gpu_object *bo, > uint32_t ctx_id, > uint64_t offset, uint32_t level, > struct virtio_gpu_box *box, > + struct virtio_gpu_object_array *objs, > struct virtio_gpu_fence *fence) > { > + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); > struct virtio_gpu_transfer_host_3d *cmd_p; > struct virtio_gpu_vbuffer *vbuf; > bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); > @@ -917,6 +918,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, > cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); > memset(cmd_p, 0, sizeof(*cmd_p)); > > + vbuf->objs = objs; > + > cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); > cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); > cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); > -- > 2.18.1 >
Hi, > > convert_to_hw_box(&box, &args->box); > > if (!vgdev->has_virgl_3d) { > > virtio_gpu_cmd_transfer_to_host_2d > > - (vgdev, qobj, offset, > > + (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset, > > box.w, box.h, box.x, box.y, NULL); > > + virtio_gpu_array_put_free(objs); > Don't we need this in non-3D case as well? No, ... > > virtio_gpu_cmd_transfer_to_host_3d > > - (vgdev, qobj, > > + (vgdev, > > vfpriv ? vfpriv->ctx_id : 0, offset, > > - args->level, &box, fence); > > - reservation_object_add_excl_fence(qobj->base.base.resv, > > - &fence->f); > > + args->level, &box, objs, fence); ... 3d case passes the objs list to virtio_gpu_cmd_transfer_to_host_3d, so it gets added to the vbuf and released when the command is finished. cheers, Gerd
On Thu, Jul 4, 2019 at 4:51 AM Gerd Hoffmann <kraxel@redhat.com> wrote: > > Hi, > > > > convert_to_hw_box(&box, &args->box); > > > if (!vgdev->has_virgl_3d) { > > > virtio_gpu_cmd_transfer_to_host_2d > > > - (vgdev, qobj, offset, > > > + (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset, > > > box.w, box.h, box.x, box.y, NULL); > > > + virtio_gpu_array_put_free(objs); > > Don't we need this in non-3D case as well? > > No, ... > > > > virtio_gpu_cmd_transfer_to_host_3d > > > - (vgdev, qobj, > > > + (vgdev, > > > vfpriv ? vfpriv->ctx_id : 0, offset, > > > - args->level, &box, fence); > > > - reservation_object_add_excl_fence(qobj->base.base.resv, > > > - &fence->f); > > > + args->level, &box, objs, fence); > > ... 3d case passes the objs list to virtio_gpu_cmd_transfer_to_host_3d, > so it gets added to the vbuf and released when the command is finished. Why doesn't this apply to virtio_gpu_cmd_transfer_to_host_2d? When object array was introduced, it was said that the object array was to keep the objects alive until the vbuf using the objects is retired.. That sounded applicable to any vbuf that uses objects. > > cheers, > Gerd >
On Thu, Jul 04, 2019 at 12:08:14PM -0700, Chia-I Wu wrote: > On Thu, Jul 4, 2019 at 4:51 AM Gerd Hoffmann <kraxel@redhat.com> wrote: > > > > Hi, > > > > > > convert_to_hw_box(&box, &args->box); > > > > if (!vgdev->has_virgl_3d) { > > > > virtio_gpu_cmd_transfer_to_host_2d > > > > - (vgdev, qobj, offset, > > > > + (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset, > > > > box.w, box.h, box.x, box.y, NULL); > > > > + virtio_gpu_array_put_free(objs); > > > Don't we need this in non-3D case as well? > > > > No, ... > > > > > > virtio_gpu_cmd_transfer_to_host_3d > > > > - (vgdev, qobj, > > > > + (vgdev, > > > > vfpriv ? vfpriv->ctx_id : 0, offset, > > > > - args->level, &box, fence); > > > > - reservation_object_add_excl_fence(qobj->base.base.resv, > > > > - &fence->f); > > > > + args->level, &box, objs, fence); > > > > ... 3d case passes the objs list to virtio_gpu_cmd_transfer_to_host_3d, > > so it gets added to the vbuf and released when the command is finished. > Why doesn't this apply to virtio_gpu_cmd_transfer_to_host_2d? Hmm, yes, makes sense to handle both the same way. With virgl=off qemu processes the commands from the guest synchronously, so it'll work fine as-is. So you can't hit the theoretical race window in practice. But depending on that host-side implementation detail isn't a good idea indeed. cheers, Gerd
> > > ... 3d case passes the objs list to virtio_gpu_cmd_transfer_to_host_3d, > > > so it gets added to the vbuf and released when the command is finished. > > Why doesn't this apply to virtio_gpu_cmd_transfer_to_host_2d? > > Hmm, yes, makes sense to handle both the same way. > > With virgl=off qemu processes the commands from the guest > synchronously, so it'll work fine as-is. So you can't hit > the theoretical race window in practice. But depending > on that host-side implementation detail isn't a good idea > indeed. Branch with incremental fixes: https://git.kraxel.org/cgit/linux/log/?h=drm-virtio-kill-ttm I'll be offline three weeks now, will resume this when I'm back. cheers, Gerd
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 4df760ba018e..b1f63a21abb6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -308,10 +308,10 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 56182abdbf36..b220918df6a1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -341,47 +341,44 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; struct virtio_gpu_box box; int ret; u32 offset = args->offset; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj); - if (ret) - goto out; - convert_to_hw_box(&box, &args->box); if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d - (vgdev, qobj, offset, + (vgdev, gem_to_virtio_gpu_obj(objs->objs[0]), offset, box.w, box.h, box.x, box.y, NULL); + virtio_gpu_array_put_free(objs); } else { + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + ret = -ENOMEM; fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto out_unres; - } + if (!fence) + goto err_unlock; + virtio_gpu_cmd_transfer_to_host_3d - (vgdev, qobj, + (vgdev, vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &box, fence); - reservation_object_add_excl_fence(qobj->base.base.resv, - &fence->f); + args->level, &box, objs, fence); dma_fence_put(&fence->f); } + return 0; -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index bef7036f4508..1c0097de419a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -899,12 +899,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -917,6 +918,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
Switch to the virtio_gpu_array_* helper workflow. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> --- drivers/gpu/drm/virtio/virtgpu_drv.h | 2 +- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 43 ++++++++++++-------------- drivers/gpu/drm/virtio/virtgpu_vq.c | 5 ++- 3 files changed, 25 insertions(+), 25 deletions(-)