@@ -80,6 +80,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
+ } else {
+ drm_gem_object_release(&bo->base.base);
+ kfree(bo);
}
}
@@ -184,6 +184,81 @@ int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
return 0;
}
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
+
+ if (attach) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment_unlocked(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(attach->dmabuf);
+ }
+
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object_params params = { 0 };
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret) {
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+ }
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_pin;
+
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+ if (ret)
+ goto err_import;
+
+ params.blob = true;
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.size = attach->dmabuf->size;
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms,
+ ents, nents);
+ bo->guest_blob = true;
+ bo->attached = true;
+
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+
+ return 0;
+
+err_import:
+ dma_buf_unpin(attach);
+err_pin:
+ dma_resv_unlock(resv);
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+}
+
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
The imported object can be considered a guest blob resource; therefore, we use create_blob cmd while creating it. These helpers are used in the next patch which does the actual import. Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Dmitry Osipenko <dmitry.osipenko@collabora.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Gurchetan Singh <gurchetansingh@chromium.org> Cc: Chia-I Wu <olvaffe@gmail.com> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com> --- drivers/gpu/drm/virtio/virtgpu_object.c | 3 + drivers/gpu/drm/virtio/virtgpu_prime.c | 75 +++++++++++++++++++++++++ 2 files changed, 78 insertions(+)