@@ -89,6 +89,7 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool detached;
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
@@ -313,6 +314,8 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_work(struct work_struct *work);
+int virtio_gpu_array_prepare(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
int virtio_gpu_gem_pin(struct virtio_gpu_object *bo);
void virtio_gpu_gem_unpin(struct virtio_gpu_object *bo);
@@ -458,6 +461,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+int virtio_gpu_reattach_shmem_object_locked(struct virtio_gpu_object *bo);
+
+int virtio_gpu_reattach_shmem_object(struct virtio_gpu_object *bo);
+
int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
uint32_t *resid);
/* virtgpu_prime.c */
@@ -295,6 +295,26 @@ void virtio_gpu_array_put_free_work(struct work_struct *work)
spin_unlock(&vgdev->obj_free_lock);
}
+int virtio_gpu_array_prepare(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo;
+ int ret = 0;
+ u32 i;
+
+ for (i = 0; i < objs->nents; i++) {
+ bo = gem_to_virtio_gpu_obj(objs->objs[i]);
+
+ if (virtio_gpu_is_shmem(bo) && bo->detached) {
+ ret = virtio_gpu_reattach_shmem_object_locked(bo);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
int virtio_gpu_gem_pin(struct virtio_gpu_object *bo)
{
int err;
@@ -303,6 +323,12 @@ int virtio_gpu_gem_pin(struct virtio_gpu_object *bo)
err = drm_gem_shmem_pin(&bo->base);
if (err)
return err;
+
+ err = virtio_gpu_reattach_shmem_object(bo);
+ if (err) {
+ drm_gem_shmem_unpin(&bo->base);
+ return err;
+ }
}
return 0;
@@ -246,6 +246,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
+ ret = virtio_gpu_array_prepare(vgdev, objs);
+ if (ret)
+ goto err_unlock;
+
fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
@@ -288,11 +292,25 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free;
}
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = virtio_gpu_array_prepare(vgdev, objs);
+ if (ret)
+ goto err_unlock;
+
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
args->box.w, args->box.h, args->box.x, args->box.y,
- objs, NULL);
+ objs, fence);
} else {
virtio_gpu_create_context(dev, file);
@@ -301,23 +319,13 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free;
}
- ret = virtio_gpu_array_lock_resv(objs);
- if (ret != 0)
- goto err_put_free;
-
- ret = -ENOMEM;
- fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
- 0);
- if (!fence)
- goto err_unlock;
-
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
args->stride, args->layer_stride, &args->box, objs,
fence);
- dma_fence_put(&fence->f);
}
+ dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
@@ -142,10 +142,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct sg_table *pages;
int si;
- pages = drm_gem_shmem_get_pages_sgt(&bo->base);
+ pages = drm_gem_shmem_get_pages_sgt_locked(&bo->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
+ if (!ents)
+ return 0;
+
if (use_dma_api)
*nents = pages->nents;
else
@@ -176,6 +179,40 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
return 0;
}
+int virtio_gpu_reattach_shmem_object_locked(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_mem_entry *ents;
+ unsigned int nents;
+ int err;
+
+ if (!bo->detached)
+ return 0;
+
+ err = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (err)
+ return err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+
+ bo->detached = false;
+
+ return 0;
+}
+
+int virtio_gpu_reattach_shmem_object(struct virtio_gpu_object *bo)
+{
+ int ret;
+
+ ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
+ if (ret)
+ return ret;
+ ret = virtio_gpu_reattach_shmem_object_locked(bo);
+ dma_resv_unlock(bo->base.base.resv);
+
+ return ret;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -202,45 +239,60 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo->dumb = params->dumb;
- ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
- if (ret != 0)
- goto err_put_id;
+ if (bo->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
+ bo->guest_blob = true;
if (fence) {
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
if (!objs)
- goto err_free_entry;
+ goto err_put_id;
virtio_gpu_array_add_obj(objs, &bo->base.base);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_objs;
+ } else {
+ ret = dma_resv_lock(bo->base.base.resv, NULL);
+ if (ret)
+ goto err_put_id;
}
if (params->blob) {
- if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
- bo->guest_blob = true;
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret)
+ goto err_unlock_objs;
+ } else {
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, NULL, NULL);
+ if (ret)
+ goto err_unlock_objs;
+ bo->detached = true;
+ }
+
+ if (params->blob)
virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
ents, nents);
- } else if (params->virgl) {
+ else if (params->virgl)
virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
objs, fence);
- virtio_gpu_object_attach(vgdev, bo, ents, nents);
- } else {
+ else
virtio_gpu_cmd_create_resource(vgdev, bo, params,
objs, fence);
- virtio_gpu_object_attach(vgdev, bo, ents, nents);
- }
+
+ if (!fence)
+ dma_resv_unlock(bo->base.base.resv);
*bo_ptr = bo;
return 0;
+err_unlock_objs:
+ if (fence)
+ virtio_gpu_array_unlock_resv(objs);
+ else
+ dma_resv_unlock(bo->base.base.resv);
err_put_objs:
virtio_gpu_array_put_free(objs);
-err_free_entry:
- kvfree(ents);
err_put_id:
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
err_free_gem:
@@ -465,8 +465,19 @@ static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
{
- if (submit->buflist)
- return virtio_gpu_array_lock_resv(submit->buflist);
+ int err;
+
+ if (submit->buflist) {
+ err = virtio_gpu_array_lock_resv(submit->buflist);
+ if (err)
+ return err;
+
+ err = virtio_gpu_array_prepare(submit->vgdev, submit->buflist);
+ if (err) {
+ virtio_gpu_array_unlock_resv(submit->buflist);
+ return err;
+ }
+ }
return 0;
}