@@ -94,6 +94,9 @@ struct virtio_gpu_object {
int uuid_state;
uuid_t uuid;
+
+ /* object's backing memory will stay pinned while count > 0 */
+ unsigned int mem_pin_count;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
@@ -261,6 +264,9 @@ struct virtio_gpu_device {
spinlock_t resource_export_lock;
/* protects map state and host_visible_mm */
spinlock_t host_visible_lock;
+
+ /* protects all memory management operations */
+ struct mutex mm_lock;
};
struct virtio_gpu_fpriv {
@@ -274,7 +280,7 @@ struct virtio_gpu_fpriv {
};
/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 12
+#define DRM_VIRTIO_NUM_IOCTLS 13
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -310,6 +316,13 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_work(struct work_struct *work);
+int virtio_gpu_array_validate(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+int virtio_gpu_gem_host_mem_release(struct virtio_gpu_object *bo);
+bool virtio_gpu_gem_madvise(struct virtio_gpu_object *obj, int madv);
+int virtio_gpu_gem_pin(struct virtio_gpu_object *bo);
+void virtio_gpu_gem_unpin(struct virtio_gpu_object *bo);
+bool virtio_gpu_gem_is_pinned(struct virtio_gpu_object *bo);
/* virtgpu_vq.c */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
@@ -321,6 +334,8 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo);
+int virtio_gpu_cmd_release_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -483,4 +498,8 @@ void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
struct sg_table *sgt,
enum dma_data_direction dir);
+/* virtgpu_gem_shrinker.c */
+int virtio_gpu_gem_shrinker_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_gem_shrinker_fini(struct virtio_gpu_device *vgdev);
+
#endif
@@ -282,3 +282,99 @@ void virtio_gpu_array_put_free_work(struct work_struct *work)
}
spin_unlock(&vgdev->obj_free_lock);
}
+
+int virtio_gpu_array_validate(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret = 0;
+ u32 i;
+
+ mutex_lock(&vgdev->mm_lock);
+
+ for (i = 0; i < objs->nents; i++) {
+ shmem = to_drm_gem_shmem_obj(objs->objs[i]);
+ if (shmem->madv) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ mutex_unlock(&vgdev->mm_lock);
+
+ return ret;
+}
+
+bool virtio_gpu_gem_madvise(struct virtio_gpu_object *bo, int madv)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ bool retained;
+
+ /*
+ * For now we support only purging BOs that are backed by guest's
+ * memory.
+ */
+ if (!virtio_gpu_is_shmem(bo))
+ return true;
+
+ mutex_lock(&vgdev->mm_lock);
+ retained = drm_gem_shmem_madvise(&bo->base, madv);
+ mutex_unlock(&vgdev->mm_lock);
+
+ return retained;
+}
+
+int virtio_gpu_gem_host_mem_release(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ int err;
+
+ if (bo->created) {
+ err = virtio_gpu_cmd_release_resource(vgdev, bo);
+ if (err)
+ return err;
+
+ virtio_gpu_notify(vgdev);
+ bo->created = false;
+ }
+
+ return 0;
+}
+
+int virtio_gpu_gem_pin(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&vgdev->mm_lock);
+
+ if (bo->base.madv == VIRTGPU_MADV_WILLNEED)
+ bo->mem_pin_count++;
+ else
+ ret = -ENOMEM;
+
+ mutex_unlock(&vgdev->mm_lock);
+
+ return ret;
+}
+
+void virtio_gpu_gem_unpin(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+
+ mutex_lock(&vgdev->mm_lock);
+ WARN_ON(!bo->mem_pin_count--);
+ mutex_unlock(&vgdev->mm_lock);
+}
+
+bool virtio_gpu_gem_is_pinned(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ bool ret;
+
+ mutex_lock(&vgdev->mm_lock);
+ ret = bo->mem_pin_count > 0;
+ mutex_unlock(&vgdev->mm_lock);
+
+ return ret;
+}
@@ -217,6 +217,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ret = virtio_gpu_array_lock_resv(buflist);
if (ret)
goto out_memdup;
+
+ ret = virtio_gpu_array_validate(vgdev, buflist);
+ if (ret)
+ goto out_unresv;
}
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
@@ -423,6 +427,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
+ ret = virtio_gpu_array_validate(vgdev, objs);
+ if (ret)
+ goto err_unlock;
+
fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
@@ -482,6 +490,10 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto err_put_free;
+ ret = virtio_gpu_array_validate(vgdev, objs);
+ if (ret)
+ goto err_unlock;
+
ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0);
@@ -836,6 +848,28 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
return ret;
}
+static int virtio_gpu_madvise_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct drm_virtgpu_madvise *args = data;
+ struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+
+ if (args->madv > VIRTGPU_MADV_DONTNEED)
+ return -EOPNOTSUPP;
+
+ obj = drm_gem_object_lookup(file, args->bo_handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = gem_to_virtio_gpu_obj(obj);
+ args->retained = virtio_gpu_gem_madvise(bo, args->madv);
+ drm_gem_object_put(obj);
+
+ return 0;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -875,4 +909,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_MADVISE, virtio_gpu_madvise_ioctl,
+ DRM_RENDER_ALLOW),
};
@@ -134,6 +134,7 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
dev->dev_private = vgdev;
vgdev->vdev = vdev;
+ mutex_init(&vgdev->mm_lock);
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
spin_lock_init(&vgdev->host_visible_lock);
@@ -238,6 +239,12 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
goto err_scanouts;
}
+ ret = drm_gem_shmem_shrinker_register(dev);
+ if (ret) {
+ DRM_ERROR("shrinker init failed\n");
+ goto err_modeset;
+ }
+
virtio_device_ready(vgdev->vdev);
if (num_capsets)
@@ -250,6 +257,8 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
5 * HZ);
return 0;
+err_modeset:
+ virtio_gpu_modeset_fini(vgdev);
err_scanouts:
virtio_gpu_free_vbufs(vgdev);
err_vbufs:
@@ -289,6 +298,7 @@ void virtio_gpu_release(struct drm_device *dev)
if (!vgdev)
return;
+ drm_gem_shmem_shrinker_unregister(dev);
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
@@ -97,6 +97,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
virtio_gpu_cleanup_object(bo);
}
+static unsigned long virtio_gpu_purge_object(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ int err;
+
+ if (virtio_gpu_gem_is_pinned(bo))
+ return 0;
+
+ /*
+ * Release host's memory before guest's memory is gone to ensure that
+ * host won't touch released memory of the guest.
+ */
+ err = virtio_gpu_gem_host_mem_release(bo);
+ if (err)
+ return 0;
+
+ drm_gem_shmem_purge_locked(&bo->base);
+
+ return obj->size >> PAGE_SHIFT;
+}
+
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
@@ -110,6 +131,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
+ .purge = &virtio_gpu_purge_object,
};
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
@@ -246,20 +246,28 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
+ int err;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
+
+ err = virtio_gpu_gem_pin(bo);
+ if (err)
+ return err;
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)
return 0;
if (bo->dumb && (plane->state->fb != new_state->fb)) {
vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0);
- if (!vgfb->fence)
+ if (!vgfb->fence) {
+ virtio_gpu_gem_unpin(bo);
return -ENOMEM;
+ }
}
return 0;
@@ -269,15 +277,20 @@ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
if (!plane->state->fb)
return;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
if (vgfb->fence) {
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
}
+
+ virtio_gpu_gem_unpin(bo);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -536,6 +536,21 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
virtio_gpu_cleanup_object(bo);
}
+int virtio_gpu_cmd_release_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_resource_unref *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ return virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
@@ -48,6 +48,7 @@ extern "C" {
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
+#define DRM_VIRTGPU_MADVISE 0x0c
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
@@ -196,6 +197,15 @@ struct drm_virtgpu_context_init {
__u64 ctx_set_params;
};
+#define VIRTGPU_MADV_WILLNEED 0
+#define VIRTGPU_MADV_DONTNEED 1
+struct drm_virtgpu_madvise {
+ __u32 bo_handle;
+ __u32 retained; /* out, non-zero if BO can be used */
+ __u32 madv;
+ __u32 pad;
+};
+
/*
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
* effect. The event size is sizeof(drm_event), since there is no additional
@@ -246,6 +256,10 @@ struct drm_virtgpu_context_init {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
struct drm_virtgpu_context_init)
+#define DRM_IOCTL_VIRTGPU_MADVISE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MADVISE, \
+ struct drm_virtgpu_madvise)
+
#if defined(__cplusplus)
}
#endif