[v14,5/7] vfio: ABI for mdev display dma-buf operation
diff mbox

Message ID 1506669073.6902.11.camel@redhat.com
State New
Headers show

Commit Message

Gerd Hoffmann Sept. 29, 2017, 7:11 a.m. UTC
Hi,

> The reason why I want to propose the close IOCTL is because that the
> current lock (fb_obj_list_lock), cannot sync the intel_vgpu_fb_info
> releasing and reusing.
> You see, the intel_vgpu_fb_info reusing and releasing are in
> different threads. There is a case that intel_vgpu_find_dmabuf can
> return a intel_vgpu_fb_obj, while the intel_vgpu_fb_obj
> is on the way to be released. That's the problem.

Oh, right.  But that race is fixable.  We need to move the locks one
level up, so we don't only cover list operations (add/lookup/delete)
but also  the kref_{get,put} operations for the list elements.

Patch against my tree, only build-tested so far.

cheers,
  Gerd

Patch
diff mbox

From 3e8c30a857d98d36357e8d9bb04b7ccb72264543 Mon Sep 17 00:00:00 2001
From: Gerd Hoffmann <kraxel@redhat.com>
Date: Fri, 29 Sep 2017 08:59:34 +0200
Subject: [PATCH] fix locking

---
 drivers/gpu/drm/i915/gvt/dmabuf.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2fb3247eff..06ff7bb04e 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -84,24 +84,25 @@  static void intel_vgpu_fb_obj_release(struct kref *kref)
 {
 	struct intel_vgpu_fb_obj *fb_obj =
 		container_of(kref, struct intel_vgpu_fb_obj, kref);
-	struct intel_vgpu *vgpu;
 
-	vgpu = fb_obj->vgpu;
-	mutex_lock(&vgpu->fb_obj_list_lock);
 	list_del(&fb_obj->list);
-	mutex_unlock(&vgpu->fb_obj_list_lock);
 	kfree(fb_obj);
 }
 
 static void intel_vgpu_gem_release(struct drm_i915_gem_object *obj)
 {
+	struct intel_vgpu *vgpu;
+
 	if (WARN_ON(!obj->gvt || !obj->gvt->vgpu)) {
 		gvt_err("gvt info is invalid\n");
 		return;
 	}
 
-	intel_gvt_hypervisor_put_vfio_device(obj->gvt->vgpu);
+	vgpu = obj->gvt->vgpu;
+	intel_gvt_hypervisor_put_vfio_device(vgpu);
+	mutex_lock(&vgpu->fb_obj_list_lock);
 	kref_put(&obj->gvt->kref, intel_vgpu_fb_obj_release);
+	mutex_unlock(&vgpu->fb_obj_list_lock);
 	obj->gvt = NULL;
 }
 
@@ -239,7 +240,6 @@  intel_vgpu_pick_exposed_dmabuf(struct intel_vgpu *vgpu,
 	struct list_head *pos;
 	struct intel_vgpu_fb_obj *fb_obj;
 
-	mutex_lock(&vgpu->fb_obj_list_lock);
 	list_for_each(pos, &vgpu->fb_obj_list_head) {
 		fb_obj = container_of(pos, struct intel_vgpu_fb_obj,
 					  list);
@@ -251,11 +251,9 @@  intel_vgpu_pick_exposed_dmabuf(struct intel_vgpu *vgpu,
 		    (fb_obj->fb.width == latest_info->width) &&
 		    (fb_obj->fb.height == latest_info->height) &&
 		    (fb_obj->fb.stride == latest_info->stride)) {
-			mutex_unlock(&vgpu->fb_obj_list_lock);
 			return fb_obj;
 		}
 	}
-	mutex_unlock(&vgpu->fb_obj_list_lock);
 	return NULL;
 }
 
@@ -265,16 +263,13 @@  intel_vgpu_find_dmabuf(struct intel_vgpu *vgpu, u32 dmabuf_id)
 	struct list_head *pos;
 	struct intel_vgpu_fb_obj *fb_obj;
 
-	mutex_lock(&vgpu->fb_obj_list_lock);
 	list_for_each(pos, &vgpu->fb_obj_list_head) {
 		fb_obj = container_of(pos, struct intel_vgpu_fb_obj,
 					  list);
 		if (fb_obj->dmabuf_id == dmabuf_id) {
-			mutex_unlock(&vgpu->fb_obj_list_lock);
 			return fb_obj;
 		}
 	}
-	mutex_unlock(&vgpu->fb_obj_list_lock);
 	return NULL;
 }
 
@@ -327,8 +322,10 @@  int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
 		return ret;
 
 	/* If exists, pick up the exposed dmabuf fd */
+	mutex_lock(&vgpu->fb_obj_list_lock);
 	fb_obj = intel_vgpu_pick_exposed_dmabuf(vgpu, &fb_info);
 	if (fb_obj != NULL) {
+		mutex_unlock(&vgpu->fb_obj_list_lock);
 		update_fb_info(gvt_dmabuf, fb_obj);
 		return 0;
 	}
@@ -345,7 +342,6 @@  int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
 	fb_obj->fb = fb_info;
 	fb_obj->dmabuf_id = id++;
 
-	mutex_lock(&vgpu->fb_obj_list_lock);
 	list_add_tail(&fb_obj->list, &vgpu->fb_obj_list_head);
 	mutex_unlock(&vgpu->fb_obj_list_lock);
 	update_fb_info(gvt_dmabuf, fb_obj);
@@ -362,11 +358,15 @@  int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, void *args)
 	struct dma_buf *dmabuf;
 	int ret;
 
+	mutex_lock(&vgpu->fb_obj_list_lock);
 	fb_obj = intel_vgpu_find_dmabuf(vgpu, gvt_dmabuf->dmabuf_id);
-	if (NULL == fb_obj)
+	if (NULL == fb_obj) {
+		mutex_unlock(&vgpu->fb_obj_list_lock);
 		return -EINVAL;
+	}
 
 	obj = intel_vgpu_create_gem(dev, fb_obj);
+	mutex_unlock(&vgpu->fb_obj_list_lock);
 	if (obj == NULL) {
 		gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
 		return -ENOMEM;
-- 
2.9.3