diff mbox

[23/23] drm: omapdrm: Remove buffer synchronization support

Message ID 1461702945-14185-24-git-send-email-laurent.pinchart@ideasonboard.com (mailing list archive)
State New, archived
Headers show

Commit Message

Laurent Pinchart April 26, 2016, 8:35 p.m. UTC
The omapdrm driver uses a custom API to synchronize with the SGX GPU.
This is unusable as such in the mainline kernel as the API is only
partially implemented and requires additional out-of-tree patches.
Furthermore, as no SGX driver is available in the mainline kernel, the
API can't be considered as a stable mainline API.

Remove buffer synchronization support to prepare for its replacement by
an implementation based on standard fences and reservation objects.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
---
 drivers/gpu/drm/omapdrm/omap_drv.c |  49 ---------
 drivers/gpu/drm/omapdrm/omap_drv.h |   5 -
 drivers/gpu/drm/omapdrm/omap_gem.c | 215 -------------------------------------
 include/uapi/drm/omap_drm.h        |  26 -----
 4 files changed, 295 deletions(-)

Comments

Tomi Valkeinen May 11, 2016, 11:12 a.m. UTC | #1
On 26/04/16 23:35, Laurent Pinchart wrote:
> The omapdrm driver uses a custom API to synchronize with the SGX GPU.
> This is unusable as such in the mainline kernel as the API is only
> partially implemented and requires additional out-of-tree patches.
> Furthermore, as no SGX driver is available in the mainline kernel, the
> API can't be considered as a stable mainline API.
> 
> Remove buffer synchronization support to prepare for its replacement by
> an implementation based on standard fences and reservation objects.

I thought one thing the OMAP_GEM_CPU_PREP/FINI ioctls were supposed to
do it cache flushing, if the buffer is OMAP_BO_CACHED. I don't think
that works, though, which might be considered as an omapdrm bug.

Also, if an app is using the prep/fini ioctls at the moment, even if it
doesn't exactly require flushing (or any kind of synchronization), after
this patch the app might fail. So even if we remove all the code behind,
perhaps we should leave no-op ioctls.

 Tomi
diff mbox

Patch

diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 80398a684cae..68f434ad9696 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -552,53 +552,6 @@  static int ioctl_gem_new(struct drm_device *dev, void *data,
 				   &args->handle);
 }
 
-static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
-		struct drm_file *file_priv)
-{
-	struct drm_omap_gem_cpu_prep *args = data;
-	struct drm_gem_object *obj;
-	int ret;
-
-	VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
-
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-	if (!obj)
-		return -ENOENT;
-
-	ret = omap_gem_op_sync(obj, args->op);
-
-	if (!ret)
-		ret = omap_gem_op_start(obj, args->op);
-
-	drm_gem_object_unreference_unlocked(obj);
-
-	return ret;
-}
-
-static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
-		struct drm_file *file_priv)
-{
-	struct drm_omap_gem_cpu_fini *args = data;
-	struct drm_gem_object *obj;
-	int ret;
-
-	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
-
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-	if (!obj)
-		return -ENOENT;
-
-	/* XXX flushy, flushy */
-	ret = 0;
-
-	if (!ret)
-		ret = omap_gem_op_finish(obj, args->op);
-
-	drm_gem_object_unreference_unlocked(obj);
-
-	return ret;
-}
-
 static int ioctl_gem_info(struct drm_device *dev, void *data,
 		struct drm_file *file_priv)
 {
@@ -624,8 +577,6 @@  static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] =
 	DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_AUTH),
 	DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_AUTH),
 };
 
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 779628f9ed7d..2aa198bec662 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -195,11 +195,6 @@  int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int omap_gem_mmap_obj(struct drm_gem_object *obj,
 		struct vm_area_struct *vma);
 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
-int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
-int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
-int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
-		void (*fxn)(void *arg), void *arg);
 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
 void omap_gem_dma_sync(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 9022cbbf26fb..d5a577037e94 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -100,19 +100,6 @@  struct omap_gem_object {
 	 * Virtual address, if mapped.
 	 */
 	void *vaddr;
-
-	/**
-	 * sync-object allocated on demand (if needed)
-	 *
-	 * Per-buffer sync-object for tracking pending and completed hw/dma
-	 * read and write operations.
-	 */
-	struct {
-		uint32_t write_pending;
-		uint32_t write_complete;
-		uint32_t read_pending;
-		uint32_t read_complete;
-	} *sync;
 };
 
 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
@@ -1071,206 +1058,6 @@  void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
 #endif
 
 /* -----------------------------------------------------------------------------
- * Buffer Synchronization
- */
-
-static DEFINE_SPINLOCK(sync_lock);
-
-struct omap_gem_sync_waiter {
-	struct list_head list;
-	struct omap_gem_object *omap_obj;
-	enum omap_gem_op op;
-	uint32_t read_target, write_target;
-	/* notify called w/ sync_lock held */
-	void (*notify)(void *arg);
-	void *arg;
-};
-
-/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
- * the read and/or write target count is achieved which can call a user
- * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
- * cpu access), etc.
- */
-static LIST_HEAD(waiters);
-
-static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
-{
-	struct omap_gem_object *omap_obj = waiter->omap_obj;
-	if ((waiter->op & OMAP_GEM_READ) &&
-			(omap_obj->sync->write_complete < waiter->write_target))
-		return true;
-	if ((waiter->op & OMAP_GEM_WRITE) &&
-			(omap_obj->sync->read_complete < waiter->read_target))
-		return true;
-	return false;
-}
-
-/* macro for sync debug.. */
-#define SYNCDBG 0
-#define SYNC(fmt, ...) do { if (SYNCDBG) \
-		printk(KERN_ERR "%s:%d: "fmt"\n", \
-				__func__, __LINE__, ##__VA_ARGS__); \
-	} while (0)
-
-
-static void sync_op_update(void)
-{
-	struct omap_gem_sync_waiter *waiter, *n;
-	list_for_each_entry_safe(waiter, n, &waiters, list) {
-		if (!is_waiting(waiter)) {
-			list_del(&waiter->list);
-			SYNC("notify: %p", waiter);
-			waiter->notify(waiter->arg);
-			kfree(waiter);
-		}
-	}
-}
-
-static inline int sync_op(struct drm_gem_object *obj,
-		enum omap_gem_op op, bool start)
-{
-	struct omap_gem_object *omap_obj = to_omap_bo(obj);
-	int ret = 0;
-
-	spin_lock(&sync_lock);
-
-	if (!omap_obj->sync) {
-		omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
-		if (!omap_obj->sync) {
-			ret = -ENOMEM;
-			goto unlock;
-		}
-	}
-
-	if (start) {
-		if (op & OMAP_GEM_READ)
-			omap_obj->sync->read_pending++;
-		if (op & OMAP_GEM_WRITE)
-			omap_obj->sync->write_pending++;
-	} else {
-		if (op & OMAP_GEM_READ)
-			omap_obj->sync->read_complete++;
-		if (op & OMAP_GEM_WRITE)
-			omap_obj->sync->write_complete++;
-		sync_op_update();
-	}
-
-unlock:
-	spin_unlock(&sync_lock);
-
-	return ret;
-}
-
-/* mark the start of read and/or write operation */
-int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
-{
-	return sync_op(obj, op, true);
-}
-
-int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
-{
-	return sync_op(obj, op, false);
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(sync_event);
-
-static void sync_notify(void *arg)
-{
-	struct task_struct **waiter_task = arg;
-	*waiter_task = NULL;
-	wake_up_all(&sync_event);
-}
-
-int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
-{
-	struct omap_gem_object *omap_obj = to_omap_bo(obj);
-	int ret = 0;
-	if (omap_obj->sync) {
-		struct task_struct *waiter_task = current;
-		struct omap_gem_sync_waiter *waiter =
-				kzalloc(sizeof(*waiter), GFP_KERNEL);
-
-		if (!waiter)
-			return -ENOMEM;
-
-		waiter->omap_obj = omap_obj;
-		waiter->op = op;
-		waiter->read_target = omap_obj->sync->read_pending;
-		waiter->write_target = omap_obj->sync->write_pending;
-		waiter->notify = sync_notify;
-		waiter->arg = &waiter_task;
-
-		spin_lock(&sync_lock);
-		if (is_waiting(waiter)) {
-			SYNC("waited: %p", waiter);
-			list_add_tail(&waiter->list, &waiters);
-			spin_unlock(&sync_lock);
-			ret = wait_event_interruptible(sync_event,
-					(waiter_task == NULL));
-			spin_lock(&sync_lock);
-			if (waiter_task) {
-				SYNC("interrupted: %p", waiter);
-				/* we were interrupted */
-				list_del(&waiter->list);
-				waiter_task = NULL;
-			} else {
-				/* freed in sync_op_update() */
-				waiter = NULL;
-			}
-		}
-		spin_unlock(&sync_lock);
-		kfree(waiter);
-	}
-	return ret;
-}
-
-/* call fxn(arg), either synchronously or asynchronously if the op
- * is currently blocked..  fxn() can be called from any context
- *
- * (TODO for now fxn is called back from whichever context calls
- * omap_gem_op_finish().. but this could be better defined later
- * if needed)
- *
- * TODO more code in common w/ _sync()..
- */
-int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
-		void (*fxn)(void *arg), void *arg)
-{
-	struct omap_gem_object *omap_obj = to_omap_bo(obj);
-	if (omap_obj->sync) {
-		struct omap_gem_sync_waiter *waiter =
-				kzalloc(sizeof(*waiter), GFP_ATOMIC);
-
-		if (!waiter)
-			return -ENOMEM;
-
-		waiter->omap_obj = omap_obj;
-		waiter->op = op;
-		waiter->read_target = omap_obj->sync->read_pending;
-		waiter->write_target = omap_obj->sync->write_pending;
-		waiter->notify = fxn;
-		waiter->arg = arg;
-
-		spin_lock(&sync_lock);
-		if (is_waiting(waiter)) {
-			SYNC("waited: %p", waiter);
-			list_add_tail(&waiter->list, &waiters);
-			spin_unlock(&sync_lock);
-			return 0;
-		}
-
-		spin_unlock(&sync_lock);
-
-		kfree(waiter);
-	}
-
-	/* no waiting.. */
-	fxn(arg);
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
  * Constructor & Destructor
  */
 
@@ -1309,8 +1096,6 @@  void omap_gem_free_object(struct drm_gem_object *obj)
 		drm_prime_gem_destroy(obj, omap_obj->sgt);
 	}
 
-	kfree(omap_obj->sync);
-
 	drm_gem_object_release(obj);
 
 	kfree(omap_obj);
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 38a3bd847e15..c34114bc0e51 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -63,28 +63,6 @@  struct drm_omap_gem_new {
 	uint32_t __pad;
 };
 
-/* mask of operations: */
-enum omap_gem_op {
-	OMAP_GEM_READ = 0x01,
-	OMAP_GEM_WRITE = 0x02,
-};
-
-struct drm_omap_gem_cpu_prep {
-	uint32_t handle;		/* buffer handle (in) */
-	uint32_t op;			/* mask of omap_gem_op (in) */
-};
-
-struct drm_omap_gem_cpu_fini {
-	uint32_t handle;		/* buffer handle (in) */
-	uint32_t op;			/* mask of omap_gem_op (in) */
-	/* TODO maybe here we pass down info about what regions are touched
-	 * by sw so we can be clever about cache ops?  For now a placeholder,
-	 * set to zero and we just do full buffer flush..
-	 */
-	uint32_t nregions;
-	uint32_t __pad;
-};
-
 struct drm_omap_gem_info {
 	uint32_t handle;		/* buffer handle (in) */
 	uint32_t pad;
@@ -102,16 +80,12 @@  struct drm_omap_gem_info {
 #define DRM_OMAP_GET_PARAM		0x00
 #define DRM_OMAP_SET_PARAM		0x01
 #define DRM_OMAP_GEM_NEW		0x03
-#define DRM_OMAP_GEM_CPU_PREP		0x04
-#define DRM_OMAP_GEM_CPU_FINI		0x05
 #define DRM_OMAP_GEM_INFO		0x06
 #define DRM_OMAP_NUM_IOCTLS		0x07
 
 #define DRM_IOCTL_OMAP_GET_PARAM	DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
 #define DRM_IOCTL_OMAP_SET_PARAM	DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
 #define DRM_IOCTL_OMAP_GEM_NEW		DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
-#define DRM_IOCTL_OMAP_GEM_CPU_PREP	DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
-#define DRM_IOCTL_OMAP_GEM_CPU_FINI	DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
 #define DRM_IOCTL_OMAP_GEM_INFO		DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
 
 #endif /* __OMAP_DRM_H__ */