diff mbox

[v2] drm/omap: Fix release of refill engine

Message ID 1350058691-3132-1-git-send-email-andy.gross@ti.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andy Gross Oct. 12, 2012, 4:18 p.m. UTC
During asynchronous refills, we don't wait for the refill to
finish.  However, we cannot release the engine back to the idle
list until it has actually completed the refill operation.  The
engine release will now be done in the IRQ handler, but only
for asynchronous refill operations.

Synchronous refills will continue to release the engine after they
unblock from waiting on the refill.

v2: Fixed review comments on async variable and bool type

Signed-off-by: Andy Gross <andy.gross@ti.com>
---
 drivers/staging/omapdrm/omap_dmm_priv.h  |    5 ++-
 drivers/staging/omapdrm/omap_dmm_tiler.c |   76 ++++++++++++++++++++---------
 2 files changed, 56 insertions(+), 25 deletions(-)

Comments

Rob Clark Oct. 12, 2012, 4:23 p.m. UTC | #1
On Fri, Oct 12, 2012 at 11:18 AM, Andy Gross <andy.gross@ti.com> wrote:
> During asynchronous refills, we don't wait for the refill to
> finish.  However, we cannot release the engine back to the idle
> list until it has actually completed the refill operation.  The
> engine release will now be done in the IRQ handler, but only
> for asynchronous refill operations.
>
> Synchronous refills will continue to release the engine after they
> unblock from waiting on the refill.
>
> v2: Fixed review comments on async variable and bool type
>
> Signed-off-by: Andy Gross <andy.gross@ti.com>

Signed-off-by: Rob Clark <rob@ti.com>

> ---
>  drivers/staging/omapdrm/omap_dmm_priv.h  |    5 ++-
>  drivers/staging/omapdrm/omap_dmm_tiler.c |   76 ++++++++++++++++++++---------
>  2 files changed, 56 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
> index 09ebc50..273ec12 100644
> --- a/drivers/staging/omapdrm/omap_dmm_priv.h
> +++ b/drivers/staging/omapdrm/omap_dmm_priv.h
> @@ -141,6 +141,8 @@ struct refill_engine {
>         /* only one trans per engine for now */
>         struct dmm_txn txn;
>
> +       bool async;
> +
>         wait_queue_head_t wait_for_refill;
>
>         struct list_head idle_node;
> @@ -158,10 +160,11 @@ struct dmm {
>         dma_addr_t refill_pa;
>
>         /* refill engines */
> -       struct semaphore engine_sem;
> +       wait_queue_head_t engine_queue;
>         struct list_head idle_head;
>         struct refill_engine *engines;
>         int num_engines;
> +       atomic_t engine_counter;
>
>         /* container information */
>         int container_width;
> diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
> index fda9efc..62d5123 100644
> --- a/drivers/staging/omapdrm/omap_dmm_tiler.c
> +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
> @@ -29,7 +29,6 @@
>  #include <linux/mm.h>
>  #include <linux/time.h>
>  #include <linux/list.h>
> -#include <linux/semaphore.h>
>
>  #include "omap_dmm_tiler.h"
>  #include "omap_dmm_priv.h"
> @@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
>         return 0;
>  }
>
> +static void release_engine(struct refill_engine *engine)
> +{
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&list_lock, flags);
> +       list_add(&engine->idle_node, &omap_dmm->idle_head);
> +       spin_unlock_irqrestore(&list_lock, flags);
> +
> +       atomic_inc(&omap_dmm->engine_counter);
> +       wake_up_interruptible(&omap_dmm->engine_queue);
> +}
> +
>  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
>  {
>         struct dmm *dmm = arg;
> @@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
>         writel(status, dmm->base + DMM_PAT_IRQSTATUS);
>
>         for (i = 0; i < dmm->num_engines; i++) {
> -               if (status & DMM_IRQSTAT_LST)
> +               if (status & DMM_IRQSTAT_LST) {
>                         wake_up_interruptible(&dmm->engines[i].wait_for_refill);
>
> +                       if (dmm->engines[i].async)
> +                               release_engine(&dmm->engines[i]);
> +               }
> +
>                 status >>= 8;
>         }
>
> @@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
>  {
>         struct dmm_txn *txn = NULL;
>         struct refill_engine *engine = NULL;
> +       int ret;
> +       unsigned long flags;
>
> -       down(&dmm->engine_sem);
> +
> +       /* wait until an engine is available */
> +       ret = wait_event_interruptible(omap_dmm->engine_queue,
> +               atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
> +       if (ret)
> +               return ERR_PTR(ret);
>
>         /* grab an idle engine */
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         if (!list_empty(&dmm->idle_head)) {
>                 engine = list_entry(dmm->idle_head.next, struct refill_engine,
>                                         idle_node);
>                 list_del(&engine->idle_node);
>         }
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         BUG_ON(!engine);
>
> @@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
>   * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
>   * corresponding slot is cleared (ie. dummy_pa is programmed)
>   */
> -static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
> +static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
>                 struct page **pages, uint32_t npages, uint32_t roll)
>  {
>         dma_addr_t pat_pa = 0;
> @@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
>
>         txn->last_pat = pat;
>
> -       return 0;
> +       return;
>  }
>
>  /**
> @@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
>                 goto cleanup;
>         }
>
> +       /* mark whether it is async to denote list management in IRQ handler */
> +       engine->async = wait ? false : true;
> +
>         /* kick reload */
>         writel(engine->refill_pa,
>                 dmm->base + reg[PAT_DESCR][engine->id]);
> @@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
>         }
>
>  cleanup:
> -       spin_lock(&list_lock);
> -       list_add(&engine->idle_node, &dmm->idle_head);
> -       spin_unlock(&list_lock);
> +       /* only place engine back on list if we are done with it */
> +       if (ret || wait)
> +               release_engine(engine);
>
> -       up(&omap_dmm->engine_sem);
>         return ret;
>  }
>
> @@ -280,16 +304,13 @@ static int fill(struct tcm_area *area, struct page **pages,
>                                 .x1 = slice.p1.x,  .y1 = slice.p1.y,
>                 };
>
> -               ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
> -               if (ret)
> -                       goto fail;
> +               dmm_txn_append(txn, &p_area, pages, npages, roll);
>
>                 roll += tcm_sizeof(slice);
>         }
>
>         ret = dmm_txn_commit(txn, wait);
>
> -fail:
>         return ret;
>  }
>
> @@ -326,6 +347,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
>         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
>         u32 min_align = 128;
>         int ret;
> +       unsigned long flags;
>
>         BUG_ON(!validfmt(fmt));
>
> @@ -347,9 +369,9 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
>         }
>
>         /* add to allocation list */
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         list_add(&block->alloc_node, &omap_dmm->alloc_head);
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         return block;
>  }
> @@ -358,6 +380,7 @@ struct tiler_block *tiler_reserve_1d(size_t size)
>  {
>         struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
>         int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> +       unsigned long flags;
>
>         if (!block)
>                 return ERR_PTR(-ENOMEM);
> @@ -370,9 +393,9 @@ struct tiler_block *tiler_reserve_1d(size_t size)
>                 return ERR_PTR(-ENOMEM);
>         }
>
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         list_add(&block->alloc_node, &omap_dmm->alloc_head);
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         return block;
>  }
> @@ -381,13 +404,14 @@ struct tiler_block *tiler_reserve_1d(size_t size)
>  int tiler_release(struct tiler_block *block)
>  {
>         int ret = tcm_free(&block->area);
> +       unsigned long flags;
>
>         if (block->area.tcm)
>                 dev_err(omap_dmm->dev, "failed to release block\n");
>
> -       spin_lock(&list_lock);
> +       spin_lock_irqsave(&list_lock, flags);
>         list_del(&block->alloc_node);
> -       spin_unlock(&list_lock);
> +       spin_unlock_irqrestore(&list_lock, flags);
>
>         kfree(block);
>         return ret;
> @@ -507,16 +531,17 @@ static int omap_dmm_remove(struct platform_device *dev)
>  {
>         struct tiler_block *block, *_block;
>         int i;
> +       unsigned long flags;
>
>         if (omap_dmm) {
>                 /* free all area regions */
> -               spin_lock(&list_lock);
> +               spin_lock_irqsave(&list_lock, flags);
>                 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
>                                         alloc_node) {
>                         list_del(&block->alloc_node);
>                         kfree(block);
>                 }
> -               spin_unlock(&list_lock);
> +               spin_unlock_irqrestore(&list_lock, flags);
>
>                 for (i = 0; i < omap_dmm->num_lut; i++)
>                         if (omap_dmm->tcm && omap_dmm->tcm[i])
> @@ -560,6 +585,8 @@ static int omap_dmm_probe(struct platform_device *dev)
>         INIT_LIST_HEAD(&omap_dmm->alloc_head);
>         INIT_LIST_HEAD(&omap_dmm->idle_head);
>
> +       init_waitqueue_head(&omap_dmm->engine_queue);
> +
>         /* lookup hwmod data - base address and irq */
>         mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
>         if (!mem) {
> @@ -588,6 +615,8 @@ static int omap_dmm_probe(struct platform_device *dev)
>         omap_dmm->container_width = 256;
>         omap_dmm->container_height = 128;
>
> +       atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
> +
>         /* read out actual LUT width and height */
>         pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
>         omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
> @@ -651,7 +680,6 @@ static int omap_dmm_probe(struct platform_device *dev)
>                 goto fail;
>         }
>
> -       sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
>         for (i = 0; i < omap_dmm->num_engines; i++) {
>                 omap_dmm->engines[i].id = i;
>                 omap_dmm->engines[i].dmm = omap_dmm;
> --
> 1.7.5.4
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox

Patch

diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
index 09ebc50..273ec12 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -141,6 +141,8 @@  struct refill_engine {
 	/* only one trans per engine for now */
 	struct dmm_txn txn;
 
+	bool async;
+
 	wait_queue_head_t wait_for_refill;
 
 	struct list_head idle_node;
@@ -158,10 +160,11 @@  struct dmm {
 	dma_addr_t refill_pa;
 
 	/* refill engines */
-	struct semaphore engine_sem;
+	wait_queue_head_t engine_queue;
 	struct list_head idle_head;
 	struct refill_engine *engines;
 	int num_engines;
+	atomic_t engine_counter;
 
 	/* container information */
 	int container_width;
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
index fda9efc..62d5123 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -29,7 +29,6 @@ 
 #include <linux/mm.h>
 #include <linux/time.h>
 #include <linux/list.h>
-#include <linux/semaphore.h>
 
 #include "omap_dmm_tiler.h"
 #include "omap_dmm_priv.h"
@@ -120,6 +119,18 @@  static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
 	return 0;
 }
 
+static void release_engine(struct refill_engine *engine)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_add(&engine->idle_node, &omap_dmm->idle_head);
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	atomic_inc(&omap_dmm->engine_counter);
+	wake_up_interruptible(&omap_dmm->engine_queue);
+}
+
 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 {
 	struct dmm *dmm = arg;
@@ -130,9 +141,13 @@  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 	writel(status, dmm->base + DMM_PAT_IRQSTATUS);
 
 	for (i = 0; i < dmm->num_engines; i++) {
-		if (status & DMM_IRQSTAT_LST)
+		if (status & DMM_IRQSTAT_LST) {
 			wake_up_interruptible(&dmm->engines[i].wait_for_refill);
 
+			if (dmm->engines[i].async)
+				release_engine(&dmm->engines[i]);
+		}
+
 		status >>= 8;
 	}
 
@@ -146,17 +161,24 @@  static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
 {
 	struct dmm_txn *txn = NULL;
 	struct refill_engine *engine = NULL;
+	int ret;
+	unsigned long flags;
 
-	down(&dmm->engine_sem);
+
+	/* wait until an engine is available */
+	ret = wait_event_interruptible(omap_dmm->engine_queue,
+		atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
+	if (ret)
+		return ERR_PTR(ret);
 
 	/* grab an idle engine */
-	spin_lock(&list_lock);
+	spin_lock_irqsave(&list_lock, flags);
 	if (!list_empty(&dmm->idle_head)) {
 		engine = list_entry(dmm->idle_head.next, struct refill_engine,
 					idle_node);
 		list_del(&engine->idle_node);
 	}
-	spin_unlock(&list_lock);
+	spin_unlock_irqrestore(&list_lock, flags);
 
 	BUG_ON(!engine);
 
@@ -174,7 +196,7 @@  static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
  * corresponding slot is cleared (ie. dummy_pa is programmed)
  */
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
 		struct page **pages, uint32_t npages, uint32_t roll)
 {
 	dma_addr_t pat_pa = 0;
@@ -208,7 +230,7 @@  static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
 
 	txn->last_pat = pat;
 
-	return 0;
+	return;
 }
 
 /**
@@ -238,6 +260,9 @@  static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
 		goto cleanup;
 	}
 
+	/* mark whether it is async to denote list management in IRQ handler */
+	engine->async = wait ? false : true;
+
 	/* kick reload */
 	writel(engine->refill_pa,
 		dmm->base + reg[PAT_DESCR][engine->id]);
@@ -252,11 +277,10 @@  static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
 	}
 
 cleanup:
-	spin_lock(&list_lock);
-	list_add(&engine->idle_node, &dmm->idle_head);
-	spin_unlock(&list_lock);
+	/* only place engine back on list if we are done with it */
+	if (ret || wait)
+		release_engine(engine);
 
-	up(&omap_dmm->engine_sem);
 	return ret;
 }
 
@@ -280,16 +304,13 @@  static int fill(struct tcm_area *area, struct page **pages,
 				.x1 = slice.p1.x,  .y1 = slice.p1.y,
 		};
 
-		ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
-		if (ret)
-			goto fail;
+		dmm_txn_append(txn, &p_area, pages, npages, roll);
 
 		roll += tcm_sizeof(slice);
 	}
 
 	ret = dmm_txn_commit(txn, wait);
 
-fail:
 	return ret;
 }
 
@@ -326,6 +347,7 @@  struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
 	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
 	u32 min_align = 128;
 	int ret;
+	unsigned long flags;
 
 	BUG_ON(!validfmt(fmt));
 
@@ -347,9 +369,9 @@  struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
 	}
 
 	/* add to allocation list */
-	spin_lock(&list_lock);
+	spin_lock_irqsave(&list_lock, flags);
 	list_add(&block->alloc_node, &omap_dmm->alloc_head);
-	spin_unlock(&list_lock);
+	spin_unlock_irqrestore(&list_lock, flags);
 
 	return block;
 }
@@ -358,6 +380,7 @@  struct tiler_block *tiler_reserve_1d(size_t size)
 {
 	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
 	int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	unsigned long flags;
 
 	if (!block)
 		return ERR_PTR(-ENOMEM);
@@ -370,9 +393,9 @@  struct tiler_block *tiler_reserve_1d(size_t size)
 		return ERR_PTR(-ENOMEM);
 	}
 
-	spin_lock(&list_lock);
+	spin_lock_irqsave(&list_lock, flags);
 	list_add(&block->alloc_node, &omap_dmm->alloc_head);
-	spin_unlock(&list_lock);
+	spin_unlock_irqrestore(&list_lock, flags);
 
 	return block;
 }
@@ -381,13 +404,14 @@  struct tiler_block *tiler_reserve_1d(size_t size)
 int tiler_release(struct tiler_block *block)
 {
 	int ret = tcm_free(&block->area);
+	unsigned long flags;
 
 	if (block->area.tcm)
 		dev_err(omap_dmm->dev, "failed to release block\n");
 
-	spin_lock(&list_lock);
+	spin_lock_irqsave(&list_lock, flags);
 	list_del(&block->alloc_node);
-	spin_unlock(&list_lock);
+	spin_unlock_irqrestore(&list_lock, flags);
 
 	kfree(block);
 	return ret;
@@ -507,16 +531,17 @@  static int omap_dmm_remove(struct platform_device *dev)
 {
 	struct tiler_block *block, *_block;
 	int i;
+	unsigned long flags;
 
 	if (omap_dmm) {
 		/* free all area regions */
-		spin_lock(&list_lock);
+		spin_lock_irqsave(&list_lock, flags);
 		list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
 					alloc_node) {
 			list_del(&block->alloc_node);
 			kfree(block);
 		}
-		spin_unlock(&list_lock);
+		spin_unlock_irqrestore(&list_lock, flags);
 
 		for (i = 0; i < omap_dmm->num_lut; i++)
 			if (omap_dmm->tcm && omap_dmm->tcm[i])
@@ -560,6 +585,8 @@  static int omap_dmm_probe(struct platform_device *dev)
 	INIT_LIST_HEAD(&omap_dmm->alloc_head);
 	INIT_LIST_HEAD(&omap_dmm->idle_head);
 
+	init_waitqueue_head(&omap_dmm->engine_queue);
+
 	/* lookup hwmod data - base address and irq */
 	mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
 	if (!mem) {
@@ -588,6 +615,8 @@  static int omap_dmm_probe(struct platform_device *dev)
 	omap_dmm->container_width = 256;
 	omap_dmm->container_height = 128;
 
+	atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
+
 	/* read out actual LUT width and height */
 	pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
 	omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
@@ -651,7 +680,6 @@  static int omap_dmm_probe(struct platform_device *dev)
 		goto fail;
 	}
 
-	sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
 	for (i = 0; i < omap_dmm->num_engines; i++) {
 		omap_dmm->engines[i].id = i;
 		omap_dmm->engines[i].dmm = omap_dmm;