diff mbox series

[13/13] drm/msm: Implement better timeout detection

Message ID 1538397105-19581-14-git-send-email-smasetty@codeaurora.org (mailing list archive)
State New, archived
Headers show
Series drm/msm: Hook up the DRM gpu scheduler | expand

Commit Message

Sharat Masetty Oct. 1, 2018, 12:31 p.m. UTC
The base scheduler patch has barebones timeout implementation, it does
not account for issues like starvation on lower priority rings. This
patch enables more accurate measurement on time spent on each
ringbuffer, thereby helping us with better timeout detection mechanism.

Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
---
 drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 29 +++++++++++++++++++++
 drivers/gpu/drm/msm/adreno/adreno_gpu.c   |  3 +++
 drivers/gpu/drm/msm/msm_ringbuffer.h      |  2 ++
 drivers/gpu/drm/msm/msm_sched.c           | 42 +++++++++++++++++++++++++++++++
 4 files changed, 76 insertions(+)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 6a3c560..8bf81c1c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -165,6 +165,33 @@  void a5xx_preempt_trigger(struct msm_gpu *gpu)
 	gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
 }
 
+static void update_ring_timestamps(struct msm_ringbuffer *prev_ring,
+		struct msm_ringbuffer *cur_ring)
+{
+	unsigned long flags;
+
+	/*
+	 * For the outgoing ring(prev_ring), capture the last sample of time
+	 * spent on this ring and add it to the ring's total active_time.
+	 */
+	spin_lock_irqsave(&prev_ring->lock, flags);
+
+	prev_ring->active_time += jiffies_delta_to_msecs(jiffies -
+			prev_ring->last_ts);
+
+	spin_unlock_irqrestore(&prev_ring->lock, flags);
+
+	/*
+	 * For the incoming ring(cur_ring), save the new current timestamp to
+	 * restart active time measurement
+	 */
+	spin_lock_irqsave(&cur_ring->lock, flags);
+
+	cur_ring->last_ts = jiffies_to_msecs(jiffies);
+
+	spin_unlock_irqrestore(&cur_ring->lock, flags);
+}
+
 void a5xx_preempt_irq(struct msm_gpu *gpu)
 {
 	uint32_t status;
@@ -194,6 +221,8 @@  void a5xx_preempt_irq(struct msm_gpu *gpu)
 		return;
 	}
 
+	update_ring_timestamps(a5xx_gpu->cur_ring, a5xx_gpu->next_ring);
+
 	a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
 	a5xx_gpu->next_ring = NULL;
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 17d0506..f8b5f4a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -212,6 +212,9 @@  int adreno_hw_init(struct msm_gpu *gpu)
 		/* reset completed fence seqno: */
 		ring->memptrs->fence = ring->seqno;
 		ring->memptrs->rptr = 0;
+
+		ring->last_ts = 0;
+		ring->active_time = 0;
 	}
 
 	/*
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 10ae4a8..27e0ab2 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -46,6 +46,8 @@  struct msm_ringbuffer {
 	struct mutex fence_idr_lock;
 	spinlock_t lock;
 	struct drm_gpu_scheduler sched;
+	u32 last_ts;
+	u32 active_time;
 };
 
 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
diff --git a/drivers/gpu/drm/msm/msm_sched.c b/drivers/gpu/drm/msm/msm_sched.c
index 8b805ce..70b7713 100644
--- a/drivers/gpu/drm/msm/msm_sched.c
+++ b/drivers/gpu/drm/msm/msm_sched.c
@@ -191,6 +191,9 @@  static void msm_sched_timedout_job(struct drm_sched_job *bad_job)
 	struct msm_gem_submit *submit = to_msm_gem_submit(bad_job);
 	struct msm_gpu *gpu = submit->gpu;
 	struct msm_ringbuffer *ring = submit->ring;
+	struct drm_gpu_scheduler *sched = &ring->sched;
+	unsigned long flags;
+	u32 total_time = 0;
 
 	/*
 	 * If this submission completed in the mean time, then the timeout is
@@ -199,6 +202,23 @@  static void msm_sched_timedout_job(struct drm_sched_job *bad_job)
 	if (submit->seqno <= submit->ring->memptrs->fence)
 		return;
 
+	spin_lock_irqsave(&ring->lock, flags);
+
+	total_time = ring->active_time;
+
+	/* Measure the last sample only if this is the active ring */
+	if (ring == gpu->funcs->active_ring(gpu))
+		total_time += jiffies_delta_to_msecs(jiffies - ring->last_ts);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+
+	if (total_time < sched->timeout) {
+		schedule_delayed_work(&bad_job->work_tdr,
+				msecs_to_jiffies(sched->timeout - total_time));
+		return;
+	}
+
+	/* Timeout occurred, go for a recovery */
 	dev_err(&gpu->pdev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
 			gpu->name, ring->id);
 	dev_err(&gpu->pdev->dev, "%s:     completed fence: %u\n",
@@ -231,11 +251,33 @@  static void msm_sched_free_job(struct drm_sched_job *sched_job)
 	msm_gem_submit_free(submit);
 }
 
+static void msm_sched_timeout_start(struct drm_sched_job *sched_job)
+{
+	struct msm_gem_submit *submit = to_msm_gem_submit(sched_job);
+	struct msm_gpu *gpu = submit->gpu;
+	struct msm_ringbuffer *ring = submit->ring;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ring->lock, flags);
+
+	ring->active_time = 0;
+
+	/*
+	 * Save the initial timestamp only if this ring is active. For other
+	 * rings the initial timestamp is captured at preemption switch-in
+	 */
+	if (ring == gpu->funcs->active_ring(gpu))
+		ring->last_ts = jiffies_to_msecs(jiffies);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+}
+
 static const struct drm_sched_backend_ops msm_sched_ops = {
 	.dependency = msm_sched_dependency,
 	.run_job = msm_sched_run_job,
 	.timedout_job = msm_sched_timedout_job,
 	.free_job = msm_sched_free_job,
+	.timeout_start_notify = msm_sched_timeout_start,
 };
 
 int msm_sched_job_init(struct drm_sched_job *sched_job)