diff mbox

[RFC,1/2] drm/i915: Extend GuC action fast spin time

Message ID 20171118003934.4800-2-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Nov. 18, 2017, 12:39 a.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The 'request pre-emption' GuC command seems to be slower than other
commands. It typically takes 20-30us on a GP-MRB system (BXT). That
means that the super-fast busy-spin wait in the GuC send action code
hits the 10us time out. It then drops through to the more system
friendly sleeping wait with a 10ms timeout. Unfortunately, the
sleeping wait seems to average a 1.5ms delay. That is almost 100 times
slower than necessary! It also means that the super-high-priority
pre-emption request is getting a significant extra latency. Even
worse, the latency can be upwards of 8ms if the kernel decides not to
reschedule the i915 driver soon beause the system is busy doing other
things.

This patch extends the busy-spin wait timeout specifically for the
case of pre-emtion requests.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_guc_submission.c | 12 ++++++------
 drivers/gpu/drm/i915/intel_guc_log.c       |  6 +++---
 drivers/gpu/drm/i915/intel_huc.c           |  2 +-
 drivers/gpu/drm/i915/intel_uc.c            | 18 ++++++++++++++----
 drivers/gpu/drm/i915/intel_uc.h            |  2 +-
 5 files changed, 25 insertions(+), 15 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 918cedcef104..ff82f0561ec1 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -78,7 +78,7 @@  static int guc_allocate_doorbell(struct intel_guc *guc,
 		client->ctx_index
 	};
 
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	return intel_guc_send(guc, action, ARRAY_SIZE(action), false);
 }
 
 static int guc_release_doorbell(struct intel_guc *guc,
@@ -89,7 +89,7 @@  static int guc_release_doorbell(struct intel_guc *guc,
 		client->ctx_index
 	};
 
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	return intel_guc_send(guc, action, ARRAY_SIZE(action), false);
 }
 
 static struct guc_context_desc *__get_context_desc(struct i915_guc_client *client)
@@ -605,7 +605,7 @@  static int i915_guc_preempt(struct intel_engine_cs *engine)
 	data[5] = guc->execbuf_client->ctx_index;
 	data[6] = guc->shared_data_offset;
 
-	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+	return intel_guc_send(guc, data, ARRAY_SIZE(data), true);
 }
 
 /**
@@ -1442,7 +1442,7 @@  int intel_guc_suspend(struct drm_i915_private *dev_priv)
 	/* first page of default ctx is shared data with GuC */
 	data[2] = guc->shared_data_offset;
 
-	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+	return intel_guc_send(guc, data, ARRAY_SIZE(data), false);
 }
 
 
@@ -1466,7 +1466,7 @@  int intel_guc_resume(struct drm_i915_private *dev_priv)
 	/* first page of default ctx is shared data with GuC */
 	data[2] = guc->shared_data_offset;
 
-	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+	return intel_guc_send(guc, data, ARRAY_SIZE(data), false);
 }
 
 int i915_guc_reset_engine(struct intel_engine_cs *engine)
@@ -1493,5 +1493,5 @@  int i915_guc_reset_engine(struct intel_engine_cs *engine)
 	/* first page is shared data with GuC */
 	data[6] = guc_ggtt_offset(ctx->engine[RCS].state);
 
-	return intel_guc_send(guc, data, ARRAY_SIZE(data));
+	return intel_guc_send(guc, data, ARRAY_SIZE(data), false);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 0a4dd4454adf..95fd4e1ace41 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -43,7 +43,7 @@  static int guc_log_flush_complete(struct intel_guc *guc)
 		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
 	};
 
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	return intel_guc_send(guc, action, ARRAY_SIZE(action), false);
 }
 
 static int guc_log_flush(struct intel_guc *guc)
@@ -53,7 +53,7 @@  static int guc_log_flush(struct intel_guc *guc)
 		0
 	};
 
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	return intel_guc_send(guc, action, ARRAY_SIZE(action), false);
 }
 
 static int guc_log_control(struct intel_guc *guc, u32 control_val)
@@ -63,7 +63,7 @@  static int guc_log_control(struct intel_guc *guc, u32 control_val)
 		control_val
 	};
 
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	return intel_guc_send(guc, action, ARRAY_SIZE(action), false);
 }
 
 
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 80c262fd56ab..e78bca7fd074 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -314,7 +314,7 @@  void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
 	data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
 	data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
 
-	ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
+	ret = intel_guc_send(guc, data, ARRAY_SIZE(data), false);
 	if (ret) {
 		DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
 		goto out;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index c46bc8594f22..680290ac36d6 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -43,12 +43,13 @@  static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
 	return INTEL_GUC_RECV_IS_RESPONSE(val);
 }
 
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len, bool urgent)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	u32 status;
 	int i;
 	int ret;
+	int fast_retry = urgent ? 10 : 1;
 
 	if (WARN_ON(len < 1 || len > 15))
 		return -EINVAL;
@@ -69,9 +70,18 @@  int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 	/*
 	 * Fast commands should complete in less than 10us, so sample quickly
 	 * up to that length of time, then switch to a slower sleep-wait loop.
-	 * No inte_guc_send command should ever take longer than 10ms.
+	 * No intel_guc_send command should ever take longer than 10ms.
+	 *
+	 * Updated: The 'request pre-emption' GuC command seems to average 120us
+	 * not <10us. Unfortunately, the second sleep option seems to have a
+	 * minimum stall time of around 8ms. That causes a huge increase in
+	 * pre-emption latency which is unacceptable for high priority workloads.
 	 */
-	ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+	for (i = 0; i < fast_retry; i++) {
+		ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+		if (ret == 0)
+			break;
+	}
 	if (ret)
 		ret = wait_for(intel_guc_recv(guc, &status), 10);
 	if (status != INTEL_GUC_STATUS_SUCCESS) {
@@ -111,6 +121,6 @@  int intel_guc_sample_forcewake(struct intel_guc *guc)
 		/* bit 0 and 1 are for Render and Media domain separately */
 		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
 
-	return intel_guc_send(guc, action, ARRAY_SIZE(action));
+	return intel_guc_send(guc, action, ARRAY_SIZE(action), false);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index a47269223531..b76604dfb82d 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -191,7 +191,7 @@  struct intel_huc {
 
 /* intel_uc.c */
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len, bool urgent);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 
 /* intel_guc_loader.c */