diff mbox

drm/i915: SNB BLT workaround

Message ID 1288686661-12154-1-git-send-email-nanhai.zou@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Zou, Nanhai Nov. 2, 2010, 8:31 a.m. UTC
None
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc3..4070f32 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -854,15 +854,98 @@  blt_ring_put_user_irq(struct drm_device *dev,
 	/* do nothing */
 }
 
+
+/* workaround for some stepping of SNB,
+   each time when BLT engine ring tail moved,
+   the first command in the ring to be parsed
+   should be MI_BATCH_BUFFER_START
+ */
+static struct drm_gem_object *wa_batch;
+static unsigned long wa_batch_addr;
+
+#define NEED_BLT_WORKAROUND(dev) \
+	(IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static int blt_ring_init(struct drm_device *dev,
+			 struct intel_ring_buffer *ring)
+{
+	u32 *ptr;
+	struct drm_i915_gem_object *batch;
+	if (NEED_BLT_WORKAROUND(dev) && wa_batch == NULL) {
+		wa_batch = i915_gem_alloc_object(dev, 4096);
+
+		i915_gem_object_pin(wa_batch, 4096);
+
+		batch = to_intel_bo(wa_batch);
+		wa_batch_addr = batch->gtt_offset;
+
+		ptr = kmap(batch->pages[0]);
+		memset((u8 *)ptr, 0, 4096);
+		*ptr = MI_BATCH_BUFFER_END;
+		kunmap(batch->pages[0]);
+	}
+	return init_ring_common(dev, ring);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+			    struct intel_ring_buffer *ring,
+			    u32 invalidate_domains,
+			    u32 flush_domains)
+{
+	if (NEED_BLT_WORKAROUND(dev)) {
+		intel_ring_begin(dev, ring, 6);
+		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+		intel_ring_emit(dev, ring, wa_batch_addr);
+	} else
+		intel_ring_begin(dev, ring, 4);
+
+	intel_ring_emit(dev, ring, MI_FLUSH_DW);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_emit(dev, ring, 0);
+
+	intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+		     struct intel_ring_buffer *ring,
+		     u32 flush_domains)
+{
+	u32 seqno;
+
+	seqno = i915_gem_get_seqno(dev);
+
+	if (NEED_BLT_WORKAROUND(dev)) {
+		intel_ring_begin(dev, ring, 6);
+		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+		intel_ring_emit(dev, ring, wa_batch_addr);
+	} else {
+		intel_ring_begin(dev, ring, 4);
+	}
+
+	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(dev, ring,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(dev, ring, seqno);
+	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+
+	intel_ring_advance(dev, ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+	return seqno;
+}
+
 static const struct intel_ring_buffer gen6_blt_ring = {
        .name			= "blt ring",
        .id			= RING_BLT,
        .mmio_base		= BLT_RING_BASE,
        .size			= 32 * PAGE_SIZE,
-       .init			= init_ring_common,
+       .init			= blt_ring_init,
        .write_tail		= ring_write_tail,
-       .flush			= gen6_ring_flush,
-       .add_request		= ring_add_request,
+       .flush			= blt_ring_flush,
+       .add_request		= blt_ring_add_request,
        .get_seqno		= ring_status_page_get_seqno,
        .user_irq_get		= blt_ring_get_user_irq,
        .user_irq_put		= blt_ring_put_user_irq,