diff mbox

[3/5] multiple ring buffer support, add BSD ring buffer support

Message ID 32606542045FF34BA04F9D5BB0CB6BB5A532ABA9@shzsmsx502.ccr.corp.intel.com (mailing list archive)
State Rejected
Headers show

Commit Message

Zou, Nanhai April 2, 2010, 5:30 a.m. UTC
None
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c0fce93..ead4a29 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -127,6 +127,8 @@  static int i915_dma_cleanup(struct drm_device * dev)
 		drm_irq_uninstall(dev);
 
 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
 
 	/* Clear the HWS virtual address at teardown */
 	if (I915_NEED_GFX_HWS(dev))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3e0218f..8e638ca 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -233,6 +233,7 @@  typedef struct drm_i915_private {
 
 	struct pci_dev *bridge_dev;
 	struct intel_ring_buffer render_ring;
+	struct intel_ring_buffer bsd_ring;
 
 
 	drm_dma_handle_t *status_page_dmah;
@@ -1101,6 +1102,7 @@  extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 			 (dev)->pci_device == 0x2A42 ||		\
 			 (dev)->pci_device == 0x2E42)
 
+#define HAS_BSD(dev)           (IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 20242ba..fb99c28 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1886,70 +1886,17 @@  i915_gem_flush(struct drm_device *dev,
 	       uint32_t flush_domains)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t cmd;
-
-#if WATCH_EXEC
-	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-		  invalidate_domains, flush_domains);
-#endif
-	trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
-				     invalidate_domains, flush_domains);
-
 	if (flush_domains & I915_GEM_DOMAIN_CPU)
 		drm_agp_chipset_flush(dev);
 
-	if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
-		/*
-		 * read/write caches:
-		 *
-		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
-		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
-		 * also flushed at 2d versus 3d pipeline switches.
-		 *
-		 * read-only caches:
-		 *
-		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
-		 * MI_READ_FLUSH is set, and is always flushed on 965.
-		 *
-		 * I915_GEM_DOMAIN_COMMAND may not exist?
-		 *
-		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
-		 * invalidated when MI_EXE_FLUSH is set.
-		 *
-		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
-		 * invalidated with every MI_FLUSH.
-		 *
-		 * TLBs:
-		 *
-		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
-		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
-		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
-		 * are flushed at any MI_FLUSH.
-		 */
-
-		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-		if ((invalidate_domains|flush_domains) &
-		    I915_GEM_DOMAIN_RENDER)
-			cmd &= ~MI_NO_WRITE_FLUSH;
-		if (!IS_I965G(dev)) {
-			/*
-			 * On the 965, the sampler cache always gets flushed
-			 * and this bit is reserved.
-			 */
-			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-				cmd |= MI_READ_FLUSH;
-		}
-		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
-			cmd |= MI_EXE_FLUSH;
+	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+			invalidate_domains,
+			flush_domains);
 
-#if WATCH_EXEC
-		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
-		BEGIN_LP_RING(2);
-		OUT_RING(cmd);
-		OUT_RING(MI_NOOP);
-		ADVANCE_LP_RING();
-	}
+	if (HAS_BSD(dev))
+		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+				invalidate_domains,
+				flush_domains);
 }
 
 /**
@@ -4552,6 +4499,10 @@  i915_gem_init_ringbuffer(struct drm_device *dev)
 		memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
 	}
 	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+	if (!ret && HAS_BSD(dev)) {
+		dev_priv->bsd_ring = bsd_ring;
+		ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+	}
 	return ret;
 }
 
@@ -4560,6 +4511,9 @@  i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+	if (HAS_BSD(dev))
+		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+
 }
 
 int
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2720bc2..6923846 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -324,6 +324,7 @@ 
 #define   I915_DEBUG_INTERRUPT				(1<<2)
 #define   I915_USER_INTERRUPT				(1<<1)
 #define   I915_ASLE_INTERRUPT				(1<<0)
+#define   I915_BSD_USER_INTERRUPT                      (1<<25)
 #define EIR		0x020b0
 #define EMR		0x020b4
 #define ESR		0x020b8
@@ -358,6 +359,18 @@ 
 #define BB_ADDR		0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL	0x02170 /* 915+ only */
 
+/*
+ * BSD (bit stream decoder instruction and interrupt control register defines
+ * (G4X and Ironlake only)
+ */
+
+#define BSD_RING_TAIL          0x04030
+#define BSD_RING_HEAD          0x04034
+#define BSD_RING_START         0x04038
+#define BSD_RING_CTL           0x0403c
+#define BSD_RING_ACTHD         0x04074
+#define BSD_HWS_PGA            0x04080
+
 
 /*
  * Framebuffer compression (915+ only)
@@ -2280,6 +2293,9 @@ 
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)
 
+#define GT_BSD_USER_INTERRUPT          (1 << 5)
+
+
 #define GTISR   0x44010
 #define GTIMR   0x44014
 #define GTIIR   0x44018
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e2ee300..8470999 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -261,6 +261,180 @@  static void render_setup_status_page(struct drm_device *dev,
 	(void)I915_READ(HWS_PGA);
 }
 
+void
+bsd_ring_flush(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		u32     invalidate_domains,
+		u32     flush_domains)
+{
+	intel_begin_ring_buffer (dev, ring, 8);
+	intel_fill_ring_buffer (dev, ring, MI_FLUSH);
+	intel_fill_ring_buffer (dev, ring, MI_NOOP);
+	intel_advance_ring_buffer (dev, ring);
+}
+
+static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return I915_READ(BSD_RING_ACTHD);
+}
+
+static inline void bsd_ring_advance_ring(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	I915_WRITE(BSD_RING_TAIL, ring->tail);
+}
+
+static int init_bsd_ring(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	u32 head;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+
+	obj_priv = ring->gem_object->driver_private;
+
+	/* Stop the ring if it's running. */
+	I915_WRITE(BSD_RING_CTL, 0);
+	I915_WRITE(BSD_RING_HEAD, 0);
+	I915_WRITE(BSD_RING_TAIL, 0);
+
+	/* Initialize the ring. */
+	I915_WRITE(BSD_RING_START, obj_priv->gtt_offset);
+	head = ring->get_head(dev, ring);
+
+	/* G45 ring initialization fails to reset head to zero */
+	if (head != 0) {
+		DRM_ERROR("%s head not reset to zero "
+				"ctl %08x head %08x tail %08x start %08x\n",
+				ring->name,
+				I915_READ(BSD_RING_CTL),
+				I915_READ(BSD_RING_HEAD),
+				I915_READ(BSD_RING_TAIL),
+				I915_READ(BSD_RING_START));
+		I915_WRITE(PRB0_HEAD, 0);
+
+		DRM_ERROR("%s head forced to zero "
+				"ctl %08x head %08x tail %08x start %08x\n",
+				ring->name,
+				I915_READ(BSD_RING_CTL),
+				I915_READ(BSD_RING_HEAD),
+				I915_READ(BSD_RING_TAIL),
+				I915_READ(BSD_RING_START));
+	}
+
+	I915_WRITE(BSD_RING_CTL,
+			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+			| RING_NO_REPORT | RING_VALID);
+
+	head = ring->get_head(dev, ring);
+	return 0;
+}
+
+static u32
+bsd_ring_add_request(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		struct drm_file *file_priv,
+		u32 flush_domains)
+{
+	u32 seqno;
+	seqno = intel_ring_get_seqno(dev, ring);
+	intel_begin_ring_buffer(dev, ring, 4);
+	intel_fill_ring_buffer(dev, ring, MI_STORE_DWORD_INDEX);
+	intel_fill_ring_buffer(dev, ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_fill_ring_buffer(dev, ring, seqno);
+	intel_fill_ring_buffer(dev, ring, MI_USER_INTERRUPT);
+	intel_advance_ring_buffer (dev, ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+	return seqno;
+}
+
+static void bsd_setup_status_page(struct drm_device *dev,
+		struct  intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+	I915_READ(BSD_HWS_PGA);
+}
+
+static void
+bsd_ring_get_user_irq(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+		if (IS_IRONLAKE(dev))
+			ironlake_enable_graphics_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+		else
+			i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+	}
+	spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+
+}
+
+static void
+bsd_ring_put_user_irq(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+		if (IS_IRONLAKE(dev))
+			ironlake_disable_graphics_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+		else
+			i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+	}
+	spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+}
+
+static u32
+bsd_ring_get_gem_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
+{
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int
+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+		struct intel_ring_buffer *ring,
+		struct drm_i915_gem_execbuffer2 *exec,
+		struct drm_clip_rect *cliprects,
+		uint64_t exec_offset)
+{
+	uint32_t exec_start;
+	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+	intel_begin_ring_buffer (dev, ring, 2);
+	intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER_START |
+			(2 << 6) | MI_BATCH_NON_SECURE_I965);
+	intel_fill_ring_buffer(dev, ring, exec_start);
+	intel_advance_ring_buffer (dev, ring);
+	return 0;
+}
+
 
 static int    
 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
@@ -330,6 +504,7 @@  static void cleanup_status_page(struct drm_device *dev, struct intel_ring_buffer
 
 	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
+
 static int init_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
 	struct drm_i915_gem_object *obj_priv;
@@ -544,6 +719,7 @@  u32 intel_ring_get_seqno(struct drm_device *dev,
 	return seqno;
 }
 
+/* ring buffer for render engine */
 struct intel_ring_buffer render_ring = {
 	.name 			= "render ring",
 	.ring_flag		= ON_RENDER_RING,
@@ -574,3 +750,37 @@  struct intel_ring_buffer render_ring = {
 	.status_page		= {NULL, 0, NULL},
 	.map 			= {0,}
 };
+
+/* ring buffer for bit-stream decoder */
+
+struct intel_ring_buffer bsd_ring = {
+	.name                   = "bsd ring",
+	.ring_flag              = ON_BSD_RING,
+	.size                   = 32 * PAGE_SIZE,
+	.alignment              = PAGE_SIZE,
+	.virtual_start          = NULL,
+	.dev                    = NULL,
+	.gem_object             = NULL,
+	.head                   = 0,
+	.tail                   = 0,
+	.space                  = 0,
+	.next_seqno             = 1,
+	.user_irq_refcount      = 0,
+	.irq_gem_seqno          = 0,
+	.waiting_gem_seqno      = 0,
+	.setup_status_page      = bsd_setup_status_page,
+	.init                   = init_bsd_ring,
+	.get_head               = bsd_ring_get_head,
+	.get_tail               = bsd_ring_get_tail,
+	.get_active_head        = bsd_ring_get_active_head,
+	.advance_ring           = bsd_ring_advance_ring,
+	.flush                  = bsd_ring_flush,
+	.add_request            = bsd_ring_add_request,
+	.get_gem_seqno          = bsd_ring_get_gem_seqno,
+	.user_irq_get           = bsd_ring_get_user_irq,
+	.user_irq_put           = bsd_ring_put_user_irq,
+	.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+	.status_page            = {NULL, 0, NULL},
+	.map                    = {0,}
+};
+