diff mbox

[1/5] multiple ring buffer support, introduce intel_ring_buffer struct

Message ID 32606542045FF34BA04F9D5BB0CB6BB5A532ABA6@shzsmsx502.ccr.corp.intel.com (mailing list archive)
State Rejected
Headers show

Commit Message

Zou, Nanhai April 2, 2010, 5:30 a.m. UTC
None
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84..bf8d097 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@  i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_fb.o \
          intel_tv.o \
          intel_dvo.o \
+         intel_ringbuffer.o \
          intel_overlay.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aba8260..74d53ed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@ 

 #include "i915_reg.h"
 #include "intel_bios.h"
+#include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>

 /* General customization:
@@ -826,6 +827,10 @@  extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
+

 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5388354..c1833a1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -73,7 +73,7 @@  ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }

-static inline void
+void
 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -114,7 +114,7 @@  i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }

-static inline void
+void
 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->irq_mask_reg & mask) != mask) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..e2ee300
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,576 @@ 
+/*
+ * Copyright 漏 20010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zou Nan hai <nanhai.zou@intel.com>
+ *    Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+#define I915_GEM_GPU_DOMAINS   (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+static void
+render_ring_flush(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               u32     invalidate_domains,
+               u32     flush_domains)
+{
+#if WATCH_EXEC
+       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+                 invalidate_domains, flush_domains);
+#endif
+       u32 cmd;
+       trace_i915_gem_request_flush(dev, ring->next_seqno,
+                                    invalidate_domains, flush_domains);
+
+       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+               /*
+                * read/write caches:
+                *
+                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+                * also flushed at 2d versus 3d pipeline switches.
+                *
+                * read-only caches:
+                *
+                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+                * MI_READ_FLUSH is set, and is always flushed on 965.
+                *
+                * I915_GEM_DOMAIN_COMMAND may not exist?
+                *
+                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+                * invalidated when MI_EXE_FLUSH is set.
+                *
+                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+                * invalidated with every MI_FLUSH.
+                *
+                * TLBs:
+                *
+                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+                * are flushed at any MI_FLUSH.
+                */
+
+               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+               if ((invalidate_domains|flush_domains) &
+                               I915_GEM_DOMAIN_RENDER)
+                       cmd &= ~MI_NO_WRITE_FLUSH;
+               if (!IS_I965G(dev)) {
+                       /*
+                        * On the 965, the sampler cache always gets flushed
+                        * and this bit is reserved.
+                        */
+                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+                               cmd |= MI_READ_FLUSH;
+               }
+               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+                       cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+               intel_begin_ring_buffer (dev, ring, 8);
+               intel_fill_ring_buffer (dev, ring, cmd);
+               intel_fill_ring_buffer (dev, ring, MI_NOOP);
+               intel_advance_ring_buffer (dev, ring);
+       }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+       return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+
+static int init_render_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 head;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       obj_priv = ring->gem_object->driver_private;
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(PRB0_CTL, 0);
+       I915_WRITE(PRB0_HEAD, 0);
+       I915_WRITE(PRB0_TAIL, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+       head = ring->get_head(dev, ring);
+
+       /* G45 ring initialization fails to reset head to zero */
+       if (head != 0) {
+               DRM_ERROR("%s head not reset to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+               I915_WRITE(PRB0_HEAD, 0);
+
+               DRM_ERROR("%s head forced to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+       }
+
+       I915_WRITE(PRB0_CTL,
+                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_NO_REPORT | RING_VALID);
+
+       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+       /* If the head is still not zero, the ring is dead */
+       if (head != 0) {
+               DRM_ERROR("%s initialization failed "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+               return -EIO;
+       }
+
+       return 0;
+}
+
+
+static u32
+render_ring_add_request(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_file *file_priv,
+               u32 flush_domains)
+{
+       u32 seqno;
+       seqno = intel_ring_get_seqno(dev, ring);
+
+       intel_begin_ring_buffer (dev, ring, 4);
+       intel_fill_ring_buffer (dev, ring, MI_STORE_DWORD_INDEX);
+       intel_fill_ring_buffer (dev, ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_fill_ring_buffer (dev, ring, seqno);
+       intel_fill_ring_buffer (dev, ring, MI_USER_INTERRUPT);
+       intel_advance_ring_buffer (dev, ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+       return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+               else
+                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+               else
+                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+       struct  intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+       (void)I915_READ(HWS_PGA);
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_i915_gem_execbuffer2 *exec,
+               struct drm_clip_rect *cliprects,
+               uint64_t exec_offset)
+{
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t exec_start, exec_len;
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       exec_len = (uint32_t) exec->batch_len;
+
+       //      trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       int ret = i915_emit_box(dev, cliprects, i,
+                                       exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               if (IS_I830(dev) || IS_845G(dev)) {
+                       intel_begin_ring_buffer (dev, ring, 4);
+                       intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER);
+                       intel_fill_ring_buffer(dev, ring, exec_start | MI_BATCH_NON_SECURE);
+                       intel_fill_ring_buffer(dev, ring, exec_start + exec_len - 4);
+                       intel_fill_ring_buffer(dev, ring, 0);
+               } else {
+                       intel_begin_ring_buffer (dev, ring, 4);
+                       if (IS_I965G(dev)) {
+                               intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER_START |
+                                               (2 << 6) | MI_BATCH_NON_SECURE_I965);
+                               intel_fill_ring_buffer(dev, ring, exec_start);
+                       } else {
+                               intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER_START |
+                                               (2 << 6));
+                               intel_fill_ring_buffer(dev, ring, exec_start | MI_BATCH_NON_SECURE);
+                       }
+               }
+               intel_advance_ring_buffer (dev, ring);
+       }
+
+       /* XXX breadcrumb */
+       return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj = ring->status_page.obj;
+       if (obj == NULL)
+               return;
+       obj_priv = obj->driver_private;
+
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       ring->status_page.obj = NULL;
+
+       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+static int init_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       obj = drm_gem_object_alloc(dev, PAGE_SIZE*10);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       obj_priv = obj->driver_private;
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, PAGE_SIZE);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+
+       ring->status_page.gfx_addr = obj_priv->gtt_offset;
+
+       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       if (ring->status_page.page_addr == NULL) {
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       ring->status_page.obj = obj;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       ring->setup_status_page(dev, ring);
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr);
+       return 0;
+}
+
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       int ret;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       ring->dev = dev;
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(dev, ring);
+               if (ret)
+                       return ret;
+       }
+
+       obj = drm_gem_object_alloc(dev, ring->size);
+       if (obj == NULL) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       ring->gem_object = obj;
+
+       ret = i915_gem_object_pin(obj, ring->alignment);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               goto cleanup;
+       }
+
+       obj_priv = obj->driver_private;
+       ring->map.size = ring->size;
+       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
+
+       drm_core_ioremap_wc(&ring->map, dev);
+
+       if (ring->map.handle == NULL) {
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ring->virtual_start = ring->map.handle;
+       ret = ring->init(dev, ring);
+       if (ret != 0) {
+               intel_cleanup_ring_buffer(dev, ring);
+               return ret;
+       }
+
+       ring->head = ring->get_head(dev, ring);
+       ring->tail = ring->get_tail(dev, ring);
+       ring->space = ring->head - (ring->tail + 8);
+       if (ring->space < 0)
+               ring->space += ring->size;
+       spin_lock_init(&ring->user_irq_lock);
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       return ret;
+cleanup:
+       cleanup_status_page(dev, ring);
+       return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       if (ring->gem_object == NULL)
+               return;
+
+       drm_core_ioremapfree(&ring->map, dev);
+
+       i915_gem_object_unpin(ring->gem_object);
+       drm_gem_object_unreference(ring->gem_object);
+       ring->gem_object = NULL;
+       cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       unsigned int *virt;
+       int rem;
+       rem = ring->size - ring->tail;
+
+       if (ring->space < rem) {
+               int ret = intel_wait_ring_buffer(dev, ring, rem);
+               if (ret)
+                       return ret;
+       }
+
+       virt = (unsigned int *)(ring->virtual_start + ring->tail);
+       rem /= 4;
+       while (rem--)
+               *virt++ = MI_NOOP;
+
+       ring->tail = 0;
+
+       return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       unsigned long end;
+
+       trace_i915_ring_wait_begin (dev);
+       end = jiffies + 3 * HZ;
+       do {
+               ring->head = ring->get_head(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n) {
+                       trace_i915_ring_wait_end (dev);
+                       return 0;
+               }
+               yield();
+       } while(!time_after(jiffies, end));
+       trace_i915_ring_wait_end (dev);
+
+       return -EBUSY;
+}
+
+void intel_begin_ring_buffer (struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       if (unlikely(ring->tail + n > ring->size))
+               intel_wrap_ring_buffer(dev, ring);
+       if (unlikely(ring->space < n))
+               intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_fill_ring_buffer (struct drm_device *dev,
+               struct intel_ring_buffer *ring, unsigned int data)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       *virt = data;
+       ring->tail += 4;
+       ring->tail &= ring->size - 1;
+       ring->space -= 4;
+}
+
+void intel_advance_ring_buffer (struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       BUG_ON((len&~(4-1)) != 0);
+       intel_begin_ring_buffer (dev, ring, len);
+       memcpy(virt, data, len);
+       ring->tail += len;
+       ring->tail &= ring->size - 1;
+       ring->space -= len;
+       intel_advance_ring_buffer (dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 seqno;
+       seqno = ring->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++ring->next_seqno == 0)
+               ring->next_seqno = 1;
+       return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+       .name                   = "render ring",
+       .ring_flag              = ON_RENDER_RING,
+       .size                   = 32 * PAGE_SIZE,
+       .alignment              = PAGE_SIZE,
+       .virtual_start          = NULL,
+       .dev                    = NULL,
+       .gem_object             = NULL,
+       .head                   = 0,
+       .tail                   = 0,
+       .space                  = 0,
+       .next_seqno             = 1,
+       .user_irq_refcount      = 0,
+       .irq_gem_seqno          = 0,
+       .waiting_gem_seqno      = 0,
+       .setup_status_page      = render_setup_status_page,
+       .init                   = init_render_ring,
+       .get_head               = render_ring_get_head,
+       .get_tail               = render_ring_get_tail,
+       .get_active_head        = render_ring_get_active_head,
+       .advance_ring           = render_ring_advance_ring,
+       .flush                  = render_ring_flush,
+       .add_request            = render_ring_add_request,
+       .get_gem_seqno          = render_ring_get_gem_seqno,
+       .user_irq_get           = render_ring_get_user_irq,
+       .user_irq_put           = render_ring_put_user_irq,
+       .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+       .status_page            = {NULL, 0, NULL},
+       .map                    = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..012dc47
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,120 @@ 
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct  intel_hw_status_page {
+       void            *page_addr;
+       unsigned int    gfx_addr;
+       struct          drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct  intel_ring_buffer {
+       const char      *name;
+       unsigned int    ring_flag;
+       unsigned long   size;
+       unsigned int    alignment;
+       void            *virtual_start;
+       struct          drm_device *dev;
+       struct          drm_gem_object *gem_object;
+
+       unsigned int    head;
+       unsigned int    tail;
+       unsigned int    space;
+       u32             next_seqno;
+       struct intel_hw_status_page status_page;
+
+       spinlock_t      user_irq_lock;
+       int             user_irq_refcount;
+       u32             irq_gem_seqno;          /* last seq seem at irq time */
+       u32             waiting_gem_seqno;
+       void            (*user_irq_get)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*user_irq_put)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*setup_status_page)(struct drm_device *dev,
+                       struct  intel_ring_buffer *ring);
+
+       int             (*init)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+
+       unsigned int    (*get_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_tail)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_active_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*advance_ring)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*flush)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       u32     invalidate_domains,
+                       u32     flush_domains);
+       u32             (*add_request)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_file *file_priv,
+                       u32 flush_domains);
+       u32             (*get_gem_seqno)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       int             (*dispatch_gem_execbuffer)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_i915_gem_execbuffer2 *exec,
+                       struct drm_clip_rect *cliprects,
+                       uint64_t exec_offset);
+
+       /**
+        * List of objects currently involved in rendering from the
+        * ringbuffer.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * List of breadcrumbs associated with GPU requests currently
+        * outstanding.
+        */
+       struct list_head request_list;
+
+       wait_queue_head_t irq_queue;
+       drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+               int reg)
+{
+       u32 *regs = ring->status_page.page_addr;
+       return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_begin_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+void intel_fill_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len);
+void intel_advance_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
+
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7..ad30e82 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -616,7 +616,9 @@  struct drm_i915_gem_execbuffer2 {
        __u32 num_cliprects;
        /** This is a struct drm_clip_rect *cliprects */
        __u64 cliprects_ptr;
-       __u64 flags; /* currently unused */
+#define ON_RENDER_RING                 (1<<0)
+#define ON_BSD_RING                    (1<<1)
+       __u64 flags;
        __u64 rsvd1;
        __u64 rsvd2;
 };