diff mbox

multiple ring buffer support(for libva H.264 decoding)

Message ID 32606542045FF34BA04F9D5BB0CB6BB5A5294ABE@shzsmsx502.ccr.corp.intel.com (mailing list archive)
State Deferred, archived
Headers show

Commit Message

Zou, Nanhai March 29, 2010, 5:13 a.m. UTC
None
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84..bf8d097 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@  i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          intel_fb.o \
          intel_tv.o \
          intel_dvo.o \
+         intel_ringbuffer.o \
          intel_overlay.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1376dfe..d92aebc 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -38,9 +38,13 @@ 

 #if defined(CONFIG_DEBUG_FS)

-#define ACTIVE_LIST    1
-#define FLUSHING_LIST  2
-#define INACTIVE_LIST  3
+#define RING_ACTIVE_LIST       1
+#define BSD_ACTIVE_LIST                2
+#define FLUSHING_LIST          3
+#define INACTIVE_LIST          4
+
+#define RING_REQUEST_LIST      1
+#define BSD_REQUEST_LIST       2

 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
 {
@@ -73,10 +77,18 @@  static int i915_gem_object_list_info(struct seq_file *m, void *data)
        spinlock_t *lock = NULL;

        switch (list) {
-       case ACTIVE_LIST:
-               seq_printf(m, "Active:\n");
+       case RING_ACTIVE_LIST:
+               seq_printf(m, "Render Ring Active:\n");
                lock = &dev_priv->mm.active_list_lock;
-               head = &dev_priv->mm.active_list;
+               head = &dev_priv->render_ring.active_list;
+               break;
+       case BSD_ACTIVE_LIST:
+               if (!HAS_BSD(dev))
+                       return 0;
+
+               seq_printf(m, "Render Ring Active:\n");
+               lock = &dev_priv->mm.active_list_lock;
+               head = &dev_priv->bsd_ring.active_list;
                break;
        case INACTIVE_LIST:
                seq_printf(m, "Inactive:\n");
@@ -124,12 +136,33 @@  static int i915_gem_object_list_info(struct seq_file *m, void *data)
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
+       uintptr_t list = (uintptr_t) node->info_ent->data;
+       struct list_head *head;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_request *gem_request;

+       switch (list) {
+       case RING_REQUEST_LIST:
+               seq_printf(m, "Render Ring Request:\n");
+               head = &dev_priv->render_ring.request_list;
+               break;
+
+       case BSD_REQUEST_LIST:
+               if (!HAS_BSD(dev))
+                       return 0;
+
+               seq_printf(m, "Render Ring Request:\n");
+               head = &dev_priv->bsd_ring.request_list;
+               break;
+
+       default:
+               DRM_INFO("Ooops, unexpected list\n");
+               return 0;
+       }
+
        seq_printf(m, "Request:\n");
-       list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+       list_for_each_entry(gem_request, head, list) {
                seq_printf(m, "    %d @ %d\n",
                           gem_request->seqno,
                           (int) (jiffies - gem_request->emitted_jiffies));
@@ -139,11 +172,13 @@  static int i915_gem_request_info(struct seq_file *m, void *data)

 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
+
+       /* XXX fix later
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;

-       if (dev_priv->hw_status_page != NULL) {
+       if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence: %d\n",
                           i915_get_gem_seqno(dev));
        } else {
@@ -152,6 +187,7 @@  static int i915_gem_seqno_info(struct seq_file *m, void *data)
        seq_printf(m, "Waiter sequence:  %d\n",
                        dev_priv->mm.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+       */
        return 0;
 }

@@ -195,16 +231,19 @@  static int i915_interrupt_info(struct seq_file *m, void *data)
        }
        seq_printf(m, "Interrupts received: %d\n",
                   atomic_read(&dev_priv->irq_received));
-       if (dev_priv->hw_status_page != NULL) {
+
+       if (dev_priv->render_ring.status_page.page_addr != NULL) {
                seq_printf(m, "Current sequence:    %d\n",
-                          i915_get_gem_seqno(dev));
+                       i915_get_gem_seqno(dev, &dev_priv->render_ring));
        } else {
                seq_printf(m, "Current sequence:    hws uninitialized\n");
        }
+       /* XXX fix later
        seq_printf(m, "Waiter sequence:     %d\n",
                   dev_priv->mm.waiting_gem_seqno);
        seq_printf(m, "IRQ sequence:        %d\n",
                   dev_priv->mm.irq_gem_seqno);
+        */
        return 0;
 }

@@ -251,7 +290,7 @@  static int i915_hws_info(struct seq_file *m, void *data)
        int i;
        volatile u32 *hws;

-       hws = (volatile u32 *)dev_priv->hw_status_page;
+       hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
        if (hws == NULL)
                return 0;

@@ -287,7 +326,7 @@  static int i915_batchbuffer_info(struct seq_file *m, void *data)

        spin_lock(&dev_priv->mm.active_list_lock);

-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) {
                obj = obj_priv->obj;
                if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
                    ret = i915_gem_object_get_pages(obj, 0);
@@ -308,45 +347,54 @@  static int i915_batchbuffer_info(struct seq_file *m, void *data)

        return 0;
 }
-
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
+static int ringbuffer_data(struct seq_file *m,
+               struct intel_ring_buffer *ring)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        u8 *virt;
        uint32_t *ptr, off;
-
-       if (!dev_priv->ring.ring_obj) {
+       if (!ring->gem_object) {
                seq_printf(m, "No ringbuffer setup\n");
                return 0;
        }
-
-       virt = dev_priv->ring.virtual_start;
-
-       for (off = 0; off < dev_priv->ring.Size; off += 4) {
+       virt = ring->virtual_start;
+       for (off = 0; off < ring->size; off += 4) {
                ptr = (uint32_t *)(virt + off);
                seq_printf(m, "%08x :  %08x\n", off, *ptr);
        }
-
        return 0;
 }

-static int i915_ringbuffer_info(struct seq_file *m, void *data)
+static int i915_render_ringbuffer_data(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       unsigned int head, tail;
+       return ringbuffer_data(m, &dev_priv->render_ring);
+}

-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+static void ring_buffer_info(struct seq_file *m,
+               struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+
+       unsigned int head, tail, active_head;
+
+       head = ring->get_head(dev, ring);
+       tail = ring->get_tail(dev, ring);
+       active_head = ring->get_active_head(dev, ring);

        seq_printf(m, "RingHead :  %08x\n", head);
        seq_printf(m, "RingTail :  %08x\n", tail);
-       seq_printf(m, "RingSize :  %08lx\n", dev_priv->ring.Size);
-       seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+       seq_printf(m, "RingSize :  %08lx\n", ring->size);
+       seq_printf(m, "Acthd :     %08x\n", active_head);
+}

+static int i915_render_ringbuffer_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       ring_buffer_info(m, dev, &dev_priv->render_ring);
        return 0;
 }

@@ -682,7 +730,7 @@  i915_wedged_write(struct file *filp,

        atomic_set(&dev_priv->mm.wedged, val);
        if (val) {
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
                queue_work(dev_priv->wq, &dev_priv->error_work);
        }

@@ -735,16 +783,18 @@  static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 }

 static struct drm_info_list i915_debugfs_list[] = {
-       {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+       {"i915_gem_active_render_ring", i915_gem_object_list_info, 0, (void *) RING_ACTIVE_LIST},
+       {"i915_gem_active_bsd_ring", i915_gem_object_list_info, 0, (void *) BSD_ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-       {"i915_gem_request", i915_gem_request_info, 0},
+       {"i915_gem_request_render_ring", i915_gem_request_info, 0, (void *) RING_REQUEST_LIST},
+       {"i915_gem_request_bsd_ring", i915_gem_request_info, 0, (void *) BSD_REQUEST_LIST},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
        {"i915_gem_interrupt", i915_interrupt_info, 0},
        {"i915_gem_hws", i915_hws_info, 0},
-       {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
-       {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+       {"i915_render_ringbuffer_data", i915_render_ringbuffer_data, 0},
+       {"i915_render_ringbuffer_info", i915_render_ringbuffer_info, 0},
        {"i915_batchbuffers", i915_batchbuffer_info, 0},
        {"i915_error_state", i915_error_state, 0},
        {"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9f8589..9bc5ee9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -39,83 +39,6 @@ 
 #include <linux/pnp.h>
 #include <linux/vga_switcheroo.h>

-/* Really want an OS-independent resettable timer.  Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-       u32 last_acthd = I915_READ(acthd_reg);
-       u32 acthd;
-       u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       int i;
-
-       trace_i915_ring_wait_begin (dev);
-
-       for (i = 0; i < 100000; i++) {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               acthd = I915_READ(acthd_reg);
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->Size;
-               if (ring->space >= n) {
-                       trace_i915_ring_wait_end (dev);
-                       return 0;
-               }
-
-               if (dev->primary->master) {
-                       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-                       if (master_priv->sarea_priv)
-                               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-               }
-
-
-               if (ring->head != last_head)
-                       i = 0;
-               if (acthd != last_acthd)
-                       i = 0;
-
-               last_head = ring->head;
-               last_acthd = acthd;
-               msleep_interruptible(10);
-
-       }
-
-       trace_i915_ring_wait_end (dev);
-       return -EBUSY;
-}
-
-/* As a ringbuffer is only allowed to wrap between instructions, fill
- * the tail with NOOPs.
- */
-int i915_wrap_ring(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       volatile unsigned int *virt;
-       int rem;
-
-       rem = dev_priv->ring.Size - dev_priv->ring.tail;
-       if (dev_priv->ring.space < rem) {
-               int ret = i915_wait_ring(dev, rem, __func__);
-               if (ret)
-                       return ret;
-       }
-       dev_priv->ring.space -= rem;
-
-       virt = (unsigned int *)
-               (dev_priv->ring.virtual_start + dev_priv->ring.tail);
-       rem /= 4;
-       while (rem--)
-               *virt++ = MI_NOOP;
-
-       dev_priv->ring.tail = 0;
-
-       return 0;
-}

 /**
  * Sets up the hardware status page for devices that need a physical address
@@ -132,10 +55,10 @@  static int i915_init_phys_hws(struct drm_device *dev)
                DRM_ERROR("Can not allocate hardware status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+       dev_priv->render_ring.status_page.page_addr  = dev_priv->status_page_dmah->vaddr;
        dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;

-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+       memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);

        if (IS_I965G(dev))
                dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -158,8 +81,8 @@  static void i915_free_hws(struct drm_device *dev)
                dev_priv->status_page_dmah = NULL;
        }

-       if (dev_priv->status_gfx_addr) {
-               dev_priv->status_gfx_addr = 0;
+       if (dev_priv->render_ring.status_page.gfx_addr) {
+               dev_priv->render_ring.status_page.gfx_addr = 0;
                drm_core_ioremapfree(&dev_priv->hws_map, dev);
        }

@@ -167,33 +90,6 @@  static void i915_free_hws(struct drm_device *dev)
        I915_WRITE(HWS_PGA, 0x1ffff000);
 }

-void i915_kernel_lost_context(struct drm_device * dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv;
-       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-
-       /*
-        * We should never lose context on the ring with modesetting
-        * as we don't expose it to userspace
-        */
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-       ring->space = ring->head - (ring->tail + 8);
-       if (ring->space < 0)
-               ring->space += ring->Size;
-
-       if (!dev->primary->master)
-               return;
-
-       master_priv = dev->primary->master->driver_priv;
-       if (ring->head == ring->tail && master_priv->sarea_priv)
-               master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
-}
-
 static int i915_dma_cleanup(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,12 +100,7 @@  static int i915_dma_cleanup(struct drm_device * dev)
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);

-       if (dev_priv->ring.virtual_start) {
-               drm_core_ioremapfree(&dev_priv->ring.map, dev);
-               dev_priv->ring.virtual_start = NULL;
-               dev_priv->ring.map.handle = NULL;
-               dev_priv->ring.map.size = 0;
-       }
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);

        /* Clear the HWS virtual address at teardown */
        if (I915_NEED_GFX_HWS(dev))
@@ -232,24 +123,24 @@  static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
        }

        if (init->ring_size != 0) {
-               if (dev_priv->ring.ring_obj != NULL) {
+               if (dev_priv->render_ring.gem_object != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
                        return -EINVAL;
                }

-               dev_priv->ring.Size = init->ring_size;
+               dev_priv->render_ring.size = init->ring_size;

-               dev_priv->ring.map.offset = init->ring_start;
-               dev_priv->ring.map.size = init->ring_size;
-               dev_priv->ring.map.type = 0;
-               dev_priv->ring.map.flags = 0;
-               dev_priv->ring.map.mtrr = 0;
+               dev_priv->render_ring.map.offset = init->ring_start;
+               dev_priv->render_ring.map.size = init->ring_size;
+               dev_priv->render_ring.map.type = 0;
+               dev_priv->render_ring.map.flags = 0;
+               dev_priv->render_ring.map.mtrr = 0;

-               drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+               drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);

-               if (dev_priv->ring.map.handle == NULL) {
+               if (dev_priv->render_ring.map.handle == NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("can not ioremap virtual address for"
                                  " ring buffer\n");
@@ -257,7 +148,7 @@  static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
                }
        }

-       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+       dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;

        dev_priv->cpp = init->cpp;
        dev_priv->back_offset = init->back_offset;
@@ -277,24 +168,25 @@  static int i915_dma_resume(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

+       struct intel_ring_buffer *ring;
        DRM_DEBUG_DRIVER("%s\n", __func__);

-       if (dev_priv->ring.map.handle == NULL) {
+       ring = &dev_priv->render_ring;
+       if (ring->map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
-
        /* Program Hardware Status Page */
-       if (!dev_priv->hw_status_page) {
+       if (!ring->status_page.page_addr) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
        DRM_DEBUG_DRIVER("hw status page @ %p\n",
-                               dev_priv->hw_status_page);
+                               ring->status_page.page_addr);

-       if (dev_priv->status_gfx_addr != 0)
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+       if (ring->status_page.gfx_addr != 0)
+               ring->setup_status_page(dev, ring);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
        DRM_DEBUG_DRIVER("Enabled hardware status page\n");
@@ -408,7 +300,7 @@  static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
        int i;
        RING_LOCALS;

-       if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+       if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
                return -EINVAL;

        BEGIN_LP_RING((dwords+1)&~1);
@@ -441,7 +333,6 @@  i915_emit_box(struct drm_device *dev,
              struct drm_clip_rect *boxes,
              int i, int DR1, int DR4)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_clip_rect box = boxes[i];
        RING_LOCALS;

@@ -509,8 +400,6 @@  static int i915_dispatch_cmdbuffer(struct drm_device * dev,
                return -EINVAL;
        }

-       i915_kernel_lost_context(dev);
-
        count = nbox ? nbox : 1;

        for (i = 0; i < count; i++) {
@@ -534,7 +423,6 @@  static int i915_dispatch_batchbuffer(struct drm_device * dev,
                                     drm_i915_batchbuffer_t * batch,
                                     struct drm_clip_rect *cliprects)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = batch->num_cliprects;
        int i = 0, count;
        RING_LOCALS;
@@ -544,8 +432,6 @@  static int i915_dispatch_batchbuffer(struct drm_device * dev,
                return -EINVAL;
        }

-       i915_kernel_lost_context(dev);
-
        count = nbox ? nbox : 1;

        for (i = 0; i < count; i++) {
@@ -596,8 +482,6 @@  static int i915_dispatch_flip(struct drm_device * dev)
                         dev_priv->current_page,
                         master_priv->sarea_priv->pf_current_page);

-       i915_kernel_lost_context(dev);
-
        BEGIN_LP_RING(2);
        OUT_RING(MI_FLUSH | MI_READ_FLUSH);
        OUT_RING(0);
@@ -638,8 +522,7 @@  static int i915_quiescent(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;

-       i915_kernel_lost_context(dev);
-       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+       return intel_wait_ring_buffer(dev, &dev_priv->render_ring, dev_priv->render_ring.size - 8);
 }

 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -881,10 +764,13 @@  static int i915_set_status_page(struct drm_device *dev, void *data,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
+       struct intel_ring_buffer *ring;

        if (!I915_NEED_GFX_HWS(dev))
                return -EINVAL;

+       ring = &dev_priv->render_ring;
+
        if (!dev_priv) {
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
@@ -897,7 +783,7 @@  static int i915_set_status_page(struct drm_device *dev, void *data,

        DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);

-       dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+       ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);

        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
@@ -913,14 +799,13 @@  static int i915_set_status_page(struct drm_device *dev, void *data,
                                " G33 hw status page\n");
                return -ENOMEM;
        }
-       dev_priv->hw_status_page = dev_priv->hws_map.handle;
-
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+       ring->status_page.page_addr = dev_priv->hws_map.handle;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
        DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
-                               dev_priv->status_gfx_addr);
+                               ring->status_page.gfx_addr);
        DRM_DEBUG_DRIVER("load hws at %p\n",
-                               dev_priv->hw_status_page);
+                               ring->status_page.page_addr);
        return 0;
 }

@@ -1695,9 +1580,7 @@  int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!IS_I945G(dev) && !IS_I945GM(dev))
                pci_enable_msi(dev->pdev);

-       spin_lock_init(&dev_priv->user_irq_lock);
        spin_lock_init(&dev_priv->error_lock);
-       dev_priv->user_irq_refcount = 0;
        dev_priv->trace_irq_seqno = 0;

        ret = drm_vblank_init(dev, I915_NUM_PIPE);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4b26919..487b9e4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -310,7 +310,9 @@  int i965_reset(struct drm_device *dev, u8 flags)
        /*
         * Clear request list
         */
-       i915_gem_retire_requests(dev);
+
+       /* XXX per ring clear */
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);

        if (need_display)
                i915_save_display(dev);
@@ -359,8 +361,10 @@  int i965_reset(struct drm_device *dev, u8 flags)
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
            !dev_priv->mm.suspended) {
-               drm_i915_ring_buffer_t *ring = &dev_priv->ring;
-               struct drm_gem_object *obj = ring->ring_obj;
+
+               struct intel_ring_buffer *ring = &dev_priv->render_ring;
+               struct drm_gem_object *obj = ring->gem_object;
+
                struct drm_i915_gem_object *obj_priv = obj->driver_private;
                dev_priv->mm.suspended = 0;

@@ -375,15 +379,11 @@  int i965_reset(struct drm_device *dev, u8 flags)
                           ((obj->size - 4096) & RING_NR_PAGES) |
                           RING_NO_REPORT |
                           RING_VALID);
-               if (!drm_core_check_feature(dev, DRIVER_MODESET))
-                       i915_kernel_lost_context(dev);
-               else {
-                       ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-                       ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-                       ring->space = ring->head - (ring->tail + 8);
-                       if (ring->space < 0)
-                               ring->space += ring->Size;
-               }
+               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+               ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;

                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aba8260..38ecc62 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@ 

 #include "i915_reg.h"
 #include "intel_bios.h"
+#include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>

 /* General customization:
@@ -82,6 +83,11 @@  enum plane {
 #define I915_GEM_PHYS_OVERLAY_REGS 3
 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)

+#define RENDER_RING    (1<<0)
+#define BLITTER_RING   (1<<1)
+#define BSD_RING       (1<<2)
+#define MFX_RING       (1<<3)
+
 struct drm_i915_gem_phys_object {
        int id;
        struct page **page_list;
@@ -231,10 +237,12 @@  typedef struct drm_i915_private {
        void __iomem *regs;

        struct pci_dev *bridge_dev;
-       drm_i915_ring_buffer_t ring;
+
+       struct intel_ring_buffer render_ring;
+
+       struct intel_ring_buffer bsd_ring;

        drm_dma_handle_t *status_page_dmah;
-       void *hw_status_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
        unsigned int status_gfx_addr;
@@ -250,12 +258,8 @@  typedef struct drm_i915_private {
        int current_page;
        int page_flipping;

-       wait_queue_head_t irq_queue;
+       //wait_queue_head_t irq_queue;
        atomic_t irq_received;
-       /** Protects user_irq_refcount and irq_mask_reg */
-       spinlock_t user_irq_lock;
-       /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-       int user_irq_refcount;
        u32 trace_irq_seqno;
        /** Cached value of IMR to avoid reads in updating the bitfield */
        u32 irq_mask_reg;
@@ -498,18 +502,7 @@  typedef struct drm_i915_private {
                 */
                struct list_head shrink_list;

-               /**
-                * List of objects currently involved in rendering from the
-                * ringbuffer.
-                *
-                * Includes buffers having the contents of their GPU caches
-                * flushed, not necessarily primitives.  last_rendering_seqno
-                * represents when the rendering involved will be completed.
-                *
-                * A reference is held on the buffer while on this list.
-                */
                spinlock_t active_list_lock;
-               struct list_head active_list;

                /**
                 * List of objects which are not in the ringbuffer but which
@@ -547,12 +540,6 @@  typedef struct drm_i915_private {
                struct list_head fence_list;

                /**
-                * List of breadcrumbs associated with GPU requests currently
-                * outstanding.
-                */
-               struct list_head request_list;
-
-               /**
                 * We leave the user IRQ off as much as possible,
                 * but this means that requests will finish and never
                 * be retired once the system goes idle. Set a timer to
@@ -561,18 +548,6 @@  typedef struct drm_i915_private {
                 */
                struct delayed_work retire_work;

-               uint32_t next_gem_seqno;
-
-               /**
-                * Waiting sequence number, if any
-                */
-               uint32_t waiting_gem_seqno;
-
-               /**
-                * Last seq seen at irq time
-                */
-               uint32_t irq_gem_seqno;
-
                /**
                 * Flag if the X Server, and thus DRM, is not currently in
                 * control of the device.
@@ -723,6 +698,10 @@  struct drm_i915_gem_object {
         */
        int madv;

+       /* Which ring refers to is this object ? */
+       u32    ring_flag;
+       struct intel_ring_buffer *ring;
+
        /**
         * Number of crtcs where this object is currently the fb, but
         * will be page flipped away on the next vblank.  When it
@@ -742,6 +721,9 @@  struct drm_i915_gem_object {
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+       /** On Which ring this request is generated */
+       struct intel_ring_buffer *ring;
+
        /** GEM sequence number associated with this request. */
        uint32_t seqno;

@@ -806,9 +788,7 @@  extern int i915_irq_emit(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
 extern void i915_enable_interrupt (struct drm_device *dev);

 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -826,6 +806,9 @@  extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);
+extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask);

 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -897,11 +880,11 @@  void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev, struct intel_ring_buffer *ring);
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev, struct intel_ring_buffer *ring);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -912,9 +895,13 @@  void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
                     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                         uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+               int interruptible,
+               struct intel_ring_buffer *ring);
+uint32_t i915_add_request(struct drm_device *dev,
+               struct drm_file *file_priv,
+               uint32_t flush_domains,
+               struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
                                      int write);
@@ -995,7 +982,7 @@  extern void g4x_disable_fbc(struct drm_device *dev);
  * has access to the ring.
  */
 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {                        \
-       if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+       if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object == NULL) \
                LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
 } while (0)

@@ -1011,33 +998,23 @@  extern void g4x_disable_fbc(struct drm_device *dev);

 #define I915_VERBOSE 0

-#define RING_LOCALS    volatile unsigned int *ring_virt__;
+#define RING_LOCALS

 #define BEGIN_LP_RING(n) do {                                          \
-       int bytes__ = 4*(n);                                            \
-       if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));        \
-       /* a wrap must occur between instructions so pad beforehand */  \
-       if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
-               i915_wrap_ring(dev);                                    \
-       if (unlikely (dev_priv->ring.space < bytes__))                  \
-               i915_wait_ring(dev, bytes__, __func__);                 \
-       ring_virt__ = (unsigned int *)                                  \
-               (dev_priv->ring.virtual_start + dev_priv->ring.tail);   \
-       dev_priv->ring.tail += bytes__;                                 \
-       dev_priv->ring.tail &= dev_priv->ring.Size - 1;                 \
-       dev_priv->ring.space -= bytes__;                                \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       intel_begin_ring_buffer(dev, &dev_priv->render_ring, 4*(n));    \
 } while (0)

-#define OUT_RING(n) do {                                               \
-       if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
-       *ring_virt__++ = (n);                                           \
-} while (0)
+
+#define OUT_RING(x) do {                                               \
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       intel_fill_ring_buffer(dev, &dev_priv->render_ring, x); \
+} while (0)

 #define ADVANCE_LP_RING() do {                                         \
-       if (I915_VERBOSE)                                               \
-               DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
-       I915_WRITE(PRB0_TAIL, dev_priv->ring.tail);                     \
-} while(0)
+       drm_i915_private_t *dev_priv = dev->dev_private;                \
+       intel_advance_ring_buffer(dev, &dev_priv->render_ring); \
+} while (0)

 /**
  * Reads a dword out of the status page, which is written to from the command
@@ -1054,7 +1031,7 @@  extern void g4x_disable_fbc(struct drm_device *dev);
  *
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_HWSP(dev_priv, reg)  intel_read_status_page(&dev_priv->render_ring, reg)
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX             0x20
 #define I915_BREADCRUMB_INDEX          0x21
@@ -1107,6 +1084,7 @@  extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
                         (dev)->pci_device == 0x2A42 ||         \
                         (dev)->pci_device == 0x2E42)

+#define HAS_BSD(dev)           (IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)

 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 933e865..ffd84e2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,8 @@  static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);

+
+
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);

@@ -1486,6 +1488,15 @@  i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
        struct drm_device *dev = obj->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct intel_ring_buffer *ring = NULL;
+
+       if (obj_priv->ring_flag == ON_RENDER_RING)
+               ring = &dev_priv->render_ring;
+       else if(obj_priv->ring_flag == ON_BSD_RING)
+               ring = &dev_priv->bsd_ring;
+
+       BUG_ON(ring == NULL);
+       obj_priv->ring = ring;

        /* Add a reference if we're newly entering the active list. */
        if (!obj_priv->active) {
@@ -1494,8 +1505,7 @@  i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
        }
        /* Move from whatever list we were on to the tail of execution. */
        spin_lock(&dev_priv->mm.active_list_lock);
-       list_move_tail(&obj_priv->list,
-                      &dev_priv->mm.active_list);
+       list_move_tail(&obj_priv->list, &ring->active_list);
        spin_unlock(&dev_priv->mm.active_list_lock);
        obj_priv->last_rendering_seqno = seqno;
 }
@@ -1548,6 +1558,8 @@  i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
        BUG_ON(!list_empty(&obj_priv->gpu_write_list));

        obj_priv->last_rendering_seqno = 0;
+       obj_priv->ring = NULL;
+       obj_priv->ring_flag = 0;
        if (obj_priv->active) {
                obj_priv->active = 0;
                drm_gem_object_unreference(obj);
@@ -1557,7 +1569,8 @@  i915_gem_object_move_to_inactive(struct drm_gem_object *obj)

 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-                              uint32_t flush_domains, uint32_t seqno)
+                              uint32_t flush_domains, uint32_t seqno,
+                              struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv, *next;
@@ -1568,7 +1581,7 @@  i915_gem_process_flushing_list(struct drm_device *dev,
                struct drm_gem_object *obj = obj_priv->obj;

                if ((obj->write_domain & flush_domains) ==
-                   obj->write_domain) {
+                   obj->write_domain && obj_priv->ring_flag == ring->ring_flag) {
                        uint32_t old_write_domain = obj->write_domain;

                        obj->write_domain = 0;
@@ -1597,14 +1610,13 @@  i915_gem_process_flushing_list(struct drm_device *dev,
  */
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-                uint32_t flush_domains)
+                uint32_t flush_domains, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_file_private *i915_file_priv = NULL;
        struct drm_i915_gem_request *request;
        uint32_t seqno;
        int was_empty;
-       RING_LOCALS;

        if (file_priv != NULL)
                i915_file_priv = file_priv->driver_priv;
@@ -1613,28 +1625,13 @@  i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (request == NULL)
                return 0;

-       /* Grab the seqno we're going to make this request be, and bump the
-        * next (skipping 0 so it can be the reserved no-seqno value).
-        */
-       seqno = dev_priv->mm.next_gem_seqno;
-       dev_priv->mm.next_gem_seqno++;
-       if (dev_priv->mm.next_gem_seqno == 0)
-               dev_priv->mm.next_gem_seqno++;
-
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(seqno);
-
-       OUT_RING(MI_USER_INTERRUPT);
-       ADVANCE_LP_RING();
-
-       DRM_DEBUG_DRIVER("%d\n", seqno);
+       seqno = ring->add_request(dev, ring, file_priv, flush_domains);

        request->seqno = seqno;
+       request->ring = ring;
        request->emitted_jiffies = jiffies;
-       was_empty = list_empty(&dev_priv->mm.request_list);
-       list_add_tail(&request->list, &dev_priv->mm.request_list);
+       was_empty = list_empty(&ring->request_list);
+       list_add_tail(&request->list, &ring->request_list);
        if (i915_file_priv) {
                list_add_tail(&request->client_list,
                              &i915_file_priv->mm.request_list);
@@ -1646,7 +1643,7 @@  i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
         * domain we're flushing with our flush.
         */
        if (flush_domains != 0)
-               i915_gem_process_flushing_list(dev, flush_domains, seqno);
+               i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);

        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1663,20 +1660,16 @@  i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * before signalling the CPU
  */
 static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
        uint32_t flush_domains = 0;
-       RING_LOCALS;

        /* The sampler always gets flushed on i965 (sigh) */
        if (IS_I965G(dev))
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-       BEGIN_LP_RING(2);
-       OUT_RING(cmd);
-       OUT_RING(0); /* noop */
-       ADVANCE_LP_RING();
+
+       ring->flush(dev, ring,
+                       I915_GEM_DOMAIN_COMMAND, flush_domains);
        return flush_domains;
 }

@@ -1696,11 +1689,11 @@  i915_gem_retire_request(struct drm_device *dev,
         * by the ringbuffer to the flushing/inactive lists as appropriate.
         */
        spin_lock(&dev_priv->mm.active_list_lock);
-       while (!list_empty(&dev_priv->mm.active_list)) {
+       while (!list_empty(&request->ring->active_list)) {
                struct drm_gem_object *obj;
                struct drm_i915_gem_object *obj_priv;

-               obj_priv = list_first_entry(&dev_priv->mm.active_list,
+               obj_priv = list_first_entry(&request->ring->active_list,
                                            struct drm_i915_gem_object,
                                            list);
                obj = obj_priv->obj;
@@ -1709,6 +1702,7 @@  i915_gem_retire_request(struct drm_device *dev,
                 * list, then the oldest in the list must still be newer than
                 * this seqno.
                 */
+
                if (obj_priv->last_rendering_seqno != request->seqno)
                        goto out;

@@ -1747,38 +1741,36 @@  i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 }

 uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+       return ring->get_gem_seqno(dev, ring);
 }

 /**
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;

-       if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+       if (!ring->status_page.page_addr || list_empty(&ring->request_list))
                return;

-       seqno = i915_get_gem_seqno(dev);
+       seqno = ring->get_gem_seqno(dev, ring);

-       while (!list_empty(&dev_priv->mm.request_list)) {
+       while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
                uint32_t retiring_seqno;

-               request = list_first_entry(&dev_priv->mm.request_list,
+               request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
                retiring_seqno = request->seqno;

                if (i915_seqno_passed(seqno, retiring_seqno) ||
-                   atomic_read(&dev_priv->mm.wedged)) {
+                               atomic_read(&dev_priv->mm.wedged)) {
                        i915_gem_retire_request(dev, request);

                        list_del(&request->list);
@@ -1790,7 +1782,7 @@  i915_gem_retire_requests(struct drm_device *dev)

        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-               i915_user_irq_put(dev);
+               ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
 }
@@ -1806,15 +1798,21 @@  i915_gem_retire_work_handler(struct work_struct *work)
        dev = dev_priv->dev;

        mutex_lock(&dev->struct_mutex);
-       i915_gem_retire_requests(dev);
-       if (!dev_priv->mm.suspended &&
-           !list_empty(&dev_priv->mm.request_list))
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
+       if (!dev_priv->mm.suspended &&
+               (!list_empty(&dev_priv->render_ring.request_list) ||
+                       (HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list))))
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
        mutex_unlock(&dev->struct_mutex);
 }

 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+               int interruptible, struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
@@ -1825,7 +1823,7 @@  i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
        if (atomic_read(&dev_priv->mm.wedged))
                return -EIO;

-       if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+       if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
@@ -1839,19 +1837,19 @@  i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)

                trace_i915_gem_request_wait_begin(dev, seqno);

-               dev_priv->mm.waiting_gem_seqno = seqno;
-               i915_user_irq_get(dev);
+               ring->waiting_gem_seqno = seqno;
+               ring->user_irq_get(dev, ring);
                if (interruptible)
-                       ret = wait_event_interruptible(dev_priv->irq_queue,
-                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                       ret = wait_event_interruptible(ring->irq_queue,
+                               i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno) ||
                                atomic_read(&dev_priv->mm.wedged));
                else
-                       wait_event(dev_priv->irq_queue,
-                               i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+                       wait_event(ring->irq_queue,
+                               i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno) ||
                                atomic_read(&dev_priv->mm.wedged));

-               i915_user_irq_put(dev);
-               dev_priv->mm.waiting_gem_seqno = 0;
+               ring->user_irq_put(dev, ring);
+               ring->waiting_gem_seqno = 0;

                trace_i915_gem_request_wait_end(dev, seqno);
        }
@@ -1860,7 +1858,7 @@  i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)

        if (ret && ret != -ERESTARTSYS)
                DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-                         __func__, ret, seqno, i915_get_gem_seqno(dev));
+                         __func__, ret, seqno, ring->get_gem_seqno(dev, ring));

        /* Directly dispatch request retiring.  While we have the work queue
         * to handle this, the waiter on a request often wants an associated
@@ -1868,7 +1866,7 @@  i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
         * a separate wait queue to handle that.
         */
        if (ret == 0)
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev, ring);

        return ret;
 }
@@ -1878,9 +1876,9 @@  i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  * request and object lists appropriately for that event.
  */
 static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno, struct intel_ring_buffer *ring)
 {
-       return i915_do_wait_request(dev, seqno, 1);
+       return i915_do_wait_request(dev, seqno, 1, ring);
 }

 static void
@@ -1889,71 +1887,30 @@  i915_gem_flush(struct drm_device *dev,
               uint32_t flush_domains)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t cmd;
-       RING_LOCALS;
-
-#if WATCH_EXEC
-       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-                 invalidate_domains, flush_domains);
-#endif
-       trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
-                                    invalidate_domains, flush_domains);
-
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);

-       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
-               /*
-                * read/write caches:
-                *
-                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
-                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
-                * also flushed at 2d versus 3d pipeline switches.
-                *
-                * read-only caches:
-                *
-                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
-                * MI_READ_FLUSH is set, and is always flushed on 965.
-                *
-                * I915_GEM_DOMAIN_COMMAND may not exist?
-                *
-                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
-                * invalidated when MI_EXE_FLUSH is set.
-                *
-                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
-                * invalidated with every MI_FLUSH.
-                *
-                * TLBs:
-                *
-                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
-                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
-                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
-                * are flushed at any MI_FLUSH.
-                */
+       dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
+                       invalidate_domains,
+                       flush_domains);

-               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-               if ((invalidate_domains|flush_domains) &
-                   I915_GEM_DOMAIN_RENDER)
-                       cmd &= ~MI_NO_WRITE_FLUSH;
-               if (!IS_I965G(dev)) {
-                       /*
-                        * On the 965, the sampler cache always gets flushed
-                        * and this bit is reserved.
-                        */
-                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-                               cmd |= MI_READ_FLUSH;
-               }
-               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
-                       cmd |= MI_EXE_FLUSH;
+       if (HAS_BSD(dev))
+               dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
+                                       invalidate_domains,
+                                       flush_domains);
+}

-#if WATCH_EXEC
-               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
-               BEGIN_LP_RING(2);
-               OUT_RING(cmd);
-               OUT_RING(MI_NOOP);
-               ADVANCE_LP_RING();
-       }
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+              uint32_t invalidate_domains,
+              uint32_t flush_domains,
+              struct intel_ring_buffer *ring)
+{
+       if (flush_domains & I915_GEM_DOMAIN_CPU)
+               drm_agp_chipset_flush(dev);
+       ring->flush(dev, ring,
+                       invalidate_domains,
+                       flush_domains);
 }

 /**
@@ -1980,7 +1937,7 @@  i915_gem_object_wait_rendering(struct drm_gem_object *obj)
                DRM_INFO("%s: object %p wait for seqno %08x\n",
                          __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-               ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+               ret = i915_wait_request(dev, obj_priv->last_rendering_seqno, obj_priv->ring);
                if (ret != 0)
                        return ret;
        }
@@ -2096,11 +2053,13 @@  i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
-       uint32_t seqno;
+       uint32_t seqno1, seqno2;
+       int ret;

        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);

        if (lists_empty)
@@ -2108,11 +2067,23 @@  i915_gpu_idle(struct drm_device *dev)

        /* Flush everything onto the inactive list. */
        i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-       seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-       if (seqno == 0)
+       seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, &dev_priv->render_ring);
+       if (seqno1 == 0)
                return -ENOMEM;
+       ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev)) {
+               seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, &dev_priv->bsd_ring);
+               if (seqno2 == 0)
+                       return -ENOMEM;

-       return i915_wait_request(dev, seqno);
+               ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+               if (ret)
+                       return ret;
+       }
+
+
+       return ret;
 }

 static int
@@ -2125,7 +2096,8 @@  i915_gem_evict_everything(struct drm_device *dev)
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);

        if (lists_empty)
@@ -2145,7 +2117,8 @@  i915_gem_evict_everything(struct drm_device *dev)
        spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+                      list_empty(&dev_priv->render_ring.active_list) &&
+                      (!HAS_BSD(dev) || list_empty(&dev_priv->bsd_ring.active_list)));
        spin_unlock(&dev_priv->mm.active_list_lock);
        BUG_ON(!lists_empty);

@@ -2160,7 +2133,10 @@  i915_gem_evict_something(struct drm_device *dev, int min_size)
        int ret;

        for (;;) {
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+               if (HAS_BSD(dev))
+                       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

                /* If there's an inactive buffer available now, grab it
                 * and be done.
@@ -2184,14 +2160,28 @@  i915_gem_evict_something(struct drm_device *dev, int min_size)
                 * things, wait for the next to finish and hopefully leave us
                 * a buffer to evict.
                 */
-               if (!list_empty(&dev_priv->mm.request_list)) {
+               if (!list_empty(&dev_priv->render_ring.request_list)) {
+                       struct drm_i915_gem_request *request;
+
+                       request = list_first_entry(&dev_priv->render_ring.request_list,
+                                                  struct drm_i915_gem_request,
+                                                  list);
+
+                       ret = i915_wait_request(dev, request->seqno, request->ring);
+                       if (ret)
+                               return ret;
+
+                       continue;
+               }
+
+               if (HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)) {
                        struct drm_i915_gem_request *request;

-                       request = list_first_entry(&dev_priv->mm.request_list,
+                       request = list_first_entry(&dev_priv->bsd_ring.request_list,
                                                   struct drm_i915_gem_request,
                                                   list);

-                       ret = i915_wait_request(dev, request->seqno);
+                       ret = i915_wait_request(dev, request->seqno, request->ring);
                        if (ret)
                                return ret;

@@ -2218,10 +2208,12 @@  i915_gem_evict_something(struct drm_device *dev, int min_size)
                        if (obj != NULL) {
                                uint32_t seqno;

-                               i915_gem_flush(dev,
+                               i915_gem_flush_ring(dev,
+                                              obj->write_domain,
                                               obj->write_domain,
-                                              obj->write_domain);
-                               seqno = i915_add_request(dev, NULL, obj->write_domain);
+                                              obj_priv->ring);
+                               seqno = i915_add_request(dev, NULL, obj->write_domain,
+                                               obj_priv->ring);
                                if (seqno == 0)
                                        return -ENOMEM;
                                continue;
@@ -2747,6 +2739,7 @@  i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;

        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return;
@@ -2754,7 +2747,7 @@  i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
-       (void) i915_add_request(dev, NULL, obj->write_domain);
+       (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
        BUG_ON(obj->write_domain);

        trace_i915_gem_object_change_domain(obj,
@@ -2894,7 +2887,7 @@  i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
                DRM_INFO("%s: object %p wait for seqno %08x\n",
                          __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+               ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0, obj_priv->ring);
                if (ret != 0)
                        return ret;
        }
@@ -3485,62 +3478,6 @@  i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
        return 0;
 }

-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-                             struct drm_i915_gem_execbuffer2 *exec,
-                             struct drm_clip_rect *cliprects,
-                             uint64_t exec_offset)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       int nbox = exec->num_cliprects;
-       int i = 0, count;
-       uint32_t exec_start, exec_len;
-       RING_LOCALS;
-
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-       exec_len = (uint32_t) exec->batch_len;
-
-       trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-       count = nbox ? nbox : 1;
-
-       for (i = 0; i < count; i++) {
-               if (i < nbox) {
-                       int ret = i915_emit_box(dev, cliprects, i,
-                                               exec->DR1, exec->DR4);
-                       if (ret)
-                               return ret;
-               }
-
-               if (IS_I830(dev) || IS_845G(dev)) {
-                       BEGIN_LP_RING(4);
-                       OUT_RING(MI_BATCH_BUFFER);
-                       OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       OUT_RING(exec_start + exec_len - 4);
-                       OUT_RING(0);
-                       ADVANCE_LP_RING();
-               } else {
-                       BEGIN_LP_RING(2);
-                       if (IS_I965G(dev)) {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6) |
-                                        MI_BATCH_NON_SECURE_I965);
-                               OUT_RING(exec_start);
-                       } else {
-                               OUT_RING(MI_BATCH_BUFFER_START |
-                                        (2 << 6));
-                               OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-                       }
-                       ADVANCE_LP_RING();
-               }
-       }
-
-       /* XXX breadcrumb */
-       return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3569,7 +3506,7 @@  i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;

-               ret = i915_wait_request(dev, request->seqno);
+               ret = i915_wait_request(dev, request->seqno, request->ring);
                if (ret != 0)
                        break;
        }
@@ -3726,6 +3663,9 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        uint32_t seqno, flush_domains, reloc_index;
        int pin_tries, flips;

+       struct intel_ring_buffer *ring = NULL;
+       u32 ring_flag;
+
 #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3783,6 +3723,15 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }

+       if (args->flags & ON_BSD_RING) {
+               BUG_ON(!HAS_BSD(dev));
+               ring = &dev_priv->bsd_ring;
+               ring_flag = ON_BSD_RING;
+       } else {
+               ring = &dev_priv->render_ring;
+               ring_flag = ON_RENDER_RING;
+       }
+
        /* Look up object handles */
        flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
@@ -3916,9 +3865,14 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                i915_gem_flush(dev,
                               dev->invalidate_domains,
                               dev->flush_domains);
-               if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+               if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
                        (void)i915_add_request(dev, file_priv,
-                                              dev->flush_domains);
+                                       dev->flush_domains, &dev_priv->render_ring);
+
+                       if (HAS_BSD(dev))
+                               (void)i915_add_request(dev, file_priv,
+                                               dev->flush_domains, &dev_priv->bsd_ring);
+               }
        }

        for (i = 0; i < args->buffer_count; i++) {
@@ -3955,7 +3909,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif

        /* Exec the batchbuffer */
-       ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       ret = ring->dispatch_gem_execbuffer(dev, ring, args, cliprects, exec_offset);
        if (ret) {
                DRM_ERROR("dispatch failed %d\n", ret);
                goto err;
@@ -3965,7 +3919,7 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
-       flush_domains = i915_retire_commands(dev);
+       flush_domains = i915_retire_commands(dev, ring);

        i915_verify_inactive(dev, __FILE__, __LINE__);

@@ -3976,10 +3930,12 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * *some* interrupts representing completion of buffers that we can
         * wait on when trying to clear up gtt space).
         */
-       seqno = i915_add_request(dev, file_priv, flush_domains);
+       seqno = i915_add_request(dev, file_priv, flush_domains, ring);
        BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
+               obj_priv = obj->driver_private;
+               obj_priv->ring_flag = ring_flag;

                i915_gem_object_move_to_active(obj, seqno);
 #if WATCH_LRU
@@ -4093,7 +4049,7 @@  i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.DR4 = args->DR4;
        exec2.num_cliprects = args->num_cliprects;
        exec2.cliprects_ptr = args->cliprects_ptr;
-       exec2.flags = 0;
+       exec2.flags = ON_RENDER_RING;

        ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
        if (!ret) {
@@ -4332,6 +4288,7 @@  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       drm_i915_private_t *dev_priv = dev->dev_private;

        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4346,7 +4303,10 @@  i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         * actually unmasked, and our working set ends up being larger than
         * required.
         */
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

        obj_priv = obj->driver_private;
        /* Don't count being on the flushing list against the object being
@@ -4507,7 +4467,9 @@  i915_gem_idle(struct drm_device *dev)

        mutex_lock(&dev->struct_mutex);

-       if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+       if (dev_priv->mm.suspended ||
+               (dev_priv->render_ring.gem_object == NULL) ||
+               (HAS_BSD(dev) && dev_priv->bsd_ring.gem_object == NULL)) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4534,7 +4496,7 @@  i915_gem_idle(struct drm_device *dev)
        dev_priv->mm.suspended = 1;
        del_timer(&dev_priv->hangcheck_timer);

-       i915_kernel_lost_context(dev);
+//     i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);

        mutex_unlock(&dev->struct_mutex);
@@ -4545,195 +4507,24 @@  i915_gem_idle(struct drm_device *dev)
        return 0;
 }

-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       int ret;
-
-       /* If we need a physical address for the status page, it's already
-        * initialized at driver load time.
-        */
-       if (!I915_NEED_GFX_HWS(dev))
-               return 0;
-
-       obj = drm_gem_object_alloc(dev, 4096);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate status page\n");
-               return -ENOMEM;
-       }
-       obj_priv = obj->driver_private;
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-       ret = i915_gem_object_pin(obj, 4096);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               return ret;
-       }
-
-       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-
-       dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
-       if (dev_priv->hw_status_page == NULL) {
-               DRM_ERROR("Failed to map status page.\n");
-               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               return -EINVAL;
-       }
-       dev_priv->hws_obj = obj;
-       memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-       if (IS_GEN6(dev)) {
-               I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA_GEN6); /* posting read */
-       } else {
-               I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-               I915_READ(HWS_PGA); /* posting read */
-       }
-       DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
-       return 0;
-}
-
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-
-       if (dev_priv->hws_obj == NULL)
-               return;
-
-       obj = dev_priv->hws_obj;
-       obj_priv = obj->driver_private;
-
-       kunmap(obj_priv->pages[0]);
-       i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
-       dev_priv->hws_obj = NULL;
-
-       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-       dev_priv->hw_status_page = NULL;
-
-       /* Write high address into HWS_PGA when disabling. */
-       I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
-       drm_i915_ring_buffer_t *ring = &dev_priv->ring;
        int ret;
-       u32 head;
-
-       ret = i915_gem_init_hws(dev);
-       if (ret != 0)
-               return ret;
-
-       obj = drm_gem_object_alloc(dev, 128 * 1024);
-       if (obj == NULL) {
-               DRM_ERROR("Failed to allocate ringbuffer\n");
-               i915_gem_cleanup_hws(dev);
-               return -ENOMEM;
-       }
-       obj_priv = obj->driver_private;
-
-       ret = i915_gem_object_pin(obj, 4096);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return ret;
-       }
-
-       /* Set up the kernel mapping for the ring. */
-       ring->Size = obj->size;
-
-       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
-       ring->map.size = obj->size;
-       ring->map.type = 0;
-       ring->map.flags = 0;
-       ring->map.mtrr = 0;
-
-       drm_core_ioremap_wc(&ring->map, dev);
-       if (ring->map.handle == NULL) {
-               DRM_ERROR("Failed to map ringbuffer.\n");
-               memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               i915_gem_cleanup_hws(dev);
-               return -EINVAL;
-       }
-       ring->ring_obj = obj;
-       ring->virtual_start = ring->map.handle;
-
-       /* Stop the ring if it's running. */
-       I915_WRITE(PRB0_CTL, 0);
-       I915_WRITE(PRB0_TAIL, 0);
-       I915_WRITE(PRB0_HEAD, 0);
-
-       /* Initialize the ring. */
-       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
-               DRM_ERROR("Ring head not reset to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               I915_WRITE(PRB0_HEAD, 0);
-
-               DRM_ERROR("Ring head forced to zero "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-       }
-
-       I915_WRITE(PRB0_CTL,
-                  ((obj->size - 4096) & RING_NR_PAGES) |
-                  RING_NO_REPORT |
-                  RING_VALID);
-
-       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
-       /* If the head is still not zero, the ring is dead */
-       if (head != 0) {
-               DRM_ERROR("Ring initialization failed "
-                         "ctl %08x head %08x tail %08x start %08x\n",
-                         I915_READ(PRB0_CTL),
-                         I915_READ(PRB0_HEAD),
-                         I915_READ(PRB0_TAIL),
-                         I915_READ(PRB0_START));
-               return -EIO;
-       }

-       /* Update our cache of the ring state */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_kernel_lost_context(dev);
-       else {
-               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-               ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
-               ring->space = ring->head - (ring->tail + 8);
-               if (ring->space < 0)
-                       ring->space += ring->Size;
+       dev_priv->render_ring = render_ring;
+       if (!I915_NEED_GFX_HWS(dev)) {
+               dev_priv->render_ring.status_page.page_addr  = dev_priv->status_page_dmah->vaddr;
+               memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
        }
-
-       if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                          (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+       ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
+       if (HAS_BSD(dev)) {
+               dev_priv->bsd_ring = bsd_ring;
+               ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
        }

-       return 0;
+       return ret;
 }

 void
@@ -4741,17 +4532,9 @@  i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;

-       if (dev_priv->ring.ring_obj == NULL)
-               return;
-
-       drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
-       i915_gem_object_unpin(dev_priv->ring.ring_obj);
-       drm_gem_object_unreference(dev_priv->ring.ring_obj);
-       dev_priv->ring.ring_obj = NULL;
-       memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
-       i915_gem_cleanup_hws(dev);
+       intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+       if (HAS_BSD(dev))
+               intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
 }

 int
@@ -4779,12 +4562,14 @@  i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }

        spin_lock(&dev_priv->mm.active_list_lock);
-       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
        spin_unlock(&dev_priv->mm.active_list_lock);

        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-       BUG_ON(!list_empty(&dev_priv->mm.request_list));
+       BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+       BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
        mutex_unlock(&dev->struct_mutex);

        drm_irq_install(dev);
@@ -4823,15 +4608,20 @@  i915_gem_load(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;

        spin_lock_init(&dev_priv->mm.active_list_lock);
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-       INIT_LIST_HEAD(&dev_priv->mm.request_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+       INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+
+       if (HAS_BSD(dev)) {
+               INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+               INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+       }
+
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
-       dev_priv->mm.next_gem_seqno = 1;

        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
@@ -5094,8 +4884,10 @@  i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
                        continue;

                spin_unlock(&shrink_list_lock);
+               i915_gem_retire_requests(dev, &dev_priv->render_ring);

-               i915_gem_retire_requests(dev);
+               if (HAS_BSD(dev))
+                       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614..db04075 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -106,12 +106,21 @@  i915_dump_lru(struct drm_device *dev, const char *where)

        DRM_INFO("active list %s {\n", where);
        spin_lock(&dev_priv->mm.active_list_lock);
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
                            list)
        {
                DRM_INFO("    %p: %08x\n", obj_priv,
                         obj_priv->last_rendering_seqno);
        }
+
+       if (HAS_BSD(dev)) {
+               list_for_each_entry(obj_priv, &dev_priv->bsd_ring.active_list,
+                               list)
+               {
+                       DRM_INFO("    %p: %08x\n", obj_priv,
+                               obj_priv->last_rendering_seqno);
+               }
+       }
        spin_unlock(&dev_priv->mm.active_list_lock);
        DRM_INFO("}\n");
        DRM_INFO("flushing list %s {\n", where);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5388354..5d09e16 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -52,7 +52,7 @@ 
         I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)

 /** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)

 #define I915_PIPE_VBLANK_STATUS        (PIPE_START_VBLANK_INTERRUPT_STATUS |\
                                 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -73,7 +73,7 @@  ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }

-static inline void
+void
 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -114,7 +114,7 @@  i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }

-static inline void
+void
 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
        if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -326,7 +326,7 @@  irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        int ret = IRQ_NONE;
        u32 de_iir, gt_iir, de_ier, pch_iir;
        struct drm_i915_master_private *master_priv;
-
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -349,14 +349,17 @@  irqreturn_t ironlake_irq_handler(struct drm_device *dev)
        }

        if (gt_iir & GT_USER_INTERRUPT) {
-               u32 seqno = i915_get_gem_seqno(dev);
-               dev_priv->mm.irq_gem_seqno = seqno;
+               u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+               render_ring->irq_gem_seqno = seqno;
                trace_i915_gem_request_complete(dev, seqno);
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
                dev_priv->hangcheck_count = 0;
                mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
        }

+       if (gt_iir & GT_BSD_USER_INTERRUPT)
+               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
        if (de_iir & DE_GSE)
                ironlake_opregion_gse_intr(dev);

@@ -527,17 +530,17 @@  i915_ringbuffer_last_batch(struct drm_device *dev)
         */
        bbaddr = 0;
        head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-       ring = (u32 *)(dev_priv->ring.virtual_start + head);
+       ring = (u32 *)(dev_priv->render_ring.virtual_start + head);

-       while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+       while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
                bbaddr = i915_get_bbaddr(dev, ring);
                if (bbaddr)
                        break;
        }

        if (bbaddr == 0) {
-               ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
-               while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+               ring = (u32 *)(dev_priv->render_ring.virtual_start + dev_priv->render_ring.size);
+               while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
                        bbaddr = i915_get_bbaddr(dev, ring);
                        if (bbaddr)
                                break;
@@ -578,7 +581,7 @@  static void i915_capture_error_state(struct drm_device *dev)
                return;
        }

-       error->seqno = i915_get_gem_seqno(dev);
+       error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
        error->pipeastat = I915_READ(PIPEASTAT);
@@ -606,7 +609,7 @@  static void i915_capture_error_state(struct drm_device *dev)
        batchbuffer[0] = NULL;
        batchbuffer[1] = NULL;
        count = 0;
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+       list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) {
                struct drm_gem_object *obj = obj_priv->obj;

                if (batchbuffer[0] == NULL &&
@@ -630,7 +633,7 @@  static void i915_capture_error_state(struct drm_device *dev)
        error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);

        /* Record the ringbuffer */
-       error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+       error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.gem_object);

        /* Record buffers on the active list. */
        error->active_bo = NULL;
@@ -642,7 +645,7 @@  static void i915_capture_error_state(struct drm_device *dev)

        if (error->active_bo) {
                int i = 0;
-               list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+               list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, list) {
                        struct drm_gem_object *obj = obj_priv->obj;

                        error->active_bo[i].size = obj->size;
@@ -820,7 +823,7 @@  static void i915_handle_error(struct drm_device *dev, bool wedged)
                /*
                 * Wakeup waiting processes so they don't hang
                 */
-               DRM_WAKEUP(&dev_priv->irq_queue);
+               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
        }

        queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -839,6 +842,7 @@  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        unsigned long irqflags;
        int irq_received;
        int ret = IRQ_NONE;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;

        atomic_inc(&dev_priv->irq_received);

@@ -863,7 +867,7 @@  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                 * It doesn't set the bit in iir again, but it still produces
                 * interrupts (for non-MSI).
                 */
-               spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+               spin_lock_irqsave(&render_ring->user_irq_lock, irqflags);
                pipea_stats = I915_READ(PIPEASTAT);
                pipeb_stats = I915_READ(PIPEBSTAT);

@@ -886,7 +890,7 @@  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                        I915_WRITE(PIPEBSTAT, pipeb_stats);
                        irq_received = 1;
                }
-               spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+               spin_unlock_irqrestore(&render_ring->user_irq_lock, irqflags);

                if (!irq_received)
                        break;
@@ -919,14 +923,17 @@  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                }

                if (iir & I915_USER_INTERRUPT) {
-                       u32 seqno = i915_get_gem_seqno(dev);
-                       dev_priv->mm.irq_gem_seqno = seqno;
+                       u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+                       render_ring->irq_gem_seqno = seqno;
                        trace_i915_gem_request_complete(dev, seqno);
-                       DRM_WAKEUP(&dev_priv->irq_queue);
+                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
                        dev_priv->hangcheck_count = 0;
                        mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
                }

+               if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
                if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
                        intel_prepare_page_flip(dev, 0);

@@ -976,8 +983,6 @@  static int i915_emit_irq(struct drm_device * dev)
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        RING_LOCALS;

-       i915_kernel_lost_context(dev);
-
        DRM_DEBUG_DRIVER("\n");

        dev_priv->counter++;
@@ -996,43 +1001,13 @@  static int i915_emit_irq(struct drm_device * dev)
        return dev_priv->counter;
 }

-void i915_user_irq_get(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
-               else
-                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-       }
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-       BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
-       if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-               if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
-               else
-                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-       }
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;

        if (dev_priv->trace_irq_seqno == 0)
-               i915_user_irq_get(dev);
+               render_ring->user_irq_get(dev, render_ring);

        dev_priv->trace_irq_seqno = seqno;
 }
@@ -1042,6 +1017,7 @@  static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        int ret = 0;
+       struct intel_ring_buffer *render_ring = &dev_priv->render_ring;

        DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
@@ -1055,10 +1031,10 @@  static int i915_wait_irq(struct drm_device * dev, int irq_nr)
        if (master_priv->sarea_priv)
                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;

-       i915_user_irq_get(dev);
-       DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+       render_ring->user_irq_get(dev, render_ring);
+       DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
                    READ_BREADCRUMB(dev_priv) >= irq_nr);
-       i915_user_irq_put(dev);
+       render_ring->user_irq_put(dev, render_ring);

        if (ret == -EBUSY) {
                DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1077,7 +1053,7 @@  int i915_irq_emit(struct drm_device *dev, void *data,
        drm_i915_irq_emit_t *emit = data;
        int result;

-       if (!dev_priv || !dev_priv->ring.virtual_start) {
+       if (!dev_priv || !dev_priv->render_ring.virtual_start) {
                DRM_ERROR("called with no initialization\n");
                return -EINVAL;
        }
@@ -1126,7 +1102,7 @@  int i915_enable_vblank(struct drm_device *dev, int pipe)
        if (!(pipeconf & PIPEACONF_ENABLE))
                return -EINVAL;

-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        if (HAS_PCH_SPLIT(dev))
                ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
                                            DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1136,7 +1112,7 @@  int i915_enable_vblank(struct drm_device *dev, int pipe)
        else
                i915_enable_pipestat(dev_priv, pipe,
                                     PIPE_VBLANK_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);
        return 0;
 }

@@ -1148,7 +1124,7 @@  void i915_disable_vblank(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;

-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        if (HAS_PCH_SPLIT(dev))
                ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
                                             DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1156,7 +1132,7 @@  void i915_disable_vblank(struct drm_device *dev, int pipe)
                i915_disable_pipestat(dev_priv, pipe,
                                      PIPE_VBLANK_INTERRUPT_ENABLE |
                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);
 }

 void i915_enable_interrupt (struct drm_device *dev)
@@ -1225,7 +1201,7 @@  int i915_vblank_swap(struct drm_device *dev, void *data,

 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+       return list_entry(dev_priv->render_ring.request_list.prev, struct drm_i915_gem_request, list);
 }

 /**
@@ -1250,12 +1226,16 @@  void i915_hangcheck_elapsed(unsigned long data)
                acthd = I915_READ(ACTHD_I965);

        /* If all work is done then ACTHD clearly hasn't advanced. */
-       if (list_empty(&dev_priv->mm.request_list) ||
-                      i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+       if (list_empty(&dev_priv->render_ring.request_list) ||
+               i915_seqno_passed(i915_get_gem_seqno(dev, &dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) {
                dev_priv->hangcheck_count = 0;
                return;
        }

+       /* XXX fix later
+        * check other ring buffer
+        */
+
        if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
                DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
                i915_handle_error(dev, true);
@@ -1304,7 +1284,7 @@  static int ironlake_irq_postinstall(struct drm_device *dev)
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-       u32 render_mask = GT_USER_INTERRUPT;
+       u32 render_mask = GT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
        u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                           SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;

@@ -1381,7 +1361,10 @@  int i915_driver_irq_postinstall(struct drm_device *dev)
        u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
        u32 error_mask;

-       DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+       DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+       if (HAS_BSD(dev))
+               DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);

        dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;

diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410..393e468 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -328,9 +328,9 @@  void opregion_enable_asle(struct drm_device *dev)
                if (IS_MOBILE(dev)) {
                        unsigned long irqflags;

-                       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+                       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
                        intel_enable_asle(dev);
-                       spin_unlock_irqrestore(&dev_priv->user_irq_lock,
+                       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock,
                                               irqflags);
                }

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2720bc2..67917ba 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -323,6 +323,7 @@ 
 #define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT          (1<<4)
 #define   I915_DEBUG_INTERRUPT                         (1<<2)
 #define   I915_USER_INTERRUPT                          (1<<1)
+#define   I915_BSD_USER_INTERRUPT                      (1<<25)
 #define   I915_ASLE_INTERRUPT                          (1<<0)
 #define EIR            0x020b0
 #define EMR            0x020b4
@@ -360,6 +361,18 @@ 


 /*
+ * BSD (bit stream decoder instruction and interrupt control register defines
+ * (G4X and Ironlake only)
+ */
+#define BSD_RING_TAIL          0x04030
+#define BSD_RING_HEAD          0x04034
+#define BSD_RING_START         0x04038
+#define BSD_RING_CTL           0x0403c
+#define BSD_RING_ACTHD         0x04074
+#define BSD_HWS_PGA                    0x04080
+
+
+/*
  * Framebuffer compression (915+ only)
  */

@@ -2280,6 +2293,8 @@ 
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)

+#define GT_BSD_USER_INTERRUPT          (1 << 5)
+
 #define GTISR   0x44010
 #define GTIMR   0x44014
 #define GTIIR   0x44018
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 60595fc..933e101 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,8 +211,8 @@  static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
+        drm_i915_private_t *dev_priv = dev->dev_private;
        RING_LOCALS;

        BUG_ON(overlay->active);
@@ -227,11 +227,11 @@  static int intel_overlay_on(struct intel_overlay *overlay)
        OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -265,7 +265,7 @@  static void intel_overlay_continue(struct intel_overlay *overlay,
        OUT_RING(flip_addr);
         ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 }

 static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -277,7 +277,7 @@  static int intel_overlay_wait_flip(struct intel_overlay *overlay)
        RING_LOCALS;

        if (overlay->last_flip_req != 0) {
-               ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+               ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
                if (ret == 0) {
                        overlay->last_flip_req = 0;

@@ -296,11 +296,11 @@  static int intel_overlay_wait_flip(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -336,11 +336,11 @@  static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -354,11 +354,11 @@  static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
        ADVANCE_LP_RING();

-       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
        if (overlay->last_flip_req == 0)
                return -ENOMEM;

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, 1, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -390,8 +390,8 @@  int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
                                         int interruptible)
 {
        struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
+        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 flip_addr;
        int ret;
        RING_LOCALS;
@@ -400,12 +400,12 @@  int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
                return -EIO;

        if (overlay->last_flip_req == 0) {
-               overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+               overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
                if (overlay->last_flip_req == 0)
                        return -ENOMEM;
        }

-       ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+       ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible, &dev_priv->render_ring);
        if (ret != 0)
                return ret;

@@ -429,12 +429,12 @@  int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
                        OUT_RING(MI_NOOP);
                        ADVANCE_LP_RING();

-                       overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+                       overlay->last_flip_req = i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
                        if (overlay->last_flip_req == 0)
                                return -ENOMEM;

                        ret = i915_do_wait_request(dev, overlay->last_flip_req,
-                                       interruptible);
+                                       interruptible, &dev_priv->render_ring);
                        if (ret != 0)
                                return ret;

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..aadb5b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,784 @@ 
+/*
+ * Copyright 漏 20010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zou Nan hai <nanhai.zou@intel.com>
+ *    Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "i915_trace.h"
+
+#define I915_GEM_GPU_DOMAINS   (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+static void
+render_ring_flush(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               u32     invalidate_domains,
+               u32     flush_domains)
+{
+#if WATCH_EXEC
+       DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+                 invalidate_domains, flush_domains);
+#endif
+       u32 cmd;
+       trace_i915_gem_request_flush(dev, ring->next_seqno,
+                                    invalidate_domains, flush_domains);
+
+       if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
+               /*
+                * read/write caches:
+                *
+                * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+                * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+                * also flushed at 2d versus 3d pipeline switches.
+                *
+                * read-only caches:
+                *
+                * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+                * MI_READ_FLUSH is set, and is always flushed on 965.
+                *
+                * I915_GEM_DOMAIN_COMMAND may not exist?
+                *
+                * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+                * invalidated when MI_EXE_FLUSH is set.
+                *
+                * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+                * invalidated with every MI_FLUSH.
+                *
+                * TLBs:
+                *
+                * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+                * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+                * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+                * are flushed at any MI_FLUSH.
+                */
+
+               cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+               if ((invalidate_domains|flush_domains) &
+                               I915_GEM_DOMAIN_RENDER)
+                       cmd &= ~MI_NO_WRITE_FLUSH;
+               if (!IS_I965G(dev)) {
+                       /*
+                        * On the 965, the sampler cache always gets flushed
+                        * and this bit is reserved.
+                        */
+                       if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+                               cmd |= MI_READ_FLUSH;
+               }
+               if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+                       cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+               DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+               intel_begin_ring_buffer (dev, ring, 8);
+               intel_fill_ring_buffer (dev, ring, cmd);
+               intel_fill_ring_buffer (dev, ring, MI_NOOP);
+               intel_advance_ring_buffer (dev, ring);
+       }
+}
+
+static unsigned int render_ring_get_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_HEAD) & HEAD_ADDR;
+}
+
+static unsigned int render_ring_get_tail(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+}
+
+static unsigned int render_ring_get_active_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+
+       return I915_READ(acthd_reg);
+}
+
+static void render_ring_advance_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(PRB0_TAIL, ring->tail);
+}
+
+
+static int init_render_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 head;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+       obj_priv = ring->gem_object->driver_private;
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(PRB0_CTL, 0);
+       I915_WRITE(PRB0_HEAD, 0);
+       I915_WRITE(PRB0_TAIL, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+       head = ring->get_head(dev, ring);
+
+       /* G45 ring initialization fails to reset head to zero */
+       if (head != 0) {
+               DRM_ERROR("%s head not reset to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+               I915_WRITE(PRB0_HEAD, 0);
+
+               DRM_ERROR("%s head forced to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+       }
+
+       I915_WRITE(PRB0_CTL,
+                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_NO_REPORT | RING_VALID);
+
+       head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+       /* If the head is still not zero, the ring is dead */
+       if (head != 0) {
+               DRM_ERROR("%s initialization failed "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(PRB0_CTL),
+                               I915_READ(PRB0_HEAD),
+                               I915_READ(PRB0_TAIL),
+                               I915_READ(PRB0_START));
+               return -EIO;
+       }
+
+       return 0;
+}
+
+
+static u32
+render_ring_add_request(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_file *file_priv,
+               u32 flush_domains)
+{
+       u32 seqno;
+       seqno = intel_ring_get_seqno(dev, ring);
+
+       intel_begin_ring_buffer (dev, ring, 4);
+       intel_fill_ring_buffer (dev, ring, MI_STORE_DWORD_INDEX);
+       intel_fill_ring_buffer (dev, ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_fill_ring_buffer (dev, ring, seqno);
+       intel_fill_ring_buffer (dev, ring, MI_USER_INTERRUPT);
+       intel_advance_ring_buffer (dev, ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+       return seqno;
+}
+
+static u32
+render_ring_get_gem_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+render_ring_get_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+               else
+                       i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+
+}
+
+static void
+render_ring_put_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+               else
+                       i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+}
+
+static void render_setup_status_page(struct drm_device *dev,
+       struct  intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+       (void)I915_READ(HWS_PGA);
+}
+
+
+static int
+render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_i915_gem_execbuffer2 *exec,
+               struct drm_clip_rect *cliprects,
+               uint64_t exec_offset)
+{
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t exec_start, exec_len;
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       exec_len = (uint32_t) exec->batch_len;
+
+       //      trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
+       count = nbox ? nbox : 1;
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+                       int ret = i915_emit_box(dev, cliprects, i,
+                                       exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+               if (IS_I830(dev) || IS_845G(dev)) {
+                       intel_begin_ring_buffer (dev, ring, 4);
+                       intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER);
+                       intel_fill_ring_buffer(dev, ring, exec_start | MI_BATCH_NON_SECURE);
+                       intel_fill_ring_buffer(dev, ring, exec_start + exec_len - 4);
+                       intel_fill_ring_buffer(dev, ring, 0);
+               } else {
+                       intel_begin_ring_buffer (dev, ring, 4);
+                       if (IS_I965G(dev)) {
+                               intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER_START |
+                                               (2 << 6) | MI_BATCH_NON_SECURE_I965);
+                               intel_fill_ring_buffer(dev, ring, exec_start);
+                       } else {
+                               intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER_START |
+                                               (2 << 6));
+                               intel_fill_ring_buffer(dev, ring, exec_start | MI_BATCH_NON_SECURE);
+                       }
+               }
+               intel_advance_ring_buffer (dev, ring);
+       }
+
+       /* XXX breadcrumb */
+       return 0;
+}
+
+
+void
+bsd_ring_flush(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               u32     invalidate_domains,
+               u32     flush_domains)
+{
+       intel_begin_ring_buffer (dev, ring, 8);
+       intel_fill_ring_buffer (dev, ring, MI_FLUSH);
+       intel_fill_ring_buffer (dev, ring, MI_NOOP);
+       intel_advance_ring_buffer (dev, ring);
+}
+
+static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return I915_READ(BSD_RING_ACTHD);
+}
+
+static inline void bsd_ring_advance_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(BSD_RING_TAIL, ring->tail);
+}
+
+static int init_bsd_ring(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 head;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj_priv = ring->gem_object->driver_private;
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(BSD_RING_CTL, 0);
+       I915_WRITE(BSD_RING_HEAD, 0);
+       I915_WRITE(BSD_RING_TAIL, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(BSD_RING_START, obj_priv->gtt_offset);
+       head = ring->get_head(dev, ring);
+
+       /* G45 ring initialization fails to reset head to zero */
+       if (head != 0) {
+               DRM_ERROR("%s head not reset to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(BSD_RING_CTL),
+                               I915_READ(BSD_RING_HEAD),
+                               I915_READ(BSD_RING_TAIL),
+                               I915_READ(BSD_RING_START));
+               I915_WRITE(PRB0_HEAD, 0);
+
+               DRM_ERROR("%s head forced to zero "
+                               "ctl %08x head %08x tail %08x start %08x\n",
+                               ring->name,
+                               I915_READ(BSD_RING_CTL),
+                               I915_READ(BSD_RING_HEAD),
+                               I915_READ(BSD_RING_TAIL),
+                               I915_READ(BSD_RING_START));
+       }
+
+       I915_WRITE(BSD_RING_CTL,
+                       ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+                       | RING_NO_REPORT | RING_VALID);
+
+       head = ring->get_head(dev, ring);
+       return 0;
+}
+
+static u32
+bsd_ring_add_request(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_file *file_priv,
+               u32 flush_domains)
+{
+       u32 seqno;
+       seqno = intel_ring_get_seqno(dev, ring);
+       intel_begin_ring_buffer(dev, ring, 4);
+       intel_fill_ring_buffer(dev, ring, MI_STORE_DWORD_INDEX);
+       intel_fill_ring_buffer(dev, ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_fill_ring_buffer(dev, ring, seqno);
+       intel_fill_ring_buffer(dev, ring, MI_USER_INTERRUPT);
+       intel_advance_ring_buffer (dev, ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+       return seqno;
+}
+
+static void bsd_setup_status_page(struct drm_device *dev,
+       struct  intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
+       I915_READ(BSD_HWS_PGA);
+}
+
+static void
+bsd_ring_get_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+       if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_enable_graphics_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+               else
+                       i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+
+}
+
+static void
+bsd_ring_put_user_irq(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&ring->user_irq_lock, irqflags);
+       BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
+       if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+               if (IS_IRONLAKE(dev))
+                       ironlake_disable_graphics_irq(dev_priv, GT_BSD_USER_INTERRUPT);
+               else
+                       i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
+       }
+       spin_unlock_irqrestore(&ring->user_irq_lock, irqflags);
+}
+
+static int init_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       int ret;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       obj = drm_gem_object_alloc(dev, PAGE_SIZE*10);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       obj_priv = obj->driver_private;
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, PAGE_SIZE);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               return ret;
+       }
+
+       ring->status_page.gfx_addr = obj_priv->gtt_offset;
+
+       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       if (ring->status_page.page_addr == NULL) {
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       ring->status_page.obj = obj;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       ring->setup_status_page(dev, ring);
+       DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr);
+       return 0;
+}
+
+static u32
+bsd_ring_get_gem_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int
+bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               struct drm_i915_gem_execbuffer2 *exec,
+               struct drm_clip_rect *cliprects,
+               uint64_t exec_offset)
+{
+       uint32_t exec_start;
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       intel_begin_ring_buffer (dev, ring, 2);
+       intel_fill_ring_buffer(dev, ring, MI_BATCH_BUFFER_START |
+                       (2 << 6) | MI_BATCH_NON_SECURE_I965);
+       intel_fill_ring_buffer(dev, ring, exec_start);
+       intel_advance_ring_buffer (dev, ring);
+       return 0;
+}
+
+static void cleanup_status_page(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj = ring->status_page.obj;
+       if (obj == NULL)
+               return;
+       obj_priv = obj->driver_private;
+
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       ring->status_page.obj = NULL;
+
+       memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       int ret;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       ring->dev = dev;
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               ret = init_status_page(dev, ring);
+               if (ret)
+                       return ret;
+       }
+
+       obj = drm_gem_object_alloc(dev, ring->size);
+       if (obj == NULL) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       ring->gem_object = obj;
+
+       ret = i915_gem_object_pin(obj, ring->alignment);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               goto cleanup;
+       }
+
+       obj_priv = obj->driver_private;
+       ring->map.size = ring->size;
+       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
+
+       drm_core_ioremap_wc(&ring->map, dev);
+
+       if (ring->map.handle == NULL) {
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(obj);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       ring->virtual_start = ring->map.handle;
+       ret = ring->init(dev, ring);
+       if (ret != 0) {
+               intel_cleanup_ring_buffer(dev, ring);
+               return ret;
+       }
+
+       ring->head = ring->get_head(dev, ring);
+       ring->tail = ring->get_tail(dev, ring);
+       ring->space = ring->head - (ring->tail + 8);
+       if (ring->space < 0)
+               ring->space += ring->size;
+       spin_lock_init(&ring->user_irq_lock);
+       INIT_LIST_HEAD(&ring->active_list);
+       INIT_LIST_HEAD(&ring->request_list);
+       return ret;
+cleanup:
+       cleanup_status_page(dev, ring);
+       return ret;
+}
+
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       if (ring->gem_object == NULL)
+               return;
+
+       drm_core_ioremapfree(&ring->map, dev);
+
+       i915_gem_object_unpin(ring->gem_object);
+       drm_gem_object_unreference(ring->gem_object);
+       ring->gem_object = NULL;
+       cleanup_status_page(dev, ring);
+}
+
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       unsigned int *virt;
+       int rem;
+       rem = ring->size - ring->tail;
+
+       if (ring->space < rem) {
+               int ret = intel_wait_ring_buffer(dev, ring, rem);
+               if (ret)
+                       return ret;
+       }
+
+       virt = (unsigned int *)(ring->virtual_start + ring->tail);
+       rem /= 4;
+       while (rem--)
+               *virt++ = MI_NOOP;
+
+       ring->tail = 0;
+
+       return 0;
+}
+
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       unsigned long end;
+
+       trace_i915_ring_wait_begin (dev);
+       end = jiffies + 3 * HZ;
+       do {
+               ring->head = ring->get_head(dev, ring);
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n) {
+                       trace_i915_ring_wait_end (dev);
+                       return 0;
+               }
+               yield();
+       } while(!time_after(jiffies, end));
+       trace_i915_ring_wait_end (dev);
+
+       return -EBUSY;
+}
+
+void intel_begin_ring_buffer (struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n)
+{
+       if (unlikely(ring->tail + n > ring->size))
+               intel_wrap_ring_buffer(dev, ring);
+       if (unlikely(ring->space < n))
+               intel_wait_ring_buffer(dev, ring, n);
+}
+
+void intel_fill_ring_buffer (struct drm_device *dev,
+               struct intel_ring_buffer *ring, unsigned int data)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       *virt = data;
+       ring->tail += 4;
+       ring->tail &= ring->size - 1;
+       ring->space -= 4;
+}
+
+void intel_advance_ring_buffer (struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       ring->advance_ring(dev, ring);
+}
+
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len)
+{
+       unsigned int *virt = ring->virtual_start + ring->tail;
+       BUG_ON((len&~(4-1)) != 0);
+       intel_begin_ring_buffer (dev, ring, len);
+       memcpy(virt, data, len);
+       ring->tail += len;
+       ring->tail &= ring->size - 1;
+       ring->space -= len;
+       intel_advance_ring_buffer (dev, ring);
+}
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring)
+{
+       u32 seqno;
+       seqno = ring->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++ring->next_seqno == 0)
+               ring->next_seqno = 1;
+       return seqno;
+}
+
+struct intel_ring_buffer render_ring = {
+       .name                   = "render ring",
+       .ring_flag              = ON_RENDER_RING,
+       .size                   = 32 * PAGE_SIZE,
+       .alignment              = PAGE_SIZE,
+       .virtual_start          = NULL,
+       .dev                    = NULL,
+       .gem_object             = NULL,
+       .head                   = 0,
+       .tail                   = 0,
+       .space                  = 0,
+       .next_seqno             = 1,
+       .user_irq_refcount      = 0,
+       .irq_gem_seqno          = 0,
+       .waiting_gem_seqno      = 0,
+       .setup_status_page      = render_setup_status_page,
+       .init                   = init_render_ring,
+       .get_head               = render_ring_get_head,
+       .get_tail               = render_ring_get_tail,
+       .get_active_head        = render_ring_get_active_head,
+       .advance_ring           = render_ring_advance_ring,
+       .flush                  = render_ring_flush,
+       .add_request            = render_ring_add_request,
+       .get_gem_seqno          = render_ring_get_gem_seqno,
+       .user_irq_get           = render_ring_get_user_irq,
+       .user_irq_put           = render_ring_put_user_irq,
+       .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+       .status_page            = {NULL, 0, NULL},
+       .map                    = {0,}
+};
+
+/* ring buffer for bit-stream decoder */
+
+struct intel_ring_buffer bsd_ring = {
+       .name                   = "bsd ring",
+       .ring_flag              = ON_BSD_RING,
+       .size                   = 32 * PAGE_SIZE,
+       .alignment              = PAGE_SIZE,
+       .virtual_start          = NULL,
+       .dev                    = NULL,
+       .gem_object             = NULL,
+       .head                   = 0,
+       .tail                   = 0,
+       .space                  = 0,
+       .next_seqno             = 1,
+       .user_irq_refcount      = 0,
+       .irq_gem_seqno          = 0,
+       .waiting_gem_seqno      = 0,
+       .setup_status_page      = bsd_setup_status_page,
+       .init                   = init_bsd_ring,
+       .get_head               = bsd_ring_get_head,
+       .get_tail               = bsd_ring_get_tail,
+       .get_active_head        = bsd_ring_get_active_head,
+       .advance_ring           = bsd_ring_advance_ring,
+       .flush                  = bsd_ring_flush,
+       .add_request            = bsd_ring_add_request,
+       .get_gem_seqno          = bsd_ring_get_gem_seqno,
+       .user_irq_get           = bsd_ring_get_user_irq,
+       .user_irq_put           = bsd_ring_put_user_irq,
+       .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
+       .status_page            = {NULL, 0, NULL},
+       .map                    = {0,}
+};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..012dc47
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,120 @@ 
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+struct  intel_hw_status_page {
+       void            *page_addr;
+       unsigned int    gfx_addr;
+       struct          drm_gem_object *obj;
+};
+
+struct drm_i915_gem_execbuffer2;
+struct  intel_ring_buffer {
+       const char      *name;
+       unsigned int    ring_flag;
+       unsigned long   size;
+       unsigned int    alignment;
+       void            *virtual_start;
+       struct          drm_device *dev;
+       struct          drm_gem_object *gem_object;
+
+       unsigned int    head;
+       unsigned int    tail;
+       unsigned int    space;
+       u32             next_seqno;
+       struct intel_hw_status_page status_page;
+
+       spinlock_t      user_irq_lock;
+       int             user_irq_refcount;
+       u32             irq_gem_seqno;          /* last seq seem at irq time */
+       u32             waiting_gem_seqno;
+       void            (*user_irq_get)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*user_irq_put)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*setup_status_page)(struct drm_device *dev,
+                       struct  intel_ring_buffer *ring);
+
+       int             (*init)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+
+       unsigned int    (*get_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_tail)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       unsigned int    (*get_active_head)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*advance_ring)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       void            (*flush)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       u32     invalidate_domains,
+                       u32     flush_domains);
+       u32             (*add_request)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_file *file_priv,
+                       u32 flush_domains);
+       u32             (*get_gem_seqno)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring);
+       int             (*dispatch_gem_execbuffer)(struct drm_device *dev,
+                       struct intel_ring_buffer *ring,
+                       struct drm_i915_gem_execbuffer2 *exec,
+                       struct drm_clip_rect *cliprects,
+                       uint64_t exec_offset);
+
+       /**
+        * List of objects currently involved in rendering from the
+        * ringbuffer.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * List of breadcrumbs associated with GPU requests currently
+        * outstanding.
+        */
+       struct list_head request_list;
+
+       wait_queue_head_t irq_queue;
+       drm_local_map_t map;
+};
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+               int reg)
+{
+       u32 *regs = ring->status_page.page_addr;
+       return regs[reg];
+}
+
+int intel_init_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+int intel_wait_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+int intel_wrap_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+void intel_begin_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, int n);
+void intel_fill_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring, u32 data);
+void intel_fill_struct(struct drm_device *dev,
+               struct intel_ring_buffer *ring,
+               void *data,
+               unsigned int len);
+void intel_advance_ring_buffer(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+u32 intel_ring_get_seqno(struct drm_device *dev,
+               struct intel_ring_buffer *ring);
+
+extern struct intel_ring_buffer render_ring;
+extern struct intel_ring_buffer bsd_ring;
+
+#endif /* _INTEL_RINGBUFFER_H_ */
+
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec11..d7dd7fd 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1373,10 +1373,10 @@  intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
        tv_dac = I915_READ(TV_DAC);

        /* Disable TV interrupts around load detect or we'll recurse */
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
                              PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);

        /*
         * Detect TV by polling)
@@ -1427,10 +1427,10 @@  intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
        }

        /* Restore interrupt config */
-       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       spin_lock_irqsave(&dev_priv->render_ring.user_irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
                             PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+       spin_unlock_irqrestore(&dev_priv->render_ring.user_irq_lock, irqflags);

        return type;
 }
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7..dfb251e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -594,9 +594,11 @@  struct drm_i915_gem_exec_object2 {
         */
        __u64 offset;

-#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_FENCE        (1<<0)
        __u64 flags;
-       __u64 rsvd1;
+#define EXEC_OBJECT_ON_RENDER_RING     (1<<0)
+#define EXEC_OBJECT_ON_BSD_RING                (1<<1)
+       __u64 ring_flags;
        __u64 rsvd2;
 };

@@ -616,7 +618,9 @@  struct drm_i915_gem_execbuffer2 {
        __u32 num_cliprects;
        /** This is a struct drm_clip_rect *cliprects */
        __u64 cliprects_ptr;
-       __u64 flags; /* currently unused */
+#define ON_RENDER_RING                 (1<<0)
+#define ON_BSD_RING                    (1<<1)
+       __u64 flags;
        __u64 rsvd1;
        __u64 rsvd2;
 };