@@ -6,6 +6,8 @@ config DRM_I915
select INTEL_GTT
select AGP_INTEL if AGP
select INTERVAL_TREE
+ select ANDROID
+ select SYNC
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
@@ -25,6 +25,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
+ i915_sync.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
@@ -2043,6 +2043,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_FENCE, i915_sync_create_fence_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
@@ -1388,6 +1388,8 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
+struct i915_sync_timeline;
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1422,6 +1424,8 @@ struct drm_i915_private {
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
+ struct i915_sync_timeline *sync_tl[I915_NUM_RINGS];
+
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -2275,6 +2279,13 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
+/* i915_sync.c */
+int i915_sync_init(struct drm_i915_private *dev_priv);
+void i915_sync_fini(struct drm_i915_private *dev_priv);
+void i915_sync_signal(struct intel_engine_cs *ring);
+int i915_sync_create_fence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
#define PIN_GLOBAL 0x4
@@ -4775,6 +4775,9 @@ int i915_gem_init(struct drm_device *dev)
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}
+
+ i915_sync_init(dev_priv);
+
mutex_unlock(&dev->struct_mutex);
/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
@@ -4970,6 +4973,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
+
+ i915_sync_fini(dev->dev_private);
}
static void
@@ -33,6 +33,7 @@
#include <linux/circ_buf.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+#include "../../../staging/android/sync.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -1269,6 +1270,7 @@ static void notify_ring(struct drm_device *dev,
intel_notify_mmio_flip(ring);
wake_up_all(&ring->irq_queue);
+ i915_sync_signal(ring);
i915_queue_hangcheck(dev);
}
@@ -2617,8 +2619,10 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- for_each_ring(ring, dev_priv, i)
+ for_each_ring(ring, dev_priv, i) {
wake_up_all(&ring->irq_queue);
+ i915_sync_signal(ring);
+ }
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
@@ -3269,6 +3273,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
DRM_INFO("Fake missed irq on %s\n",
ring->name);
wake_up_all(&ring->irq_queue);
+ i915_sync_signal(ring);
}
/* Safeguard against driver failure */
ring->hangcheck.score += BUSY;
new file mode 100644
@@ -0,0 +1,301 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/oom.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include "../../../staging/android/sync.h"
+
+/*
+ * i915 fences on sync timelines
+ *
+ * We implement sync points in terms of i915 seqnos. They're exposed
+ * through the new DRM_I915_GEM_FENCE ioctl, and can be mixed and matched
+ * with other Android timelines and aggregated into sync_fences, etc.
+ *
+ * TODO:
+ * rebase on top of Chris's seqno/request stuff and use requests
+ * allow non-RCS fences (need ring/context association)
+ */
+
+struct i915_sync_timeline {
+ struct sync_timeline obj;
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
+};
+
+struct i915_sync_pt {
+ struct sync_pt pt;
+ u32 seqno;
+};
+
+static struct sync_pt *i915_sync_pt_create(struct i915_sync_timeline *tl)
+{
+ struct intel_engine_cs *ring = tl->ring;
+ struct i915_sync_pt *pt;
+ int ret;
+
+ pt = (struct i915_sync_pt *)sync_pt_create(&tl->obj,
+ sizeof(struct i915_sync_pt));
+ if (!pt)
+ return NULL;
+
+ ret = ring->add_request(ring);
+ if (ret) {
+ DRM_ERROR("add_request failed\n");
+ sync_pt_free((struct sync_pt *)pt);
+ return NULL;
+ }
+
+ pt->seqno = ring->outstanding_lazy_seqno;
+
+ return (struct sync_pt *)pt;
+}
+
+static struct sync_pt *i915_sync_dup(struct sync_pt *sync_pt)
+{
+ struct i915_sync_pt *dst, *src = (struct i915_sync_pt *)sync_pt;
+ struct i915_sync_timeline *obj =
+ (struct i915_sync_timeline *)sync_pt_parent(sync_pt);
+
+ dst = (struct i915_sync_pt *)i915_sync_pt_create(obj);
+ if (!dst)
+ return NULL;
+
+ dst->seqno = src->seqno;
+
+ return (struct sync_pt *)dst;
+}
+
+static int i915_sync_signaled(struct sync_pt *sync_pt)
+{
+ struct i915_sync_pt *pt = (struct i915_sync_pt *)sync_pt;
+ struct i915_sync_timeline *obj =
+ (struct i915_sync_timeline *)sync_pt_parent(sync_pt);
+ struct intel_engine_cs *ring = obj->ring;
+
+ return i915_seqno_passed(ring->get_seqno(ring, false), pt->seqno);
+}
+
+static int i915_sync_compare(struct sync_pt *pta, struct sync_pt *ptb)
+{
+ struct i915_sync_pt *pt1 = (struct i915_sync_pt *)pta;
+ struct i915_sync_pt *pt2 = (struct i915_sync_pt *)ptb;
+ u32 a = pt1->seqno, b = pt2->seqno;
+
+ if (a == b)
+ return 0;
+
+ return ((s32)a - (s32)b) < 0 ? -1 : 1;
+}
+
+static int i915_sync_fill_driver_data(struct sync_pt *sync_pt, void *data,
+ int size)
+{
+ struct i915_sync_pt *pt = (struct i915_sync_pt *)sync_pt;
+
+ if (size < sizeof(pt->seqno))
+ return -ENOMEM;
+
+ memcpy(data, &pt->seqno, sizeof(pt->seqno));
+
+ return sizeof(pt->seqno);
+}
+
+static void i915_sync_timeline_value_str(struct sync_timeline *timeline,
+ char *str, int size)
+{
+ struct i915_sync_timeline *obj = (struct i915_sync_timeline *)timeline;
+ struct intel_engine_cs *ring = obj->ring;
+
+ snprintf(str, size, "%u", ring->get_seqno(ring, false));
+}
+
+static void i915_pt_value_str(struct sync_pt *sync_pt, char *str, int size)
+{
+ struct i915_sync_pt *pt = (struct i915_sync_pt *)sync_pt;
+
+ snprintf(str, size, "%u", pt->seqno);
+}
+
+static struct sync_timeline_ops i915_sync_ops = {
+ .driver_name = "i915_sync",
+ .dup = i915_sync_dup,
+ .has_signaled = i915_sync_signaled,
+ .compare = i915_sync_compare,
+ .fill_driver_data = i915_sync_fill_driver_data,
+ .timeline_value_str = i915_sync_timeline_value_str,
+ .pt_value_str = i915_pt_value_str,
+};
+
+/**
+ * i915_sync_create_fence_ioctl - fence creation function
+ * @dev: drm device
+ * @data: ioctl data
+ * @file: file struct
+ *
+ * This function creates a fence given a context and ring, and returns
+ * it to the caller in the form of a file descriptor.
+ *
+ * The returned descriptor is a sync fence fd, and can be used with all
+ * the usual sync fence operations (poll, ioctl, etc).
+ *
+ * The process fd limit should prevent an overallocation of fence objects,
+ * which need to be destroyed manually with a close() call.
+ */
+int i915_sync_create_fence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_fence *fdata = data;
+ struct sync_pt *pt;
+ struct sync_fence *fence;
+ struct i915_sync_timeline *obj;
+ struct intel_engine_cs *ring;
+ struct intel_context *ctx;
+ u32 ctx_id = fdata->ctx_id;
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int ret = 0;
+
+ if (file == NULL) {
+ DRM_ERROR("no file priv?\n");
+ return -EINVAL;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ DRM_ERROR("mutex interrupted\n");
+ goto out;
+ }
+
+ ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ if (ctx == NULL) {
+ DRM_ERROR("context lookup failed\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ ring = &dev_priv->ring[RCS];
+
+ if (!ring) {
+ DRM_ERROR("context has no last ring\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ if (!intel_ring_initialized(ring)) {
+ DRM_ERROR("ring not ready\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ obj = dev_priv->sync_tl[ring->id];
+
+ pt = i915_sync_pt_create(obj);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ fdata->name[sizeof(fdata->name) - 1] = '\0';
+ fence = sync_fence_create(fdata->name, pt);
+ if (!fence) {
+ sync_pt_free(pt);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ fdata->fd = fd;
+
+ sync_fence_install(fence, fd);
+
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+
+err:
+ mutex_unlock(&dev->struct_mutex);
+ put_unused_fd(fd);
+ return ret;
+}
+
+/* FIXME: handle hangs */
+void i915_sync_signal(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ sync_timeline_signal(&dev_priv->sync_tl[ring->id]->obj);
+}
+
+int i915_sync_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *ring;
+ struct i915_sync_timeline *tl;
+ int i, ret = 0;
+
+ for_each_ring(ring, dev_priv, i) {
+ tl = (struct i915_sync_timeline *)
+ sync_timeline_create(&i915_sync_ops,
+ sizeof(struct i915_sync_timeline),
+ ring->name);
+ if (!tl) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ tl->dev_priv = dev_priv;
+ tl->ring = ring;
+ dev_priv->sync_tl[ring->id] = tl;
+ }
+
+ return ret;
+
+out_err:
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ kfree(dev_priv->sync_tl[i]);
+
+ return ret;
+}
+
+void i915_sync_fini(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct i915_sync_timeline *tl = dev_priv->sync_tl[i];
+
+ sync_timeline_destroy(&tl->obj);
+ }
+}
@@ -224,6 +224,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_FENCE 0x34
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -275,6 +276,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_FENCE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_FENCE, struct drm_i915_gem_fence)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -1066,4 +1068,25 @@ struct drm_i915_gem_userptr {
__u32 handle;
};
+/**
+ * drm_i915_gem_fence - create a fence
+ * @fd: fd for fence
+ * @ctx_id: context ID for fence
+ * @flags: flags for operation
+ *
+ * Creates a fence in @fd and returns it to the caller. This fd can be
+ * passed around between processes as any other fd, and can be poll'd
+ * and read for status.
+ *
+ * RETURNS:
+ * A valid fd in the @fd field or an errno on error.
+ */
+struct drm_i915_gem_fence {
+ __s32 fd;
+ __u32 ctx_id;
+ __u32 flags;
+ __u32 pad;
+ char name[32];
+};
+
#endif /* _UAPI_I915_DRM_H_ */
Expose an ioctl to create Android fences based on the Android sync point infrastructure (which in turn is based on DMA-buf fences). Just a sketch at this point, no testing has been done. There are a couple of goals here: 1) allow applications and libraries to create fences without an associated buffer 2) re-use a common API so userspace doesn't have to impedance mismatch between different driver implementations too much 3) allow applications and libraries to use explicit synchronization if they choose by exposing fences directly Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org> --- drivers/gpu/drm/i915/Kconfig | 2 + drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 11 ++ drivers/gpu/drm/i915/i915_gem.c | 5 + drivers/gpu/drm/i915/i915_irq.c | 7 +- drivers/gpu/drm/i915/i915_sync.c | 301 ++++++++++++++++++++++++++++++++++++++ include/uapi/drm/i915_drm.h | 23 +++ 8 files changed, 350 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/i915/i915_sync.c