diff mbox

[2/2] intel: Use atomic refcounters

Message ID 1254454295-11470-2-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State Accepted
Headers show

Commit Message

Chris Wilson Oct. 2, 2009, 3:31 a.m. UTC
None
diff mbox

Patch

diff --git a/configure.ac b/configure.ac
index d707052..2852962 100644
--- a/configure.ac
+++ b/configure.ac
@@ -146,6 +146,23 @@  if test "x$HAVE_LIBUDEV" = xyes; then
 fi
 AM_CONDITIONAL(HAVE_LIBUDEV, [test "x$HAVE_LIBUDEV" = xyes])
 
+# Check for atomic intrinsics
+AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives,
+[
+	drm_cv_atomic_primitives="none"
+
+	AC_TRY_LINK([
+int atomic_add(int i) { return __sync_fetch_and_add (&i, 1); }
+int atomic_cmpxchg(int i, int j, int k) { return __sync_val_compare_and_swap (&i, j, k); }
+], [],
+	  drm_cv_atomic_primitives="Intel"
+	  )
+])
+if test "x$drm_cv_atomic_primitives" = xIntel; then
+	AC_DEFINE(HAVE_INTEL_ATOMIC_PRIMITIVES, 1,
+		  [Enable if your compiler supports the Intel __sync_* atomic primitives])
+fi
+
 AC_SUBST(WARN_CFLAGS)
 AC_OUTPUT([
 	Makefile
diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am
index e68a3b4..28f8952 100644
--- a/libdrm/intel/Makefile.am
+++ b/libdrm/intel/Makefile.am
@@ -35,6 +35,7 @@  libdrm_intel_la_LDFLAGS = -version-number 1:0:0 -no-undefined
 libdrm_intel_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@ @CLOCK_LIB@
 
 libdrm_intel_la_SOURCES = \
+	intel_atomic.h \
 	intel_bufmgr.c \
 	intel_bufmgr_priv.h \
 	intel_bufmgr_fake.c \
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 6125f6b..73325ca 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -53,6 +53,7 @@ 
 
 #include "errno.h"
 #include "libdrm_lists.h"
+#include "intel_atomic.h"
 #include "intel_bufmgr.h"
 #include "intel_bufmgr_priv.h"
 #include "intel_chipset.h"
@@ -102,7 +103,7 @@  typedef struct _drm_intel_bufmgr_gem {
 struct _drm_intel_bo_gem {
     drm_intel_bo bo;
 
-    int refcount;
+    atomic_t refcount;
     /** Boolean whether the mmap ioctl has been called for this buffer yet. */
     uint32_t gem_handle;
     const char *name;
@@ -172,8 +173,6 @@  struct _drm_intel_bo_gem {
     int reloc_tree_fences;
 };
 
-static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
-
 static unsigned int
 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count);
 
@@ -189,6 +188,9 @@  drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
 			    uint32_t stride);
 
 static void
+drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
+
+static void
 drm_intel_gem_bo_unreference(drm_intel_bo *bo);
 
 static void
@@ -237,6 +239,15 @@  static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
     }
 }
 
+static void
+drm_intel_gem_bo_reference(drm_intel_bo *bo)
+{
+    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+
+    assert(atomic_read(&bo_gem->refcount) > 0);
+    atomic_inc(&bo_gem->refcount);
+}
+
 /**
  * Adds the given buffer to the list of buffers to be validated (moved into the
  * appropriate memory type) with the next batch submission.
@@ -280,7 +291,7 @@  drm_intel_add_validate_buffer(drm_intel_bo *bo)
     bufmgr_gem->exec_objects[index].alignment = 0;
     bufmgr_gem->exec_objects[index].offset = 0;
     bufmgr_gem->exec_bos[index] = bo;
-    drm_intel_gem_bo_reference_locked(bo);
+    drm_intel_gem_bo_reference(bo);
     bufmgr_gem->exec_count++;
 }
 
@@ -436,7 +447,7 @@  retry:
     }
 
     bo_gem->name = name;
-    bo_gem->refcount = 1;
+    atomic_set(&bo_gem->refcount, 1);
     bo_gem->validate_index = -1;
     bo_gem->reloc_tree_size = bo_gem->bo.size;
     bo_gem->reloc_tree_fences = 0;
@@ -499,7 +510,7 @@  drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
     bo_gem->bo.virtual = NULL;
     bo_gem->bo.bufmgr = bufmgr;
     bo_gem->name = name;
-    bo_gem->refcount = 1;
+    atomic_set (&bo_gem->refcount, 1);
     bo_gem->validate_index = -1;
     bo_gem->gem_handle = open_arg.handle;
     bo_gem->global_name = handle;
@@ -525,27 +536,6 @@  drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
 }
 
 static void
-drm_intel_gem_bo_reference(drm_intel_bo *bo)
-{
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-
-    assert(bo_gem->refcount > 0);
-    pthread_mutex_lock(&bufmgr_gem->lock);
-    bo_gem->refcount++;
-    pthread_mutex_unlock(&bufmgr_gem->lock);
-}
-
-static void
-drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
-{
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-
-    assert(bo_gem->refcount > 0);
-    bo_gem->refcount++;
-}
-
-static void
 drm_intel_gem_bo_free(drm_intel_bo *bo)
 {
     drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
@@ -594,64 +584,74 @@  drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
 }
 
 static void
-drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
+drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
 {
     drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
     drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+    struct drm_intel_gem_bo_bucket *bucket;
+    uint32_t tiling_mode;
 
-    assert(bo_gem->refcount > 0);
-    if (--bo_gem->refcount == 0) {
-	struct drm_intel_gem_bo_bucket *bucket;
-	uint32_t tiling_mode;
-
-	if (bo_gem->relocs != NULL) {
-	    int i;
+    if (bo_gem->relocs != NULL) {
+	int i;
 
-	    /* Unreference all the target buffers */
-	    for (i = 0; i < bo_gem->reloc_count; i++)
-		 drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
-	    free(bo_gem->reloc_target_bo);
-	    free(bo_gem->relocs);
-	}
+	/* Unreference all the target buffers */
+	for (i = 0; i < bo_gem->reloc_count; i++)
+	     drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
+	free(bo_gem->reloc_target_bo);
+	free(bo_gem->relocs);
+    }
 
-	DBG("bo_unreference final: %d (%s)\n",
-	    bo_gem->gem_handle, bo_gem->name);
+    DBG("bo_unreference final: %d (%s)\n",
+	bo_gem->gem_handle, bo_gem->name);
 
-	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
-	/* Put the buffer into our internal cache for reuse if we can. */
-	tiling_mode = I915_TILING_NONE;
-	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
-	    drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0)
-	{
-	    struct timespec time;
+    bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+    /* Put the buffer into our internal cache for reuse if we can. */
+    tiling_mode = I915_TILING_NONE;
+    if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
+	drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0)
+    {
+	struct timespec time;
 
-	    clock_gettime(CLOCK_MONOTONIC, &time);
-	    bo_gem->free_time = time.tv_sec;
+	clock_gettime(CLOCK_MONOTONIC, &time);
+	bo_gem->free_time = time.tv_sec;
 
-	    bo_gem->name = NULL;
-	    bo_gem->validate_index = -1;
-	    bo_gem->relocs = NULL;
-	    bo_gem->reloc_target_bo = NULL;
-	    bo_gem->reloc_count = 0;
+	bo_gem->name = NULL;
+	bo_gem->validate_index = -1;
+	bo_gem->relocs = NULL;
+	bo_gem->reloc_target_bo = NULL;
+	bo_gem->reloc_count = 0;
 
-	    DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
+	DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
 
-	    drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
-	    drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
-	} else {
-	    drm_intel_gem_bo_free(bo);
-	}
+	drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
+	drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+    } else {
+	drm_intel_gem_bo_free(bo);
     }
 }
 
 static void
+drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
+{
+    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+
+    assert(atomic_read(&bo_gem->refcount) > 0);
+    if (atomic_dec_and_test (&bo_gem->refcount))
+	drm_intel_gem_bo_unreference_final(bo);
+}
+
+static void
 drm_intel_gem_bo_unreference(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
 
-    pthread_mutex_lock(&bufmgr_gem->lock);
-    drm_intel_gem_bo_unreference_locked(bo);
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+    assert(atomic_read(&bo_gem->refcount) > 0);
+    if (atomic_dec_and_test (&bo_gem->refcount)) {
+	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+	pthread_mutex_lock(&bufmgr_gem->lock);
+	drm_intel_gem_bo_unreference_final(bo);
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+    }
 }
 
 static int
@@ -1018,7 +1018,7 @@  drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
     bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
 
     bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
-    drm_intel_gem_bo_reference_locked(target_bo);
+    drm_intel_gem_bo_reference(target_bo);
 
     bo_gem->reloc_count++;