@@ -39,6 +39,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
+#include "drm_crtc_internal.h"
/**
* drm_modeset_lock_all - take all modeset locks
@@ -60,7 +61,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
mutex_lock(&config->mutex);
- drm_modeset_acquire_init(ctx, false, false);
+ drm_modeset_acquire_init(ctx);
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -105,7 +106,6 @@ void drm_modeset_unlock_all(struct drm_device *dev)
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
- ww_acquire_fini(&ctx->ww_ctx);
drm_modeset_acquire_fini(ctx);
kfree(ctx);
@@ -26,32 +26,21 @@
#include <drm/drm_modeset_lock.h>
-void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
- bool nolock, bool nonblock)
+void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx)
{
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
INIT_LIST_HEAD(&ctx->locked);
- mutex_init(&ctx->mutex);
- ctx->nolock = nolock;
- ctx->nonblock = nonblock;
}
EXPORT_SYMBOL(drm_modeset_acquire_init);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
{
- WARN_ON(ctx->contended);
- /*
- * NOTE: it is intentional that ww_acquire_fini() is not called
- * here.. due to the way lock handover works in drm_atomic
- */
- mutex_destroy(&ctx->mutex);
+ ww_acquire_fini(&ctx->ww_ctx);
}
EXPORT_SYMBOL(drm_modeset_acquire_fini);
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
{
- WARN_ON(ctx->contended);
- mutex_lock(&ctx->mutex);
while (!list_empty(&ctx->locked)) {
struct drm_modeset_lock *lock;
@@ -60,45 +49,31 @@ void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
drm_modeset_unlock(lock);
}
- mutex_unlock(&ctx->mutex);
}
EXPORT_SYMBOL(drm_modeset_drop_locks);
static int modeset_lock(struct drm_modeset_lock *lock,
- struct drm_modeset_acquire_ctx *ctx,
- bool interruptible, bool slow)
+ struct drm_modeset_acquire_ctx *ctx,
+ bool interruptible)
{
int ret;
- if (ctx->nolock)
- return 0;
-
- WARN_ON(ctx->frozen); /* all locks should be held by now! */
- WARN_ON(ctx->contended);
-
-retry:
if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
- } else if (slow) {
- ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
- ret = 0;
} else {
ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
}
- if (!ret) {
- if (lock->atomic_pending) {
- /* some other pending update with dropped locks */
- ww_mutex_unlock(&lock->mutex);
- if (ctx->nonblock)
- return -EBUSY;
- wait_event(lock->event, !lock->atomic_pending);
- goto retry;
- }
- lock->atomic_pending = true;
- WARN_ON(!list_empty(&lock->head));
+
+ if (ret == 0) {
list_add(&lock->head, &ctx->locked);
} else if (ret == -EALREADY) {
- /* we already hold the lock.. this is fine */
+ /*
+ * We already hold the lock. This is only fine if it's the lock
+ * we've contended on.
+ */
+ WARN_ON(ctx->contended != lock);
+ ctx->contended = NULL;
+
ret = 0;
} else if (ret == -EDEADLK) {
ctx->contended = lock;
@@ -109,18 +84,19 @@ retry:
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_modeset_lock *contended = ctx->contended;
-
- ctx->contended = NULL;
+ drm_modeset_drop_locks(ctx);
- if (WARN_ON(!contended))
- return;
+ ww_mutex_lock_slow(&ctx->contended->mutex, ctx);
+}
+EXPORT_SYMBOL(drm_modeset_backoff);
+void drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
+{
drm_modeset_drop_locks(ctx);
- modeset_lock(contended, ctx, false, true);
+ ww_mutex_lock_slow(&ctx->contended->mutex, ctx);
}
-EXPORT_SYMBOL(drm_modeset_backoff);
+EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
/**
* drm_modeset_lock - take modeset lock
@@ -136,7 +112,7 @@ int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
- return modeset_lock(lock, ctx, false, false);
+ return modeset_lock(lock, ctx, false);
ww_mutex_lock(&lock->mutex, NULL);
return 0;
@@ -147,7 +123,7 @@ int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
- return modeset_lock(lock, ctx, true, false);
+ return modeset_lock(lock, ctx, true);
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
}
@@ -160,9 +136,7 @@ EXPORT_SYMBOL(drm_modeset_lock_interruptible);
void drm_modeset_unlock(struct drm_modeset_lock *lock)
{
list_del_init(&lock->head);
- lock->atomic_pending = false;
ww_mutex_unlock(&lock->mutex);
- wake_up_all(&lock->event);
}
EXPORT_SYMBOL(drm_modeset_unlock);
@@ -7990,7 +7990,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
- drm_modeset_acquire_init(ctx, false, false);
+ drm_modeset_acquire_init(ctx);
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -8103,7 +8103,6 @@ fail_unlock:
}
drm_modeset_drop_locks(ctx);
- ww_acquire_fini(&ctx->ww_ctx);
drm_modeset_acquire_fini(ctx);
return false;
@@ -32,15 +32,6 @@ struct drm_modeset_acquire_ctx {
struct ww_acquire_ctx ww_ctx;
- bool nolock : 1;
- bool nonblock : 1;
-
- /* just for debugging, the context is 'frozen' in drm_atomic_check()
- * to catch anyone who might be trying to acquire a lock after it is
- * too late.
- */
- bool frozen : 1;
-
/* contended lock: if a lock is contended you should only call
* drm_modeset_backoff() which drops locks and slow-locks the
* contended lock.
@@ -49,16 +40,6 @@ struct drm_modeset_acquire_ctx {
/* list of 'struct drm_modeset_lock': */
struct list_head locked;
-
- /* currently simply for protecting against 'locked' list manipulation
- * between original thread calling atomic->end() and driver thread
- * calling back drm_atomic_commit_unlocked().
- *
- * Other spots are sufficiently synchronized by virtue of holding
- * the lock's ww_mutex. But during the lock/resource hand-over to the
- * driver thread (drop_locks()/grab_locks()), we cannot rely on this.
- */
- struct mutex mutex;
};
/**
@@ -78,42 +59,24 @@ struct drm_modeset_lock {
struct ww_mutex mutex;
/**
- * Are we busy (pending asynchronous/NONBLOCK update)? Any further
- * asynchronous update will return -EBUSY if it also needs to acquire
- * this lock. While a synchronous update will block until the pending
- * async update completes.
- *
- * Drivers must ensure the update is completed before sending vblank
- * event to userspace. Typically this just means don't send event
- * before drm_atomic_commit_unlocked() returns.
- */
- bool atomic_pending;
-
- /**
* Resources that are locked as part of an atomic update are added
* to a list (so we know what to unlock at the end).
*/
struct list_head head;
-
- /**
- * For waiting on atomic_pending locks, if not a NONBLOCK operation.
- */
- wait_queue_head_t event;
};
extern struct ww_class crtc_ww_class;
-void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
- bool nolock, bool nonblock);
+void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
+void drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
{
ww_mutex_init(&lock->mutex, &crtc_ww_class);
INIT_LIST_HEAD(&lock->head);
- init_waitqueue_head(&lock->event);
}
static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
@@ -511,7 +511,4 @@ struct drm_mode_destroy_dumb {
uint32_t handle;
};
-#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
-#define DRM_MODE_ATOMIC_NOLOCK 0x8000 /* only used internally */
-
#endif
Still missing: Updated kerneldoc and integration into the docbook. Also, this isn't a full review yet. Should be squashed into Rob's patch. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> --- drivers/gpu/drm/drm_crtc.c | 4 +- drivers/gpu/drm/drm_modeset_lock.c | 72 ++++++++++++------------------------ drivers/gpu/drm/i915/intel_display.c | 3 +- include/drm/drm_modeset_lock.h | 41 +------------------- include/uapi/drm/drm_mode.h | 3 -- 5 files changed, 28 insertions(+), 95 deletions(-)