Message ID | 20240416010837.333694-3-zfigura@codeweavers.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | NT synchronization primitive driver | expand |
On Mon, Apr 15, 2024 at 08:08:12PM -0500, Elizabeth Figura wrote: > + if (atomic_read(&sem->all_hint) > 0) { > + spin_lock(&dev->wait_all_lock); > + spin_lock_nest_lock(&sem->lock, &dev->wait_all_lock); > > + prev_count = sem->u.sem.count; > + ret = post_sem_state(sem, args); > + if (!ret) { > + try_wake_all_obj(dev, sem); > + try_wake_any_sem(sem); > + } > > + spin_unlock(&sem->lock); > + spin_unlock(&dev->wait_all_lock); > + } else { > + spin_lock(&sem->lock); > + > + prev_count = sem->u.sem.count; > + ret = post_sem_state(sem, args); > + if (!ret) > + try_wake_any_sem(sem); > + > + spin_unlock(&sem->lock); > + } > > if (!ret && put_user(prev_count, user_args)) > ret = -EFAULT; vs. > + /* queue ourselves */ > + > + spin_lock(&dev->wait_all_lock); > + > + for (i = 0; i < args.count; i++) { > + struct ntsync_q_entry *entry = &q->entries[i]; > + struct ntsync_obj *obj = entry->obj; > + > + atomic_inc(&obj->all_hint); > + > + /* > + * obj->all_waiters is protected by dev->wait_all_lock rather > + * than obj->lock, so there is no need to acquire obj->lock > + * here. > + */ > + list_add_tail(&entry->node, &obj->all_waiters); > + } This looks racy, consider: atomic_read(all_hints) /* 0 */ spin_lock(wait_all_lock) atomic_inc(all_hint) /* 1 */ list_add_tail() spin_lock(sem->lock) /* try_wake_all_obj() missing */ I've not yet thought about if this is harmful or not, but if not, it definitely needs a comment. Anyway, I need a break, maybe more this evening.
On Wednesday, 17 April 2024 06:37:03 CDT Peter Zijlstra wrote: > On Mon, Apr 15, 2024 at 08:08:12PM -0500, Elizabeth Figura wrote: > > + if (atomic_read(&sem->all_hint) > 0) { > > + spin_lock(&dev->wait_all_lock); > > + spin_lock_nest_lock(&sem->lock, &dev->wait_all_lock); > > > > + prev_count = sem->u.sem.count; > > + ret = post_sem_state(sem, args); > > + if (!ret) { > > + try_wake_all_obj(dev, sem); > > + try_wake_any_sem(sem); > > + } > > > > + spin_unlock(&sem->lock); > > + spin_unlock(&dev->wait_all_lock); > > + } else { > > + spin_lock(&sem->lock); > > + > > + prev_count = sem->u.sem.count; > > + ret = post_sem_state(sem, args); > > + if (!ret) > > + try_wake_any_sem(sem); > > + > > + spin_unlock(&sem->lock); > > + } > > > > if (!ret && put_user(prev_count, user_args)) > > ret = -EFAULT; > > vs. > > > + /* queue ourselves */ > > + > > + spin_lock(&dev->wait_all_lock); > > + > > + for (i = 0; i < args.count; i++) { > > + struct ntsync_q_entry *entry = &q->entries[i]; > > + struct ntsync_obj *obj = entry->obj; > > + > > + atomic_inc(&obj->all_hint); > > + > > + /* > > + * obj->all_waiters is protected by dev->wait_all_lock rather > > + * than obj->lock, so there is no need to acquire obj->lock > > + * here. > > + */ > > + list_add_tail(&entry->node, &obj->all_waiters); > > + } > > This looks racy, consider: > > atomic_read(all_hints) /* 0 */ > > spin_lock(wait_all_lock) > atomic_inc(all_hint) /* 1 */ > list_add_tail() > > spin_lock(sem->lock) > /* try_wake_all_obj() missing */ > > > > > I've not yet thought about if this is harmful or not, but if not, it > definitely needs a comment. > > Anyway, I need a break, maybe more this evening. Ach. I wrote this with the idea that the race isn't meaningful, but looking at it again you're right—there is a harmful race here. I think it should be fixable by moving the atomic_read inside the lock, though.
On Wed, Apr 17, 2024 at 03:03:05PM -0500, Elizabeth Figura wrote: > Ach. I wrote this with the idea that the race isn't meaningful, but > looking at it again you're right—there is a harmful race here. > > I think it should be fixable by moving the atomic_read inside the lock, > though. Right, I've ended up with the (as yet untested) below. I'll see if I can find time later to actually test things. --- --- a/drivers/misc/ntsync.c +++ b/drivers/misc/ntsync.c @@ -18,6 +18,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <uapi/linux/ntsync.h> #define NTSYNC_NAME "ntsync" @@ -43,6 +44,7 @@ enum ntsync_type { struct ntsync_obj { spinlock_t lock; + int dev_locked; enum ntsync_type type; @@ -132,7 +134,7 @@ struct ntsync_device { * wait_all_lock is taken first whenever multiple objects must be locked * at the same time. */ - spinlock_t wait_all_lock; + struct mutex wait_all_lock; struct file *file; }; @@ -157,6 +159,56 @@ static bool is_signaled(struct ntsync_ob } /* + * XXX write coherent comment on the locking + */ + +static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + lockdep_assert_held(&dev->wait_all_lock); + WARN_ON_ONCE(obj->dev != dev); + spin_lock(&obj->lock); + obj->dev_locked = 1; + spin_unlock(&obj->lock); +} + +static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + lockdep_assert_held(&dev->wait_all_lock); + WARN_ON_ONCE(obj->dev != dev); + spin_lock(&obj->lock); + obj->dev_locked = 0; + spin_unlock(&obj->lock); +} + +static void obj_lock(struct ntsync_obj *obj) +{ + struct ntsync_device *dev = obj->dev; + + for (;;) { + spin_lock(&obj->lock); + if (likely(!obj->dev_locked)) + break; + + spin_unlock(&obj->lock); + mutex_lock(&dev->wait_all_lock); + spin_lock(&obj->lock); + /* + * obj->dev_locked should be set and released under the same + * wait_all_lock section, since we now own this lock, it should + * be clear. + */ + WARN_ON_ONCE(obj->dev_locked); + spin_unlock(&obj->lock); + mutex_unlock(&dev->wait_all_lock); + } +} + +static void obj_unlock(struct ntsync_obj *obj) +{ + spin_unlock(&obj->lock); +} + +/* * "locked_obj" is an optional pointer to an object which is already locked and * should not be locked again. This is necessary so that changing an object's * state and waking it can be a single atomic operation. @@ -175,7 +227,7 @@ static void try_wake_all(struct ntsync_d for (i = 0; i < count; i++) { if (q->entries[i].obj != locked_obj) - spin_lock_nest_lock(&q->entries[i].obj->lock, &dev->wait_all_lock); + dev_lock_obj(dev, q->entries[i].obj); } for (i = 0; i < count; i++) { @@ -211,7 +263,7 @@ static void try_wake_all(struct ntsync_d for (i = 0; i < count; i++) { if (q->entries[i].obj != locked_obj) - spin_unlock(&q->entries[i].obj->lock); + dev_unlock_obj(dev, q->entries[i].obj); } } @@ -231,6 +283,7 @@ static void try_wake_any_sem(struct ntsy struct ntsync_q_entry *entry; lockdep_assert_held(&sem->lock); + WARN_ON_ONCE(sem->type != NTSYNC_TYPE_SEM); list_for_each_entry(entry, &sem->any_waiters, node) { struct ntsync_q *q = entry->q; @@ -251,6 +304,7 @@ static void try_wake_any_mutex(struct nt struct ntsync_q_entry *entry; lockdep_assert_held(&mutex->lock); + WARN_ON_ONCE(mutex->type != NTSYNC_TYPE_MUTEX); list_for_each_entry(entry, &mutex->any_waiters, node) { struct ntsync_q *q = entry->q; @@ -302,6 +356,7 @@ static int post_sem_state(struct ntsync_ __u32 sum; lockdep_assert_held(&sem->lock); + WARN_ON_ONCE(sem->type != NTSYNC_TYPE_SEM); if (check_add_overflow(sem->u.sem.count, count, &sum) || sum > sem->u.sem.max) @@ -316,6 +371,7 @@ static int ntsync_sem_post(struct ntsync struct ntsync_device *dev = sem->dev; __u32 __user *user_args = argp; __u32 prev_count; + bool all = false; __u32 args; int ret; @@ -325,28 +381,27 @@ static int ntsync_sem_post(struct ntsync if (sem->type != NTSYNC_TYPE_SEM) return -EINVAL; - if (atomic_read(&sem->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&sem->lock, &dev->wait_all_lock); - - prev_count = sem->u.sem.count; - ret = post_sem_state(sem, args); - if (!ret) { + obj_lock(sem); + all = atomic_read(&sem->all_hint); + if (unlikely(all)) { + obj_unlock(sem); + mutex_lock(&dev->wait_all_lock); + dev_lock_obj(dev, sem); + } + + prev_count = sem->u.sem.count; + ret = post_sem_state(sem, args); + if (!ret) { + if (all) try_wake_all_obj(dev, sem); - try_wake_any_sem(sem); - } + try_wake_any_sem(sem); + } - spin_unlock(&sem->lock); - spin_unlock(&dev->wait_all_lock); + if (all) { + dev_unlock_obj(dev, sem); + mutex_unlock(&dev->wait_all_lock); } else { - spin_lock(&sem->lock); - - prev_count = sem->u.sem.count; - ret = post_sem_state(sem, args); - if (!ret) - try_wake_any_sem(sem); - - spin_unlock(&sem->lock); + obj_unlock(sem); } if (!ret && put_user(prev_count, user_args)) @@ -376,6 +431,7 @@ static int ntsync_mutex_unlock(struct nt struct ntsync_mutex_args __user *user_args = argp; struct ntsync_device *dev = mutex->dev; struct ntsync_mutex_args args; + bool all = false; __u32 prev_count; int ret; @@ -387,28 +443,27 @@ static int ntsync_mutex_unlock(struct nt if (mutex->type != NTSYNC_TYPE_MUTEX) return -EINVAL; - if (atomic_read(&mutex->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&mutex->lock, &dev->wait_all_lock); - - prev_count = mutex->u.mutex.count; - ret = unlock_mutex_state(mutex, &args); - if (!ret) { + obj_lock(mutex); + all = atomic_read(&mutex->all_hint); + if (unlikely(all)) { + obj_unlock(mutex); + mutex_lock(&dev->wait_all_lock); + dev_lock_obj(dev, mutex); + } + + prev_count = mutex->u.mutex.count; + ret = unlock_mutex_state(mutex, &args); + if (!ret) { + if (all) try_wake_all_obj(dev, mutex); - try_wake_any_mutex(mutex); - } + try_wake_any_mutex(mutex); + } - spin_unlock(&mutex->lock); - spin_unlock(&dev->wait_all_lock); + if (all) { + dev_unlock_obj(dev, mutex); + mutex_unlock(&dev->wait_all_lock); } else { - spin_lock(&mutex->lock); - - prev_count = mutex->u.mutex.count; - ret = unlock_mutex_state(mutex, &args); - if (!ret) - try_wake_any_mutex(mutex); - - spin_unlock(&mutex->lock); + obj_unlock(mutex); } if (!ret && put_user(prev_count, &user_args->count)) @@ -437,6 +492,7 @@ static int kill_mutex_state(struct ntsyn static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp) { struct ntsync_device *dev = mutex->dev; + bool all = false; __u32 owner; int ret; @@ -448,26 +504,26 @@ static int ntsync_mutex_kill(struct ntsy if (mutex->type != NTSYNC_TYPE_MUTEX) return -EINVAL; - if (atomic_read(&mutex->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&mutex->lock, &dev->wait_all_lock); + obj_lock(mutex); + all = atomic_read(&mutex->all_hint); + if (unlikely(all)) { + obj_unlock(mutex); + mutex_lock(&dev->wait_all_lock); + dev_lock_obj(dev, mutex); + } - ret = kill_mutex_state(mutex, owner); - if (!ret) { + ret = kill_mutex_state(mutex, owner); + if (!ret) { + if (all) try_wake_all_obj(dev, mutex); - try_wake_any_mutex(mutex); - } + try_wake_any_mutex(mutex); + } - spin_unlock(&mutex->lock); - spin_unlock(&dev->wait_all_lock); + if (all) { + dev_unlock_obj(dev, mutex); + mutex_unlock(&dev->wait_all_lock); } else { - spin_lock(&mutex->lock); - - ret = kill_mutex_state(mutex, owner); - if (!ret) - try_wake_any_mutex(mutex); - - spin_unlock(&mutex->lock); + obj_unlock(mutex); } return ret; @@ -477,35 +533,35 @@ static int ntsync_event_set(struct ntsyn { struct ntsync_device *dev = event->dev; __u32 prev_state; + bool all = false; if (event->type != NTSYNC_TYPE_EVENT) return -EINVAL; - if (atomic_read(&event->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&event->lock, &dev->wait_all_lock); + obj_lock(event); + all = atomic_read(&event->all_hint); + if (unlikely(all)) { + obj_unlock(event); + mutex_lock(&dev->wait_all_lock); + dev_lock_obj(dev, event); + } - prev_state = event->u.event.signaled; - event->u.event.signaled = true; + prev_state = event->u.event.signaled; + event->u.event.signaled = true; + if (all) try_wake_all_obj(dev, event); - try_wake_any_event(event); - if (pulse) - event->u.event.signaled = false; - - spin_unlock(&event->lock); - spin_unlock(&dev->wait_all_lock); + try_wake_any_event(event); + if (pulse) + event->u.event.signaled = false; + + if (all) { + dev_unlock_obj(dev, event); + mutex_unlock(&dev->wait_all_lock); } else { - spin_lock(&event->lock); - - prev_state = event->u.event.signaled; - event->u.event.signaled = true; - try_wake_any_event(event); - if (pulse) - event->u.event.signaled = false; - - spin_unlock(&event->lock); + obj_unlock(event); } + if (put_user(prev_state, (__u32 __user *)argp)) return -EFAULT; @@ -984,7 +1040,7 @@ static int ntsync_wait_all(struct ntsync /* queue ourselves */ - spin_lock(&dev->wait_all_lock); + mutex_lock(&dev->wait_all_lock); for (i = 0; i < args.count; i++) { struct ntsync_q_entry *entry = &q->entries[i]; @@ -1004,7 +1060,7 @@ static int ntsync_wait_all(struct ntsync try_wake_all(dev, q, NULL); - spin_unlock(&dev->wait_all_lock); + mutex_unlock(&dev->wait_all_lock); /* sleep */ @@ -1012,7 +1068,7 @@ static int ntsync_wait_all(struct ntsync /* and finally, unqueue */ - spin_lock(&dev->wait_all_lock); + mutex_lock(&dev->wait_all_lock); for (i = 0; i < args.count; i++) { struct ntsync_q_entry *entry = &q->entries[i]; @@ -1029,7 +1085,7 @@ static int ntsync_wait_all(struct ntsync put_obj(obj); } - spin_unlock(&dev->wait_all_lock); + mutex_unlock(&dev->wait_all_lock); signaled = atomic_read(&q->signaled); if (signaled != -1) { @@ -1056,7 +1112,7 @@ static int ntsync_char_open(struct inode if (!dev) return -ENOMEM; - spin_lock_init(&dev->wait_all_lock); + mutex_init(&dev->wait_all_lock); file->private_data = dev; dev->file = file;
On Thu, Apr 18, 2024 at 11:35:11AM +0200, Peter Zijlstra wrote: > On Wed, Apr 17, 2024 at 03:03:05PM -0500, Elizabeth Figura wrote: > > > Ach. I wrote this with the idea that the race isn't meaningful, but > > looking at it again you're right—there is a harmful race here. > > > > I think it should be fixable by moving the atomic_read inside the lock, > > though. > > Right, I've ended up with the (as yet untested) below. I'll see if I can > find time later to actually test things. Latest hackery... I tried testing this but I'm not having luck using the patched wine as per the other email. --- --- a/drivers/misc/ntsync.c +++ b/drivers/misc/ntsync.c @@ -18,6 +18,7 @@ #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <uapi/linux/ntsync.h> #define NTSYNC_NAME "ntsync" @@ -43,6 +44,7 @@ enum ntsync_type { struct ntsync_obj { spinlock_t lock; + int dev_locked; enum ntsync_type type; @@ -132,14 +134,107 @@ struct ntsync_device { * wait_all_lock is taken first whenever multiple objects must be locked * at the same time. */ - spinlock_t wait_all_lock; + struct mutex wait_all_lock; struct file *file; }; +/* + * Single objects are locked using obj->lock. + * + * Multiple objects are 'locked' while holding dev->wait_all_lock to avoid lock + * order issues. In this case however, single objects are not locked by holding + * obj->lock, but by setting obj->dev_locked. + * + * This means that in order to lock a single object, the sequence is slightly + * more complicated than usual. Specifically it needs to check obj->dev_locked + * after acquiring obj->lock, if set, it needs to drop the lock and acquire + * dev->wait_all_lock in order to serialize against the multi-object operation. + */ + +static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert(obj->dev == dev); + spin_lock(&obj->lock); + /* + * By setting obj->dev_locked inside obj->lock, it is ensured that + * anyone holding obj->lock must see the value. + */ + obj->dev_locked = 1; + spin_unlock(&obj->lock); +} + +static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert(obj->dev == dev); + spin_lock(&obj->lock); + obj->dev_locked = 0; + spin_unlock(&obj->lock); +} + +static void obj_lock(struct ntsync_obj *obj) +{ + struct ntsync_device *dev = obj->dev; + + for (;;) { + spin_lock(&obj->lock); + if (likely(!obj->dev_locked)) + break; + + spin_unlock(&obj->lock); + mutex_lock(&dev->wait_all_lock); + spin_lock(&obj->lock); + /* + * obj->dev_locked should be set and released under the same + * wait_all_lock section, since we now own this lock, it should + * be clear. + */ + lockdep_assert(!obj->dev_locked); + spin_unlock(&obj->lock); + mutex_unlock(&dev->wait_all_lock); + } +} + +static void obj_unlock(struct ntsync_obj *obj) +{ + spin_unlock(&obj->lock); +} + +static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + bool all; + + obj_lock(obj); + all = atomic_read(&obj->all_hint); + if (unlikely(all)) { + obj_unlock(obj); + mutex_lock(&dev->wait_all_lock); + dev_lock_obj(dev, obj); + } + + return all; +} + +static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all) +{ + if (all) { + dev_unlock_obj(dev, obj); + mutex_unlock(&dev->wait_all_lock); + } else { + obj_unlock(obj); + } +} + +#define ntsync_assert_held(obj) \ + lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \ + ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \ + (obj)->dev_locked)) + static bool is_signaled(struct ntsync_obj *obj, __u32 owner) { - lockdep_assert_held(&obj->lock); + ntsync_assert_held(obj); switch (obj->type) { case NTSYNC_TYPE_SEM: @@ -171,11 +266,11 @@ static void try_wake_all(struct ntsync_d lockdep_assert_held(&dev->wait_all_lock); if (locked_obj) - lockdep_assert_held(&locked_obj->lock); + lockdep_assert(locked_obj->dev_locked); for (i = 0; i < count; i++) { if (q->entries[i].obj != locked_obj) - spin_lock_nest_lock(&q->entries[i].obj->lock, &dev->wait_all_lock); + dev_lock_obj(dev, q->entries[i].obj); } for (i = 0; i < count; i++) { @@ -211,7 +306,7 @@ static void try_wake_all(struct ntsync_d for (i = 0; i < count; i++) { if (q->entries[i].obj != locked_obj) - spin_unlock(&q->entries[i].obj->lock); + dev_unlock_obj(dev, q->entries[i].obj); } } @@ -220,7 +315,7 @@ static void try_wake_all_obj(struct ntsy struct ntsync_q_entry *entry; lockdep_assert_held(&dev->wait_all_lock); - lockdep_assert_held(&obj->lock); + lockdep_assert(obj->dev_locked); list_for_each_entry(entry, &obj->all_waiters, node) try_wake_all(dev, entry->q, obj); @@ -230,7 +325,8 @@ static void try_wake_any_sem(struct ntsy { struct ntsync_q_entry *entry; - lockdep_assert_held(&sem->lock); + ntsync_assert_held(sem); + lockdep_assert(sem->type == NTSYNC_TYPE_SEM); list_for_each_entry(entry, &sem->any_waiters, node) { struct ntsync_q *q = entry->q; @@ -250,7 +346,8 @@ static void try_wake_any_mutex(struct nt { struct ntsync_q_entry *entry; - lockdep_assert_held(&mutex->lock); + ntsync_assert_held(mutex); + lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX); list_for_each_entry(entry, &mutex->any_waiters, node) { struct ntsync_q *q = entry->q; @@ -276,7 +373,8 @@ static void try_wake_any_event(struct nt { struct ntsync_q_entry *entry; - lockdep_assert_held(&event->lock); + ntsync_assert_held(event); + lockdep_assert(event->type == NTSYNC_TYPE_EVENT); list_for_each_entry(entry, &event->any_waiters, node) { struct ntsync_q *q = entry->q; @@ -302,6 +400,7 @@ static int post_sem_state(struct ntsync_ __u32 sum; lockdep_assert_held(&sem->lock); + lockdep_assert(sem->type == NTSYNC_TYPE_SEM); if (check_add_overflow(sem->u.sem.count, count, &sum) || sum > sem->u.sem.max) @@ -317,6 +416,7 @@ static int ntsync_sem_post(struct ntsync __u32 __user *user_args = argp; __u32 prev_count; __u32 args; + bool all; int ret; if (copy_from_user(&args, argp, sizeof(args))) @@ -325,30 +425,18 @@ static int ntsync_sem_post(struct ntsync if (sem->type != NTSYNC_TYPE_SEM) return -EINVAL; - if (atomic_read(&sem->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&sem->lock, &dev->wait_all_lock); - - prev_count = sem->u.sem.count; - ret = post_sem_state(sem, args); - if (!ret) { - try_wake_all_obj(dev, sem); - try_wake_any_sem(sem); - } - - spin_unlock(&sem->lock); - spin_unlock(&dev->wait_all_lock); - } else { - spin_lock(&sem->lock); + all = ntsync_lock_obj(dev, sem); - prev_count = sem->u.sem.count; - ret = post_sem_state(sem, args); - if (!ret) - try_wake_any_sem(sem); - - spin_unlock(&sem->lock); + prev_count = sem->u.sem.count; + ret = post_sem_state(sem, args); + if (!ret) { + if (all) + try_wake_all_obj(dev, sem); + try_wake_any_sem(sem); } + ntsync_unlock_obj(dev, sem, all); + if (!ret && put_user(prev_count, user_args)) ret = -EFAULT; @@ -377,6 +465,7 @@ static int ntsync_mutex_unlock(struct nt struct ntsync_device *dev = mutex->dev; struct ntsync_mutex_args args; __u32 prev_count; + bool all; int ret; if (copy_from_user(&args, argp, sizeof(args))) @@ -387,30 +476,18 @@ static int ntsync_mutex_unlock(struct nt if (mutex->type != NTSYNC_TYPE_MUTEX) return -EINVAL; - if (atomic_read(&mutex->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&mutex->lock, &dev->wait_all_lock); - - prev_count = mutex->u.mutex.count; - ret = unlock_mutex_state(mutex, &args); - if (!ret) { - try_wake_all_obj(dev, mutex); - try_wake_any_mutex(mutex); - } + all = ntsync_lock_obj(dev, mutex); - spin_unlock(&mutex->lock); - spin_unlock(&dev->wait_all_lock); - } else { - spin_lock(&mutex->lock); - - prev_count = mutex->u.mutex.count; - ret = unlock_mutex_state(mutex, &args); - if (!ret) - try_wake_any_mutex(mutex); - - spin_unlock(&mutex->lock); + prev_count = mutex->u.mutex.count; + ret = unlock_mutex_state(mutex, &args); + if (!ret) { + if (all) + try_wake_all_obj(dev, mutex); + try_wake_any_mutex(mutex); } + ntsync_unlock_obj(dev, mutex, all); + if (!ret && put_user(prev_count, &user_args->count)) ret = -EFAULT; @@ -438,6 +515,7 @@ static int ntsync_mutex_kill(struct ntsy { struct ntsync_device *dev = mutex->dev; __u32 owner; + bool all; int ret; if (get_user(owner, (__u32 __user *)argp)) @@ -448,28 +526,17 @@ static int ntsync_mutex_kill(struct ntsy if (mutex->type != NTSYNC_TYPE_MUTEX) return -EINVAL; - if (atomic_read(&mutex->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&mutex->lock, &dev->wait_all_lock); + all = ntsync_lock_obj(dev, mutex); - ret = kill_mutex_state(mutex, owner); - if (!ret) { + ret = kill_mutex_state(mutex, owner); + if (!ret) { + if (all) try_wake_all_obj(dev, mutex); - try_wake_any_mutex(mutex); - } - - spin_unlock(&mutex->lock); - spin_unlock(&dev->wait_all_lock); - } else { - spin_lock(&mutex->lock); - - ret = kill_mutex_state(mutex, owner); - if (!ret) - try_wake_any_mutex(mutex); - - spin_unlock(&mutex->lock); + try_wake_any_mutex(mutex); } + ntsync_unlock_obj(dev, mutex, all); + return ret; } @@ -477,34 +544,22 @@ static int ntsync_event_set(struct ntsyn { struct ntsync_device *dev = event->dev; __u32 prev_state; + bool all; if (event->type != NTSYNC_TYPE_EVENT) return -EINVAL; - if (atomic_read(&event->all_hint) > 0) { - spin_lock(&dev->wait_all_lock); - spin_lock_nest_lock(&event->lock, &dev->wait_all_lock); + all = ntsync_lock_obj(dev, event); - prev_state = event->u.event.signaled; - event->u.event.signaled = true; + prev_state = event->u.event.signaled; + event->u.event.signaled = true; + if (all) try_wake_all_obj(dev, event); - try_wake_any_event(event); - if (pulse) - event->u.event.signaled = false; + try_wake_any_event(event); + if (pulse) + event->u.event.signaled = false; - spin_unlock(&event->lock); - spin_unlock(&dev->wait_all_lock); - } else { - spin_lock(&event->lock); - - prev_state = event->u.event.signaled; - event->u.event.signaled = true; - try_wake_any_event(event); - if (pulse) - event->u.event.signaled = false; - - spin_unlock(&event->lock); - } + ntsync_unlock_obj(dev, event, all); if (put_user(prev_state, (__u32 __user *)argp)) return -EFAULT; @@ -984,7 +1039,7 @@ static int ntsync_wait_all(struct ntsync /* queue ourselves */ - spin_lock(&dev->wait_all_lock); + mutex_lock(&dev->wait_all_lock); for (i = 0; i < args.count; i++) { struct ntsync_q_entry *entry = &q->entries[i]; @@ -1004,7 +1059,7 @@ static int ntsync_wait_all(struct ntsync try_wake_all(dev, q, NULL); - spin_unlock(&dev->wait_all_lock); + mutex_unlock(&dev->wait_all_lock); /* sleep */ @@ -1012,7 +1067,7 @@ static int ntsync_wait_all(struct ntsync /* and finally, unqueue */ - spin_lock(&dev->wait_all_lock); + mutex_lock(&dev->wait_all_lock); for (i = 0; i < args.count; i++) { struct ntsync_q_entry *entry = &q->entries[i]; @@ -1029,7 +1084,7 @@ static int ntsync_wait_all(struct ntsync put_obj(obj); } - spin_unlock(&dev->wait_all_lock); + mutex_unlock(&dev->wait_all_lock); signaled = atomic_read(&q->signaled); if (signaled != -1) { @@ -1056,7 +1111,7 @@ static int ntsync_char_open(struct inode if (!dev) return -ENOMEM; - spin_lock_init(&dev->wait_all_lock); + mutex_init(&dev->wait_all_lock); file->private_data = dev; dev->file = file;
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c index c6f84a5fc8c0..e914d626465a 100644 --- a/drivers/misc/ntsync.c +++ b/drivers/misc/ntsync.c @@ -55,7 +55,34 @@ struct ntsync_obj { } sem; } u; + /* + * any_waiters is protected by the object lock, but all_waiters is + * protected by the device wait_all_lock. + */ struct list_head any_waiters; + struct list_head all_waiters; + + /* + * Hint describing how many tasks are queued on this object in a + * wait-all operation. + * + * Any time we do a wake, we may need to wake "all" waiters as well as + * "any" waiters. In order to atomically wake "all" waiters, we must + * lock all of the objects, and that means grabbing the wait_all_lock + * below (and, due to lock ordering rules, before locking this object). + * However, wait-all is a rare operation, and grabbing the wait-all + * lock for every wake would create unnecessary contention. + * Therefore we first check whether all_hint is zero, and, if it is, + * we skip trying to wake "all" waiters. + * + * This hint isn't protected by any lock. It might change during the + * course of a wake, but there's no meaningful race there; it's only a + * hint. + * + * Since wait requests must originate from user-space threads, we're + * limited here by PID_MAX_LIMIT, so there's no risk of overflow. + */ + atomic_t all_hint; }; struct ntsync_q_entry { @@ -76,14 +103,100 @@ struct ntsync_q { */ atomic_t signaled; + bool all; __u32 count; struct ntsync_q_entry entries[]; }; struct ntsync_device { + /* + * Wait-all operations must atomically grab all objects, and be totally + * ordered with respect to each other and wait-any operations. + * If one thread is trying to acquire several objects, another thread + * cannot touch the object at the same time. + * + * We achieve this by grabbing multiple object locks at the same time. + * However, this creates a lock ordering problem. To solve that problem, + * wait_all_lock is taken first whenever multiple objects must be locked + * at the same time. + */ + spinlock_t wait_all_lock; + struct file *file; }; +static bool is_signaled(struct ntsync_obj *obj, __u32 owner) +{ + lockdep_assert_held(&obj->lock); + + switch (obj->type) { + case NTSYNC_TYPE_SEM: + return !!obj->u.sem.count; + } + + WARN(1, "bad object type %#x\n", obj->type); + return false; +} + +/* + * "locked_obj" is an optional pointer to an object which is already locked and + * should not be locked again. This is necessary so that changing an object's + * state and waking it can be a single atomic operation. + */ +static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q, + struct ntsync_obj *locked_obj) +{ + __u32 count = q->count; + bool can_wake = true; + int signaled = -1; + __u32 i; + + lockdep_assert_held(&dev->wait_all_lock); + if (locked_obj) + lockdep_assert_held(&locked_obj->lock); + + for (i = 0; i < count; i++) { + if (q->entries[i].obj != locked_obj) + spin_lock_nest_lock(&q->entries[i].obj->lock, &dev->wait_all_lock); + } + + for (i = 0; i < count; i++) { + if (!is_signaled(q->entries[i].obj, q->owner)) { + can_wake = false; + break; + } + } + + if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) { + for (i = 0; i < count; i++) { + struct ntsync_obj *obj = q->entries[i].obj; + + switch (obj->type) { + case NTSYNC_TYPE_SEM: + obj->u.sem.count--; + break; + } + } + wake_up_process(q->task); + } + + for (i = 0; i < count; i++) { + if (q->entries[i].obj != locked_obj) + spin_unlock(&q->entries[i].obj->lock); + } +} + +static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj) +{ + struct ntsync_q_entry *entry; + + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert_held(&obj->lock); + + list_for_each_entry(entry, &obj->all_waiters, node) + try_wake_all(dev, entry->q, obj); +} + static void try_wake_any_sem(struct ntsync_obj *sem) { struct ntsync_q_entry *entry; @@ -124,6 +237,7 @@ static int post_sem_state(struct ntsync_obj *sem, __u32 count) static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) { + struct ntsync_device *dev = sem->dev; __u32 __user *user_args = argp; __u32 prev_count; __u32 args; @@ -135,14 +249,29 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) if (sem->type != NTSYNC_TYPE_SEM) return -EINVAL; - spin_lock(&sem->lock); + if (atomic_read(&sem->all_hint) > 0) { + spin_lock(&dev->wait_all_lock); + spin_lock_nest_lock(&sem->lock, &dev->wait_all_lock); - prev_count = sem->u.sem.count; - ret = post_sem_state(sem, args); - if (!ret) - try_wake_any_sem(sem); + prev_count = sem->u.sem.count; + ret = post_sem_state(sem, args); + if (!ret) { + try_wake_all_obj(dev, sem); + try_wake_any_sem(sem); + } - spin_unlock(&sem->lock); + spin_unlock(&sem->lock); + spin_unlock(&dev->wait_all_lock); + } else { + spin_lock(&sem->lock); + + prev_count = sem->u.sem.count; + ret = post_sem_state(sem, args); + if (!ret) + try_wake_any_sem(sem); + + spin_unlock(&sem->lock); + } if (!ret && put_user(prev_count, user_args)) ret = -EFAULT; @@ -195,6 +324,8 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev, get_file(dev->file); spin_lock_init(&obj->lock); INIT_LIST_HEAD(&obj->any_waiters); + INIT_LIST_HEAD(&obj->all_waiters); + atomic_set(&obj->all_hint, 0); return obj; } @@ -306,7 +437,7 @@ static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_ar * Allocate and initialize the ntsync_q structure, but do not queue us yet. */ static int setup_wait(struct ntsync_device *dev, - const struct ntsync_wait_args *args, + const struct ntsync_wait_args *args, bool all, struct ntsync_q **ret_q) { const __u32 count = args->count; @@ -333,6 +464,7 @@ static int setup_wait(struct ntsync_device *dev, q->task = current; q->owner = args->owner; atomic_set(&q->signaled, -1); + q->all = all; q->count = count; for (i = 0; i < count; i++) { @@ -342,6 +474,16 @@ static int setup_wait(struct ntsync_device *dev, if (!obj) goto err; + if (all) { + /* Check that the objects are all distinct. */ + for (j = 0; j < i; j++) { + if (obj == q->entries[j].obj) { + put_obj(obj); + goto err; + } + } + } + entry->obj = obj; entry->q = q; entry->index = i; @@ -377,7 +519,7 @@ static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - ret = setup_wait(dev, &args, &q); + ret = setup_wait(dev, &args, false, &q); if (ret < 0) return ret; @@ -439,6 +581,87 @@ static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) return ret; } +static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp) +{ + struct ntsync_wait_args args; + struct ntsync_q *q; + int signaled; + __u32 i; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + ret = setup_wait(dev, &args, true, &q); + if (ret < 0) + return ret; + + /* queue ourselves */ + + spin_lock(&dev->wait_all_lock); + + for (i = 0; i < args.count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = entry->obj; + + atomic_inc(&obj->all_hint); + + /* + * obj->all_waiters is protected by dev->wait_all_lock rather + * than obj->lock, so there is no need to acquire obj->lock + * here. + */ + list_add_tail(&entry->node, &obj->all_waiters); + } + + /* check if we are already signaled */ + + try_wake_all(dev, q, NULL); + + spin_unlock(&dev->wait_all_lock); + + /* sleep */ + + ret = ntsync_schedule(q, &args); + + /* and finally, unqueue */ + + spin_lock(&dev->wait_all_lock); + + for (i = 0; i < args.count; i++) { + struct ntsync_q_entry *entry = &q->entries[i]; + struct ntsync_obj *obj = entry->obj; + + /* + * obj->all_waiters is protected by dev->wait_all_lock rather + * than obj->lock, so there is no need to acquire it here. + */ + list_del(&entry->node); + + atomic_dec(&obj->all_hint); + + put_obj(obj); + } + + spin_unlock(&dev->wait_all_lock); + + signaled = atomic_read(&q->signaled); + if (signaled != -1) { + struct ntsync_wait_args __user *user_args = argp; + + /* even if we caught a signal, we need to communicate success */ + ret = 0; + + if (put_user(signaled, &user_args->index)) + ret = -EFAULT; + } else if (!ret) { + ret = -ETIMEDOUT; + } + + kfree(q); + return ret; +} + static int ntsync_char_open(struct inode *inode, struct file *file) { struct ntsync_device *dev; @@ -447,6 +670,8 @@ static int ntsync_char_open(struct inode *inode, struct file *file) if (!dev) return -ENOMEM; + spin_lock_init(&dev->wait_all_lock); + file->private_data = dev; dev->file = file; return nonseekable_open(inode, file); @@ -470,6 +695,8 @@ static long ntsync_char_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case NTSYNC_IOC_CREATE_SEM: return ntsync_create_sem(dev, argp); + case NTSYNC_IOC_WAIT_ALL: + return ntsync_wait_all(dev, argp); case NTSYNC_IOC_WAIT_ANY: return ntsync_wait_any(dev, argp); default: diff --git a/include/uapi/linux/ntsync.h b/include/uapi/linux/ntsync.h index 60ad414b5552..83784d4438a1 100644 --- a/include/uapi/linux/ntsync.h +++ b/include/uapi/linux/ntsync.h @@ -33,6 +33,7 @@ struct ntsync_wait_args { #define NTSYNC_IOC_CREATE_SEM _IOWR('N', 0x80, struct ntsync_sem_args) #define NTSYNC_IOC_WAIT_ANY _IOWR('N', 0x82, struct ntsync_wait_args) +#define NTSYNC_IOC_WAIT_ALL _IOWR('N', 0x83, struct ntsync_wait_args) #define NTSYNC_IOC_SEM_POST _IOWR('N', 0x81, __u32)
This is similar to NTSYNC_IOC_WAIT_ANY, but waits until all of the objects are simultaneously signaled, and then acquires all of them as a single atomic operation. Signed-off-by: Elizabeth Figura <zfigura@codeweavers.com> --- drivers/misc/ntsync.c | 243 ++++++++++++++++++++++++++++++++++-- include/uapi/linux/ntsync.h | 1 + 2 files changed, 236 insertions(+), 8 deletions(-)