diff mbox

[03/11] qspinlock: Add pending bit

Message ID 20140617203615.GA29634@laptop.dumpdata.com (mailing list archive)
State New, archived
Headers show

Commit Message

Konrad Rzeszutek Wilk June 17, 2014, 8:36 p.m. UTC
On Sun, Jun 15, 2014 at 02:47:00PM +0200, Peter Zijlstra wrote:
> Because the qspinlock needs to touch a second cacheline; add a pending
> bit and allow a single in-word spinner before we punt to the second
> cacheline.

Could you add this in the description please:

And by second cacheline we mean the local 'node'. That is the:
mcs_nodes[0] and mcs_nodes[idx]

Perhaps it might be better then to split this in the header file
as this is trying to not be a slowpath code - but rather - a
pre-slow-path-lets-try-if-we can do another cmpxchg in case
the unlocker has just unlocked itself.

So something like:

and then the slowpath preserves most of the old logic path
(with the pending bit stuff)?

 
> 
> Signed-off-by: Peter Zijlstra <peterz@infradead.org>
> ---
>  include/asm-generic/qspinlock_types.h |   12 ++-
>  kernel/locking/qspinlock.c            |  109 +++++++++++++++++++++++++++-------
>  2 files changed, 97 insertions(+), 24 deletions(-)
> 
> --- a/include/asm-generic/qspinlock_types.h
> +++ b/include/asm-generic/qspinlock_types.h
> @@ -39,8 +39,9 @@ typedef struct qspinlock {
>   * Bitfields in the atomic value:
>   *
>   *  0- 7: locked byte
> - *  8- 9: tail index
> - * 10-31: tail cpu (+1)
> + *     8: pending
> + *  9-10: tail index
> + * 11-31: tail cpu (+1)
>   */
>  #define	_Q_SET_MASK(type)	(((1U << _Q_ ## type ## _BITS) - 1)\
>  				      << _Q_ ## type ## _OFFSET)
> @@ -48,7 +49,11 @@ typedef struct qspinlock {
>  #define _Q_LOCKED_BITS		8
>  #define _Q_LOCKED_MASK		_Q_SET_MASK(LOCKED)
>  
> -#define _Q_TAIL_IDX_OFFSET	(_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
> +#define _Q_PENDING_OFFSET	(_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
> +#define _Q_PENDING_BITS		1
> +#define _Q_PENDING_MASK		_Q_SET_MASK(PENDING)
> +
> +#define _Q_TAIL_IDX_OFFSET	(_Q_PENDING_OFFSET + _Q_PENDING_BITS)
>  #define _Q_TAIL_IDX_BITS	2
>  #define _Q_TAIL_IDX_MASK	_Q_SET_MASK(TAIL_IDX)
>  
> @@ -57,5 +62,6 @@ typedef struct qspinlock {
>  #define _Q_TAIL_CPU_MASK	_Q_SET_MASK(TAIL_CPU)
>  
>  #define _Q_LOCKED_VAL		(1U << _Q_LOCKED_OFFSET)
> +#define _Q_PENDING_VAL		(1U << _Q_PENDING_OFFSET)
>  
>  #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -83,24 +83,28 @@ static inline struct mcs_spinlock *decod
>  	return per_cpu_ptr(&mcs_nodes[idx], cpu);
>  }
>  
> +#define _Q_LOCKED_PENDING_MASK	(_Q_LOCKED_MASK | _Q_PENDING_MASK)
> +
>  /**
>   * queue_spin_lock_slowpath - acquire the queue spinlock
>   * @lock: Pointer to queue spinlock structure
>   * @val: Current value of the queue spinlock 32-bit word
>   *
> - * (queue tail, lock bit)
> - *
> - *              fast      :    slow                                  :    unlock
> - *                        :                                          :
> - * uncontended  (0,0)   --:--> (0,1) --------------------------------:--> (*,0)
> - *                        :       | ^--------.                    /  :
> - *                        :       v           \                   |  :
> - * uncontended            :    (n,x) --+--> (n,0)                 |  :
> - *   queue                :       | ^--'                          |  :
> - *                        :       v                               |  :
> - * contended              :    (*,x) --+--> (*,0) -----> (*,1) ---'  :
> - *   queue                :         ^--'                             :
> + * (queue tail, pending bit, lock bit)
>   *
> + *              fast     :    slow                                  :    unlock
> + *                       :                                          :
> + * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
> + *                       :       | ^--------.------.             /  :
> + *                       :       v           \      \            |  :
> + * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
> + *                       :       | ^--'              |           |  :
> + *                       :       v                   |           |  :
> + * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
> + *   queue               :       | ^--'                          |  :
> + *                       :       v                               |  :
> + * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
> + *   queue               :         ^--'                             :
>   */
>  void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
>  {
> @@ -110,6 +114,65 @@ void queue_spin_lock_slowpath(struct qsp
>  
>  	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
>  
> +	/*
> +	 * trylock || pending
> +	 *
> +	 * 0,0,0 -> 0,0,1 ; trylock
> +	 * 0,0,1 -> 0,1,1 ; pending
> +	 */
> +	for (;;) {
> +		/*
> +		 * If we observe any contention; queue.
> +		 */
> +		if (val & ~_Q_LOCKED_MASK)
> +			goto queue;
> +
> +		new = _Q_LOCKED_VAL;
> +		if (val == new)
> +			new |= _Q_PENDING_VAL;
> +
> +		old = atomic_cmpxchg(&lock->val, val, new);
> +		if (old == val)
> +			break;
> +
> +		val = old;
> +	}
> +
> +	/*
> +	 * we won the trylock
> +	 */
> +	if (new == _Q_LOCKED_VAL)
> +		return;
> +
> +	/*
> +	 * we're pending, wait for the owner to go away.
> +	 *
> +	 * *,1,1 -> *,1,0
> +	 */
> +	while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK)
> +		cpu_relax();
> +
> +	/*
> +	 * take ownership and clear the pending bit.
> +	 *
> +	 * *,1,0 -> *,0,1
> +	 */
> +	for (;;) {
> +		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
> +
> +		old = atomic_cmpxchg(&lock->val, val, new);
> +		if (old == val)
> +			break;
> +
> +		val = old;
> +	}
> +	return;
> +
> +	/*
> +	 * End of pending bit optimistic spinning and beginning of MCS
> +	 * queuing.
> +	 */
> +queue:
>  	node = this_cpu_ptr(&mcs_nodes[0]);
>  	idx = node->count++;
>  	tail = encode_tail(smp_processor_id(), idx);
> @@ -119,15 +182,18 @@ void queue_spin_lock_slowpath(struct qsp
>  	node->next = NULL;
>  
>  	/*
> +	 * we already touched the queueing cacheline; don't bother with pending
> +	 * stuff.
> +	 *
>  	 * trylock || xchg(lock, node)
>  	 *
> -	 * 0,0 -> 0,1 ; trylock
> -	 * p,x -> n,x ; prev = xchg(lock, node)
> +	 * 0,0,0 -> 0,0,1 ; trylock
> +	 * p,y,x -> n,y,x ; prev = xchg(lock, node)
>  	 */
>  	for (;;) {
>  		new = _Q_LOCKED_VAL;
>  		if (val)
> -			new = tail | (val & _Q_LOCKED_MASK);
> +			new = tail | (val & _Q_LOCKED_PENDING_MASK);
>  
>  		old = atomic_cmpxchg(&lock->val, val, new);
>  		if (old == val)
> @@ -145,7 +211,7 @@ void queue_spin_lock_slowpath(struct qsp
>  	/*
>  	 * if there was a previous node; link it and wait.
>  	 */
> -	if (old & ~_Q_LOCKED_MASK) {
> +	if (old & ~_Q_LOCKED_PENDING_MASK) {
>  		prev = decode_tail(old);
>  		ACCESS_ONCE(prev->next) = node;
>  
> @@ -153,18 +219,19 @@ void queue_spin_lock_slowpath(struct qsp
>  	}
>  
>  	/*
> -	 * we're at the head of the waitqueue, wait for the owner to go away.
> +	 * we're at the head of the waitqueue, wait for the owner & pending to
> +	 * go away.
>  	 *
> -	 * *,x -> *,0
> +	 * *,x,y -> *,0,0
>  	 */
> -	while ((val = atomic_read(&lock->val)) & _Q_LOCKED_MASK)
> +	while ((val = atomic_read(&lock->val)) & _Q_LOCKED_PENDING_MASK)
>  		cpu_relax();
>  
>  	/*
>  	 * claim the lock:
>  	 *
> -	 * n,0 -> 0,1 : lock, uncontended
> -	 * *,0 -> *,1 : lock, contended
> +	 * n,0,0 -> 0,0,1 : lock, uncontended
> +	 * *,0,0 -> *,0,1 : lock, contended
>  	 */
>  	for (;;) {
>  		new = _Q_LOCKED_VAL;
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Waiman Long June 17, 2014, 8:51 p.m. UTC | #1
On 06/17/2014 04:36 PM, Konrad Rzeszutek Wilk wrote:
> On Sun, Jun 15, 2014 at 02:47:00PM +0200, Peter Zijlstra wrote:
>> Because the qspinlock needs to touch a second cacheline; add a pending
>> bit and allow a single in-word spinner before we punt to the second
>> cacheline.
> Could you add this in the description please:
>
> And by second cacheline we mean the local 'node'. That is the:
> mcs_nodes[0] and mcs_nodes[idx]
>
> Perhaps it might be better then to split this in the header file
> as this is trying to not be a slowpath code - but rather - a
> pre-slow-path-lets-try-if-we can do another cmpxchg in case
> the unlocker has just unlocked itself.
>
> So something like:
>
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index e8a7ae8..29cc9c7 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
>    */
>   static __always_inline void queue_spin_lock(struct qspinlock *lock)
>   {
> -	u32 val;
> +	u32 val, new;
>
>   	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
>   	if (likely(val == 0))
>   		return;
> +
> +	/* One more attempt - but if we fail mark it as pending. */
> +	if (val == _Q_LOCKED_VAL) {
> +		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
> +
> +		old = atomic_cmpxchg(&lock->val, val, new);
> +		if (old == _Q_LOCKED_VAL) /* YEEY! */
> +			return;

No, it can leave like that. The unlock path will not clear the pending 
bit. We are trying to make the fastpath as simple as possible as it may 
be inlined. The complexity of the queue spinlock is in the slowpath.

Moreover, an cmpxchg followed immediately followed by another cmpxchg 
will just increase the level of memory contention when a lock is fairly 
contended. The chance of second cmpxchg() succeeding will be pretty low.

-Longman


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Konrad Rzeszutek Wilk June 17, 2014, 9:07 p.m. UTC | #2
On Tue, Jun 17, 2014 at 04:51:57PM -0400, Waiman Long wrote:
> On 06/17/2014 04:36 PM, Konrad Rzeszutek Wilk wrote:
> >On Sun, Jun 15, 2014 at 02:47:00PM +0200, Peter Zijlstra wrote:
> >>Because the qspinlock needs to touch a second cacheline; add a pending
> >>bit and allow a single in-word spinner before we punt to the second
> >>cacheline.
> >Could you add this in the description please:
> >
> >And by second cacheline we mean the local 'node'. That is the:
> >mcs_nodes[0] and mcs_nodes[idx]
> >
> >Perhaps it might be better then to split this in the header file
> >as this is trying to not be a slowpath code - but rather - a
> >pre-slow-path-lets-try-if-we can do another cmpxchg in case
> >the unlocker has just unlocked itself.
> >
> >So something like:
> >
> >diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> >index e8a7ae8..29cc9c7 100644
> >--- a/include/asm-generic/qspinlock.h
> >+++ b/include/asm-generic/qspinlock.h
> >@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> >   */
> >  static __always_inline void queue_spin_lock(struct qspinlock *lock)
> >  {
> >-	u32 val;
> >+	u32 val, new;
> >
> >  	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
> >  	if (likely(val == 0))
> >  		return;
> >+
> >+	/* One more attempt - but if we fail mark it as pending. */
> >+	if (val == _Q_LOCKED_VAL) {
> >+		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
> >+
> >+		old = atomic_cmpxchg(&lock->val, val, new);
> >+		if (old == _Q_LOCKED_VAL) /* YEEY! */
> >+			return;
> 
> No, it can leave like that. The unlock path will not clear the pending bit.

Err, you are right. It needs to go back in the slowpath.

> We are trying to make the fastpath as simple as possible as it may be
> inlined. The complexity of the queue spinlock is in the slowpath.

Sure, but then it shouldn't be called slowpath anymore as it is not
slow. It is a combination of fast path (the potential chance of
grabbing the lock and setting the pending lock) and the real slow
path (the queuing). Perhaps it should be called 'queue_spinlock_complex' ?

> 
> Moreover, an cmpxchg followed immediately followed by another cmpxchg will
> just increase the level of memory contention when a lock is fairly
> contended. The chance of second cmpxchg() succeeding will be pretty low.

Then why even do the pending bit - which is what the slowpath does
for the first time. And if it grabs it (And sets the pending bit) it
immediately exits. Why not perculate that piece of code in-to this header.

And the leave all that slow code (queing, mcs_lock access, etc) in the slowpath.

> 
> -Longman
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Konrad Rzeszutek Wilk June 17, 2014, 9:10 p.m. UTC | #3
On Tue, Jun 17, 2014 at 05:07:29PM -0400, Konrad Rzeszutek Wilk wrote:
> On Tue, Jun 17, 2014 at 04:51:57PM -0400, Waiman Long wrote:
> > On 06/17/2014 04:36 PM, Konrad Rzeszutek Wilk wrote:
> > >On Sun, Jun 15, 2014 at 02:47:00PM +0200, Peter Zijlstra wrote:
> > >>Because the qspinlock needs to touch a second cacheline; add a pending
> > >>bit and allow a single in-word spinner before we punt to the second
> > >>cacheline.
> > >Could you add this in the description please:
> > >
> > >And by second cacheline we mean the local 'node'. That is the:
> > >mcs_nodes[0] and mcs_nodes[idx]
> > >
> > >Perhaps it might be better then to split this in the header file
> > >as this is trying to not be a slowpath code - but rather - a
> > >pre-slow-path-lets-try-if-we can do another cmpxchg in case
> > >the unlocker has just unlocked itself.
> > >
> > >So something like:
> > >
> > >diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> > >index e8a7ae8..29cc9c7 100644
> > >--- a/include/asm-generic/qspinlock.h
> > >+++ b/include/asm-generic/qspinlock.h
> > >@@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> > >   */
> > >  static __always_inline void queue_spin_lock(struct qspinlock *lock)
> > >  {
> > >-	u32 val;
> > >+	u32 val, new;
> > >
> > >  	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
> > >  	if (likely(val == 0))
> > >  		return;
> > >+
> > >+	/* One more attempt - but if we fail mark it as pending. */
> > >+	if (val == _Q_LOCKED_VAL) {
> > >+		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
> > >+
> > >+		old = atomic_cmpxchg(&lock->val, val, new);
> > >+		if (old == _Q_LOCKED_VAL) /* YEEY! */
> > >+			return;
> > 
> > No, it can leave like that. The unlock path will not clear the pending bit.
> 
> Err, you are right. It needs to go back in the slowpath.

What I should have wrote is:

if (old == 0) /* YEEY */
  return;

As that would the same thing as this patch does on the pending bit - that
is if we can on the second compare and exchange set the pending bit (and the
lock) and the lock has been released - we are good.

And it is a quick path.

> 
> > We are trying to make the fastpath as simple as possible as it may be
> > inlined. The complexity of the queue spinlock is in the slowpath.
> 
> Sure, but then it shouldn't be called slowpath anymore as it is not
> slow. It is a combination of fast path (the potential chance of
> grabbing the lock and setting the pending lock) and the real slow
> path (the queuing). Perhaps it should be called 'queue_spinlock_complex' ?
> 

I forgot to mention - that was the crux of my comments - just change
the slowpath to complex name at that point to better reflect what
it does.

> > 
> > Moreover, an cmpxchg followed immediately followed by another cmpxchg will
> > just increase the level of memory contention when a lock is fairly
> > contended. The chance of second cmpxchg() succeeding will be pretty low.
> 
> Then why even do the pending bit - which is what the slowpath does
> for the first time. And if it grabs it (And sets the pending bit) it
> immediately exits. Why not perculate that piece of code in-to this header.
> 
> And the leave all that slow code (queing, mcs_lock access, etc) in the slowpath.
> 
> > 
> > -Longman
> > 
> > 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Waiman Long June 17, 2014, 10:25 p.m. UTC | #4
On 06/17/2014 05:10 PM, Konrad Rzeszutek Wilk wrote:
> On Tue, Jun 17, 2014 at 05:07:29PM -0400, Konrad Rzeszutek Wilk wrote:
>> On Tue, Jun 17, 2014 at 04:51:57PM -0400, Waiman Long wrote:
>>> On 06/17/2014 04:36 PM, Konrad Rzeszutek Wilk wrote:
>>>> On Sun, Jun 15, 2014 at 02:47:00PM +0200, Peter Zijlstra wrote:
>>>>> Because the qspinlock needs to touch a second cacheline; add a pending
>>>>> bit and allow a single in-word spinner before we punt to the second
>>>>> cacheline.
>>>> Could you add this in the description please:
>>>>
>>>> And by second cacheline we mean the local 'node'. That is the:
>>>> mcs_nodes[0] and mcs_nodes[idx]
>>>>
>>>> Perhaps it might be better then to split this in the header file
>>>> as this is trying to not be a slowpath code - but rather - a
>>>> pre-slow-path-lets-try-if-we can do another cmpxchg in case
>>>> the unlocker has just unlocked itself.
>>>>
>>>> So something like:
>>>>
>>>> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
>>>> index e8a7ae8..29cc9c7 100644
>>>> --- a/include/asm-generic/qspinlock.h
>>>> +++ b/include/asm-generic/qspinlock.h
>>>> @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
>>>>    */
>>>>   static __always_inline void queue_spin_lock(struct qspinlock *lock)
>>>>   {
>>>> -	u32 val;
>>>> +	u32 val, new;
>>>>
>>>>   	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
>>>>   	if (likely(val == 0))
>>>>   		return;
>>>> +
>>>> +	/* One more attempt - but if we fail mark it as pending. */
>>>> +	if (val == _Q_LOCKED_VAL) {
>>>> +		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
>>>> +
>>>> +		old = atomic_cmpxchg(&lock->val, val, new);
>>>> +		if (old == _Q_LOCKED_VAL) /* YEEY! */
>>>> +			return;
>>> No, it can leave like that. The unlock path will not clear the pending bit.
>> Err, you are right. It needs to go back in the slowpath.
> What I should have wrote is:
>
> if (old == 0) /* YEEY */
>    return;

Unfortunately, that still doesn't work. If old is 0, it just meant the 
cmpxchg failed. It still haven't got the lock.
> As that would the same thing as this patch does on the pending bit - that
> is if we can on the second compare and exchange set the pending bit (and the
> lock) and the lock has been released - we are good.

That is not true. When the lock is freed, the pending bit holder will 
still have to clear the pending bit and set the lock bit as is done in 
the slowpath. We cannot skip the step here. The problem of moving the 
pending code here is that it includes a wait loop which we don't want to 
put in the fastpath.
>
> And it is a quick path.
>
>>> We are trying to make the fastpath as simple as possible as it may be
>>> inlined. The complexity of the queue spinlock is in the slowpath.
>> Sure, but then it shouldn't be called slowpath anymore as it is not
>> slow. It is a combination of fast path (the potential chance of
>> grabbing the lock and setting the pending lock) and the real slow
>> path (the queuing). Perhaps it should be called 'queue_spinlock_complex' ?
>>
> I forgot to mention - that was the crux of my comments - just change
> the slowpath to complex name at that point to better reflect what
> it does.

Actually in my v11 patch, I subdivided the slowpath into a slowpath for 
the pending code and slowerpath for actual queuing. Perhaps, we could 
use quickpath and slowpath instead. Anyway, it is a minor detail that we 
can discuss after the core code get merged.

-Longman
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini June 18, 2014, 11:29 a.m. UTC | #5
Il 17/06/2014 22:36, Konrad Rzeszutek Wilk ha scritto:
> +	/* One more attempt - but if we fail mark it as pending. */
> +	if (val == _Q_LOCKED_VAL) {
> +		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
> +
> +		old = atomic_cmpxchg(&lock->val, val, new);
> +		if (old == _Q_LOCKED_VAL) /* YEEY! */
> +			return;
> +		val = old;
> +	}

Note that Peter's code is in a for(;;) loop:


+	for (;;) {
+		/*
+		 * If we observe any contention; queue.
+		 */
+		if (val & ~_Q_LOCKED_MASK)
+			goto queue;
+
+		new = _Q_LOCKED_VAL;
+		if (val == new)
+			new |= _Q_PENDING_VAL;
+
+		old = atomic_cmpxchg(&lock->val, val, new);
+		if (old == val)
+			break;
+
+		val = old;
+	}
+
+	/*
+	 * we won the trylock
+	 */
+	if (new == _Q_LOCKED_VAL)
+		return;

So what you'd have is basically:

	/*
	 * One more attempt if no one is already in queue.  Perhaps
	 * they have unlocked the spinlock already.
	 */
	if (val == _Q_LOCKED_VAL && atomic_read(&lock->val) == 0) {
		old = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
		if (old == 0) /* YEEY! */
			return;
		val = old;
	}

But I agree with Waiman that this is unlikely to trigger often enough. 
It does have to be handled in the slowpath for correctness, but the most 
likely path is (0,0,1)->(0,1,1).

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Konrad Rzeszutek Wilk June 18, 2014, 1:36 p.m. UTC | #6
On Wed, Jun 18, 2014 at 01:29:48PM +0200, Paolo Bonzini wrote:
> Il 17/06/2014 22:36, Konrad Rzeszutek Wilk ha scritto:
> >+	/* One more attempt - but if we fail mark it as pending. */
> >+	if (val == _Q_LOCKED_VAL) {
> >+		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
> >+
> >+		old = atomic_cmpxchg(&lock->val, val, new);
> >+		if (old == _Q_LOCKED_VAL) /* YEEY! */
> >+			return;
> >+		val = old;
> >+	}
> 
> Note that Peter's code is in a for(;;) loop:
> 
> 
> +	for (;;) {
> +		/*
> +		 * If we observe any contention; queue.
> +		 */
> +		if (val & ~_Q_LOCKED_MASK)
> +			goto queue;
> +
> +		new = _Q_LOCKED_VAL;
> +		if (val == new)
> +			new |= _Q_PENDING_VAL;
> +
> +		old = atomic_cmpxchg(&lock->val, val, new);
> +		if (old == val)
> +			break;
> +
> +		val = old;
> +	}
> +
> +	/*
> +	 * we won the trylock
> +	 */
> +	if (new == _Q_LOCKED_VAL)
> +		return;
> 
> So what you'd have is basically:
> 
> 	/*
> 	 * One more attempt if no one is already in queue.  Perhaps
> 	 * they have unlocked the spinlock already.
> 	 */
> 	if (val == _Q_LOCKED_VAL && atomic_read(&lock->val) == 0) {
> 		old = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
> 		if (old == 0) /* YEEY! */
> 			return;
> 		val = old;
> 	}
> 
> But I agree with Waiman that this is unlikely to trigger often enough. It
> does have to be handled in the slowpath for correctness, but the most likely
> path is (0,0,1)->(0,1,1).

<nods>
> 
> Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Zijlstra June 23, 2014, 4:35 p.m. UTC | #7
On Tue, Jun 17, 2014 at 04:36:15PM -0400, Konrad Rzeszutek Wilk wrote:
> On Sun, Jun 15, 2014 at 02:47:00PM +0200, Peter Zijlstra wrote:
> > Because the qspinlock needs to touch a second cacheline; add a pending
> > bit and allow a single in-word spinner before we punt to the second
> > cacheline.
> 
> Could you add this in the description please:
> 
> And by second cacheline we mean the local 'node'. That is the:
> mcs_nodes[0] and mcs_nodes[idx]

Those should be the very same cacheline :), but yes, I can add something
like that.

> Perhaps it might be better then to split this in the header file
> as this is trying to not be a slowpath code - but rather - a
> pre-slow-path-lets-try-if-we can do another cmpxchg in case
> the unlocker has just unlocked itself.
> 
> So something like:
> 
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index e8a7ae8..29cc9c7 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -75,11 +75,21 @@ extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
>   */
>  static __always_inline void queue_spin_lock(struct qspinlock *lock)
>  {
> -	u32 val;
> +	u32 val, new;
>  
>  	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
>  	if (likely(val == 0))
>  		return;
> +
> +	/* One more attempt - but if we fail mark it as pending. */
> +	if (val == _Q_LOCKED_VAL) {
> +		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
> +
> +		old = atomic_cmpxchg(&lock->val, val, new);
> +		if (old == _Q_LOCKED_VAL) /* YEEY! */
> +			return;
> +		val = old;
> +	}
>  	queue_spin_lock_slowpath(lock, val);
>  }

I think that's too big for an inline function.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Peter Zijlstra June 24, 2014, 8:24 a.m. UTC | #8
On Tue, Jun 17, 2014 at 05:07:29PM -0400, Konrad Rzeszutek Wilk wrote:
> > We are trying to make the fastpath as simple as possible as it may be
> > inlined. The complexity of the queue spinlock is in the slowpath.
> 
> Sure, but then it shouldn't be called slowpath anymore as it is not
> slow.

Its common terminology to call the inline part the fast path and the
out-of-line call on failure the slow path.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index e8a7ae8..29cc9c7 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -75,11 +75,21 @@  extern void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  */
 static __always_inline void queue_spin_lock(struct qspinlock *lock)
 {
-	u32 val;
+	u32 val, new;
 
 	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
 	if (likely(val == 0))
 		return;
+
+	/* One more attempt - but if we fail mark it as pending. */
+	if (val == _Q_LOCKED_VAL) {
+		new = Q_LOCKED_VAL |_Q_PENDING_VAL;
+
+		old = atomic_cmpxchg(&lock->val, val, new);
+		if (old == _Q_LOCKED_VAL) /* YEEY! */
+			return;
+		val = old;
+	}
 	queue_spin_lock_slowpath(lock, val);
 }