diff mbox

[RFC,10/12] x86, rwsem: simplify __down_write

Message ID 1454444369-2146-11-git-send-email-mhocko@kernel.org (mailing list archive)
State Superseded
Headers show

Commit Message

Michal Hocko Feb. 2, 2016, 8:19 p.m. UTC
From: Michal Hocko <mhocko@suse.com>

x86 implementation of __down_write is using inline asm to optimize the
code flow. This however requires that it has go over an additional hop
for the slow path call_rwsem_down_write_failed which has to
save_common_regs/restore_common_regs to preserve the calling convention.
This, however doesn't add much because the fast path only saves one
register push/pop (rdx) when compared to the generic implementation:

Before:
0000000000000019 <down_write>:
  19:   e8 00 00 00 00          callq  1e <down_write+0x5>
  1e:   55                      push   %rbp
  1f:   48 ba 01 00 00 00 ff    movabs $0xffffffff00000001,%rdx
  26:   ff ff ff
  29:   48 89 f8                mov    %rdi,%rax
  2c:   48 89 e5                mov    %rsp,%rbp
  2f:   f0 48 0f c1 10          lock xadd %rdx,(%rax)
  34:   85 d2                   test   %edx,%edx
  36:   74 05                   je     3d <down_write+0x24>
  38:   e8 00 00 00 00          callq  3d <down_write+0x24>
  3d:   65 48 8b 04 25 00 00    mov    %gs:0x0,%rax
  44:   00 00
  46:   5d                      pop    %rbp
  47:   48 89 47 38             mov    %rax,0x38(%rdi)
  4b:   c3                      retq

After:
0000000000000019 <down_write>:
  19:   e8 00 00 00 00          callq  1e <down_write+0x5>
  1e:   55                      push   %rbp
  1f:   48 b8 01 00 00 00 ff    movabs $0xffffffff00000001,%rax
  26:   ff ff ff
  29:   48 89 e5                mov    %rsp,%rbp
  2c:   53                      push   %rbx
  2d:   48 89 fb                mov    %rdi,%rbx
  30:   f0 48 0f c1 07          lock xadd %rax,(%rdi)
  35:   48 85 c0                test   %rax,%rax
  38:   74 05                   je     3f <down_write+0x26>
  3a:   e8 00 00 00 00          callq  3f <down_write+0x26>
  3f:   65 48 8b 04 25 00 00    mov    %gs:0x0,%rax
  46:   00 00
  48:   48 89 43 38             mov    %rax,0x38(%rbx)
  4c:   5b                      pop    %rbx
  4d:   5d                      pop    %rbp
  4e:   c3                      retq

This doesn't seem to justify the code obfuscation and complexity. Use
the generic implementation instead.

Signed-off-by: Michal Hocko <mhocko@suse.com>
---
 arch/x86/include/asm/rwsem.h | 17 +++++------------
 arch/x86/lib/rwsem.S         |  9 ---------
 2 files changed, 5 insertions(+), 21 deletions(-)

Comments

Ingo Molnar Feb. 3, 2016, 8:10 a.m. UTC | #1
* Michal Hocko <mhocko@kernel.org> wrote:

> From: Michal Hocko <mhocko@suse.com>
> 
> x86 implementation of __down_write is using inline asm to optimize the
> code flow. This however requires that it has go over an additional hop
> for the slow path call_rwsem_down_write_failed which has to
> save_common_regs/restore_common_regs to preserve the calling convention.
> This, however doesn't add much because the fast path only saves one
> register push/pop (rdx) when compared to the generic implementation:
> 
> Before:
> 0000000000000019 <down_write>:
>   19:   e8 00 00 00 00          callq  1e <down_write+0x5>
>   1e:   55                      push   %rbp
>   1f:   48 ba 01 00 00 00 ff    movabs $0xffffffff00000001,%rdx
>   26:   ff ff ff
>   29:   48 89 f8                mov    %rdi,%rax
>   2c:   48 89 e5                mov    %rsp,%rbp
>   2f:   f0 48 0f c1 10          lock xadd %rdx,(%rax)
>   34:   85 d2                   test   %edx,%edx
>   36:   74 05                   je     3d <down_write+0x24>
>   38:   e8 00 00 00 00          callq  3d <down_write+0x24>
>   3d:   65 48 8b 04 25 00 00    mov    %gs:0x0,%rax
>   44:   00 00
>   46:   5d                      pop    %rbp
>   47:   48 89 47 38             mov    %rax,0x38(%rdi)
>   4b:   c3                      retq
> 
> After:
> 0000000000000019 <down_write>:
>   19:   e8 00 00 00 00          callq  1e <down_write+0x5>
>   1e:   55                      push   %rbp
>   1f:   48 b8 01 00 00 00 ff    movabs $0xffffffff00000001,%rax
>   26:   ff ff ff
>   29:   48 89 e5                mov    %rsp,%rbp
>   2c:   53                      push   %rbx
>   2d:   48 89 fb                mov    %rdi,%rbx
>   30:   f0 48 0f c1 07          lock xadd %rax,(%rdi)
>   35:   48 85 c0                test   %rax,%rax
>   38:   74 05                   je     3f <down_write+0x26>
>   3a:   e8 00 00 00 00          callq  3f <down_write+0x26>
>   3f:   65 48 8b 04 25 00 00    mov    %gs:0x0,%rax
>   46:   00 00
>   48:   48 89 43 38             mov    %rax,0x38(%rbx)
>   4c:   5b                      pop    %rbx
>   4d:   5d                      pop    %rbp
>   4e:   c3                      retq

I'm not convinced about the removal of this optimization at all.

> This doesn't seem to justify the code obfuscation and complexity. Use
> the generic implementation instead.
> 
> Signed-off-by: Michal Hocko <mhocko@suse.com>
> ---
>  arch/x86/include/asm/rwsem.h | 17 +++++------------
>  arch/x86/lib/rwsem.S         |  9 ---------
>  2 files changed, 5 insertions(+), 21 deletions(-)

Turn the argument around, would we be willing to save two instructions off the 
fast path of a commonly used locking construct, with such a simple optimization:

>  arch/x86/include/asm/rwsem.h | 17 ++++++++++++-----
>  arch/x86/lib/rwsem.S         |  9 +++++++++
>  2 files changed, 21 insertions(+), 5 deletions(-)

?

Yes!

So, if you want to remove the assembly code - can we achieve that without hurting 
the generated fast path, using the compiler?

Thanks,

	Ingo
--
To unsubscribe from this list: send the line "unsubscribe linux-sh" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Michal Hocko Feb. 3, 2016, 12:10 p.m. UTC | #2
On Wed 03-02-16 09:10:16, Ingo Molnar wrote:
> 
> * Michal Hocko <mhocko@kernel.org> wrote:
> 
> > From: Michal Hocko <mhocko@suse.com>
> > 
> > x86 implementation of __down_write is using inline asm to optimize the
> > code flow. This however requires that it has go over an additional hop
> > for the slow path call_rwsem_down_write_failed which has to
> > save_common_regs/restore_common_regs to preserve the calling convention.
> > This, however doesn't add much because the fast path only saves one
> > register push/pop (rdx) when compared to the generic implementation:
> > 
> > Before:
> > 0000000000000019 <down_write>:
> >   19:   e8 00 00 00 00          callq  1e <down_write+0x5>
> >   1e:   55                      push   %rbp
> >   1f:   48 ba 01 00 00 00 ff    movabs $0xffffffff00000001,%rdx
> >   26:   ff ff ff
> >   29:   48 89 f8                mov    %rdi,%rax
> >   2c:   48 89 e5                mov    %rsp,%rbp
> >   2f:   f0 48 0f c1 10          lock xadd %rdx,(%rax)
> >   34:   85 d2                   test   %edx,%edx
> >   36:   74 05                   je     3d <down_write+0x24>
> >   38:   e8 00 00 00 00          callq  3d <down_write+0x24>
> >   3d:   65 48 8b 04 25 00 00    mov    %gs:0x0,%rax
> >   44:   00 00
> >   46:   5d                      pop    %rbp
> >   47:   48 89 47 38             mov    %rax,0x38(%rdi)
> >   4b:   c3                      retq
> > 
> > After:
> > 0000000000000019 <down_write>:
> >   19:   e8 00 00 00 00          callq  1e <down_write+0x5>
> >   1e:   55                      push   %rbp
> >   1f:   48 b8 01 00 00 00 ff    movabs $0xffffffff00000001,%rax
> >   26:   ff ff ff
> >   29:   48 89 e5                mov    %rsp,%rbp
> >   2c:   53                      push   %rbx
> >   2d:   48 89 fb                mov    %rdi,%rbx
> >   30:   f0 48 0f c1 07          lock xadd %rax,(%rdi)
> >   35:   48 85 c0                test   %rax,%rax
> >   38:   74 05                   je     3f <down_write+0x26>
> >   3a:   e8 00 00 00 00          callq  3f <down_write+0x26>
> >   3f:   65 48 8b 04 25 00 00    mov    %gs:0x0,%rax
> >   46:   00 00
> >   48:   48 89 43 38             mov    %rax,0x38(%rbx)
> >   4c:   5b                      pop    %rbx
> >   4d:   5d                      pop    %rbp
> >   4e:   c3                      retq
> 
> I'm not convinced about the removal of this optimization at all.

OK, fair enough. As I've mentioned in the cover letter I do not really
insist on this patch. I just found the current code too ugly to
live without a good reason because down_write is a call so saving one
push/pop seems like really negligible to the call itself. Moreover this
is a write lock which is expected to be heavier. It is the read path
which is expected to be light and contention (slow path) is expected
on the write lock.

That being said, if you really believe that the current code is easier
to maintain then I will not pursue this patch. The rest doesn't really
depend on it. I will just respin the follow up x86 specifi
__down_write_killable to follow the same code convention.

[...]
> So, if you want to remove the assembly code - can we achieve that without hurting 
> the generated fast path, using the compiler?

One way would be to do the same thing as mutex does and do the fast path
as an inline. This could bloat the kernel and require some additional
changes to allow arch specific reimplementations though so I didn't want
to go that path.
diff mbox

Patch

diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index d79a218675bc..1b5e89b3643d 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -102,18 +102,11 @@  static inline int __down_read_trylock(struct rw_semaphore *sem)
 static inline void __down_write(struct rw_semaphore *sem)
 {
 	long tmp;
-	asm volatile("# beginning down_write\n\t"
-		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
-		     /* adds 0xffff0001, returns the old value */
-		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
-		     /* was the active mask 0 before? */
-		     "  jz        1f\n"
-		     "  call call_rwsem_down_write_failed\n"
-		     "1:\n"
-		     "# ending down_write"
-		     : "+m" (sem->count), "=d" (tmp)
-		     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
-		     : "memory", "cc");
+
+	tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+				     (atomic_long_t *)&sem->count);
+	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+		rwsem_down_write_failed(sem);
 }
 
 /*
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 40027db99140..ea5c7c177483 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -57,7 +57,6 @@ 
  * is also the input argument to these helpers)
  *
  * The following can clobber %rdx because the asm clobbers it:
- *   call_rwsem_down_write_failed
  *   call_rwsem_wake
  * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
  */
@@ -93,14 +92,6 @@  ENTRY(call_rwsem_down_read_failed)
 	ret
 ENDPROC(call_rwsem_down_read_failed)
 
-ENTRY(call_rwsem_down_write_failed)
-	save_common_regs
-	movq %rax,%rdi
-	call rwsem_down_write_failed
-	restore_common_regs
-	ret
-ENDPROC(call_rwsem_down_write_failed)
-
 ENTRY(call_rwsem_wake)
 	/* do nothing if still outstanding active readers */
 	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)