diff mbox series

x86/acpi: Use %rip-relative addressing in wakeup_64.S

Message ID 20231103104900.409470-1-ubizjak@gmail.com (mailing list archive)
State Handled Elsewhere, archived
Headers show
Series x86/acpi: Use %rip-relative addressing in wakeup_64.S | expand

Commit Message

Uros Bizjak Nov. 3, 2023, 10:48 a.m. UTC
Instruction with %rip-relative address operand is one byte shorter than
its absolute address counterpart and is also compatible with position
independent executable (-fpie) build.

No functional changes intended.

Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
 arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

Comments

Rafael J. Wysocki Nov. 6, 2023, 2:14 p.m. UTC | #1
On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <ubizjak@gmail.com> wrote:
>
> Instruction with %rip-relative address operand is one byte shorter than
> its absolute address counterpart and is also compatible with position
> independent executable (-fpie) build.
>
> No functional changes intended.

I'm wondering what's the exact motivation for making this change.

Any urgent need for it doesn't seem to be there.

> Cc: "Rafael J. Wysocki" <rafael@kernel.org>
> Cc: Len Brown <len.brown@intel.com>
> Cc: Pavel Machek <pavel@ucw.cz>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@kernel.org>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> ---
>  arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
>  1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> index d5d8a352eafa..94ff83f3d3fe 100644
> --- a/arch/x86/kernel/acpi/wakeup_64.S
> +++ b/arch/x86/kernel/acpi/wakeup_64.S
> @@ -17,7 +17,7 @@
>          * Hooray, we are in Long 64-bit mode (but still running in low memory)
>          */
>  SYM_FUNC_START(wakeup_long64)
> -       movq    saved_magic, %rax
> +       movq    saved_magic(%rip), %rax
>         movq    $0x123456789abcdef0, %rdx
>         cmpq    %rdx, %rax
>         je      2f
> @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
>         movw    %ax, %es
>         movw    %ax, %fs
>         movw    %ax, %gs
> -       movq    saved_rsp, %rsp
> +       movq    saved_rsp(%rip), %rsp
>
> -       movq    saved_rbx, %rbx
> -       movq    saved_rdi, %rdi
> -       movq    saved_rsi, %rsi
> -       movq    saved_rbp, %rbp
> +       movq    saved_rbx(%rip), %rbx
> +       movq    saved_rdi(%rip), %rdi
> +       movq    saved_rsi(%rip), %rsi
> +       movq    saved_rbp(%rip), %rbp
>
> -       movq    saved_rip, %rax
> +       movq    saved_rip(%rip), %rax
>         ANNOTATE_RETPOLINE_SAFE
>         jmp     *%rax
>  SYM_FUNC_END(wakeup_long64)
> @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
>
>         movq    $.Lresume_point, saved_rip(%rip)
>
> -       movq    %rsp, saved_rsp
> -       movq    %rbp, saved_rbp
> -       movq    %rbx, saved_rbx
> -       movq    %rdi, saved_rdi
> -       movq    %rsi, saved_rsi
> +       movq    %rsp, saved_rsp(%rip)
> +       movq    %rbp, saved_rbp(%rip)
> +       movq    %rbx, saved_rbx(%rip)
> +       movq    %rdi, saved_rdi(%rip)
> +       movq    %rsi, saved_rsi(%rip)
>
>         addq    $8, %rsp
>         movl    $3, %edi
> --
Uros Bizjak Nov. 6, 2023, 2:25 p.m. UTC | #2
On Mon, Nov 6, 2023 at 3:14 PM Rafael J. Wysocki <rafael@kernel.org> wrote:
>
> On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <ubizjak@gmail.com> wrote:
> >
> > Instruction with %rip-relative address operand is one byte shorter than
> > its absolute address counterpart and is also compatible with position
> > independent executable (-fpie) build.
> >
> > No functional changes intended.
>
> I'm wondering what's the exact motivation for making this change.

Mainly to be consistent with what the compiler emits by default when a
symbol is accessed. As said in the commit message, the %rip-relative
access is also one byte shorter, and results in a position independent
code.

> Any urgent need for it doesn't seem to be there.

True. It's mostly a nice-to-have change.

Thanks,
Uros.

> > Cc: "Rafael J. Wysocki" <rafael@kernel.org>
> > Cc: Len Brown <len.brown@intel.com>
> > Cc: Pavel Machek <pavel@ucw.cz>
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Ingo Molnar <mingo@kernel.org>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > ---
> >  arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
> >  1 file changed, 12 insertions(+), 12 deletions(-)
> >
> > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> > index d5d8a352eafa..94ff83f3d3fe 100644
> > --- a/arch/x86/kernel/acpi/wakeup_64.S
> > +++ b/arch/x86/kernel/acpi/wakeup_64.S
> > @@ -17,7 +17,7 @@
> >          * Hooray, we are in Long 64-bit mode (but still running in low memory)
> >          */
> >  SYM_FUNC_START(wakeup_long64)
> > -       movq    saved_magic, %rax
> > +       movq    saved_magic(%rip), %rax
> >         movq    $0x123456789abcdef0, %rdx
> >         cmpq    %rdx, %rax
> >         je      2f
> > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
> >         movw    %ax, %es
> >         movw    %ax, %fs
> >         movw    %ax, %gs
> > -       movq    saved_rsp, %rsp
> > +       movq    saved_rsp(%rip), %rsp
> >
> > -       movq    saved_rbx, %rbx
> > -       movq    saved_rdi, %rdi
> > -       movq    saved_rsi, %rsi
> > -       movq    saved_rbp, %rbp
> > +       movq    saved_rbx(%rip), %rbx
> > +       movq    saved_rdi(%rip), %rdi
> > +       movq    saved_rsi(%rip), %rsi
> > +       movq    saved_rbp(%rip), %rbp
> >
> > -       movq    saved_rip, %rax
> > +       movq    saved_rip(%rip), %rax
> >         ANNOTATE_RETPOLINE_SAFE
> >         jmp     *%rax
> >  SYM_FUNC_END(wakeup_long64)
> > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
> >
> >         movq    $.Lresume_point, saved_rip(%rip)
> >
> > -       movq    %rsp, saved_rsp
> > -       movq    %rbp, saved_rbp
> > -       movq    %rbx, saved_rbx
> > -       movq    %rdi, saved_rdi
> > -       movq    %rsi, saved_rsi
> > +       movq    %rsp, saved_rsp(%rip)
> > +       movq    %rbp, saved_rbp(%rip)
> > +       movq    %rbx, saved_rbx(%rip)
> > +       movq    %rdi, saved_rdi(%rip)
> > +       movq    %rsi, saved_rsi(%rip)
> >
> >         addq    $8, %rsp
> >         movl    $3, %edi
> > --
Rafael J. Wysocki Nov. 6, 2023, 3:09 p.m. UTC | #3
On Mon, Nov 6, 2023 at 3:25 PM Uros Bizjak <ubizjak@gmail.com> wrote:
>
> On Mon, Nov 6, 2023 at 3:14 PM Rafael J. Wysocki <rafael@kernel.org> wrote:
> >
> > On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <ubizjak@gmail.com> wrote:
> > >
> > > Instruction with %rip-relative address operand is one byte shorter than
> > > its absolute address counterpart and is also compatible with position
> > > independent executable (-fpie) build.
> > >
> > > No functional changes intended.
> >
> > I'm wondering what's the exact motivation for making this change.
>
> Mainly to be consistent with what the compiler emits by default when a
> symbol is accessed. As said in the commit message, the %rip-relative
> access is also one byte shorter, and results in a position independent
> code.
>
> > Any urgent need for it doesn't seem to be there.
>
> True. It's mostly a nice-to-have change.

OK, so

Acked-by: Rafael J. Wysocki <rafael@kernel.org>

and the decision what to do with it is up to the x86 folks.

> > > Cc: "Rafael J. Wysocki" <rafael@kernel.org>
> > > Cc: Len Brown <len.brown@intel.com>
> > > Cc: Pavel Machek <pavel@ucw.cz>
> > > Cc: Thomas Gleixner <tglx@linutronix.de>
> > > Cc: Ingo Molnar <mingo@kernel.org>
> > > Cc: Borislav Petkov <bp@alien8.de>
> > > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > > ---
> > >  arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
> > >  1 file changed, 12 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> > > index d5d8a352eafa..94ff83f3d3fe 100644
> > > --- a/arch/x86/kernel/acpi/wakeup_64.S
> > > +++ b/arch/x86/kernel/acpi/wakeup_64.S
> > > @@ -17,7 +17,7 @@
> > >          * Hooray, we are in Long 64-bit mode (but still running in low memory)
> > >          */
> > >  SYM_FUNC_START(wakeup_long64)
> > > -       movq    saved_magic, %rax
> > > +       movq    saved_magic(%rip), %rax
> > >         movq    $0x123456789abcdef0, %rdx
> > >         cmpq    %rdx, %rax
> > >         je      2f
> > > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
> > >         movw    %ax, %es
> > >         movw    %ax, %fs
> > >         movw    %ax, %gs
> > > -       movq    saved_rsp, %rsp
> > > +       movq    saved_rsp(%rip), %rsp
> > >
> > > -       movq    saved_rbx, %rbx
> > > -       movq    saved_rdi, %rdi
> > > -       movq    saved_rsi, %rsi
> > > -       movq    saved_rbp, %rbp
> > > +       movq    saved_rbx(%rip), %rbx
> > > +       movq    saved_rdi(%rip), %rdi
> > > +       movq    saved_rsi(%rip), %rsi
> > > +       movq    saved_rbp(%rip), %rbp
> > >
> > > -       movq    saved_rip, %rax
> > > +       movq    saved_rip(%rip), %rax
> > >         ANNOTATE_RETPOLINE_SAFE
> > >         jmp     *%rax
> > >  SYM_FUNC_END(wakeup_long64)
> > > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
> > >
> > >         movq    $.Lresume_point, saved_rip(%rip)
> > >
> > > -       movq    %rsp, saved_rsp
> > > -       movq    %rbp, saved_rbp
> > > -       movq    %rbx, saved_rbx
> > > -       movq    %rdi, saved_rdi
> > > -       movq    %rsi, saved_rsi
> > > +       movq    %rsp, saved_rsp(%rip)
> > > +       movq    %rbp, saved_rbp(%rip)
> > > +       movq    %rbx, saved_rbx(%rip)
> > > +       movq    %rdi, saved_rdi(%rip)
> > > +       movq    %rsi, saved_rsi(%rip)
> > >
> > >         addq    $8, %rsp
> > >         movl    $3, %edi
> > > --
diff mbox series

Patch

diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index d5d8a352eafa..94ff83f3d3fe 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,7 +17,7 @@ 
 	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
 	 */
 SYM_FUNC_START(wakeup_long64)
-	movq	saved_magic, %rax
+	movq	saved_magic(%rip), %rax
 	movq	$0x123456789abcdef0, %rdx
 	cmpq	%rdx, %rax
 	je	2f
@@ -33,14 +33,14 @@  SYM_FUNC_START(wakeup_long64)
 	movw	%ax, %es
 	movw	%ax, %fs
 	movw	%ax, %gs
-	movq	saved_rsp, %rsp
+	movq	saved_rsp(%rip), %rsp
 
-	movq	saved_rbx, %rbx
-	movq	saved_rdi, %rdi
-	movq	saved_rsi, %rsi
-	movq	saved_rbp, %rbp
+	movq	saved_rbx(%rip), %rbx
+	movq	saved_rdi(%rip), %rdi
+	movq	saved_rsi(%rip), %rsi
+	movq	saved_rbp(%rip), %rbp
 
-	movq	saved_rip, %rax
+	movq	saved_rip(%rip), %rax
 	ANNOTATE_RETPOLINE_SAFE
 	jmp	*%rax
 SYM_FUNC_END(wakeup_long64)
@@ -72,11 +72,11 @@  SYM_FUNC_START(do_suspend_lowlevel)
 
 	movq	$.Lresume_point, saved_rip(%rip)
 
-	movq	%rsp, saved_rsp
-	movq	%rbp, saved_rbp
-	movq	%rbx, saved_rbx
-	movq	%rdi, saved_rdi
-	movq	%rsi, saved_rsi
+	movq	%rsp, saved_rsp(%rip)
+	movq	%rbp, saved_rbp(%rip)
+	movq	%rbx, saved_rbx(%rip)
+	movq	%rdi, saved_rdi(%rip)
+	movq	%rsi, saved_rsi(%rip)
 
 	addq	$8, %rsp
 	movl	$3, %edi