diff mbox series

[v3,2/3] arm64: Prepare to switch to generic entry

Message ID 20240629085601.470241-3-ruanjinjie@huawei.com (mailing list archive)
State New, archived
Headers show
Series arm64: entry: Convert to generic entry | expand

Commit Message

Jinjie Ruan June 29, 2024, 8:56 a.m. UTC
Prepare to switch to generic entry for arm64:

 - Implement regs_irqs_disabled() using interrupts_enabled() macro.

 - Make on_thread_stack() compatible with generic entry.

 - Split report_syscall() to report_syscall_enter() and
   report_syscall_exit() to make it clear to switch to generic entry.

Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
v2:
- Refactor report_syscall().
- Update the commit message.
---
 arch/arm64/include/asm/ptrace.h     |  5 +++++
 arch/arm64/include/asm/stacktrace.h |  5 ++++-
 arch/arm64/kernel/ptrace.c          | 29 ++++++++++++++++++++---------
 3 files changed, 29 insertions(+), 10 deletions(-)

Comments

Kevin Brodsky Aug. 20, 2024, 11:42 a.m. UTC | #1
On 29/06/2024 10:56, Jinjie Ruan wrote:
> -#define on_thread_stack()	(on_task_stack(current, current_stack_pointer, 1))
> +static __always_inline bool on_thread_stack(void)
> +{
> +	return on_task_stack(current, current_stack_pointer, 1);
> +}

This looks reasonable but I wonder why this change is required (as the
commit message suggests)?

Kevin
Jinjie Ruan Aug. 20, 2024, 12:57 p.m. UTC | #2
On 2024/8/20 19:42, Kevin Brodsky wrote:
> On 29/06/2024 10:56, Jinjie Ruan wrote:
>> -#define on_thread_stack()	(on_task_stack(current, current_stack_pointer, 1))
>> +static __always_inline bool on_thread_stack(void)
>> +{
>> +	return on_task_stack(current, current_stack_pointer, 1);
>> +}
> 
> This looks reasonable but I wonder why this change is required (as the
> commit message suggests)?

There seems to be a compilation problem, but rolling back this function
is ok now.

> 
> Kevin
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 47ec58031f11..1857748ff017 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -360,6 +360,11 @@  static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
 	return 0;
 }
 
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+	return !interrupts_enabled(regs);
+}
+
 /* We must avoid circular header include via sched.h */
 struct task_struct;
 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 66ec8caa6ac0..36bc1831f906 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -57,7 +57,10 @@  static inline bool on_task_stack(const struct task_struct *tsk,
 	return stackinfo_on_stack(&info, sp, size);
 }
 
-#define on_thread_stack()	(on_task_stack(current, current_stack_pointer, 1))
+static __always_inline bool on_thread_stack(void)
+{
+	return on_task_stack(current, current_stack_pointer, 1);
+}
 
 #ifdef CONFIG_VMAP_STACK
 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 0d022599eb61..60fd85d5119d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -2184,7 +2184,7 @@  enum ptrace_syscall_dir {
 	PTRACE_SYSCALL_EXIT,
 };
 
-static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
+static void report_syscall_enter(struct pt_regs *regs)
 {
 	int regno;
 	unsigned long saved_reg;
@@ -2207,13 +2207,24 @@  static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
 	 */
 	regno = (is_compat_task() ? 12 : 7);
 	saved_reg = regs->regs[regno];
-	regs->regs[regno] = dir;
+	regs->regs[regno] = PTRACE_SYSCALL_ENTER;
 
-	if (dir == PTRACE_SYSCALL_ENTER) {
-		if (ptrace_report_syscall_entry(regs))
-			forget_syscall(regs);
-		regs->regs[regno] = saved_reg;
-	} else if (!test_thread_flag(TIF_SINGLESTEP)) {
+	if (ptrace_report_syscall_entry(regs))
+		forget_syscall(regs);
+	regs->regs[regno] = saved_reg;
+}
+
+static void report_syscall_exit(struct pt_regs *regs)
+{
+	int regno;
+	unsigned long saved_reg;
+
+	/* See comment for report_syscall_enter() */
+	regno = (is_compat_task() ? 12 : 7);
+	saved_reg = regs->regs[regno];
+	regs->regs[regno] = PTRACE_SYSCALL_EXIT;
+
+	if (!test_thread_flag(TIF_SINGLESTEP)) {
 		ptrace_report_syscall_exit(regs, 0);
 		regs->regs[regno] = saved_reg;
 	} else {
@@ -2233,7 +2244,7 @@  int syscall_trace_enter(struct pt_regs *regs)
 	unsigned long flags = read_thread_flags();
 
 	if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
-		report_syscall(regs, PTRACE_SYSCALL_ENTER);
+		report_syscall_enter(regs);
 		if (flags & _TIF_SYSCALL_EMU)
 			return NO_SYSCALL;
 	}
@@ -2261,7 +2272,7 @@  void syscall_trace_exit(struct pt_regs *regs)
 		trace_sys_exit(regs, syscall_get_return_value(current, regs));
 
 	if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
-		report_syscall(regs, PTRACE_SYSCALL_EXIT);
+		report_syscall_exit(regs);
 
 	rseq_syscall(regs);
 }