@@ -53,12 +53,17 @@ visibility.
Preserving tags
---------------
-Non-zero tags are not preserved when delivering signals. This means that
-signal handlers in applications making use of tags cannot rely on the
-tag information for user virtual addresses being maintained for fields
-inside siginfo_t. One exception to this rule is for signals raised in
-response to watchpoint debug exceptions, where the tag information will
-be preserved.
+Non-zero tags are not preserved in the fault address fields
+siginfo.si_addr or sigcontext.fault_address when delivering
+signals. This means that signal handlers in applications making use
+of tags cannot rely on the tag information for user virtual addresses
+being maintained in these fields. One exception to this rule is for
+signals raised in response to watchpoint debug exceptions, where the
+tag information will be preserved.
+
+The fault address tag is preserved in the fault_addr_top_byte field of
+the signal frame record fault_addr_top_byte_context, which is present
+for signals raised in response to data aborts and instruction aborts.
The architecture prevents the use of a tagged PC, so the upper byte will
be set to a sign-extension of bit 55 on exception return.
@@ -9,6 +9,9 @@ struct arch_private_siginfo {
/* FAR_EL1 value */
unsigned long fault_address;
+ /* Mask of defined bits in the top byte of FAR_EL1 */
+ unsigned char fault_address_top_byte_mask;
+
/* Sanitized ESR_EL1 value, or FSR/syscall number in compat mode */
unsigned long error_code;
};
@@ -27,8 +27,8 @@ void unregister_undef_hook(struct undef_hook *hook);
void force_signal_inject(int signal, int code, unsigned long address);
void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_fault(int signo, int code, void __user *addr,
- unsigned long far, unsigned long esr,
- const char *str);
+ unsigned long far, unsigned char far_tb_mask,
+ unsigned long esr, const char *str);
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
unsigned long far, unsigned long esr,
const char *str);
@@ -44,11 +44,12 @@ struct sigcontext {
*
* 0x210 fpsimd_context
* 0x10 esr_context
+ * 0x10 fault_addr_top_byte_context
* 0x8a0 sve_context (vl <= 64) (optional)
* 0x20 extra_context (optional)
* 0x10 terminator (null _aarch64_ctx)
*
- * 0x510 (reserved for future allocation)
+ * 0x500 (reserved for future allocation)
*
* New records that can exceed this space need to be opt-in for userspace, so
* that an expanded signal frame is not generated unexpectedly. The mechanism
@@ -94,17 +95,28 @@ struct esr_context {
__u64 esr;
};
+/* Top byte of fault address (normally not exposed via si_addr) */
+#define FAULT_ADDR_TOP_BYTE_MAGIC 0x46544201
+
+struct fault_addr_top_byte_context {
+ struct _aarch64_ctx head;
+ __u8 flags;
+ __u8 fault_addr_top_byte;
+ __u8 fault_addr_top_byte_mask;
+ __u8 __reserved[5];
+};
+
/*
* extra_context: describes extra space in the signal frame for
* additional structures that don't fit in sigcontext.__reserved[].
*
* Note:
*
- * 1) fpsimd_context, esr_context and extra_context must be placed in
- * sigcontext.__reserved[] if present. They cannot be placed in the
- * extra space. Any other record can be placed either in the extra
- * space or in sigcontext.__reserved[], unless otherwise specified in
- * this file.
+ * 1) fpsimd_context, esr_context, fault_addr_top_byte_context and
+ * extra_context must be placed in sigcontext.__reserved[] if present.
+ * They cannot be placed in the extra space. Any other record can be
+ * placed either in the extra space or in sigcontext.__reserved[],
+ * unless otherwise specified in this file.
*
* 2) There must not be more than one extra_context.
*
@@ -232,7 +232,7 @@ static void send_user_sigtrap(int si_code)
local_irq_enable();
arm64_force_sig_fault(SIGTRAP, si_code,
- (void __user *)instruction_pointer(regs), 0, 0,
+ (void __user *)instruction_pointer(regs), 0, 0, 0,
"User debug trap");
}
@@ -198,7 +198,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
}
#endif
arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
- (void __user *)(bkpt->trigger), 0, 0,
+ (void __user *)(bkpt->trigger), 0, 0, 0,
desc);
}
@@ -55,6 +55,7 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset;
unsigned long esr_offset;
+ unsigned long ftb_offset;
unsigned long sve_offset;
unsigned long extra_offset;
unsigned long end_offset;
@@ -383,6 +384,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
break;
case ESR_MAGIC:
+ case FAULT_ADDR_TOP_BYTE_MAGIC:
/* ignore */
break;
@@ -582,6 +584,14 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
sizeof(struct esr_context));
if (err)
return err;
+ }
+
+ if (add_all || info->arch.fault_address_top_byte_mask) {
+ err = sigframe_alloc(
+ user, &user->ftb_offset,
+ sizeof(struct fault_addr_top_byte_context));
+ if (err)
+ return err;
}
if (system_supports_sve()) {
@@ -644,6 +654,21 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
__put_user_error(info->arch.error_code, &esr_ctx->esr, err);
}
+ if (err == 0 && user->ftb_offset) {
+ struct fault_addr_top_byte_context __user *ftb_ctx =
+ apply_user_offset(user, user->ftb_offset);
+
+ __put_user_error(FAULT_ADDR_TOP_BYTE_MAGIC,
+ &ftb_ctx->head.magic, err);
+ __put_user_error(sizeof(*ftb_ctx), &ftb_ctx->head.size, err);
+ __put_user_error(0, &ftb_ctx->flags, err);
+ __put_user_error((info->arch.fault_address >> 56) &
+ info->arch.fault_address_top_byte_mask,
+ &ftb_ctx->fault_addr_top_byte, err);
+ __put_user_error(info->arch.fault_address_top_byte_mask,
+ &ftb_ctx->fault_addr_top_byte_mask, err);
+ }
+
/* Scalable Vector Extension state, if present */
if (system_supports_sve() && err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx =
@@ -295,8 +295,8 @@ static unsigned long esr_to_error_code(unsigned long esr, unsigned long far)
}
void arm64_force_sig_fault(int signo, int code, void __user *addr,
- unsigned long far, unsigned long esr,
- const char *str)
+ unsigned long far, unsigned char far_tb_mask,
+ unsigned long esr, const char *str)
{
arm64_show_signal(signo, esr, str);
if (signo == SIGKILL) {
@@ -309,6 +309,7 @@ void arm64_force_sig_fault(int signo, int code, void __user *addr,
info.si_code = code;
info.si_addr = addr;
info.arch.fault_address = far;
+ info.arch.fault_address_top_byte_mask = far_tb_mask;
info.arch.error_code = esr_to_error_code(esr, far);
force_sig_info(&info);
}
@@ -329,6 +330,7 @@ void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
info.si_addr = addr;
info.si_addr_lsb = lsb;
info.arch.fault_address = far;
+ info.arch.fault_address_top_byte_mask = 0xff;
info.arch.error_code = esr_to_error_code(esr, far);
force_sig_info(&info);
}
@@ -346,7 +348,7 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
{
if (user_mode(regs)) {
WARN_ON(regs != current_pt_regs());
- arm64_force_sig_fault(signo, sicode, addr, 0, esr, str);
+ arm64_force_sig_fault(signo, sicode, addr, 0, 0, esr, str);
} else {
die(str, regs, esr);
}
@@ -893,7 +895,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
void __user *pc = (void __user *)instruction_pointer(regs);
- arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, 0, esr,
+ arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, 0, 0, esr,
"Bad EL0 synchronous exception");
}
@@ -332,7 +332,7 @@ static void do_bad_area(unsigned long far, unsigned int esr,
if (user_mode(regs)) {
const struct fault_info *inf = esr_to_fault_info(esr);
arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
- far, esr, inf->name);
+ far, 0xff, esr, inf->name);
} else {
__do_kernel_fault(addr, esr, regs);
}
@@ -520,8 +520,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
* We had some memory, but were unable to successfully fix up
* this page fault.
*/
- arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, far, esr,
- inf->name);
+ arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
+ far, 0xff, esr, inf->name);
} else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
unsigned int lsb;
@@ -538,7 +538,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
*/
arm64_force_sig_fault(SIGSEGV,
fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
- (void __user *)addr, far, esr,
+ (void __user *)addr, far, 0xff, esr,
inf->name);
}
The kernel currently clears the tag bits (i.e. bits 56-63) in the fault address exposed via siginfo.si_addr and sigcontext.fault_address. However, the tag bits may be needed by tools in order to accurately diagnose memory errors, such as HWASan [1] or future tools based on the Memory Tagging Extension (MTE). We should not stop clearing these bits in the existing fault address fields, because there may be existing userspace applications that are expecting the tag bits to be cleared. Instead, create a fault_addr_top_byte_context in sigcontext (similar to the existing esr_context), and store the tag bits of FAR_EL1 there. [1] http://clang.llvm.org/docs/HardwareAssistedAddressSanitizerDesign.html Signed-off-by: Peter Collingbourne <pcc@google.com> --- v6: - move fault address and fault code into the kernel_siginfo data structure - split the patch in three since it was getting large and now has generic and arch-specific parts v5: - add padding to fault_addr_top_byte_context in order to ensure the correct size and preserve sp alignment v4: - expose only the tag bits in the context instead of the entire FAR_EL1 - remove mention of the new context from the sigcontext.__reserved[] note v3: - add documentation to tagged-pointers.rst - update comments in sigcontext.h v2: - revert changes to hw_breakpoint.c - rename set_thread_esr to set_thread_far_esr Documentation/arm64/tagged-pointers.rst | 17 ++++++++++------ arch/arm64/include/asm/signal.h | 3 +++ arch/arm64/include/asm/traps.h | 4 ++-- arch/arm64/include/uapi/asm/sigcontext.h | 24 +++++++++++++++++------ arch/arm64/kernel/debug-monitors.c | 2 +- arch/arm64/kernel/ptrace.c | 2 +- arch/arm64/kernel/signal.c | 25 ++++++++++++++++++++++++ arch/arm64/kernel/traps.c | 10 ++++++---- arch/arm64/mm/fault.c | 8 ++++---- 9 files changed, 71 insertions(+), 24 deletions(-)