@@ -93,3 +93,23 @@ SYM_INNER_LABEL(x86_sbm_return, SYM_L_GLOBAL)
pop %rbp
RET
SYM_FUNC_END(x86_sbm_exec)
+
+.text
+
+/*
+ * arguments:
+ * rdi .. state (ignored)
+ * rsi .. target function
+ * rdx .. struct pt_regs
+*/
+SYM_FUNC_START(x86_sbm_proxy_call)
+ mov %rdx, %r10
+ mov %rsi, %r11
+ mov pt_regs_di(%r10), %rdi
+ mov pt_regs_si(%r10), %rsi
+ mov pt_regs_dx(%r10), %rdx
+ mov pt_regs_cx(%r10), %rcx
+ mov pt_regs_r8(%r10), %r8
+ mov pt_regs_r9(%r10), %r9
+ JMP_NOSPEC r11
+SYM_FUNC_END(x86_sbm_proxy_call)
@@ -28,6 +28,60 @@ asmlinkage int x86_sbm_exec(struct x86_sbm_state *state, sbm_func func,
unsigned long exc_tos);
extern char x86_sbm_return[];
+extern char __nosbm_text_start[], __nosbm_text_end[];
+
+/*************************************************************
+ * HACK: PROOF-OF-CONCEPT FIXUP CODE STARTS HERE
+ */
+
+typedef unsigned long (*sbm_proxy_call_fn)(struct x86_sbm_state *,
+ unsigned long func,
+ struct pt_regs *);
+
+asmlinkage unsigned long x86_sbm_proxy_call(struct x86_sbm_state *state,
+ unsigned long func,
+ struct pt_regs *regs);
+
+/**
+ * struct sbm_fixup - Describe a sandbox fault fixup.
+ * @target: Target function to be called.
+ * @proxy: Proxy call function.
+ */
+struct sbm_fixup {
+ void *target;
+ sbm_proxy_call_fn proxy;
+};
+
+static const struct sbm_fixup fixups[] =
+{
+ { }
+};
+
+/* Fix up a page fault if it is one of the known exceptions. */
+static bool fixup_sbm_call(struct x86_sbm_state *state,
+ struct pt_regs *regs, unsigned long address)
+{
+ const struct sbm_fixup *fixup;
+
+ for (fixup = fixups; fixup->target; ++fixup) {
+ if (address == (unsigned long)fixup->target) {
+ regs->ax = fixup->proxy(state, address, regs);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Execution in sandbox mode continues here after fixup. */
+static void x86_sbm_continue(void)
+{
+}
+
+/*
+ * HACK: PROOF-OF-CONCEPT FIXUP CODE ENDS HERE
+ *************************************************************/
+
union {
struct x86_sbm_state state;
char page[PAGE_SIZE];
@@ -140,8 +194,8 @@ static int map_kernel(struct x86_sbm_state *state)
if (err)
return err;
- err = map_range(state, (unsigned long)__entry_text_start,
- (unsigned long)__entry_text_end, PAGE_KERNEL_ROX);
+ err = map_range(state, (unsigned long)__nosbm_text_start,
+ (unsigned long)__nosbm_text_end, PAGE_KERNEL_ROX);
if (err)
return err;
@@ -482,6 +536,13 @@ void handle_sbm_fault(struct pt_regs *regs, unsigned long error_code,
if (spurious_sbm_fault(state, error_code, address))
return;
+ if ((error_code & ~X86_PF_PROT) == (X86_PF_USER | X86_PF_INSTR) &&
+ fixup_sbm_call(state, regs, address)) {
+ /* Return back to sandbox... */
+ regs->ip = (unsigned long)x86_sbm_continue;
+ return;
+ }
+
/*
* Force -EFAULT unless the fault was due to a user-mode instruction
* fetch from the designated return address.
@@ -139,8 +139,17 @@ SECTIONS
STATIC_CALL_TEXT
ALIGN_ENTRY_TEXT_BEGIN
+#ifdef CONFIG_SANDBOX_MODE
+ . = ALIGN(PAGE_SIZE);
+ __nosbm_text_start = .;
+#endif
*(.text..__x86.rethunk_untrain)
ENTRY_TEXT
+#ifdef CONFIG_SANDBOX_MODE
+ *(.text.nosbm)
+ . = ALIGN(PAGE_SIZE);
+ __nosbm_text_end = .;
+#endif
#ifdef CONFIG_CPU_SRSO
/*
@@ -267,6 +267,8 @@ int arch_sbm_map_writable(struct sbm *sbm, const struct sbm_buf *buf);
*/
int arch_sbm_exec(struct sbm *sbm, sbm_func func, void *data);
+#define __nosbm __section(".text.nosbm")
+
#else /* !CONFIG_HAVE_ARCH_SBM */
static inline int arch_sbm_init(struct sbm *sbm)
@@ -295,6 +297,8 @@ static inline int arch_sbm_exec(struct sbm *sbm, sbm_func func, void *data)
return func(data);
}
+#define __nosbm
+
#endif /* CONFIG_HAVE_ARCH_SBM */
#else /* !CONFIG_SANDBOX_MODE */
@@ -340,6 +344,8 @@ static inline void *sbm_map_writable(struct sbm *sbm, const void *ptr,
return (void *)ptr;
}
+#define __nosbm
+
#endif /* CONFIG_SANDBOX_MODE */
/**