diff mbox

[v2,4/9] x86, memcpy_mcsafe: add write-protection-fault handling

Message ID 152532354268.17218.11923624861859552990.stgit@dwillia2-desk3.amr.corp.intel.com
State New, archived
Headers show

Commit Message

Dan Williams May 3, 2018, 4:59 a.m. UTC
In preparation for using memcpy_mcsafe() to handle user copies it needs
to be to handle write-protection faults while writing user pages. Add
MMU-fault handlers alongside the machine-check exception handlers.

Note that the machine check fault exception handling makes assumptions
about source buffer alignment and poison alignment. In the write fault
case, given the destination buffer is arbitrarily aligned, it needs a
separate / additional fault handling approach. The mcsafe_handle_tail()
helper is reused. The @limit argument is set to @len since there is no
safety concern about retriggering an MMU fault, and this simplifies the
assembly.

Cc: <x86@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Co-developed-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/uaccess_64.h |    3 +++
 arch/x86/lib/memcpy_64.S          |   14 ++++++++++++++
 arch/x86/lib/usercopy_64.c        |   17 +++++++++++++++++
 3 files changed, 34 insertions(+)

Comments

Mika Penttilä May 3, 2018, 5:29 a.m. UTC | #1
On 05/03/2018 07:59 AM, Dan Williams wrote:
> In preparation for using memcpy_mcsafe() to handle user copies it needs
> to be to handle write-protection faults while writing user pages. Add
> MMU-fault handlers alongside the machine-check exception handlers.
> 
> Note that the machine check fault exception handling makes assumptions
> about source buffer alignment and poison alignment. In the write fault
> case, given the destination buffer is arbitrarily aligned, it needs a
> separate / additional fault handling approach. The mcsafe_handle_tail()
> helper is reused. The @limit argument is set to @len since there is no
> safety concern about retriggering an MMU fault, and this simplifies the
> assembly.
> 

> diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
> index 75d3776123cc..9787f5ee0cf9 100644
> --- a/arch/x86/lib/usercopy_64.c
> +++ b/arch/x86/lib/usercopy_64.c
> @@ -75,6 +75,23 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
>  	return len;
>  }
>  
> +/*
> + * Similar to copy_user_handle_tail, probe for the write fault point,
> + * but reuse __memcpy_mcsafe in case a new read error is encountered.
> + * clac() is handled in _copy_to_iter_mcsafe().
> + */
> +__visible unsigned long
> +mcsafe_handle_tail(char *to, char *from, unsigned len)
> +{
> +	for (; len; --len, to++) {
> +		unsigned long rem = memcpy_mcsafe(to, from, 1);
> +


Hmm why not 
	for (; len; --len, from++, to++)



> +		if (rem)
> +			break;
> +	}
> +	return len;
> +}


--Mika
Dan Williams May 3, 2018, 2:19 p.m. UTC | #2
On Wed, May 2, 2018 at 10:29 PM, Mika Penttilä
<mika.penttila@nextfour.com> wrote:
> On 05/03/2018 07:59 AM, Dan Williams wrote:
>> In preparation for using memcpy_mcsafe() to handle user copies it needs
>> to be to handle write-protection faults while writing user pages. Add
>> MMU-fault handlers alongside the machine-check exception handlers.
>>
>> Note that the machine check fault exception handling makes assumptions
>> about source buffer alignment and poison alignment. In the write fault
>> case, given the destination buffer is arbitrarily aligned, it needs a
>> separate / additional fault handling approach. The mcsafe_handle_tail()
>> helper is reused. The @limit argument is set to @len since there is no
>> safety concern about retriggering an MMU fault, and this simplifies the
>> assembly.
>>
>
>> diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
>> index 75d3776123cc..9787f5ee0cf9 100644
>> --- a/arch/x86/lib/usercopy_64.c
>> +++ b/arch/x86/lib/usercopy_64.c
>> @@ -75,6 +75,23 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
>>       return len;
>>  }
>>
>> +/*
>> + * Similar to copy_user_handle_tail, probe for the write fault point,
>> + * but reuse __memcpy_mcsafe in case a new read error is encountered.
>> + * clac() is handled in _copy_to_iter_mcsafe().
>> + */
>> +__visible unsigned long
>> +mcsafe_handle_tail(char *to, char *from, unsigned len)
>> +{
>> +     for (; len; --len, to++) {
>> +             unsigned long rem = memcpy_mcsafe(to, from, 1);
>> +
>
>
> Hmm why not
>         for (; len; --len, from++, to++)
>
>

Why not indeed. Great catch, I'll fix this and extend the unit test to
verify the data transfer as well.
diff mbox

Patch

diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 62546b3a398e..c63efc07891f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -194,4 +194,7 @@  __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
 unsigned long
 copy_user_handle_tail(char *to, char *from, unsigned len);
 
+unsigned long
+mcsafe_handle_tail(char *to, char *from, unsigned len);
+
 #endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index f01a88391c98..c3b527a9f95d 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -265,9 +265,23 @@  EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
 	mov	%ecx, %eax
 	ret
 
+	/*
+	 * For write fault handling, given the destination is unaligned,
+	 * we handle faults on multi-byte writes with a byte-by-byte
+	 * copy up to the write-protected page.
+	 */
+.E_write_words:
+	shll	$3, %ecx
+	addl	%edx, %ecx
+	movl	%ecx, %edx
+	jmp mcsafe_handle_tail
+
 	.previous
 
 	_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
 	_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
 	_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+	_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+	_ASM_EXTABLE(.L_write_words, .E_write_words)
+	_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
 #endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 75d3776123cc..9787f5ee0cf9 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -75,6 +75,23 @@  copy_user_handle_tail(char *to, char *from, unsigned len)
 	return len;
 }
 
+/*
+ * Similar to copy_user_handle_tail, probe for the write fault point,
+ * but reuse __memcpy_mcsafe in case a new read error is encountered.
+ * clac() is handled in _copy_to_iter_mcsafe().
+ */
+__visible unsigned long
+mcsafe_handle_tail(char *to, char *from, unsigned len)
+{
+	for (; len; --len, to++) {
+		unsigned long rem = memcpy_mcsafe(to, from, 1);
+
+		if (rem)
+			break;
+	}
+	return len;
+}
+
 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 /**
  * clean_cache_range - write back a cache range with CLWB