diff mbox series

[v0.8,2/6] mm, x86/uaccess: add userspace atomic helpers

Message ID 20211104195804.83240-3-posk@google.com (mailing list archive)
State New
Headers show
Series sched,mm,x86/uaccess: implement User Managed Concurrency Groups | expand

Commit Message

Peter Oskolkov Nov. 4, 2021, 7:58 p.m. UTC
In addition to futexes needing to do atomic operations in the userspace,
a second use case is now in the works (UMCG, see
https://lore.kernel.org/all/20210917180323.278250-1-posk@google.com/),
so a generic facility to perform these operations has been called for
(see https://lore.kernel.org/all/87ilyk9xc0.ffs@tglx/).

Add a set of generic helpers to perform 32/64-bit xchg and cmpxchg
operations in the userspace. Also implement the required
architecture-specific support on x86_64.

Signed-off-by: Peter Oskolkov <posk@google.com>
---
 arch/x86/include/asm/uaccess_64.h |  93 +++++++++++
 include/linux/uaccess.h           |  46 ++++++
 mm/maccess.c                      | 264 ++++++++++++++++++++++++++++++
 3 files changed, 403 insertions(+)

--
2.25.1

Comments

kernel test robot Nov. 5, 2021, 6:10 a.m. UTC | #1
Hi Peter,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 8ea9183db4ad8afbcb7089a77c23eaf965b0cacd]

url:    https://github.com/0day-ci/linux/commits/Peter-Oskolkov/sched-mm-x86-uaccess-implement-User-Managed-Concurrency-Groups/20211105-035945
base:   8ea9183db4ad8afbcb7089a77c23eaf965b0cacd
config: arm-randconfig-r026-20211105 (attached as .config)
compiler: clang version 14.0.0 (https://github.com/llvm/llvm-project 847a6807332b13f43704327c2d30103ec0347c77)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm cross compiling tool for clang build
        # apt-get install binutils-arm-linux-gnueabi
        # https://github.com/0day-ci/linux/commit/23012d2a531975af519f3a7c7a3152197efcbe13
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Peter-Oskolkov/sched-mm-x86-uaccess-implement-User-Managed-Concurrency-Groups/20211105-035945
        git checkout 23012d2a531975af519f3a7c7a3152197efcbe13
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 ARCH=arm 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> mm/maccess.c:491: warning: expecting prototype for xchg_user_[32|64][_nofault|]() - exchange 32/64(). Prototype was for xchg_user_32_nofault() instead


vim +491 mm/maccess.c

   474	
   475	/**
   476	 * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
   477	 * @uaddr:   Destination address, in user space;
   478	 * @val:     Source address, in kernel space.
   479	 *
   480	 * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
   481	 *
   482	 * The _nofault versions don't fault and can be used in
   483	 * atomic/preempt-disabled contexts.
   484	 *
   485	 * Return:
   486	 * 0      : OK/success;
   487	 * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
   488	 * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).
   489	 */
   490	int xchg_user_32_nofault(u32 __user *uaddr, u32 *val)
 > 491	{
   492		int ret;
   493	
   494		if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
   495			return -EFAULT;
   496	
   497		pagefault_disable();
   498	
   499		if (!user_access_begin(uaddr, sizeof(*uaddr))) {
   500			pagefault_enable();
   501			return -EFAULT;
   502		}
   503	
   504		ret = __try_xchg_user_32(val, uaddr, *val);
   505		user_access_end();
   506	
   507		pagefault_enable();
   508	
   509		return ret;
   510	}
   511	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Tao Zhou Nov. 9, 2021, 4:18 a.m. UTC | #2
On Thu, Nov 04, 2021 at 12:58:00PM -0700, Peter Oskolkov wrote:

> In addition to futexes needing to do atomic operations in the userspace,
> a second use case is now in the works (UMCG, see
> https://lore.kernel.org/all/20210917180323.278250-1-posk@google.com/),
> so a generic facility to perform these operations has been called for
> (see https://lore.kernel.org/all/87ilyk9xc0.ffs@tglx/).
> 
> Add a set of generic helpers to perform 32/64-bit xchg and cmpxchg
> operations in the userspace. Also implement the required
> architecture-specific support on x86_64.
> 
> Signed-off-by: Peter Oskolkov <posk@google.com>
> ---
>  arch/x86/include/asm/uaccess_64.h |  93 +++++++++++
>  include/linux/uaccess.h           |  46 ++++++
>  mm/maccess.c                      | 264 ++++++++++++++++++++++++++++++
>  3 files changed, 403 insertions(+)
> 
> diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
> index 45697e04d771..41e2f96d3ec4 100644
> --- a/arch/x86/include/asm/uaccess_64.h
> +++ b/arch/x86/include/asm/uaccess_64.h
> @@ -79,4 +79,97 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
>  	kasan_check_write(dst, size);
>  	return __copy_user_flushcache(dst, src, size);
>  }
> +
> +#define ARCH_HAS_ATOMIC_UACCESS_HELPERS 1
> +
> +static inline int __try_cmpxchg_user_32(u32 *uval, u32 __user *uaddr,
> +						u32 oldval, u32 newval)
> +{
> +	int ret = 0;
> +
> +	asm volatile("\n"
> +		"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
> +		"2:\n"
> +		"\t.section .fixup, \"ax\"\n"
> +		"3:\tmov     %3, %0\n"
> +		"\tjmp     2b\n"
> +		"\t.previous\n"
> +		_ASM_EXTABLE_UA(1b, 3b)
> +		: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
> +		: "i" (-EFAULT), "r" (newval), "1" (oldval)
> +		: "memory"
> +	);
> +	*uval = oldval;
> +	return ret;
> +}
> +
> +static inline int __try_cmpxchg_user_64(u64 *uval, u64 __user *uaddr,
> +						u64 oldval, u64 newval)
> +{
> +	int ret = 0;
> +
> +	asm volatile("\n"
> +		"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"
> +		"2:\n"
> +		"\t.section .fixup, \"ax\"\n"
> +		"3:\tmov     %3, %0\n"
> +		"\tjmp     2b\n"
> +		"\t.previous\n"
> +		_ASM_EXTABLE_UA(1b, 3b)
> +		: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
> +		: "i" (-EFAULT), "r" (newval), "1" (oldval)
> +		: "memory"
> +	);
> +	*uval = oldval;
> +	return ret;
> +}
> +
> +static inline int __try_xchg_user_32(u32 *oval, u32 __user *uaddr, u32 newval)
> +{
> +	u32 oldval = 0;
> +	int ret = 0;
> +
> +	asm volatile("\n"
> +		"1:\txchgl %0, %2\n"
> +		"2:\n"
> +		"\t.section .fixup, \"ax\"\n"
> +		"3:\tmov     %3, %1\n"
> +		"\tjmp     2b\n"
> +		"\t.previous\n"
> +		_ASM_EXTABLE_UA(1b, 3b)
> +		: "=r" (oldval), "=r" (ret), "+m" (*uaddr)
> +		: "i" (-EFAULT), "0" (newval), "1" (0)

"1"(0) can be omitted, that ret is initialized. And the initialization
of oldval not need.

> +	);
> +
> +	if (ret)
> +		return ret;
> +
> +	*oval = oldval;
> +	return 0;
> +}
> +
> +static inline int __try_xchg_user_64(u64 *oval, u64 __user *uaddr, u64 newval)
> +{
> +	u64 oldval = 0;
> +	int ret = 0;
> +
> +	asm volatile("\n"
> +		"1:\txchgq %0, %2\n"
> +		"2:\n"
> +		"\t.section .fixup, \"ax\"\n"
> +		"3:\tmov     %3, %1\n"
> +		"\tjmp     2b\n"
> +		"\t.previous\n"
> +		_ASM_EXTABLE_UA(1b, 3b)
> +		: "=r" (oldval), "=r" (ret), "+m" (*uaddr)
> +		: "i" (-EFAULT), "0" (newval), "1" (0)

Same of above.

> +	);
> +
> +	if (ret)
> +		return ret;
> +
> +	*oval = oldval;
> +	return 0;
> +}
> +
>  #endif /* _ASM_X86_UACCESS_64_H */
> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
> index ac0394087f7d..dcb3ac093075 100644
> --- a/include/linux/uaccess.h
> +++ b/include/linux/uaccess.h
> @@ -408,4 +408,50 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
>  			       unsigned long len);
>  #endif
> 
> +#ifdef ARCH_HAS_ATOMIC_UACCESS_HELPERS
> +/**
> + * cmpxchg_user_[32|64][_nofault|]() - compare_exchange 32/64-bit values
> + * @uaddr:     Destination address, in user space;
> + * @curr_val:  Source address, in kernel space;
> + * @new_val:   The value to write to the destination address.
> + *
> + * This is the standard cmpxchg: atomically: compare *@uaddr to *@curr_val;
> + * if the values match, write @new_val to @uaddr, return 0; if the values
> + * do not match, write *@uaddr to @curr_val, return -EAGAIN.
> + *
> + * The _nofault versions don't fault and can be used in
> + * atomic/preempt-disabled contexts.
> + *
> + * Return:
> + * 0      : OK/success;
> + * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
> + * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault);

(including mis-aligned @uaddr in _fault)

> + * -EAGAIN: @old did not match.
> + */
> +int cmpxchg_user_32_nofault(u32 __user *uaddr, u32 *curr_val, u32 new_val);
> +int cmpxchg_user_64_nofault(u64 __user *uaddr, u64 *curr_val, u64 new_val);
> +int cmpxchg_user_32(u32 __user *uaddr, u32 *curr_val, u32 new_val);
> +int cmpxchg_user_64(u64 __user *uaddr, u64 *curr_val, u64 new_val);
> +
> +/**
> + * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
> + * @uaddr:   Destination address, in user space;
> + * @val:     Source address, in kernel space.
> + *
> + * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
> + *
> + * The _nofault versions don't fault and can be used in
> + * atomic/preempt-disabled contexts.
> + *
> + * Return:
> + * 0      : OK/success;
> + * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
> + * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).

(including mis-aligned @uaddr in _fault)

> + */
> +int xchg_user_32_nofault(u32 __user *uaddr, u32 *val);
> +int xchg_user_64_nofault(u64 __user *uaddr, u64 *val);
> +int xchg_user_32(u32 __user *uaddr, u32 *val);
> +int xchg_user_64(u64 __user *uaddr, u64 *val);
> +#endif		/* ARCH_HAS_ATOMIC_UACCESS_HELPERS */
> +
>  #endif		/* __LINUX_UACCESS_H__ */
> diff --git a/mm/maccess.c b/mm/maccess.c
> index d3f1a1f0b1c1..620556b11550 100644
> --- a/mm/maccess.c
> +++ b/mm/maccess.c
> @@ -335,3 +335,267 @@ long strnlen_user_nofault(const void __user *unsafe_addr, long count)
> 
>  	return ret;
>  }
> +
> +#ifdef ARCH_HAS_ATOMIC_UACCESS_HELPERS
> +
> +static int fix_pagefault(unsigned long uaddr, bool write_fault, int bytes)
> +{
> +	struct mm_struct *mm = current->mm;
> +	int ret;
> +
> +	mmap_read_lock(mm);
> +	ret = fixup_user_fault(mm, uaddr, write_fault ? FAULT_FLAG_WRITE : 0,
> +			NULL);
> +	mmap_read_unlock(mm);
> +
> +	return ret < 0 ? ret : 0;
> +}
> +
> +int cmpxchg_user_32_nofault(u32 __user *uaddr, u32 *curr_val, u32 new_val)
> +{
> +	int ret = -EFAULT;
> +	u32 __old = *curr_val;
> +
> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
> +		pagefault_enable();
> +		return -EFAULT;
> +	}
> +	ret = __try_cmpxchg_user_32(curr_val, uaddr, __old, new_val);
> +	user_access_end();
> +
> +	if (!ret)
> +		ret =  *curr_val == __old ? 0 : -EAGAIN;
> +
> +	pagefault_enable();

May this can be moved to be sibling to user_access_end() even do not
know much about this to me.

> +	return ret;
> +}
> +
> +int cmpxchg_user_64_nofault(u64 __user *uaddr, u64 *curr_val, u64 new_val)
> +{
> +	int ret = -EFAULT;
> +	u64 __old = *curr_val;
> +
> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
> +		pagefault_enable();
> +		return -EFAULT;
> +	}
> +	ret = __try_cmpxchg_user_64(curr_val, uaddr, __old, new_val);
> +	user_access_end();
> +
> +	if (!ret)
> +		ret =  *curr_val == __old ? 0 : -EAGAIN;
> +
> +	pagefault_enable();

The same.

> +	return ret;
> +}
> +
> +int cmpxchg_user_32(u32 __user *uaddr, u32 *curr_val, u32 new_val)
> +{
> +	int ret = -EFAULT;
> +	u32 __old = *curr_val;
> +
> +	/* Validate proper alignment. */
> +	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||

See address. sizeof(*uaddr) --> sizeof(uaddr).
The size of address length.

> +			((unsigned long)curr_val % sizeof(*curr_val))))

Same. sizeof(*curr_val) -->  sizeof(curr_val)

> +		return -EINVAL;

This return should be -EFAULT accord to the comment above
if not wrong to me.

> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	while (true) {
> +		ret = -EFAULT;
> +		if (!user_access_begin(uaddr, sizeof(*uaddr)))
> +			break;
> +
> +		ret = __try_cmpxchg_user_32(curr_val, uaddr, __old, new_val);
> +		user_access_end();
> +
> +		if (!ret) {
> +			ret =  *curr_val == __old ? 0 : -EAGAIN;
> +			break;
> +		}
> +
> +		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
> +			break;
> +	}
> +
> +	pagefault_enable();
> +	return ret;
> +}
> +
> +int cmpxchg_user_64(u64 __user *uaddr, u64 *curr_val, u64 new_val)
> +{
> +	int ret = -EFAULT;
> +	u64 __old = *curr_val;
> +
> +	/* Validate proper alignment. */
> +	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
> +			((unsigned long)curr_val % sizeof(*curr_val))))
> +		return -EINVAL;

The same as above.. even the address size is equal to the value
size. Not use value size here.

> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	while (true) {
> +		ret = -EFAULT;
> +		if (!user_access_begin(uaddr, sizeof(*uaddr)))
> +			break;
> +
> +		ret = __try_cmpxchg_user_64(curr_val, uaddr, __old, new_val);
> +		user_access_end();
> +
> +		if (!ret) {
> +			ret =  *curr_val == __old ? 0 : -EAGAIN;
> +			break;
> +		}
> +
> +		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
> +			break;
> +	}
> +
> +	pagefault_enable();
> +	return ret;
> +}
> +
> +/**
> + * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
> + * @uaddr:   Destination address, in user space;
> + * @val:     Source address, in kernel space.
> + *
> + * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
> + *
> + * The _nofault versions don't fault and can be used in
> + * atomic/preempt-disabled contexts.
> + *
> + * Return:
> + * 0      : OK/success;
> + * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
> + * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).
> + */
> +int xchg_user_32_nofault(u32 __user *uaddr, u32 *val)
> +{
> +	int ret;
> +
> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
> +		pagefault_enable();
> +		return -EFAULT;
> +	}
> +
> +	ret = __try_xchg_user_32(val, uaddr, *val);
> +	user_access_end();
> +
> +	pagefault_enable();
> +
> +	return ret;
> +}
> +
> +int xchg_user_64_nofault(u64 __user *uaddr, u64 *val)
> +{
> +	int ret;
> +
> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
> +		pagefault_enable();
> +		return -EFAULT;
> +	}
> +
> +	ret = __try_xchg_user_64(val, uaddr, *val);
> +	user_access_end();
> +
> +	pagefault_enable();
> +
> +	return ret;
> +}
> +
> +int xchg_user_32(u32 __user *uaddr, u32 *val)
> +{
> +	int ret = -EFAULT;
> +
> +	/* Validate proper alignment. */
> +	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
> +			((unsigned long)val % sizeof(*val))))
> +		return -EINVAL;

The same as above.

> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	while (true) {
> +		ret = -EFAULT;
> +		if (!user_access_begin(uaddr, sizeof(*uaddr)))
> +			break;
> +
> +		ret = __try_xchg_user_32(val, uaddr, *val);
> +		user_access_end();
> +
> +		if (!ret)
> +			break;
> +
> +		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
> +			break;
> +	}
> +
> +	pagefault_enable();
> +
> +	return ret;
> +}
> +
> +int xchg_user_64(u64 __user *uaddr, u64 *val)
> +{
> +	int ret = -EFAULT;
> +
> +	/* Validate proper alignment. */
> +	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
> +			((unsigned long)val % sizeof(*val))))
> +		return -EINVAL;

The same as above..

> +	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
> +		return -EFAULT;
> +
> +	pagefault_disable();
> +
> +	while (true) {
> +		ret = -EFAULT;
> +		if (!user_access_begin(uaddr, sizeof(*uaddr)))
> +			break;
> +
> +		ret = __try_xchg_user_64(val, uaddr, *val);
> +		user_access_end();
> +
> +		if (!ret)
> +			break;
> +
> +		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
> +			break;
> +	}
> +
> +	pagefault_enable();
> +
> +	return ret;
> +}
> +#endif		/* ARCH_HAS_ATOMIC_UACCESS_HELPERS */
> --
> 2.25.1
>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 45697e04d771..41e2f96d3ec4 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -79,4 +79,97 @@  __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
 	kasan_check_write(dst, size);
 	return __copy_user_flushcache(dst, src, size);
 }
+
+#define ARCH_HAS_ATOMIC_UACCESS_HELPERS 1
+
+static inline int __try_cmpxchg_user_32(u32 *uval, u32 __user *uaddr,
+						u32 oldval, u32 newval)
+{
+	int ret = 0;
+
+	asm volatile("\n"
+		"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+		"2:\n"
+		"\t.section .fixup, \"ax\"\n"
+		"3:\tmov     %3, %0\n"
+		"\tjmp     2b\n"
+		"\t.previous\n"
+		_ASM_EXTABLE_UA(1b, 3b)
+		: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+		: "i" (-EFAULT), "r" (newval), "1" (oldval)
+		: "memory"
+	);
+	*uval = oldval;
+	return ret;
+}
+
+static inline int __try_cmpxchg_user_64(u64 *uval, u64 __user *uaddr,
+						u64 oldval, u64 newval)
+{
+	int ret = 0;
+
+	asm volatile("\n"
+		"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"
+		"2:\n"
+		"\t.section .fixup, \"ax\"\n"
+		"3:\tmov     %3, %0\n"
+		"\tjmp     2b\n"
+		"\t.previous\n"
+		_ASM_EXTABLE_UA(1b, 3b)
+		: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+		: "i" (-EFAULT), "r" (newval), "1" (oldval)
+		: "memory"
+	);
+	*uval = oldval;
+	return ret;
+}
+
+static inline int __try_xchg_user_32(u32 *oval, u32 __user *uaddr, u32 newval)
+{
+	u32 oldval = 0;
+	int ret = 0;
+
+	asm volatile("\n"
+		"1:\txchgl %0, %2\n"
+		"2:\n"
+		"\t.section .fixup, \"ax\"\n"
+		"3:\tmov     %3, %1\n"
+		"\tjmp     2b\n"
+		"\t.previous\n"
+		_ASM_EXTABLE_UA(1b, 3b)
+		: "=r" (oldval), "=r" (ret), "+m" (*uaddr)
+		: "i" (-EFAULT), "0" (newval), "1" (0)
+	);
+
+	if (ret)
+		return ret;
+
+	*oval = oldval;
+	return 0;
+}
+
+static inline int __try_xchg_user_64(u64 *oval, u64 __user *uaddr, u64 newval)
+{
+	u64 oldval = 0;
+	int ret = 0;
+
+	asm volatile("\n"
+		"1:\txchgq %0, %2\n"
+		"2:\n"
+		"\t.section .fixup, \"ax\"\n"
+		"3:\tmov     %3, %1\n"
+		"\tjmp     2b\n"
+		"\t.previous\n"
+		_ASM_EXTABLE_UA(1b, 3b)
+		: "=r" (oldval), "=r" (ret), "+m" (*uaddr)
+		: "i" (-EFAULT), "0" (newval), "1" (0)
+	);
+
+	if (ret)
+		return ret;
+
+	*oval = oldval;
+	return 0;
+}
+
 #endif /* _ASM_X86_UACCESS_64_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ac0394087f7d..dcb3ac093075 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -408,4 +408,50 @@  void __noreturn usercopy_abort(const char *name, const char *detail,
 			       unsigned long len);
 #endif

+#ifdef ARCH_HAS_ATOMIC_UACCESS_HELPERS
+/**
+ * cmpxchg_user_[32|64][_nofault|]() - compare_exchange 32/64-bit values
+ * @uaddr:     Destination address, in user space;
+ * @curr_val:  Source address, in kernel space;
+ * @new_val:   The value to write to the destination address.
+ *
+ * This is the standard cmpxchg: atomically: compare *@uaddr to *@curr_val;
+ * if the values match, write @new_val to @uaddr, return 0; if the values
+ * do not match, write *@uaddr to @curr_val, return -EAGAIN.
+ *
+ * The _nofault versions don't fault and can be used in
+ * atomic/preempt-disabled contexts.
+ *
+ * Return:
+ * 0      : OK/success;
+ * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
+ * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault);
+ * -EAGAIN: @old did not match.
+ */
+int cmpxchg_user_32_nofault(u32 __user *uaddr, u32 *curr_val, u32 new_val);
+int cmpxchg_user_64_nofault(u64 __user *uaddr, u64 *curr_val, u64 new_val);
+int cmpxchg_user_32(u32 __user *uaddr, u32 *curr_val, u32 new_val);
+int cmpxchg_user_64(u64 __user *uaddr, u64 *curr_val, u64 new_val);
+
+/**
+ * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
+ * @uaddr:   Destination address, in user space;
+ * @val:     Source address, in kernel space.
+ *
+ * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
+ *
+ * The _nofault versions don't fault and can be used in
+ * atomic/preempt-disabled contexts.
+ *
+ * Return:
+ * 0      : OK/success;
+ * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
+ * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).
+ */
+int xchg_user_32_nofault(u32 __user *uaddr, u32 *val);
+int xchg_user_64_nofault(u64 __user *uaddr, u64 *val);
+int xchg_user_32(u32 __user *uaddr, u32 *val);
+int xchg_user_64(u64 __user *uaddr, u64 *val);
+#endif		/* ARCH_HAS_ATOMIC_UACCESS_HELPERS */
+
 #endif		/* __LINUX_UACCESS_H__ */
diff --git a/mm/maccess.c b/mm/maccess.c
index d3f1a1f0b1c1..620556b11550 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -335,3 +335,267 @@  long strnlen_user_nofault(const void __user *unsafe_addr, long count)

 	return ret;
 }
+
+#ifdef ARCH_HAS_ATOMIC_UACCESS_HELPERS
+
+static int fix_pagefault(unsigned long uaddr, bool write_fault, int bytes)
+{
+	struct mm_struct *mm = current->mm;
+	int ret;
+
+	mmap_read_lock(mm);
+	ret = fixup_user_fault(mm, uaddr, write_fault ? FAULT_FLAG_WRITE : 0,
+			NULL);
+	mmap_read_unlock(mm);
+
+	return ret < 0 ? ret : 0;
+}
+
+int cmpxchg_user_32_nofault(u32 __user *uaddr, u32 *curr_val, u32 new_val)
+{
+	int ret = -EFAULT;
+	u32 __old = *curr_val;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+		pagefault_enable();
+		return -EFAULT;
+	}
+	ret = __try_cmpxchg_user_32(curr_val, uaddr, __old, new_val);
+	user_access_end();
+
+	if (!ret)
+		ret =  *curr_val == __old ? 0 : -EAGAIN;
+
+	pagefault_enable();
+	return ret;
+}
+
+int cmpxchg_user_64_nofault(u64 __user *uaddr, u64 *curr_val, u64 new_val)
+{
+	int ret = -EFAULT;
+	u64 __old = *curr_val;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+		pagefault_enable();
+		return -EFAULT;
+	}
+	ret = __try_cmpxchg_user_64(curr_val, uaddr, __old, new_val);
+	user_access_end();
+
+	if (!ret)
+		ret =  *curr_val == __old ? 0 : -EAGAIN;
+
+	pagefault_enable();
+
+	return ret;
+}
+
+int cmpxchg_user_32(u32 __user *uaddr, u32 *curr_val, u32 new_val)
+{
+	int ret = -EFAULT;
+	u32 __old = *curr_val;
+
+	/* Validate proper alignment. */
+	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+			((unsigned long)curr_val % sizeof(*curr_val))))
+		return -EINVAL;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	while (true) {
+		ret = -EFAULT;
+		if (!user_access_begin(uaddr, sizeof(*uaddr)))
+			break;
+
+		ret = __try_cmpxchg_user_32(curr_val, uaddr, __old, new_val);
+		user_access_end();
+
+		if (!ret) {
+			ret =  *curr_val == __old ? 0 : -EAGAIN;
+			break;
+		}
+
+		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+			break;
+	}
+
+	pagefault_enable();
+	return ret;
+}
+
+int cmpxchg_user_64(u64 __user *uaddr, u64 *curr_val, u64 new_val)
+{
+	int ret = -EFAULT;
+	u64 __old = *curr_val;
+
+	/* Validate proper alignment. */
+	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+			((unsigned long)curr_val % sizeof(*curr_val))))
+		return -EINVAL;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	while (true) {
+		ret = -EFAULT;
+		if (!user_access_begin(uaddr, sizeof(*uaddr)))
+			break;
+
+		ret = __try_cmpxchg_user_64(curr_val, uaddr, __old, new_val);
+		user_access_end();
+
+		if (!ret) {
+			ret =  *curr_val == __old ? 0 : -EAGAIN;
+			break;
+		}
+
+		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+			break;
+	}
+
+	pagefault_enable();
+	return ret;
+}
+
+/**
+ * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
+ * @uaddr:   Destination address, in user space;
+ * @val:     Source address, in kernel space.
+ *
+ * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
+ *
+ * The _nofault versions don't fault and can be used in
+ * atomic/preempt-disabled contexts.
+ *
+ * Return:
+ * 0      : OK/success;
+ * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
+ * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).
+ */
+int xchg_user_32_nofault(u32 __user *uaddr, u32 *val)
+{
+	int ret;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+		pagefault_enable();
+		return -EFAULT;
+	}
+
+	ret = __try_xchg_user_32(val, uaddr, *val);
+	user_access_end();
+
+	pagefault_enable();
+
+	return ret;
+}
+
+int xchg_user_64_nofault(u64 __user *uaddr, u64 *val)
+{
+	int ret;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+		pagefault_enable();
+		return -EFAULT;
+	}
+
+	ret = __try_xchg_user_64(val, uaddr, *val);
+	user_access_end();
+
+	pagefault_enable();
+
+	return ret;
+}
+
+int xchg_user_32(u32 __user *uaddr, u32 *val)
+{
+	int ret = -EFAULT;
+
+	/* Validate proper alignment. */
+	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+			((unsigned long)val % sizeof(*val))))
+		return -EINVAL;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	while (true) {
+		ret = -EFAULT;
+		if (!user_access_begin(uaddr, sizeof(*uaddr)))
+			break;
+
+		ret = __try_xchg_user_32(val, uaddr, *val);
+		user_access_end();
+
+		if (!ret)
+			break;
+
+		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+			break;
+	}
+
+	pagefault_enable();
+
+	return ret;
+}
+
+int xchg_user_64(u64 __user *uaddr, u64 *val)
+{
+	int ret = -EFAULT;
+
+	/* Validate proper alignment. */
+	if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+			((unsigned long)val % sizeof(*val))))
+		return -EINVAL;
+
+	if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+		return -EFAULT;
+
+	pagefault_disable();
+
+	while (true) {
+		ret = -EFAULT;
+		if (!user_access_begin(uaddr, sizeof(*uaddr)))
+			break;
+
+		ret = __try_xchg_user_64(val, uaddr, *val);
+		user_access_end();
+
+		if (!ret)
+			break;
+
+		if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+			break;
+	}
+
+	pagefault_enable();
+
+	return ret;
+}
+#endif		/* ARCH_HAS_ATOMIC_UACCESS_HELPERS */