diff mbox series

[v1,1/9] s390/uaccess: Add storage key checked cmpxchg access to user space

Message ID 20220930210751.225873-2-scgl@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series KVM: s390: Extend MEM_OP ioctl by storage key checked cmpxchg | expand

Commit Message

Janis Schoetterl-Glausch Sept. 30, 2022, 9:07 p.m. UTC
Add cmpxchg functionality similar to that in cmpxchg.h except that the
target is a user space address and that the address' storage key is
matched with the access_key argument in order to honor key-controlled
protection.
The access is performed by changing to the secondary-spaces mode and
setting the PSW key for the duration of the compare and swap.

Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
---


Possible variations:
  * check the assumptions made in cmpxchg_user_key_size and error out
  * call functions called by copy_to_user
     * access_ok? is a nop
     * should_fail_usercopy?
     * instrument_copy_to_user? doesn't make sense IMO
  * don't be overly strict in cmpxchg_user_key


 arch/s390/include/asm/uaccess.h | 187 ++++++++++++++++++++++++++++++++
 1 file changed, 187 insertions(+)

Comments

Claudio Imbrenda Oct. 5, 2022, 2:13 p.m. UTC | #1
On Fri, 30 Sep 2022 23:07:43 +0200
Janis Schoetterl-Glausch <scgl@linux.ibm.com> wrote:

> Add cmpxchg functionality similar to that in cmpxchg.h except that the
> target is a user space address and that the address' storage key is
> matched with the access_key argument in order to honor key-controlled
> protection.
> The access is performed by changing to the secondary-spaces mode and
> setting the PSW key for the duration of the compare and swap.

this whole patch is very complex, I think it can be simplified and made
more maintainable (see my comments below)

in the end here we need an atomic compare and swap with key checking,
if we are doing a syscall for it, we are clearly not looking for
performance.

> 
> Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> ---
> 
> 
> Possible variations:
>   * check the assumptions made in cmpxchg_user_key_size and error out
>   * call functions called by copy_to_user
>      * access_ok? is a nop
>      * should_fail_usercopy?
>      * instrument_copy_to_user? doesn't make sense IMO
>   * don't be overly strict in cmpxchg_user_key
> 
> 
>  arch/s390/include/asm/uaccess.h | 187 ++++++++++++++++++++++++++++++++
>  1 file changed, 187 insertions(+)
> 
> diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
> index f7038b800cc3..0ce90b7e2b75 100644
> --- a/arch/s390/include/asm/uaccess.h
> +++ b/arch/s390/include/asm/uaccess.h
> @@ -19,6 +19,8 @@
>  #include <asm/extable.h>
>  #include <asm/facility.h>
>  #include <asm-generic/access_ok.h>
> +#include <asm/page.h>
> +#include <linux/log2.h>
>  
>  void debug_user_asce(int exit);
>  
> @@ -390,4 +392,189 @@ do {									\
>  		goto err_label;						\
>  } while (0)
>  
> +static __always_inline int __cmpxchg_user_key_small(int size, u64 address,
> +						    unsigned __int128 *old_p,
> +						    unsigned __int128 new, u8 access_key)
> +{

can this whole function be simplified to be a C wrapper for the 4 byte
version of compare and swap?

> +	u32 shift, mask, old_word, new_word, align_mask, tmp, diff;
> +	u64 aligned;
> +	int ret = -EFAULT;
> +
> +	switch (size) {
> +	case 2:
> +		align_mask = 2;
> +		aligned = (address ^ (address & align_mask));
> +		shift = (sizeof(u32) - (address & align_mask) - size) * 8;
> +		mask = 0xffff << shift;
> +		old_word = ((u16)*old_p) << shift;
> +		new_word = ((u16)new) << shift;
> +		break;
> +	case 1:
> +		align_mask = 3;
> +		aligned = (address ^ (address & align_mask));
> +		shift = (sizeof(u32) - (address & align_mask) - size) * 8;
> +		mask = 0xff << shift;
> +		old_word = ((u8)*old_p) << shift;
> +		new_word = ((u8)new) << shift;
> +		break;
> +	}
> +	asm volatile(
> +		       "spka	0(%[access_key])\n"
> +		"	sacf	256\n"
> +		"0:	l	%[tmp],%[aligned]\n"
> +		"1:	nr	%[tmp],%[hole_mask]\n"
> +		"	or	%[new_word],%[tmp]\n"
> +		"	or	%[old_word],%[tmp]\n"
> +		"	lr	%[tmp],%[old_word]\n"
> +		"2:	cs	%[tmp],%[new_word],%[aligned]\n"
> +		"3:	jnl	4f\n"
> +		"	xrk	%[diff],%[tmp],%[old_word]\n"
> +		"	nr	%[diff],%[hole_mask]\n"
> +		"	xr	%[new_word],%[diff]\n"
> +		"	xr	%[old_word],%[diff]\n"
> +		"	xrk	%[diff],%[tmp],%[old_word]\n"
> +		"	jz	2b\n"
> +		"4:	ipm	%[ret]\n"
> +		"	srl	%[ret],28\n"
> +		"5:	sacf	768\n"
> +		"	spka	%[default_key]\n"
> +		EX_TABLE(0b, 5b) EX_TABLE(1b, 5b)
> +		EX_TABLE(2b, 5b) EX_TABLE(3b, 5b)
> +		: [old_word] "+&d" (old_word),
> +		  [new_word] "+&d" (new_word),
> +		  [tmp] "=&d" (tmp),
> +		  [aligned] "+Q" (*(u32 *)aligned),
> +		  [diff] "=&d" (diff),
> +		  [ret] "+d" (ret)
> +		: [access_key] "a" (access_key << 4),
> +		  [hole_mask] "d" (~mask),
> +		  [default_key] "J" (PAGE_DEFAULT_KEY)
> +		: "cc"
> +	);
> +	*old_p = (tmp & mask) >> shift;
> +	return ret;
> +}
> +
> +/**
> + * cmpxchg_user_key_size() - cmpxchg with user space target, honoring storage keys
> + * @size: Size of the value being cmpxchg'ed, one of 1,2,4,8,16.
> + * @address: User space address of value to compare to *@old_p and exchange with
> + *           *@new. Must be aligned to @size.
> + * @old_p: Pointer to old value. Interpreted as a @size byte integer and compared
> + *         to the content pointed to by @address in order to determine if the
> + *         exchange occurs. The value read from @address is written back to *@old_p.
> + * @new: New value to place at @address, interpreted as a @size byte integer.
> + * @access_key: Access key to use for checking storage key protection.
> + *
> + * Perform a cmpxchg on a user space target, honoring storage key protection.
> + * @access_key alone determines how key checking is performed, neither
> + * storage-protection-override nor fetch-protection-override apply.
> + *
> + * Return:	0: successful exchange
> + *		1: exchange failed
> + *		-EFAULT: @address not accessible or not naturally aligned
> + *		-EINVAL: invalid @size
> + */
> +static __always_inline int cmpxchg_user_key_size(int size, void __user *address,
> +						 unsigned __int128 *old_p,
> +						 unsigned __int128 new, u8 access_key)
> +{
> +	union {
> +		u32 word;
> +		u64 doubleword;
> +	} old;
> +	int ret = -EFAULT;
> +
> +	/*
> +	 * The following assumes that:
> +	 *  * the current psw key is the default key
> +	 *  * no storage protection overrides are in effect
> +	 */
> +	might_fault();
> +	switch (size) {
> +	case 16:
> +		asm volatile(
> +			       "spka	0(%[access_key])\n"
> +			"	sacf	256\n"
> +			"0:	cdsg	%[old],%[new],%[target]\n"
> +			"1:	ipm	%[ret]\n"
> +			"	srl	%[ret],28\n"
> +			"2:	sacf	768\n"
> +			"	spka	%[default_key]\n"
> +			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> +			: [old] "+d" (*old_p),
> +			  [target] "+Q" (*(unsigned __int128 __user *)address),
> +			  [ret] "+d" (ret)
> +			: [access_key] "a" (access_key << 4),
> +			  [new] "d" (new),
> +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> +			: "cc"
> +		);
> +		return ret;
> +	case 8:
> +		old.doubleword = *old_p;
> +		asm volatile(
> +			       "spka	0(%[access_key])\n"
> +			"	sacf	256\n"
> +			"0:	csg	%[old],%[new],%[target]\n"
> +			"1:	ipm	%[ret]\n"
> +			"	srl	%[ret],28\n"
> +			"2:	sacf	768\n"
> +			"	spka	%[default_key]\n"
> +			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> +			: [old] "+d" (old.doubleword),
> +			  [target] "+Q" (*(u64 __user *)address),
> +			  [ret] "+d" (ret)
> +			: [access_key] "a" (access_key << 4),
> +			  [new] "d" ((u64)new),
> +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> +			: "cc"
> +		);
> +		*old_p = old.doubleword;
> +		return ret;
> +	case 4:
> +		old.word = *old_p;
> +		asm volatile(
> +			       "spka	0(%[access_key])\n"
> +			"	sacf	256\n"
> +			"0:	cs	%[old],%[new],%[target]\n"
> +			"1:	ipm	%[ret]\n"
> +			"	srl	%[ret],28\n"
> +			"2:	sacf	768\n"
> +			"	spka	%[default_key]\n"
> +			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> +			: [old] "+d" (old.word),
> +			  [target] "+Q" (*(u32 __user *)address),
> +			  [ret] "+d" (ret)
> +			: [access_key] "a" (access_key << 4),
> +			  [new] "d" ((u32)new),
> +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> +			: "cc"

this is the same code 3 times with only very minimal changes.
can you factor it out in macros?

something like this:

#define DO_COMPARE_AND_SWAP(instr, _old, _addr, _ret, _key, _new) \
	asm volatile(
			"spka	0(%[access_key])\n"
		"	sacf	256\n" 
		"0:	" instr "%[old],%[new],%[target]\n"
		"1:	ipm	%[ret]\n"
 		"	srl 	%[ret],28\n"
		"2:	sacf	768\n"
		"	spka	%[default_key]\n"
		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
		: [old] "+d"(_old),
		  [target] "+Q" (*(_addr)),
		  [ret] "+d" (_ret)
		: [access_key] "a" ((_key) << 4),
		  [new] "d" (_new),
		  [default_key] "J" (PAGE_DEFAULT_KEY)
		: "cc"

and then in the code:

DO_COMPARE_AND_SWAP("cs", old.word, (u32 __user *)address, ret, access_key, (u32)new)

this way the code is not duplicated


or have you tried it already and there are issues I didn't think of?

> +		);
> +		*old_p = old.word;
> +		return ret;
> +	case 2:
> +	case 1:
> +		return __cmpxchg_user_key_small(size, (u64)address, old_p, new, access_key);
> +	default:
> +		return -EINVAL;
> +	}
> +}
> +
> +#define cmpxchg_user_key(target_p, old_p, new, access_key)			\
> +({										\
> +	__typeof__(old_p) __old_p = (old_p);					\
> +	unsigned __int128 __old = *__old_p;					\
> +	size_t __size = sizeof(*(target_p));					\
> +	int __ret;								\
> +										\
> +	BUILD_BUG_ON(__size != sizeof(*__old_p));				\
> +	BUILD_BUG_ON(__size != sizeof(new));					\
> +	BUILD_BUG_ON(__size > 16 || !is_power_of_2(__size));			\

and here an if to see if you need the _small version or the regular
one, with the _small version being a wrapper around the regular one

> +	__ret = cmpxchg_user_key_size(__size, (target_p), &__old, (new),	\
> +				      (access_key));				\
> +	*__old_p = __old;							\
> +	__ret;									\
> +})
> +
>  #endif /* __S390_UACCESS_H */
Janis Schoetterl-Glausch Oct. 5, 2022, 3:54 p.m. UTC | #2
On Wed, 2022-10-05 at 16:13 +0200, Claudio Imbrenda wrote:
> On Fri, 30 Sep 2022 23:07:43 +0200
> Janis Schoetterl-Glausch <scgl@linux.ibm.com> wrote:
> 
> > Add cmpxchg functionality similar to that in cmpxchg.h except that the
> > target is a user space address and that the address' storage key is
> > matched with the access_key argument in order to honor key-controlled
> > protection.
> > The access is performed by changing to the secondary-spaces mode and
> > setting the PSW key for the duration of the compare and swap.
> 
> this whole patch is very complex, I think it can be simplified and made
> more maintainable (see my comments below)
> 
> in the end here we need an atomic compare and swap with key checking,
> if we are doing a syscall for it, we are clearly not looking for
> performance.

If you only consider this in the context of KVM you are correct, but
Heiko wanted me not to specialize this for KVM.

> > 
> > Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> > ---
> > 
> > 
> > Possible variations:
> >   * check the assumptions made in cmpxchg_user_key_size and error out
> >   * call functions called by copy_to_user
> >      * access_ok? is a nop
> >      * should_fail_usercopy?
> >      * instrument_copy_to_user? doesn't make sense IMO
> >   * don't be overly strict in cmpxchg_user_key
> > 
> > 
> >  arch/s390/include/asm/uaccess.h | 187 ++++++++++++++++++++++++++++++++
> >  1 file changed, 187 insertions(+)
> > 
> > diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
> > index f7038b800cc3..0ce90b7e2b75 100644
> > --- a/arch/s390/include/asm/uaccess.h
> > +++ b/arch/s390/include/asm/uaccess.h
> > @@ -19,6 +19,8 @@
> >  #include <asm/extable.h>
> >  #include <asm/facility.h>
> >  #include <asm-generic/access_ok.h>
> > +#include <asm/page.h>
> > +#include <linux/log2.h>
> >  
> >  void debug_user_asce(int exit);
> >  
> > @@ -390,4 +392,189 @@ do {									\
> >  		goto err_label;						\
> >  } while (0)
> >  
> > +static __always_inline int __cmpxchg_user_key_small(int size, u64 address,
> > +						    unsigned __int128 *old_p,
> > +						    unsigned __int128 new, u8 access_key)
> > +{
> 
> can this whole function be simplified to be a C wrapper for the 4 byte
> version of compare and swap?

I think so, but all of this is supposed to mirror
arch/s390/include/asm/cmpxchg.h, although I did depart from that
somewhat. For one, I changed the decision for retrying the loop,
but I'll have to undo that since compilation for older machines
complains about xrk.
> 
> > +	u32 shift, mask, old_word, new_word, align_mask, tmp, diff;
> > +	u64 aligned;
> > +	int ret = -EFAULT;
> > +
> > +	switch (size) {
> > +	case 2:
> > +		align_mask = 2;
> > +		aligned = (address ^ (address & align_mask));
> > +		shift = (sizeof(u32) - (address & align_mask) - size) * 8;
> > +		mask = 0xffff << shift;
> > +		old_word = ((u16)*old_p) << shift;
> > +		new_word = ((u16)new) << shift;
> > +		break;
> > +	case 1:
> > +		align_mask = 3;
> > +		aligned = (address ^ (address & align_mask));
> > +		shift = (sizeof(u32) - (address & align_mask) - size) * 8;
> > +		mask = 0xff << shift;
> > +		old_word = ((u8)*old_p) << shift;
> > +		new_word = ((u8)new) << shift;
> > +		break;
> > +	}
> > +	asm volatile(
> > +		       "spka	0(%[access_key])\n"
> > +		"	sacf	256\n"
> > +		"0:	l	%[tmp],%[aligned]\n"
> > +		"1:	nr	%[tmp],%[hole_mask]\n"
> > +		"	or	%[new_word],%[tmp]\n"
> > +		"	or	%[old_word],%[tmp]\n"
> > +		"	lr	%[tmp],%[old_word]\n"
> > +		"2:	cs	%[tmp],%[new_word],%[aligned]\n"
> > +		"3:	jnl	4f\n"
> > +		"	xrk	%[diff],%[tmp],%[old_word]\n"
> > +		"	nr	%[diff],%[hole_mask]\n"
> > +		"	xr	%[new_word],%[diff]\n"
> > +		"	xr	%[old_word],%[diff]\n"
> > +		"	xrk	%[diff],%[tmp],%[old_word]\n"
> > +		"	jz	2b\n"
> > +		"4:	ipm	%[ret]\n"
> > +		"	srl	%[ret],28\n"
> > +		"5:	sacf	768\n"
> > +		"	spka	%[default_key]\n"
> > +		EX_TABLE(0b, 5b) EX_TABLE(1b, 5b)
> > +		EX_TABLE(2b, 5b) EX_TABLE(3b, 5b)
> > +		: [old_word] "+&d" (old_word),
> > +		  [new_word] "+&d" (new_word),
> > +		  [tmp] "=&d" (tmp),
> > +		  [aligned] "+Q" (*(u32 *)aligned),
> > +		  [diff] "=&d" (diff),
> > +		  [ret] "+d" (ret)
> > +		: [access_key] "a" (access_key << 4),
> > +		  [hole_mask] "d" (~mask),
> > +		  [default_key] "J" (PAGE_DEFAULT_KEY)
> > +		: "cc"
> > +	);
> > +	*old_p = (tmp & mask) >> shift;
> > +	return ret;
> > +}
> > +
> > +/**
> > + * cmpxchg_user_key_size() - cmpxchg with user space target, honoring storage keys
> > + * @size: Size of the value being cmpxchg'ed, one of 1,2,4,8,16.
> > + * @address: User space address of value to compare to *@old_p and exchange with
> > + *           *@new. Must be aligned to @size.
> > + * @old_p: Pointer to old value. Interpreted as a @size byte integer and compared
> > + *         to the content pointed to by @address in order to determine if the
> > + *         exchange occurs. The value read from @address is written back to *@old_p.
> > + * @new: New value to place at @address, interpreted as a @size byte integer.
> > + * @access_key: Access key to use for checking storage key protection.
> > + *
> > + * Perform a cmpxchg on a user space target, honoring storage key protection.
> > + * @access_key alone determines how key checking is performed, neither
> > + * storage-protection-override nor fetch-protection-override apply.
> > + *
> > + * Return:	0: successful exchange
> > + *		1: exchange failed
> > + *		-EFAULT: @address not accessible or not naturally aligned
> > + *		-EINVAL: invalid @size
> > + */
> > +static __always_inline int cmpxchg_user_key_size(int size, void __user *address,
> > +						 unsigned __int128 *old_p,
> > +						 unsigned __int128 new, u8 access_key)
> > +{
> > +	union {
> > +		u32 word;
> > +		u64 doubleword;
> > +	} old;
> > +	int ret = -EFAULT;
> > +
> > +	/*
> > +	 * The following assumes that:
> > +	 *  * the current psw key is the default key
> > +	 *  * no storage protection overrides are in effect
> > +	 */
> > +	might_fault();
> > +	switch (size) {
> > +	case 16:
> > +		asm volatile(
> > +			       "spka	0(%[access_key])\n"
> > +			"	sacf	256\n"
> > +			"0:	cdsg	%[old],%[new],%[target]\n"
> > +			"1:	ipm	%[ret]\n"
> > +			"	srl	%[ret],28\n"
> > +			"2:	sacf	768\n"
> > +			"	spka	%[default_key]\n"
> > +			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> > +			: [old] "+d" (*old_p),
> > +			  [target] "+Q" (*(unsigned __int128 __user *)address),
> > +			  [ret] "+d" (ret)
> > +			: [access_key] "a" (access_key << 4),
> > +			  [new] "d" (new),
> > +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> > +			: "cc"
> > +		);
> > +		return ret;
> > +	case 8:
> > +		old.doubleword = *old_p;
> > +		asm volatile(
> > +			       "spka	0(%[access_key])\n"
> > +			"	sacf	256\n"
> > +			"0:	csg	%[old],%[new],%[target]\n"
> > +			"1:	ipm	%[ret]\n"
> > +			"	srl	%[ret],28\n"
> > +			"2:	sacf	768\n"
> > +			"	spka	%[default_key]\n"
> > +			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> > +			: [old] "+d" (old.doubleword),
> > +			  [target] "+Q" (*(u64 __user *)address),
> > +			  [ret] "+d" (ret)
> > +			: [access_key] "a" (access_key << 4),
> > +			  [new] "d" ((u64)new),
> > +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> > +			: "cc"
> > +		);
> > +		*old_p = old.doubleword;
> > +		return ret;
> > +	case 4:
> > +		old.word = *old_p;
> > +		asm volatile(
> > +			       "spka	0(%[access_key])\n"
> > +			"	sacf	256\n"
> > +			"0:	cs	%[old],%[new],%[target]\n"
> > +			"1:	ipm	%[ret]\n"
> > +			"	srl	%[ret],28\n"
> > +			"2:	sacf	768\n"
> > +			"	spka	%[default_key]\n"
> > +			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> > +			: [old] "+d" (old.word),
> > +			  [target] "+Q" (*(u32 __user *)address),
> > +			  [ret] "+d" (ret)
> > +			: [access_key] "a" (access_key << 4),
> > +			  [new] "d" ((u32)new),
> > +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> > +			: "cc"
> 
> this is the same code 3 times with only very minimal changes.
> can you factor it out in macros?
> 
> something like this:
> 
> #define DO_COMPARE_AND_SWAP(instr, _old, _addr, _ret, _key, _new) \
> 	asm volatile(
> 			"spka	0(%[access_key])\n"
> 		"	sacf	256\n" 
> 		"0:	" instr "%[old],%[new],%[target]\n"
> 		"1:	ipm	%[ret]\n"
>  		"	srl 	%[ret],28\n"
> 		"2:	sacf	768\n"
> 		"	spka	%[default_key]\n"
> 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
> 		: [old] "+d"(_old),
> 		  [target] "+Q" (*(_addr)),
> 		  [ret] "+d" (_ret)
> 		: [access_key] "a" ((_key) << 4),
> 		  [new] "d" (_new),
> 		  [default_key] "J" (PAGE_DEFAULT_KEY)
> 		: "cc"
> 
> and then in the code:
> 
> DO_COMPARE_AND_SWAP("cs", old.word, (u32 __user *)address, ret, access_key, (u32)new)
> 
> this way the code is not duplicated
> 
> 
> or have you tried it already and there are issues I didn't think of?

I'd prefer that, but it's different from how cmpxchg.h does it.
But then that has simpler asm and needs to special case int128 so the
benefit isn't as great. I guess Heiko should make that call.
> 
> > +		);
> > +		*old_p = old.word;
> > +		return ret;
> > +	case 2:
> > +	case 1:
> > +		return __cmpxchg_user_key_small(size, (u64)address, old_p, new, access_key);
> > +	default:
> > +		return -EINVAL;
> > +	}
> > +}
> > +
> > +#define cmpxchg_user_key(target_p, old_p, new, access_key)			\

Note that this macro isn't being used because I also deviated from the
functions in cmpxchg.h here. Since we need to return an error in case
of a fault the return type cannot be void. So we can also return EINVAL
in case of an invalid size. Then cmpxchg_user_key_size is perfectly
fine to call directly, which avoids awkwardness in KVM converting the
numeric size we got from user space into the right types.
So this macro only exists for other future possible users of key
checked cmpxchg where the types are fixed at compile time.
So with your version cmpxchg_user_key_size should just recurse for the
small sizes.

> > +({										\
> > +	__typeof__(old_p) __old_p = (old_p);					\
> > +	unsigned __int128 __old = *__old_p;					\
> > +	size_t __size = sizeof(*(target_p));					\
> > +	int __ret;								\
> > +										\
> > +	BUILD_BUG_ON(__size != sizeof(*__old_p));				\
> > +	BUILD_BUG_ON(__size != sizeof(new));					\
> > +	BUILD_BUG_ON(__size > 16 || !is_power_of_2(__size));			\
> 
> and here an if to see if you need the _small version or the regular
> one, with the _small version being a wrapper around the regular one
> 
> > +	__ret = cmpxchg_user_key_size(__size, (target_p), &__old, (new),	\
> > +				      (access_key));				\
> > +	*__old_p = __old;							\
> > +	__ret;									\
> > +})
> > +
> >  #endif /* __S390_UACCESS_H */
>
diff mbox series

Patch

diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index f7038b800cc3..0ce90b7e2b75 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -19,6 +19,8 @@ 
 #include <asm/extable.h>
 #include <asm/facility.h>
 #include <asm-generic/access_ok.h>
+#include <asm/page.h>
+#include <linux/log2.h>
 
 void debug_user_asce(int exit);
 
@@ -390,4 +392,189 @@  do {									\
 		goto err_label;						\
 } while (0)
 
+static __always_inline int __cmpxchg_user_key_small(int size, u64 address,
+						    unsigned __int128 *old_p,
+						    unsigned __int128 new, u8 access_key)
+{
+	u32 shift, mask, old_word, new_word, align_mask, tmp, diff;
+	u64 aligned;
+	int ret = -EFAULT;
+
+	switch (size) {
+	case 2:
+		align_mask = 2;
+		aligned = (address ^ (address & align_mask));
+		shift = (sizeof(u32) - (address & align_mask) - size) * 8;
+		mask = 0xffff << shift;
+		old_word = ((u16)*old_p) << shift;
+		new_word = ((u16)new) << shift;
+		break;
+	case 1:
+		align_mask = 3;
+		aligned = (address ^ (address & align_mask));
+		shift = (sizeof(u32) - (address & align_mask) - size) * 8;
+		mask = 0xff << shift;
+		old_word = ((u8)*old_p) << shift;
+		new_word = ((u8)new) << shift;
+		break;
+	}
+	asm volatile(
+		       "spka	0(%[access_key])\n"
+		"	sacf	256\n"
+		"0:	l	%[tmp],%[aligned]\n"
+		"1:	nr	%[tmp],%[hole_mask]\n"
+		"	or	%[new_word],%[tmp]\n"
+		"	or	%[old_word],%[tmp]\n"
+		"	lr	%[tmp],%[old_word]\n"
+		"2:	cs	%[tmp],%[new_word],%[aligned]\n"
+		"3:	jnl	4f\n"
+		"	xrk	%[diff],%[tmp],%[old_word]\n"
+		"	nr	%[diff],%[hole_mask]\n"
+		"	xr	%[new_word],%[diff]\n"
+		"	xr	%[old_word],%[diff]\n"
+		"	xrk	%[diff],%[tmp],%[old_word]\n"
+		"	jz	2b\n"
+		"4:	ipm	%[ret]\n"
+		"	srl	%[ret],28\n"
+		"5:	sacf	768\n"
+		"	spka	%[default_key]\n"
+		EX_TABLE(0b, 5b) EX_TABLE(1b, 5b)
+		EX_TABLE(2b, 5b) EX_TABLE(3b, 5b)
+		: [old_word] "+&d" (old_word),
+		  [new_word] "+&d" (new_word),
+		  [tmp] "=&d" (tmp),
+		  [aligned] "+Q" (*(u32 *)aligned),
+		  [diff] "=&d" (diff),
+		  [ret] "+d" (ret)
+		: [access_key] "a" (access_key << 4),
+		  [hole_mask] "d" (~mask),
+		  [default_key] "J" (PAGE_DEFAULT_KEY)
+		: "cc"
+	);
+	*old_p = (tmp & mask) >> shift;
+	return ret;
+}
+
+/**
+ * cmpxchg_user_key_size() - cmpxchg with user space target, honoring storage keys
+ * @size: Size of the value being cmpxchg'ed, one of 1,2,4,8,16.
+ * @address: User space address of value to compare to *@old_p and exchange with
+ *           *@new. Must be aligned to @size.
+ * @old_p: Pointer to old value. Interpreted as a @size byte integer and compared
+ *         to the content pointed to by @address in order to determine if the
+ *         exchange occurs. The value read from @address is written back to *@old_p.
+ * @new: New value to place at @address, interpreted as a @size byte integer.
+ * @access_key: Access key to use for checking storage key protection.
+ *
+ * Perform a cmpxchg on a user space target, honoring storage key protection.
+ * @access_key alone determines how key checking is performed, neither
+ * storage-protection-override nor fetch-protection-override apply.
+ *
+ * Return:	0: successful exchange
+ *		1: exchange failed
+ *		-EFAULT: @address not accessible or not naturally aligned
+ *		-EINVAL: invalid @size
+ */
+static __always_inline int cmpxchg_user_key_size(int size, void __user *address,
+						 unsigned __int128 *old_p,
+						 unsigned __int128 new, u8 access_key)
+{
+	union {
+		u32 word;
+		u64 doubleword;
+	} old;
+	int ret = -EFAULT;
+
+	/*
+	 * The following assumes that:
+	 *  * the current psw key is the default key
+	 *  * no storage protection overrides are in effect
+	 */
+	might_fault();
+	switch (size) {
+	case 16:
+		asm volatile(
+			       "spka	0(%[access_key])\n"
+			"	sacf	256\n"
+			"0:	cdsg	%[old],%[new],%[target]\n"
+			"1:	ipm	%[ret]\n"
+			"	srl	%[ret],28\n"
+			"2:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+			: [old] "+d" (*old_p),
+			  [target] "+Q" (*(unsigned __int128 __user *)address),
+			  [ret] "+d" (ret)
+			: [access_key] "a" (access_key << 4),
+			  [new] "d" (new),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "cc"
+		);
+		return ret;
+	case 8:
+		old.doubleword = *old_p;
+		asm volatile(
+			       "spka	0(%[access_key])\n"
+			"	sacf	256\n"
+			"0:	csg	%[old],%[new],%[target]\n"
+			"1:	ipm	%[ret]\n"
+			"	srl	%[ret],28\n"
+			"2:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+			: [old] "+d" (old.doubleword),
+			  [target] "+Q" (*(u64 __user *)address),
+			  [ret] "+d" (ret)
+			: [access_key] "a" (access_key << 4),
+			  [new] "d" ((u64)new),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "cc"
+		);
+		*old_p = old.doubleword;
+		return ret;
+	case 4:
+		old.word = *old_p;
+		asm volatile(
+			       "spka	0(%[access_key])\n"
+			"	sacf	256\n"
+			"0:	cs	%[old],%[new],%[target]\n"
+			"1:	ipm	%[ret]\n"
+			"	srl	%[ret],28\n"
+			"2:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+			: [old] "+d" (old.word),
+			  [target] "+Q" (*(u32 __user *)address),
+			  [ret] "+d" (ret)
+			: [access_key] "a" (access_key << 4),
+			  [new] "d" ((u32)new),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "cc"
+		);
+		*old_p = old.word;
+		return ret;
+	case 2:
+	case 1:
+		return __cmpxchg_user_key_small(size, (u64)address, old_p, new, access_key);
+	default:
+		return -EINVAL;
+	}
+}
+
+#define cmpxchg_user_key(target_p, old_p, new, access_key)			\
+({										\
+	__typeof__(old_p) __old_p = (old_p);					\
+	unsigned __int128 __old = *__old_p;					\
+	size_t __size = sizeof(*(target_p));					\
+	int __ret;								\
+										\
+	BUILD_BUG_ON(__size != sizeof(*__old_p));				\
+	BUILD_BUG_ON(__size != sizeof(new));					\
+	BUILD_BUG_ON(__size > 16 || !is_power_of_2(__size));			\
+	__ret = cmpxchg_user_key_size(__size, (target_p), &__old, (new),	\
+				      (access_key));				\
+	*__old_p = __old;							\
+	__ret;									\
+})
+
 #endif /* __S390_UACCESS_H */