diff mbox series

[5/5] s390/uaccess: add cmpxchg_user_key()

Message ID Y2J8axs+bcQ2dO/l@osiris (mailing list archive)
State New
Headers show
Series [1/5] s390/cmpxchg: use symbolic names for inline assembly operands | expand

Commit Message

Heiko Carstens Nov. 2, 2022, 2:19 p.m. UTC
Add cmpxchg_user_key() which allows to execute a compare and exchange
on a user space address. This allows also to specify a storage key
which makes sure that key-controlled protection is considered.

This is based on a patch written by Janis Schoetterl-Glausch.

Link: https://lore.kernel.org/all/20220930210751.225873-2-scgl@linux.ibm.com
Cc: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
---
 arch/s390/include/asm/uaccess.h | 183 ++++++++++++++++++++++++++++++++
 1 file changed, 183 insertions(+)

Comments

Janis Schoetterl-Glausch Nov. 9, 2022, 3:46 p.m. UTC | #1
On Wed, 2022-11-02 at 15:19 +0100, Heiko Carstens wrote:
> Add cmpxchg_user_key() which allows to execute a compare and exchange
> on a user space address. This allows also to specify a storage key
> which makes sure that key-controlled protection is considered.
> 
> This is based on a patch written by Janis Schoetterl-Glausch.
> 
> Link: https://lore.kernel.org/all/20220930210751.225873-2-scgl@linux.ibm.com
> Cc: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
> ---
>  arch/s390/include/asm/uaccess.h | 183 ++++++++++++++++++++++++++++++++
>  1 file changed, 183 insertions(+)
> 
> diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
> index f7038b800cc3..9bbdecb80e06 100644
> --- a/arch/s390/include/asm/uaccess.h
> +++ b/arch/s390/include/asm/uaccess.h
> @@ -390,4 +390,187 @@ do {									\
>  		goto err_label;						\
>  } while (0)
>  
> +void __cmpxchg_user_key_called_with_bad_pointer(void);
> +
> +static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
> +					      __uint128_t old, __uint128_t new,
> +					      unsigned long key, int size)
> +{
> +	int rc = 0;
> +
> +	switch (size) {
> +	case 1: {
> +		unsigned int prev, tmp, shift;
> +
> +		shift = (3 ^ (address & 3)) << 3;
> +		address ^= address & 3;
> +		asm volatile(
> +			"	spka	0(%[key])\n"
> +			"	sacf	256\n"
> +			"0:	l	%[prev],%[address]\n"
> +			"1:	nr	%[prev],%[mask]\n"
> +			"	lr	%[tmp],%[prev]\n"
> +			"	or	%[prev],%[old]\n"
> +			"	or	%[tmp],%[new]\n"
> +			"2:	cs	%[prev],%[tmp],%[address]\n"
> +			"3:	jnl	4f\n"
> +			"	xr	%[tmp],%[prev]\n"
> +			"	nr	%[tmp],%[mask]\n"

Are you only entertaining cosmetic changes to cmpxchg.h?
The loop condition being imprecise seems non-ideal.

> +			"	jnz	1b\n"
> +			"4:	sacf	768\n"
> +			"	spka	%[default_key]\n"
> +			EX_TABLE_UA_LOAD_REG(0b, 4b, %[rc], %[prev])
> +			EX_TABLE_UA_LOAD_REG(1b, 4b, %[rc], %[prev])
> +			EX_TABLE_UA_LOAD_REG(2b, 4b, %[rc], %[prev])
> +			EX_TABLE_UA_LOAD_REG(3b, 4b, %[rc], %[prev])
> +			: [rc] "+&d" (rc),
> +			  [prev] "=&d" (prev),
> +			  [tmp] "=&d" (tmp),
> +			  [address] "+Q" (*(int *)address)
> +			: [old] "d" (((unsigned int)old & 0xff) << shift),
> +			  [new] "d" (((unsigned int)new & 0xff) << shift),
> +			  [mask] "d" (~(0xff << shift)),
> +			  [key] "a" (key),

Why did you get rid of the << 4 shift?
That's inconsistent with the other uaccess functions that take an access key.

> +			  [default_key] "J" (PAGE_DEFAULT_KEY)
> +			: "memory", "cc");
> +		*(unsigned char *)uval = prev >> shift;
> +		return rc;
> +	}

[...]
Heiko Carstens Nov. 9, 2022, 10:24 p.m. UTC | #2
On Wed, Nov 09, 2022 at 04:46:29PM +0100, Janis Schoetterl-Glausch wrote:
> On Wed, 2022-11-02 at 15:19 +0100, Heiko Carstens wrote:
> > +	case 1: {
> > +		unsigned int prev, tmp, shift;
> > +
> > +		shift = (3 ^ (address & 3)) << 3;
> > +		address ^= address & 3;
> > +		asm volatile(
> > +			"	spka	0(%[key])\n"
> > +			"	sacf	256\n"
> > +			"0:	l	%[prev],%[address]\n"
> > +			"1:	nr	%[prev],%[mask]\n"
> > +			"	lr	%[tmp],%[prev]\n"
> > +			"	or	%[prev],%[old]\n"
> > +			"	or	%[tmp],%[new]\n"
> > +			"2:	cs	%[prev],%[tmp],%[address]\n"
> > +			"3:	jnl	4f\n"
> > +			"	xr	%[tmp],%[prev]\n"
> > +			"	nr	%[tmp],%[mask]\n"
> 
> Are you only entertaining cosmetic changes to cmpxchg.h?

I fail to parse what you are trying to say. Please elaborate.

> The loop condition being imprecise seems non-ideal.

What exactly is imprecise?

> > +			  [key] "a" (key),
> 
> Why did you get rid of the << 4 shift?
> That's inconsistent with the other uaccess functions that take an access key.

That's not only inconsistent, but also a bug.
Thank you for pointing this out. Will be fixed.
Janis Schoetterl-Glausch Nov. 10, 2022, 11:01 a.m. UTC | #3
On Wed, 2022-11-09 at 23:24 +0100, Heiko Carstens wrote:
> On Wed, Nov 09, 2022 at 04:46:29PM +0100, Janis Schoetterl-Glausch wrote:
> > On Wed, 2022-11-02 at 15:19 +0100, Heiko Carstens wrote:
> > > +	case 1: {
> > > +		unsigned int prev, tmp, shift;
> > > +
> > > +		shift = (3 ^ (address & 3)) << 3;
> > > +		address ^= address & 3;
> > > +		asm volatile(
> > > +			"	spka	0(%[key])\n"
> > > +			"	sacf	256\n"
> > > +			"0:	l	%[prev],%[address]\n"
> > > +			"1:	nr	%[prev],%[mask]\n"
> > > +			"	lr	%[tmp],%[prev]\n"
> > > +			"	or	%[prev],%[old]\n"
> > > +			"	or	%[tmp],%[new]\n"
> > > +			"2:	cs	%[prev],%[tmp],%[address]\n"
> > > +			"3:	jnl	4f\n"
> > > +			"	xr	%[tmp],%[prev]\n"
> > > +			"	nr	%[tmp],%[mask]\n"
> > 
> > Are you only entertaining cosmetic changes to cmpxchg.h?
> 
> I fail to parse what you are trying to say. Please elaborate.
> 
> > The loop condition being imprecise seems non-ideal.
> 
> What exactly is imprecise?

The loop retries the CS if bits outside the target byte changed instead
of retrying until the target byte differs from the old value.
So if you attempt to exchange (prev_left_0 old_byte prev_right_0) and 
that fails because the word at the address is (prev_left_1 x prev_right_1)
where both x != old_byte and one of the prev_*_1 values differs from the respective
prev_*_0 value, the CS is retried. If there were a native 1 byte compare and swap,
the exchange would just fail here. Instead the loop retries the CS until the margin
values are stable and it can infer from that that the CS failed because of the target value.
(Assuming that doesn't change to the old_byte value.)

It's not a problem, but it struck me as non-ideal, which is why for v2 I inverted the mask
after using it to punch the hole for the old/new values.
Then you can use it to test if bits inside the target byte differ.

That's why I asked about cmpxchg.h. If you don't want non-cosmetic changes to the existing
cmpxchg function and consistency of the new key checked function, then obviously the loop
condition needs to be the same.
> 
> > > +			  [key] "a" (key),
> > 
> > Why did you get rid of the << 4 shift?
> > That's inconsistent with the other uaccess functions that take an access key.
> 
> That's not only inconsistent, but also a bug.
> Thank you for pointing this out. Will be fixed.

Well, you could pass in the shifted key as argument, but yeah.
Heiko Carstens Nov. 10, 2022, 11:32 a.m. UTC | #4
On Thu, Nov 10, 2022 at 12:01:23PM +0100, Janis Schoetterl-Glausch wrote:
> On Wed, 2022-11-09 at 23:24 +0100, Heiko Carstens wrote:
> > On Wed, Nov 09, 2022 at 04:46:29PM +0100, Janis Schoetterl-Glausch wrote:
> > > Are you only entertaining cosmetic changes to cmpxchg.h?
> > 
> > I fail to parse what you are trying to say. Please elaborate.
> > 
> > > The loop condition being imprecise seems non-ideal.
> > 
> > What exactly is imprecise?
> 
> The loop retries the CS if bits outside the target byte changed instead
> of retrying until the target byte differs from the old value.
> So if you attempt to exchange (prev_left_0 old_byte prev_right_0) and 
> that fails because the word at the address is (prev_left_1 x prev_right_1)
> where both x != old_byte and one of the prev_*_1 values differs from the respective
> prev_*_0 value, the CS is retried. If there were a native 1 byte compare and swap,
> the exchange would just fail here. Instead the loop retries the CS until the margin
> values are stable and it can infer from that that the CS failed because of the target value.
> (Assuming that doesn't change to the old_byte value.)
> 
> It's not a problem, but it struck me as non-ideal, which is why for v2 I inverted the mask
> after using it to punch the hole for the old/new values.
> Then you can use it to test if bits inside the target byte differ.
> 
> That's why I asked about cmpxchg.h. If you don't want non-cosmetic changes to the existing
> cmpxchg function and consistency of the new key checked function, then obviously the loop
> condition needs to be the same.

Such a change is fine of course, even though compare-and-swap for one and
two byte patterns don't really matter. I would appreciate if you could send
one or two patches on-top of this series which adds the improved logic to
(now) both variants.

And, since the question will come up anyway: as soon as we agreed on a
complete patch series, I think we should go for a features branch on s390's
kernel.org tree which would contain the first five patches sent by me plus
potential addon patches provided by you.
This tree can then be pulled in by the kvms390 tree where your kvm specific
patches can then be applied on top.
Heiko Carstens Nov. 13, 2022, 6:20 p.m. UTC | #5
On Thu, Nov 10, 2022 at 12:32:06PM +0100, Heiko Carstens wrote:
> > That's why I asked about cmpxchg.h. If you don't want non-cosmetic changes to the existing
> > cmpxchg function and consistency of the new key checked function, then obviously the loop
> > condition needs to be the same.
> 
> Such a change is fine of course, even though compare-and-swap for one and
> two byte patterns don't really matter. I would appreciate if you could send
> one or two patches on-top of this series which adds the improved logic to
> (now) both variants.
> 
> And, since the question will come up anyway: as soon as we agreed on a
> complete patch series, I think we should go for a features branch on s390's
> kernel.org tree which would contain the first five patches sent by me plus
> potential addon patches provided by you.
> This tree can then be pulled in by the kvms390 tree where your kvm specific
> patches can then be applied on top.

FWIW, pushed a non-stable work-in-progress branch to
git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git wip/cmpxchg_user_key

This includes also an updated patch, which fixes the missing shift of
the access key.
diff mbox series

Patch

diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index f7038b800cc3..9bbdecb80e06 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -390,4 +390,187 @@  do {									\
 		goto err_label;						\
 } while (0)
 
+void __cmpxchg_user_key_called_with_bad_pointer(void);
+
+static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
+					      __uint128_t old, __uint128_t new,
+					      unsigned long key, int size)
+{
+	int rc = 0;
+
+	switch (size) {
+	case 1: {
+		unsigned int prev, tmp, shift;
+
+		shift = (3 ^ (address & 3)) << 3;
+		address ^= address & 3;
+		asm volatile(
+			"	spka	0(%[key])\n"
+			"	sacf	256\n"
+			"0:	l	%[prev],%[address]\n"
+			"1:	nr	%[prev],%[mask]\n"
+			"	lr	%[tmp],%[prev]\n"
+			"	or	%[prev],%[old]\n"
+			"	or	%[tmp],%[new]\n"
+			"2:	cs	%[prev],%[tmp],%[address]\n"
+			"3:	jnl	4f\n"
+			"	xr	%[tmp],%[prev]\n"
+			"	nr	%[tmp],%[mask]\n"
+			"	jnz	1b\n"
+			"4:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE_UA_LOAD_REG(0b, 4b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(1b, 4b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(2b, 4b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(3b, 4b, %[rc], %[prev])
+			: [rc] "+&d" (rc),
+			  [prev] "=&d" (prev),
+			  [tmp] "=&d" (tmp),
+			  [address] "+Q" (*(int *)address)
+			: [old] "d" (((unsigned int)old & 0xff) << shift),
+			  [new] "d" (((unsigned int)new & 0xff) << shift),
+			  [mask] "d" (~(0xff << shift)),
+			  [key] "a" (key),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "memory", "cc");
+		*(unsigned char *)uval = prev >> shift;
+		return rc;
+	}
+	case 2: {
+		unsigned int prev, tmp, shift;
+
+		shift = (2 ^ (address & 2)) << 3;
+		address ^= address & 2;
+		asm volatile(
+			"	spka	0(%[key])\n"
+			"	sacf	256\n"
+			"0:	l	%[prev],%[address]\n"
+			"1:	nr	%[prev],%[mask]\n"
+			"	lr	%[tmp],%[prev]\n"
+			"	or	%[prev],%[old]\n"
+			"	or	%[tmp],%[new]\n"
+			"2:	cs	%[prev],%[tmp],%[address]\n"
+			"3:	jnl	4f\n"
+			"	xr	%[tmp],%[prev]\n"
+			"	nr	%[tmp],%[mask]\n"
+			"	jnz	1b\n"
+			"4:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE_UA_LOAD_REG(0b, 4b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(1b, 4b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(2b, 4b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(3b, 4b, %[rc], %[prev])
+			: [rc] "+&d" (rc),
+			  [prev] "=&d" (prev),
+			  [tmp] "=&d" (tmp),
+			  [address] "+Q" (*(int *)address)
+			: [old] "d" (((unsigned int)old & 0xffff) << shift),
+			  [new] "d" (((unsigned int)new & 0xffff) << shift),
+			  [mask] "d" (~(0xffff << shift)),
+			  [key] "a" (key),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "memory", "cc");
+		*(unsigned short *)uval = prev >> shift;
+		return rc;
+	}
+	case 4:	{
+		unsigned int prev = old;
+
+		asm volatile(
+			"	spka	0(%[key])\n"
+			"	sacf	256\n"
+			"0:	cs	%[prev],%[new],%[address]\n"
+			"1:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
+			: [rc] "+&d" (rc),
+			  [prev] "+&d" (prev),
+			  [address] "+Q" (*(int *)address)
+			: [new] "d" ((unsigned int)new),
+			  [key] "a" (key),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "memory", "cc");
+		*(unsigned int *)uval = prev;
+		return rc;
+	}
+	case 8: {
+		unsigned long prev = old;
+
+		asm volatile(
+			"	spka	0(%[key])\n"
+			"	sacf	256\n"
+			"0:	csg	%[prev],%[new],%[address]\n"
+			"1:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
+			: [rc] "+&d" (rc),
+			  [prev] "+&d" (prev),
+			  [address] "+QS" (*(long *)address)
+			: [new] "d" ((unsigned long)new),
+			  [key] "a" (key),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "memory", "cc");
+		*(unsigned long *)uval = prev;
+		return rc;
+	}
+	case 16: {
+		__uint128_t prev = old;
+
+		asm volatile(
+			"	spka	0(%[key])\n"
+			"	sacf	256\n"
+			"0:	cdsg	%[prev],%[new],%[address]\n"
+			"1:	sacf	768\n"
+			"	spka	%[default_key]\n"
+			EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
+			EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
+			: [rc] "+&d" (rc),
+			  [prev] "+&d" (prev),
+			  [address] "+QS" (*(__int128_t *)address)
+			: [new] "d" (new),
+			  [key] "a" (key),
+			  [default_key] "J" (PAGE_DEFAULT_KEY)
+			: "memory", "cc");
+		*(__uint128_t *)uval = prev;
+		return rc;
+	}
+	}
+	__cmpxchg_user_key_called_with_bad_pointer();
+	return rc;
+}
+
+/**
+ * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
+ * @ptr: User space address of value to compare to @old and exchange with
+ *	 @new. Must be aligned to sizeof(*@size).
+ * @uval: Address where the old value of *@ptr is written to.
+ * @old: Old value. Compared to the content pointed to by @ptr in order to
+ *	 determine if the exchange occurs. The old value read from *@ptr is
+ *	 written to *@uval.
+ * @new: New value to place at *@ptr.
+ * @key: Access key to use for checking storage key protection.
+ *
+ * Perform a cmpxchg on a user space target, honoring storage key protection.
+ * @key alone determines how key checking is performed, neither
+ * storage-protection-override nor fetch-protection-override apply.
+ * The caller must compare *@uval and @old to determine if values have been
+ * exchanged. In case of an exception *@uval is set to zero.
+ *
+ * Return:     0: cmpxchg executed
+ *	       -EFAULT: an exception happened when trying to access *@ptr
+ */
+#define cmpxchg_user_key(ptr, uval, old, new, key)			\
+({									\
+	__typeof__(ptr) __ptr = (ptr);					\
+	__typeof__(uval) __uval = (uval);				\
+									\
+	BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval)));		\
+	might_fault();							\
+	__chk_user_ptr(__ptr);						\
+	__cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval),	\
+			   (old), (new), (key), sizeof(*(__ptr)));	\
+})
+
 #endif /* __S390_UACCESS_H */