diff mbox series

[v3,1/5] xen/ppc: Implement atomic.h

Message ID 917dc5517b69657b48e69c4100234383f5c70e0e.1693590982.git.sanastasio@raptorengineering.com (mailing list archive)
State Superseded
Headers show
Series ppc: Enable full Xen build | expand

Commit Message

Shawn Anastasio Sept. 1, 2023, 6:25 p.m. UTC
Implement atomic.h for PPC, based off of the original Xen 3.2
implementation.

Signed-off-by: Shawn Anastasio <sanastasio@raptorengineering.com>
---
v3:
  - Drop full copyright text headers
  - Drop unnecessary spaces after casts
  - const-ize write_atomic_size
  - Don't use GNU 0-length array extension in read_atomic
  - Use "+m" asm constraint instead of separate "=m" and "m"
  - Fix line-continuing backslash formatting in arch_cmpxchg

v2:
  - Fix style of asm block constraints to include required spaces
  - Fix macro local variable naming (use trailing underscore instead of
    leading)
  - Drop unnecessary parens in __atomic_add_unless

 xen/arch/ppc/include/asm/atomic.h | 384 ++++++++++++++++++++++++++++++
 xen/arch/ppc/include/asm/memory.h |  34 +++
 2 files changed, 418 insertions(+)
 create mode 100644 xen/arch/ppc/include/asm/atomic.h
 create mode 100644 xen/arch/ppc/include/asm/memory.h

--
2.30.2

Comments

Jan Beulich Sept. 5, 2023, 2:58 p.m. UTC | #1
On 01.09.2023 20:25, Shawn Anastasio wrote:
> +static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new,
> +                                             atomic_t *v)
> +{
> +    atomic_t rc;
> +    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));

I can't seem to be able to spot where __cmpxchg() is defined. (I really
only wanted to check why it needs the 4th argument, which here rather
would want to be e.g. sizeof(v->counter). If it can't be omitted.)

> +    return rc;
> +}
> +
> +#define arch_cmpxchg(ptr, o, n)                                                \
> +    ({                                                                         \
> +        __typeof__(*(ptr)) o_ = (o);                                           \
> +        __typeof__(*(ptr)) n_ = (n);                                           \
> +        (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long) o_,              \
> +                                       (unsigned long) n_, sizeof(*(ptr)));    \

Nit: Stray blanks again after cast specifiers.

> --- /dev/null
> +++ b/xen/arch/ppc/include/asm/memory.h
> @@ -0,0 +1,21 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * Copyright (C) IBM Corp. 2005
> + *
> + * Authors: Jimi Xenidis <jimix@watson.ibm.com>
> + */
> +
> +#ifndef _ASM_MEMORY_H_
> +#define _ASM_MEMORY_H_
> +
> +#include <xen/config.h>

As mentioned before - no need for this explicit #include.

> +#ifdef CONFIG_SMP
> +#define PPC_ATOMIC_ENTRY_BARRIER "sync\n"
> +#define PPC_ATOMIC_EXIT_BARRIER  "sync\n"
> +#else
> +#define PPC_ATOMIC_ENTRY_BARRIER
> +#define PPC_ATOMIC_EXIT_BARRIER
> +#endif

Is this correct, considering that we have no CONFIG_SMP and assume SMP
in all cases?

I'm sorry for not paying attention earlier.

Jan
Jan Beulich Sept. 5, 2023, 3:54 p.m. UTC | #2
On 05.09.2023 16:58, Jan Beulich wrote:
> On 01.09.2023 20:25, Shawn Anastasio wrote:
>> +static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new,
>> +                                             atomic_t *v)
>> +{
>> +    atomic_t rc;
>> +    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
> 
> I can't seem to be able to spot where __cmpxchg() is defined. (I really
> only wanted to check why it needs the 4th argument, which here rather
> would want to be e.g. sizeof(v->counter). If it can't be omitted.)

Found it in patch 3, which suggests an incorrect split of patches. May
not matter all that much of course if the headers aren't used before
enough of the machinery is in place. But then such broken connections
would be nice to mention in the description.

Jan
Shawn Anastasio Sept. 7, 2023, 8:10 p.m. UTC | #3
On 9/5/23 9:58 AM, Jan Beulich wrote:
> On 01.09.2023 20:25, Shawn Anastasio wrote:
>> +static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new,
>> +                                             atomic_t *v)
>> +{
>> +    atomic_t rc;
>> +    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
> 
> I can't seem to be able to spot where __cmpxchg() is defined. (I really
> only wanted to check why it needs the 4th argument, which here rather
> would want to be e.g. sizeof(v->counter). If it can't be omitted.)
>

As you later saw, it's defined in system.h later in patch 3. This was an
error I made when splitting up the changes into this patchset. If you're
fine with it I'll just add a mention in the commit message that the
definitions in this commit are not yet fully usable.

Also I will change the size parameter to sizeof(v->counter) per your
suggestion.

>> +    return rc;
>> +}
>> +
>> +#define arch_cmpxchg(ptr, o, n)                                                \
>> +    ({                                                                         \
>> +        __typeof__(*(ptr)) o_ = (o);                                           \
>> +        __typeof__(*(ptr)) n_ = (n);                                           \
>> +        (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long) o_,              \
>> +                                       (unsigned long) n_, sizeof(*(ptr)));    \
> 
> Nit: Stray blanks again after cast specifiers.
>

Will fix.

>> --- /dev/null
>> +++ b/xen/arch/ppc/include/asm/memory.h
>> @@ -0,0 +1,21 @@
>> +/* SPDX-License-Identifier: GPL-2.0-or-later */
>> +/*
>> + * Copyright (C) IBM Corp. 2005
>> + *
>> + * Authors: Jimi Xenidis <jimix@watson.ibm.com>
>> + */
>> +
>> +#ifndef _ASM_MEMORY_H_
>> +#define _ASM_MEMORY_H_
>> +
>> +#include <xen/config.h>
> 
> As mentioned before - no need for this explicit #include.
>

Will drop.

>> +#ifdef CONFIG_SMP
>> +#define PPC_ATOMIC_ENTRY_BARRIER "sync\n"
>> +#define PPC_ATOMIC_EXIT_BARRIER  "sync\n"
>> +#else
>> +#define PPC_ATOMIC_ENTRY_BARRIER
>> +#define PPC_ATOMIC_EXIT_BARRIER
>> +#endif
> 
> Is this correct, considering that we have no CONFIG_SMP and assume SMP
> in all cases?
> 
> I'm sorry for not paying attention earlier.
>

Good observation, and no problem. I will remove the ifdef and the !SMP
case.

> Jan

Thanks,
Shawn
Jan Beulich Sept. 8, 2023, 6:14 a.m. UTC | #4
On 07.09.2023 22:10, Shawn Anastasio wrote:
> On 9/5/23 9:58 AM, Jan Beulich wrote:
>> On 01.09.2023 20:25, Shawn Anastasio wrote:
>>> +static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new,
>>> +                                             atomic_t *v)
>>> +{
>>> +    atomic_t rc;
>>> +    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
>>
>> I can't seem to be able to spot where __cmpxchg() is defined. (I really
>> only wanted to check why it needs the 4th argument, which here rather
>> would want to be e.g. sizeof(v->counter). If it can't be omitted.)
>>
> 
> As you later saw, it's defined in system.h later in patch 3. This was an
> error I made when splitting up the changes into this patchset. If you're
> fine with it I'll just add a mention in the commit message that the
> definitions in this commit are not yet fully usable.

Sure, that's going to be okay.

Jan
diff mbox series

Patch

diff --git a/xen/arch/ppc/include/asm/atomic.h b/xen/arch/ppc/include/asm/atomic.h
new file mode 100644
index 0000000000..06cd24ead4
--- /dev/null
+++ b/xen/arch/ppc/include/asm/atomic.h
@@ -0,0 +1,384 @@ 
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PowerPC64 atomic operations
+ *
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright Raptor Engineering LLC
+ */
+
+#ifndef _ASM_PPC64_ATOMIC_H_
+#define _ASM_PPC64_ATOMIC_H_
+
+#include <xen/atomic.h>
+
+#include <asm/memory.h>
+
+static inline int atomic_read(const atomic_t *v)
+{
+    return *(volatile int *)&v->counter;
+}
+
+static inline int _atomic_read(atomic_t v)
+{
+    return v.counter;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+    v->counter = i;
+}
+
+static inline void _atomic_set(atomic_t *v, int i)
+{
+    v->counter = i;
+}
+
+void __bad_atomic_read(const volatile void *p, void *res);
+void __bad_atomic_size(void);
+
+#define build_atomic_read(name, insn, type)                                    \
+    static inline type name(const volatile type *addr)                         \
+    {                                                                          \
+        type ret;                                                              \
+        asm volatile ( insn "%U1%X1 %0,%1" : "=r" (ret) : "m<>" (*addr) );     \
+        return ret;                                                            \
+    }
+
+#define build_atomic_write(name, insn, type)                                   \
+    static inline void name(volatile type *addr, type val)                     \
+    {                                                                          \
+        asm volatile ( insn "%U0%X0 %1,%0" : "=m<>" (*addr) : "r" (val) );     \
+    }
+
+#define build_add_sized(name, ldinsn, stinsn, type)                            \
+    static inline void name(volatile type *addr, type val)                     \
+    {                                                                          \
+        type t;                                                                \
+        asm volatile ( "1: " ldinsn " %0,0,%3\n"                               \
+                       "add%I2 %0,%0,%2\n"                                     \
+                       stinsn " %0,0,%3 \n"                                    \
+                       "bne- 1b\n"                                             \
+                       : "=&r" (t), "+m" (*addr)                               \
+                       : "r" (val), "r" (addr)                                 \
+                       : "cc" );                                               \
+    }
+
+build_atomic_read(read_u8_atomic, "lbz", uint8_t)
+build_atomic_read(read_u16_atomic, "lhz", uint16_t)
+build_atomic_read(read_u32_atomic, "lwz", uint32_t)
+build_atomic_read(read_u64_atomic, "ldz", uint64_t)
+
+build_atomic_write(write_u8_atomic, "stb", uint8_t)
+build_atomic_write(write_u16_atomic, "sth", uint16_t)
+build_atomic_write(write_u32_atomic, "stw", uint32_t)
+build_atomic_write(write_u64_atomic, "std", uint64_t)
+
+build_add_sized(add_u8_sized, "lbarx", "stbcx.",uint8_t)
+build_add_sized(add_u16_sized, "lharx", "sthcx.", uint16_t)
+build_add_sized(add_u32_sized, "lwarx", "stwcx.", uint32_t)
+
+#undef build_atomic_read
+#undef build_atomic_write
+#undef build_add_sized
+
+static always_inline void read_atomic_size(const volatile void *p, void *res,
+                                           unsigned int size)
+{
+    ASSERT(IS_ALIGNED((vaddr_t)p, size));
+    switch ( size )
+    {
+    case 1:
+        *(uint8_t *)res = read_u8_atomic(p);
+        break;
+    case 2:
+        *(uint16_t *)res = read_u16_atomic(p);
+        break;
+    case 4:
+        *(uint32_t *)res = read_u32_atomic(p);
+        break;
+    case 8:
+        *(uint64_t *)res = read_u64_atomic(p);
+        break;
+    default:
+        __bad_atomic_read(p, res);
+        break;
+    }
+}
+
+static always_inline void write_atomic_size(volatile void *p, const void *val,
+                                            unsigned int size)
+{
+    ASSERT(IS_ALIGNED((vaddr_t)p, size));
+    switch ( size )
+    {
+    case 1:
+        write_u8_atomic(p, *(const uint8_t *)val);
+        break;
+    case 2:
+        write_u16_atomic(p, *(const uint16_t *)val);
+        break;
+    case 4:
+        write_u32_atomic(p, *(const uint32_t *)val);
+        break;
+    case 8:
+        write_u64_atomic(p, *(const uint64_t *)val);
+        break;
+    default:
+        __bad_atomic_size();
+        break;
+    }
+}
+
+#define read_atomic(p)                                                         \
+    ({                                                                         \
+        union {                                                                \
+            typeof(*(p)) val;                                                  \
+            char c[sizeof(*(p))];                                              \
+        } x_;                                                                  \
+        read_atomic_size(p, x_.c, sizeof(*(p)));                               \
+        x_.val;                                                                \
+    })
+
+#define write_atomic(p, x)                                                     \
+    do                                                                         \
+    {                                                                          \
+        typeof(*(p)) x_ = (x);                                                 \
+        write_atomic_size(p, &x_, sizeof(*(p)));                               \
+    } while ( 0 )
+
+#define add_sized(p, x)                                                        \
+    ({                                                                         \
+        typeof(*(p)) x_ = (x);                                                 \
+        switch ( sizeof(*(p)) )                                                \
+        {                                                                      \
+        case 1:                                                                \
+            add_u8_sized((uint8_t *)(p), x_);                                  \
+            break;                                                             \
+        case 2:                                                                \
+            add_u16_sized((uint16_t *)(p), x_);                                \
+            break;                                                             \
+        case 4:                                                                \
+            add_u32_sized((uint32_t *)(p), x_);                                \
+            break;                                                             \
+        default:                                                               \
+            __bad_atomic_size();                                               \
+            break;                                                             \
+        }                                                                      \
+    })
+
+static inline void atomic_add(int a, atomic_t *v)
+{
+    int t;
+
+    asm volatile ( "1: lwarx %0,0,%3\n"
+                   "add %0,%2,%0\n"
+                   "stwcx. %0,0,%3\n"
+                   "bne- 1b"
+                   : "=&r" (t), "+m" (v->counter)
+                   : "r" (a), "r" (&v->counter)
+                   : "cc" );
+}
+
+static inline int atomic_add_return(int a, atomic_t *v)
+{
+    int t;
+
+    asm volatile ( PPC_ATOMIC_ENTRY_BARRIER
+                   "1: lwarx %0,0,%2\n"
+                   "add %0,%1,%0\n"
+                   "stwcx. %0,0,%2\n"
+                   "bne- 1b"
+                   PPC_ATOMIC_EXIT_BARRIER
+                   : "=&r" (t)
+                   : "r" (a), "r" (&v->counter)
+                   : "cc", "memory" );
+
+    return t;
+}
+
+static inline void atomic_sub(int a, atomic_t *v)
+{
+    int t;
+
+    asm volatile ( "1: lwarx %0,0,%3\n"
+                   "subf %0,%2,%0\n"
+                   "stwcx. %0,0,%3\n"
+                   "bne- 1b"
+                   : "=&r" (t), "+m" (v->counter)
+                   : "r" (a), "r" (&v->counter)
+                   : "cc" );
+}
+
+static inline int atomic_sub_return(int a, atomic_t *v)
+{
+    int t;
+
+    asm volatile ( PPC_ATOMIC_ENTRY_BARRIER
+                   "1: lwarx %0,0,%2\n"
+                   "subf %0,%1,%0\n"
+                   "stwcx. %0,0,%2\n"
+                   "bne- 1b"
+                   PPC_ATOMIC_EXIT_BARRIER
+                   : "=&r" (t)
+                   : "r" (a), "r" (&v->counter)
+                   : "cc", "memory" );
+
+    return t;
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+    int t;
+
+    asm volatile ( "1: lwarx %0,0,%2\n"
+                   "addic %0,%0,1\n"
+                   "stwcx. %0,0,%2\n"
+                   "bne- 1b"
+                   : "=&r" (t), "+m" (v->counter)
+                   : "r" (&v->counter)
+                   : "cc" );
+}
+
+static inline int atomic_inc_return(atomic_t *v)
+{
+    int t;
+
+    asm volatile ( PPC_ATOMIC_ENTRY_BARRIER
+                   "1: lwarx %0,0,%1\n"
+                   "addic %0,%0,1\n"
+                   "stwcx. %0,0,%1\n"
+                   "bne- 1b"
+                   PPC_ATOMIC_EXIT_BARRIER
+                   : "=&r" (t)
+                   : "r" (&v->counter)
+                   : "cc", "memory" );
+
+    return t;
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+    int t;
+
+    asm volatile ( "1: lwarx %0,0,%2\n"
+                   "addic %0,%0,-1\n"
+                   "stwcx. %0,0,%2\n"
+                   "bne- 1b"
+                   : "=&r" (t), "+m" (v->counter)
+                   : "r" (&v->counter)
+                   : "cc" );
+}
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+    int t;
+
+    asm volatile ( PPC_ATOMIC_ENTRY_BARRIER
+                   "1: lwarx %0,0,%1\n"
+                   "addic %0,%0,-1\n"
+                   "stwcx. %0,0,%1\n"
+                   "bne- 1b"
+                   PPC_ATOMIC_EXIT_BARRIER
+                   : "=&r" (t)
+                   : "r" (&v->counter)
+                   : "cc", "memory" );
+
+    return t;
+}
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static inline int atomic_dec_if_positive(atomic_t *v)
+{
+    int t;
+
+    asm volatile( PPC_ATOMIC_ENTRY_BARRIER
+                  "1: lwarx %0,0,%1 # atomic_dec_if_positive\n"
+                  "addic. %0,%0,-1\n"
+                  "blt- 2f\n"
+                  "stwcx. %0,0,%1\n"
+                  "bne- 1b\n"
+                  PPC_ATOMIC_EXIT_BARRIER
+                  "2:"
+                  : "=&r" (t)
+                  : "r" (&v->counter)
+                  : "cc", "memory" );
+
+    return t;
+}
+
+static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new,
+                                             atomic_t *v)
+{
+    atomic_t rc;
+    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
+    return rc;
+}
+
+#define arch_cmpxchg(ptr, o, n)                                                \
+    ({                                                                         \
+        __typeof__(*(ptr)) o_ = (o);                                           \
+        __typeof__(*(ptr)) n_ = (n);                                           \
+        (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long) o_,              \
+                                       (unsigned long) n_, sizeof(*(ptr)));    \
+    })
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+    return arch_cmpxchg(&v->counter, old, new);
+}
+
+#define ATOMIC_OP(op, insn, suffix, sign) \
+    static inline void atomic_##op(int a, atomic_t *v)                           \
+    {                                                                            \
+        int t;                                                                   \
+        asm volatile ( "1: lwarx %0,0,%3\n"                                      \
+                       insn "%I2" suffix " %0,%0,%2\n"                           \
+                       "stwcx. %0,0,%3 \n"                                       \
+                       "bne- 1b\n"                                               \
+                       : "=&r" (t), "+m" (v->counter)                            \
+                       : "r" #sign (a), "r" (&v->counter)                        \
+                       : "cc" );                                                 \
+    }
+
+ATOMIC_OP(and, "and", ".", K)
+
+static inline int atomic_sub_and_test(int i, atomic_t *v)
+{
+    return atomic_sub_return(i, v) == 0;
+}
+
+static inline int atomic_inc_and_test(atomic_t *v)
+{
+    return atomic_add_return(1, v) == 0;
+}
+
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+    return atomic_sub_return(1, v) == 0;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+    return atomic_add_return(i, v) < 0;
+}
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+		c = old;
+	return c;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+    return __atomic_add_unless(v, a, u);
+}
+
+#endif /* _ASM_PPC64_ATOMIC_H_ */
diff --git a/xen/arch/ppc/include/asm/memory.h b/xen/arch/ppc/include/asm/memory.h
new file mode 100644
index 0000000000..a159ba2d63
--- /dev/null
+++ b/xen/arch/ppc/include/asm/memory.h
@@ -0,0 +1,21 @@ 
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _ASM_MEMORY_H_
+#define _ASM_MEMORY_H_
+
+#include <xen/config.h>
+
+#ifdef CONFIG_SMP
+#define PPC_ATOMIC_ENTRY_BARRIER "sync\n"
+#define PPC_ATOMIC_EXIT_BARRIER  "sync\n"
+#else
+#define PPC_ATOMIC_ENTRY_BARRIER
+#define PPC_ATOMIC_EXIT_BARRIER
+#endif
+
+#endif