@@ -259,10 +259,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-static __inline__ int
+static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
{
- int ret;
+ s64 ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -5,11 +5,14 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <asm/atomic.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
+ unsigned long int flags;
+ u32 val;
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
@@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
return -EFAULT;
pagefault_disable();
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
switch (op) {
case FUTEX_OP_SET:
+ /* *(int *)UADDR2 = OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret)
+ ret = put_user(oparg, uaddr);
+ break;
case FUTEX_OP_ADD:
+ /* *(int *)UADDR2 += OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval + oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_OR:
+ /* *(int *)UADDR2 |= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval | oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_ANDN:
+ /* *(int *)UADDR2 &= ~OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval & ~oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_XOR:
+ /* *(int *)UADDR2 ^= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval ^ oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
default:
ret = -ENOSYS;
}
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
pagefault_enable();
if (!ret) {
@@ -54,7 +94,9 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ int ret;
u32 val;
+ unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
- if (get_user(val, uaddr))
- return -EFAULT;
- if (val == oldval && put_user(newval, uaddr))
- return -EFAULT;
+ /* HPPA has no cmpxchg in hardware and therefore the
+ * best we can do here is use an array of locks. The
+ * lock selected is based on a hash of the userspace
+ * address. This should scale to a couple of CPUs.
+ */
+
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
+ ret = get_user(val, uaddr);
+
+ if (!ret && val == oldval)
+ ret = put_user(newval, uaddr);
+
*uval = val;
- return 0;
+
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
+ return ret;
}
#endif /*__KERNEL__*/
@@ -821,8 +821,9 @@
#define __NR_open_by_handle_at (__NR_Linux + 326)
#define __NR_syncfs (__NR_Linux + 327)
#define __NR_setns (__NR_Linux + 328)
+#define __NR_sendmmsg (__NR_Linux + 329)
-#define __NR_Linux_syscalls (__NR_setns + 1)
+#define __NR_Linux_syscalls (__NR_sendmmsg + 1)
#define __IGNORE_select /* newselect */
@@ -427,6 +427,7 @@
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
ENTRY_SAME(setns)
+ ENTRY_COMP(sendmmsg)
/* Nothing yet */