@@ -154,4 +154,10 @@ static inline void kpkeys_hardened_pgtables_enable(void) {}
#endif /* CONFIG_KPKEYS_HARDENED_PGTABLES */
+#ifdef CONFIG_KPKEYS_UNRESTRICTED_RCU
+KPKEYS_GUARD(kpkeys_rcu, KPKEYS_LVL_UNRESTRICTED)
+#else
+KPKEYS_GUARD_NOOP(kpkeys_rcu)
+#endif
+
#endif /* _LINUX_KPKEYS_H */
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/kpkeys.h>
#include "rcu_segcblist.h"
@@ -332,7 +333,8 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
rcu_segcblist_inc_len(rsclp);
rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL);
rhp->next = NULL;
- WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
+ scoped_guard(kpkeys_rcu)
+ WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next);
}
@@ -381,7 +383,8 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL);
*rclp->tail = rsclp->head;
WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
- WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
+ scoped_guard(kpkeys_rcu)
+ WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
rclp->tail = rsclp->tails[RCU_DONE_TAIL];
for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
@@ -436,7 +439,8 @@ void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
if (!rclp->head)
return; /* No callbacks to move. */
rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len);
- *rclp->tail = rsclp->head;
+ scoped_guard(kpkeys_rcu)
+ *rclp->tail = rsclp->head;
WRITE_ONCE(rsclp->head, rclp->head);
for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
if (&rsclp->head == rsclp->tails[i])
@@ -64,6 +64,7 @@
#include <linux/mm.h>
#include <linux/kasan.h>
#include <linux/context_tracking.h>
+#include <linux/kpkeys.h>
#include "../time/tick-internal.h"
#include "tree.h"
@@ -2542,7 +2543,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
f = rhp->func;
debug_rcu_head_callback(rhp);
- WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
+ scoped_guard(kpkeys_rcu)
+ WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
f(rhp);
rcu_lock_release(&rcu_callback_map);
@@ -1152,6 +1152,8 @@ config ARCH_HAS_KPKEYS
# ARCH_HAS_KPKEYS must be selected when selecting this option
config ARCH_HAS_KPKEYS_HARDENED_PGTABLES
bool
+config KPKEYS_UNRESTRICTED_RCU
+ bool
config ARCH_USES_PG_ARCH_2
bool
Data assigned a non-default pkey is not writable at the default kpkeys level. If such data is managed via RCU, some mechanism is required to temporarily grant write access to the data's struct rcu_head, for instance when zeroing the callback pointer. There is unfortunately no straightforward way for RCU to know whether the managed data is mapped with a non-default pkey. This patch takes the easy route and switches to the unrestricted kpkeys level whenever struct rcu_head is written; this should work reliably but it is clearly suboptimal. That behaviour is enabled by selecting CONFIG_KPKEYS_UNRESTRICTED_RCU. This patch isn't comprehensive, in particular it does not take care of Tiny RCU. Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com> --- include/linux/kpkeys.h | 6 ++++++ kernel/rcu/rcu_segcblist.c | 10 +++++++--- kernel/rcu/tree.c | 4 +++- mm/Kconfig | 2 ++ 4 files changed, 18 insertions(+), 4 deletions(-)