@@ -56,7 +56,7 @@ static inline enum ctx_state exception_enter(void)
!context_tracking_enabled())
return 0;
- prev_ctx = this_cpu_read(context_tracking.state);
+ prev_ctx = __ct_state();
if (prev_ctx != CONTEXT_KERNEL)
ct_user_exit(prev_ctx);
@@ -86,33 +86,21 @@ static __always_inline void context_tracking_guest_exit(void)
__ct_user_exit(CONTEXT_GUEST);
}
-/**
- * ct_state() - return the current context tracking state if known
- *
- * Returns the current cpu's context tracking state if context tracking
- * is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
- */
-static __always_inline enum ctx_state ct_state(void)
-{
- return context_tracking_enabled() ?
- this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
-}
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
-static inline enum ctx_state exception_enter(void) { return 0; }
+static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
+static inline int ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; }
static inline void context_tracking_guest_exit(void) { }
-
+#define CT_WARN_ON(cond)
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
-
#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
extern void context_tracking_init(void);
#else
@@ -6,6 +6,9 @@
#include <linux/static_key.h>
#include <linux/context_tracking_irq.h>
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+
enum ctx_state {
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
@@ -13,9 +16,6 @@ enum ctx_state {
CONTEXT_GUEST,
};
-/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
-#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
-
struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
@@ -26,7 +26,7 @@ struct context_tracking {
*/
bool active;
int recursion;
- enum ctx_state state;
+ atomic_t state;
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
atomic_t dynticks; /* Even value for idle, else odd. */
@@ -98,6 +98,32 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
+static __always_inline int __ct_state(void)
+{
+ return atomic_read(this_cpu_ptr(&context_tracking.state));
+}
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static __always_inline int ct_state(void)
+{
+ int ret;
+
+ if (!context_tracking_enabled())
+ return CONTEXT_DISABLED;
+
+ preempt_disable();
+ ret = __ct_state();
+ preempt_enable();
+
+ return ret;
+}
+
#else
static __always_inline bool context_tracking_enabled(void) { return false; }
static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
@@ -463,6 +463,7 @@ static __always_inline void context_tracking_recursion_exit(void)
*/
void noinstr __ct_user_enter(enum ctx_state state)
{
+ struct context_tracking *ct = this_cpu_ptr(&context_tracking);
lockdep_assert_irqs_disabled();
/* Kernel threads aren't supposed to go to userspace */
@@ -471,8 +472,8 @@ void noinstr __ct_user_enter(enum ctx_state state)
if (!context_tracking_recursion_enter())
return;
- if ( __this_cpu_read(context_tracking.state) != state) {
- if (__this_cpu_read(context_tracking.active)) {
+ if (__ct_state() != state) {
+ if (ct->active) {
/*
* At this stage, only low level arch entry code remains and
* then we'll run in userspace. We can assume there won't be
@@ -513,7 +514,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
* is false because we know that CPU is not tickless.
*/
- __this_cpu_write(context_tracking.state, state);
+ atomic_set(&ct->state, state);
}
context_tracking_recursion_exit();
}
@@ -581,11 +582,13 @@ NOKPROBE_SYMBOL(user_enter_callable);
*/
void noinstr __ct_user_exit(enum ctx_state state)
{
+ struct context_tracking *ct = this_cpu_ptr(&context_tracking);
+
if (!context_tracking_recursion_enter())
return;
- if (__this_cpu_read(context_tracking.state) == state) {
- if (__this_cpu_read(context_tracking.active)) {
+ if (__ct_state() == state) {
+ if (ct->active) {
/*
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
@@ -598,7 +601,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
instrumentation_end();
}
}
- __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
+ atomic_set(&ct->state, CONTEXT_KERNEL);
}
context_tracking_recursion_exit();
}