@@ -445,7 +445,7 @@ static int clamp_thread(void *arg)
atomic_inc(&idle_wakeup_counter);
}
tick_nohz_idle_exit();
- preempt_enable_no_resched();
+ preempt_enable();
}
del_timer_sync(&wakeup_timer);
clear_bit(cpunr, cpu_clamping_mask);
@@ -64,7 +64,7 @@ do { \
} while (0)
#else
-#define preempt_enable() preempt_enable_no_resched()
+#define preempt_enable() sched_preempt_enable_no_resched()
#define preempt_check_resched() do { } while (0)
#endif
@@ -116,6 +116,12 @@ do { \
#endif /* CONFIG_PREEMPT_COUNT */
+#ifdef MODULE
+#undef preempt_enable_no_resched
+#undef preempt_enable_no_resched_notrace
+#undef preempt_check_resched
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
@@ -42,27 +42,18 @@ static inline bool net_busy_loop_on(void)
return sysctl_net_busy_poll;
}
-/* a wrapper to make debug_smp_processor_id() happy
- * we can use sched_clock() because we don't care much about precision
- * we only care that the average is bounded
- */
-#ifdef CONFIG_DEBUG_PREEMPT
static inline u64 busy_loop_us_clock(void)
{
u64 rc;
+ /* XXX with interrupts enabled sched_clock() can return utter garbage */
+
preempt_disable_notrace();
rc = sched_clock();
- preempt_enable_no_resched_notrace();
+ preempt_enable_notrace();
return rc >> 10;
}
-#else /* CONFIG_DEBUG_PREEMPT */
-static inline u64 busy_loop_us_clock(void)
-{
- return sched_clock() >> 10;
-}
-#endif /* CONFIG_DEBUG_PREEMPT */
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
{