@@ -65,6 +65,8 @@ const char * const softirq_to_name[NR_SOFTIRQS] = {
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
+unsigned int sysctl_softirq_mask = 1 << HI_SOFTIRQ | 1 << TASKLET_SOFTIRQ;
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
@@ -80,17 +82,23 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
+static bool softirq_now_mask(unsigned long pending)
+{
+ if (pending & sysctl_softirq_mask)
+ return false;
+ return true;
+}
+
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
* unless we're doing some of the synchronous softirqs.
*/
-#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
static bool ksoftirqd_running(unsigned long pending)
{
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
- if (pending & SOFTIRQ_NOW_MASK)
+ if (softirq_now_mask(pending))
return false;
return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
}
@@ -903,6 +911,22 @@ void tasklet_unlock_wait(struct tasklet_struct *t)
EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
#endif
+static struct ctl_table softirq_sysctls[] = {
+ {
+ .procname = "softirq_mask",
+ .data = &sysctl_softirq_mask,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static void __init softirq_mask_sysctl_init(void)
+{
+ register_sysctl_init("kernel", softirq_sysctls);
+}
+
void __init softirq_init(void)
{
int cpu;
@@ -916,6 +940,7 @@ void __init softirq_init(void)
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+ softirq_mask_sysctl_init();
}
static int ksoftirqd_should_run(unsigned int cpu)