Source
82
82
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
83
83
* unless we're doing some of the synchronous softirqs.
84
84
*/
85
85
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
86
86
static bool ksoftirqd_running(unsigned long pending)
87
87
{
88
88
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
89
89
90
90
if (pending & SOFTIRQ_NOW_MASK)
91
91
return false;
92
-
return tsk && (tsk->state == TASK_RUNNING);
92
+
return tsk && (tsk->state == TASK_RUNNING) &&
93
+
!__kthread_should_park(tsk);
93
94
}
94
95
95
96
/*
96
97
* preempt_count and SOFTIRQ_OFFSET usage:
97
98
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
98
99
* softirq processing.
99
100
* - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100
101
* on local_bh_disable or local_bh_enable.
101
102
* This lets us distinguish between whether we are currently processing
102
103
* softirq and whether we just have bh disabled.