forked from rrcarlosr/Jetpack
853 lines
22 KiB
Diff
853 lines
22 KiB
Diff
From 163651523ab2b511d44e77d99c4cf97780ded204 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 4 Oct 2012 14:20:47 +0100
|
|
Subject: [PATCH 155/352] softirq: Split softirq locks
|
|
|
|
The 3.x RT series removed the split softirq implementation in favour
|
|
of pushing softirq processing into the context of the thread which
|
|
raised it. Though this prevents us from handling the various softirqs
|
|
at different priorities. Now instead of reintroducing the split
|
|
softirq threads we split the locks which serialize the softirq
|
|
processing.
|
|
|
|
If a softirq is raised in context of a thread, then the softirq is
|
|
noted on a per thread field, if the thread is in a bh disabled
|
|
region. If the softirq is raised from hard interrupt context, then the
|
|
bit is set in the flag field of ksoftirqd and ksoftirqd is invoked.
|
|
When a thread leaves a bh disabled region, then it tries to execute
|
|
the softirqs which have been raised in its own context. It acquires
|
|
the per softirq / per cpu lock for the softirq and then checks,
|
|
whether the softirq is still pending in the per cpu
|
|
local_softirq_pending() field. If yes, it runs the softirq. If no,
|
|
then some other task executed it already. This allows for zero config
|
|
softirq elevation in the context of user space tasks or interrupt
|
|
threads.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
include/linux/bottom_half.h | 34 +++
|
|
include/linux/interrupt.h | 15 +-
|
|
include/linux/preempt.h | 15 +-
|
|
include/linux/sched.h | 3 +
|
|
init/main.c | 1 +
|
|
kernel/softirq.c | 492 +++++++++++++++++++++++++++++++++++++-------
|
|
kernel/time/tick-sched.c | 9 +-
|
|
net/core/dev.c | 6 +-
|
|
8 files changed, 480 insertions(+), 95 deletions(-)
|
|
|
|
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
|
|
index 8fdcb78..d07dbee 100644
|
|
--- a/include/linux/bottom_half.h
|
|
+++ b/include/linux/bottom_half.h
|
|
@@ -3,6 +3,39 @@
|
|
|
|
#include <linux/preempt.h>
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+extern void __local_bh_disable(void);
|
|
+extern void _local_bh_enable(void);
|
|
+extern void __local_bh_enable(void);
|
|
+
|
|
+static inline void local_bh_disable(void)
|
|
+{
|
|
+ __local_bh_disable();
|
|
+}
|
|
+
|
|
+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
|
|
+{
|
|
+ __local_bh_disable();
|
|
+}
|
|
+
|
|
+static inline void local_bh_enable(void)
|
|
+{
|
|
+ __local_bh_enable();
|
|
+}
|
|
+
|
|
+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
|
+{
|
|
+ __local_bh_enable();
|
|
+}
|
|
+
|
|
+static inline void local_bh_enable_ip(unsigned long ip)
|
|
+{
|
|
+ __local_bh_enable();
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
|
|
#else
|
|
@@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
|
|
{
|
|
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
|
|
}
|
|
+#endif
|
|
|
|
#endif /* _LINUX_BH_H */
|
|
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
|
|
index e5f4cea..94b32e8 100644
|
|
--- a/include/linux/interrupt.h
|
|
+++ b/include/linux/interrupt.h
|
|
@@ -470,10 +470,11 @@ struct softirq_action
|
|
void (*action)(struct softirq_action *);
|
|
};
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
asmlinkage void do_softirq(void);
|
|
asmlinkage void __do_softirq(void);
|
|
-
|
|
-#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL)
|
|
+static inline void thread_do_softirq(void) { do_softirq(); }
|
|
+#ifdef __ARCH_HAS_DO_SOFTIRQ
|
|
void do_softirq_own_stack(void);
|
|
#else
|
|
static inline void do_softirq_own_stack(void)
|
|
@@ -481,6 +482,9 @@ static inline void do_softirq_own_stack(void)
|
|
__do_softirq();
|
|
}
|
|
#endif
|
|
+#else
|
|
+extern void thread_do_softirq(void);
|
|
+#endif
|
|
|
|
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
|
extern void softirq_init(void);
|
|
@@ -488,6 +492,7 @@ extern void __raise_softirq_irqoff(unsigned int nr);
|
|
|
|
extern void raise_softirq_irqoff(unsigned int nr);
|
|
extern void raise_softirq(unsigned int nr);
|
|
+extern void softirq_check_pending_idle(void);
|
|
|
|
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
|
|
|
@@ -645,6 +650,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
|
|
tasklet_kill(&ttimer->tasklet);
|
|
}
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+extern void softirq_early_init(void);
|
|
+#else
|
|
+static inline void softirq_early_init(void) { }
|
|
+#endif
|
|
+
|
|
/*
|
|
* Autoprobing for irqs:
|
|
*
|
|
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
|
index 2eed4b5..0c734628 100644
|
|
--- a/include/linux/preempt.h
|
|
+++ b/include/linux/preempt.h
|
|
@@ -50,7 +50,11 @@
|
|
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
|
|
#define NMI_OFFSET (1UL << NMI_SHIFT)
|
|
|
|
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
|
|
+#else
|
|
+# define SOFTIRQ_DISABLE_OFFSET (0)
|
|
+#endif
|
|
|
|
/* We use the MSB mostly because its available */
|
|
#define PREEMPT_NEED_RESCHED 0x80000000
|
|
@@ -59,9 +63,15 @@
|
|
#include <asm/preempt.h>
|
|
|
|
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
|
|
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
|
|
| NMI_MASK))
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
|
|
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
|
+#else
|
|
+# define softirq_count() (0UL)
|
|
+extern int in_serving_softirq(void);
|
|
+#endif
|
|
|
|
/*
|
|
* Are we doing bottom half or hardware interrupt processing?
|
|
@@ -79,7 +89,6 @@
|
|
#define in_irq() (hardirq_count())
|
|
#define in_softirq() (softirq_count())
|
|
#define in_interrupt() (irq_count())
|
|
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
|
|
#define in_nmi() (preempt_count() & NMI_MASK)
|
|
#define in_task() (!(preempt_count() & \
|
|
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index daed623..d69e34c 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -2144,6 +2144,8 @@ struct task_struct {
|
|
#endif
|
|
#ifdef CONFIG_PREEMPT_RT_BASE
|
|
struct rcu_head put_rcu;
|
|
+ int softirq_nestcnt;
|
|
+ unsigned int softirqs_raised;
|
|
#endif
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
unsigned long task_state_change;
|
|
@@ -2464,6 +2466,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
|
/*
|
|
* Per process flags
|
|
*/
|
|
+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
|
|
#define PF_EXITING 0x00000004 /* getting shut down */
|
|
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
|
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
|
|
diff --git a/init/main.c b/init/main.c
|
|
index a1c1368..0a9e29a 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -509,6 +509,7 @@ asmlinkage __visible void __init start_kernel(void)
|
|
setup_command_line(command_line);
|
|
setup_nr_cpu_ids();
|
|
setup_per_cpu_areas();
|
|
+ softirq_early_init();
|
|
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
|
|
boot_cpu_hotplug_init();
|
|
|
|
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
|
index 02d1106..e7693cd 100644
|
|
--- a/kernel/softirq.c
|
|
+++ b/kernel/softirq.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/smp.h>
|
|
#include <linux/smpboot.h>
|
|
#include <linux/tick.h>
|
|
+#include <linux/locallock.h>
|
|
#include <linux/irq.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
@@ -63,6 +64,98 @@ const char * const softirq_to_name[NR_SOFTIRQS] = {
|
|
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
|
};
|
|
|
|
+#ifdef CONFIG_NO_HZ_COMMON
|
|
+# ifdef CONFIG_PREEMPT_RT_FULL
|
|
+
|
|
+struct softirq_runner {
|
|
+ struct task_struct *runner[NR_SOFTIRQS];
|
|
+};
|
|
+
|
|
+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
|
|
+
|
|
+static inline void softirq_set_runner(unsigned int sirq)
|
|
+{
|
|
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
|
|
+
|
|
+ sr->runner[sirq] = current;
|
|
+}
|
|
+
|
|
+static inline void softirq_clr_runner(unsigned int sirq)
|
|
+{
|
|
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
|
|
+
|
|
+ sr->runner[sirq] = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * On preempt-rt a softirq running context might be blocked on a
|
|
+ * lock. There might be no other runnable task on this CPU because the
|
|
+ * lock owner runs on some other CPU. So we have to go into idle with
|
|
+ * the pending bit set. Therefor we need to check this otherwise we
|
|
+ * warn about false positives which confuses users and defeats the
|
|
+ * whole purpose of this test.
|
|
+ *
|
|
+ * This code is called with interrupts disabled.
|
|
+ */
|
|
+void softirq_check_pending_idle(void)
|
|
+{
|
|
+ static int rate_limit;
|
|
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
|
|
+ u32 warnpending;
|
|
+ int i;
|
|
+
|
|
+ if (rate_limit >= 10)
|
|
+ return;
|
|
+
|
|
+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
|
|
+ for (i = 0; i < NR_SOFTIRQS; i++) {
|
|
+ struct task_struct *tsk = sr->runner[i];
|
|
+
|
|
+ /*
|
|
+ * The wakeup code in rtmutex.c wakes up the task
|
|
+ * _before_ it sets pi_blocked_on to NULL under
|
|
+ * tsk->pi_lock. So we need to check for both: state
|
|
+ * and pi_blocked_on.
|
|
+ */
|
|
+ if (tsk) {
|
|
+ raw_spin_lock(&tsk->pi_lock);
|
|
+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
|
|
+ /* Clear all bits pending in that task */
|
|
+ warnpending &= ~(tsk->softirqs_raised);
|
|
+ warnpending &= ~(1 << i);
|
|
+ }
|
|
+ raw_spin_unlock(&tsk->pi_lock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (warnpending) {
|
|
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
+ warnpending);
|
|
+ rate_limit++;
|
|
+ }
|
|
+}
|
|
+# else
|
|
+/*
|
|
+ * On !PREEMPT_RT we just printk rate limited:
|
|
+ */
|
|
+void softirq_check_pending_idle(void)
|
|
+{
|
|
+ static int rate_limit;
|
|
+
|
|
+ if (rate_limit < 10 &&
|
|
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
|
|
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
|
+ local_softirq_pending());
|
|
+ rate_limit++;
|
|
+ }
|
|
+}
|
|
+# endif
|
|
+
|
|
+#else /* !CONFIG_NO_HZ_COMMON */
|
|
+static inline void softirq_set_runner(unsigned int sirq) { }
|
|
+static inline void softirq_clr_runner(unsigned int sirq) { }
|
|
+#endif
|
|
+
|
|
/*
|
|
* we cannot loop indefinitely here to avoid userspace starvation,
|
|
* but we also don't want to introduce a worst case 1/HZ latency
|
|
@@ -78,6 +171,26 @@ static void wakeup_softirqd(void)
|
|
wake_up_process(tsk);
|
|
}
|
|
|
|
+static void handle_softirq(unsigned int vec_nr)
|
|
+{
|
|
+ struct softirq_action *h = softirq_vec + vec_nr;
|
|
+ int prev_count;
|
|
+
|
|
+ prev_count = preempt_count();
|
|
+
|
|
+ kstat_incr_softirqs_this_cpu(vec_nr);
|
|
+
|
|
+ trace_softirq_entry(vec_nr);
|
|
+ h->action(h);
|
|
+ trace_softirq_exit(vec_nr);
|
|
+ if (unlikely(prev_count != preempt_count())) {
|
|
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
|
|
+ vec_nr, softirq_to_name[vec_nr], h->action,
|
|
+ prev_count, preempt_count());
|
|
+ preempt_count_set(prev_count);
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* If ksoftirqd is scheduled, we do not want to process pending softirqs
|
|
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
|
|
@@ -93,6 +206,48 @@ static bool ksoftirqd_running(unsigned long pending)
|
|
return tsk && (tsk->state == TASK_RUNNING);
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
+static inline int ksoftirqd_softirq_pending(void)
|
|
+{
|
|
+ return local_softirq_pending();
|
|
+}
|
|
+
|
|
+static void handle_pending_softirqs(u32 pending)
|
|
+{
|
|
+ struct softirq_action *h = softirq_vec;
|
|
+ int softirq_bit;
|
|
+
|
|
+ local_irq_enable();
|
|
+
|
|
+ h = softirq_vec;
|
|
+
|
|
+ while ((softirq_bit = ffs(pending))) {
|
|
+ unsigned int vec_nr;
|
|
+
|
|
+ h += softirq_bit - 1;
|
|
+ vec_nr = h - softirq_vec;
|
|
+ handle_softirq(vec_nr);
|
|
+
|
|
+ h++;
|
|
+ pending >>= softirq_bit;
|
|
+ }
|
|
+
|
|
+ rcu_bh_qs();
|
|
+ local_irq_disable();
|
|
+}
|
|
+
|
|
+static void run_ksoftirqd(unsigned int cpu)
|
|
+{
|
|
+ local_irq_disable();
|
|
+ if (ksoftirqd_softirq_pending()) {
|
|
+ __do_softirq();
|
|
+ local_irq_enable();
|
|
+ cond_resched_rcu_qs();
|
|
+ return;
|
|
+ }
|
|
+ local_irq_enable();
|
|
+}
|
|
+
|
|
/*
|
|
* preempt_count and SOFTIRQ_OFFSET usage:
|
|
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
|
|
@@ -248,10 +403,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
|
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
|
unsigned long old_flags = current->flags;
|
|
int max_restart = MAX_SOFTIRQ_RESTART;
|
|
- struct softirq_action *h;
|
|
bool in_hardirq;
|
|
__u32 pending;
|
|
- int softirq_bit;
|
|
|
|
/*
|
|
* Mask out PF_MEMALLOC s current task context is borrowed for the
|
|
@@ -270,36 +423,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
|
/* Reset the pending bitmask before enabling irqs */
|
|
set_softirq_pending(0);
|
|
|
|
- local_irq_enable();
|
|
-
|
|
- h = softirq_vec;
|
|
-
|
|
- while ((softirq_bit = ffs(pending))) {
|
|
- unsigned int vec_nr;
|
|
- int prev_count;
|
|
-
|
|
- h += softirq_bit - 1;
|
|
-
|
|
- vec_nr = h - softirq_vec;
|
|
- prev_count = preempt_count();
|
|
-
|
|
- kstat_incr_softirqs_this_cpu(vec_nr);
|
|
-
|
|
- trace_softirq_entry(vec_nr);
|
|
- h->action(h);
|
|
- trace_softirq_exit(vec_nr);
|
|
- if (unlikely(prev_count != preempt_count())) {
|
|
- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
|
|
- vec_nr, softirq_to_name[vec_nr], h->action,
|
|
- prev_count, preempt_count());
|
|
- preempt_count_set(prev_count);
|
|
- }
|
|
- h++;
|
|
- pending >>= softirq_bit;
|
|
- }
|
|
-
|
|
- rcu_bh_qs();
|
|
- local_irq_disable();
|
|
+ handle_pending_softirqs(pending);
|
|
|
|
pending = local_softirq_pending();
|
|
if (pending) {
|
|
@@ -336,6 +460,246 @@ asmlinkage __visible void do_softirq(void)
|
|
}
|
|
|
|
/*
|
|
+ * This function must run with irqs disabled!
|
|
+ */
|
|
+void raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ __raise_softirq_irqoff(nr);
|
|
+
|
|
+ /*
|
|
+ * If we're in an interrupt or softirq, we're done
|
|
+ * (this also catches softirq-disabled code). We will
|
|
+ * actually run the softirq once we return from
|
|
+ * the irq or softirq.
|
|
+ *
|
|
+ * Otherwise we wake up ksoftirqd to make sure we
|
|
+ * schedule the softirq soon.
|
|
+ */
|
|
+ if (!in_interrupt())
|
|
+ wakeup_softirqd();
|
|
+}
|
|
+
|
|
+void __raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ trace_softirq_raise(nr);
|
|
+ or_softirq_pending(1UL << nr);
|
|
+}
|
|
+
|
|
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
|
|
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
|
|
+static void ksoftirqd_set_sched_params(unsigned int cpu) { }
|
|
+static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
|
|
+
|
|
+#else /* !PREEMPT_RT_FULL */
|
|
+
|
|
+/*
|
|
+ * On RT we serialize softirq execution with a cpu local lock per softirq
|
|
+ */
|
|
+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
|
|
+
|
|
+void __init softirq_early_init(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < NR_SOFTIRQS; i++)
|
|
+ local_irq_lock_init(local_softirq_locks[i]);
|
|
+}
|
|
+
|
|
+static void lock_softirq(int which)
|
|
+{
|
|
+ local_lock(local_softirq_locks[which]);
|
|
+}
|
|
+
|
|
+static void unlock_softirq(int which)
|
|
+{
|
|
+ local_unlock(local_softirq_locks[which]);
|
|
+}
|
|
+
|
|
+static void do_single_softirq(int which)
|
|
+{
|
|
+ unsigned long old_flags = current->flags;
|
|
+
|
|
+ current->flags &= ~PF_MEMALLOC;
|
|
+ vtime_account_irq_enter(current);
|
|
+ current->flags |= PF_IN_SOFTIRQ;
|
|
+ lockdep_softirq_enter();
|
|
+ local_irq_enable();
|
|
+ handle_softirq(which);
|
|
+ local_irq_disable();
|
|
+ lockdep_softirq_exit();
|
|
+ current->flags &= ~PF_IN_SOFTIRQ;
|
|
+ vtime_account_irq_enter(current);
|
|
+ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called with interrupts disabled. Process softirqs which were raised
|
|
+ * in current context (or on behalf of ksoftirqd).
|
|
+ */
|
|
+static void do_current_softirqs(void)
|
|
+{
|
|
+ while (current->softirqs_raised) {
|
|
+ int i = __ffs(current->softirqs_raised);
|
|
+ unsigned int pending, mask = (1U << i);
|
|
+
|
|
+ current->softirqs_raised &= ~mask;
|
|
+ local_irq_enable();
|
|
+
|
|
+ /*
|
|
+ * If the lock is contended, we boost the owner to
|
|
+ * process the softirq or leave the critical section
|
|
+ * now.
|
|
+ */
|
|
+ lock_softirq(i);
|
|
+ local_irq_disable();
|
|
+ softirq_set_runner(i);
|
|
+ /*
|
|
+ * Check with the local_softirq_pending() bits,
|
|
+ * whether we need to process this still or if someone
|
|
+ * else took care of it.
|
|
+ */
|
|
+ pending = local_softirq_pending();
|
|
+ if (pending & mask) {
|
|
+ set_softirq_pending(pending & ~mask);
|
|
+ do_single_softirq(i);
|
|
+ }
|
|
+ softirq_clr_runner(i);
|
|
+ unlock_softirq(i);
|
|
+ WARN_ON(current->softirq_nestcnt != 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+void __local_bh_disable(void)
|
|
+{
|
|
+ if (++current->softirq_nestcnt == 1)
|
|
+ migrate_disable();
|
|
+}
|
|
+EXPORT_SYMBOL(__local_bh_disable);
|
|
+
|
|
+void __local_bh_enable(void)
|
|
+{
|
|
+ if (WARN_ON(current->softirq_nestcnt == 0))
|
|
+ return;
|
|
+
|
|
+ local_irq_disable();
|
|
+ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
|
|
+ do_current_softirqs();
|
|
+ local_irq_enable();
|
|
+
|
|
+ if (--current->softirq_nestcnt == 0)
|
|
+ migrate_enable();
|
|
+}
|
|
+EXPORT_SYMBOL(__local_bh_enable);
|
|
+
|
|
+int in_serving_softirq(void)
|
|
+{
|
|
+ return current->flags & PF_IN_SOFTIRQ;
|
|
+}
|
|
+EXPORT_SYMBOL(in_serving_softirq);
|
|
+
|
|
+/* Called with preemption disabled */
|
|
+static void run_ksoftirqd(unsigned int cpu)
|
|
+{
|
|
+ local_irq_disable();
|
|
+ current->softirq_nestcnt++;
|
|
+
|
|
+ do_current_softirqs();
|
|
+ current->softirq_nestcnt--;
|
|
+ local_irq_enable();
|
|
+ cond_resched_rcu_qs();
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called from netif_rx_ni(). Preemption enabled, but migration
|
|
+ * disabled. So the cpu can't go away under us.
|
|
+ */
|
|
+void thread_do_softirq(void)
|
|
+{
|
|
+ if (!in_serving_softirq() && current->softirqs_raised) {
|
|
+ current->softirq_nestcnt++;
|
|
+ do_current_softirqs();
|
|
+ current->softirq_nestcnt--;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void do_raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ trace_softirq_raise(nr);
|
|
+ or_softirq_pending(1UL << nr);
|
|
+
|
|
+ /*
|
|
+ * If we are not in a hard interrupt and inside a bh disabled
|
|
+ * region, we simply raise the flag on current. local_bh_enable()
|
|
+ * will make sure that the softirq is executed. Otherwise we
|
|
+ * delegate it to ksoftirqd.
|
|
+ */
|
|
+ if (!in_irq() && current->softirq_nestcnt)
|
|
+ current->softirqs_raised |= (1U << nr);
|
|
+ else if (__this_cpu_read(ksoftirqd))
|
|
+ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
|
|
+}
|
|
+
|
|
+void __raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ do_raise_softirq_irqoff(nr);
|
|
+ if (!in_irq() && !current->softirq_nestcnt)
|
|
+ wakeup_softirqd();
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function must run with irqs disabled!
|
|
+ */
|
|
+void raise_softirq_irqoff(unsigned int nr)
|
|
+{
|
|
+ do_raise_softirq_irqoff(nr);
|
|
+
|
|
+ /*
|
|
+ * If we're in an hard interrupt we let irq return code deal
|
|
+ * with the wakeup of ksoftirqd.
|
|
+ */
|
|
+ if (in_irq())
|
|
+ return;
|
|
+ /*
|
|
+ * If we are in thread context but outside of a bh disabled
|
|
+ * region, we need to wake ksoftirqd as well.
|
|
+ *
|
|
+ * CHECKME: Some of the places which do that could be wrapped
|
|
+ * into local_bh_disable/enable pairs. Though it's unclear
|
|
+ * whether this is worth the effort. To find those places just
|
|
+ * raise a WARN() if the condition is met.
|
|
+ */
|
|
+ if (!current->softirq_nestcnt)
|
|
+ wakeup_softirqd();
|
|
+}
|
|
+
|
|
+static inline int ksoftirqd_softirq_pending(void)
|
|
+{
|
|
+ return current->softirqs_raised;
|
|
+}
|
|
+
|
|
+static inline void local_bh_disable_nort(void) { }
|
|
+static inline void _local_bh_enable_nort(void) { }
|
|
+
|
|
+static inline void ksoftirqd_set_sched_params(unsigned int cpu)
|
|
+{
|
|
+ struct sched_param param = { .sched_priority = 1 };
|
|
+
|
|
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
+ /* Take over all pending softirqs when starting */
|
|
+ local_irq_disable();
|
|
+ current->softirqs_raised = local_softirq_pending();
|
|
+ local_irq_enable();
|
|
+}
|
|
+
|
|
+static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
|
|
+{
|
|
+ struct sched_param param = { .sched_priority = 0 };
|
|
+
|
|
+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
|
|
+}
|
|
+
|
|
+#endif /* PREEMPT_RT_FULL */
|
|
+/*
|
|
* Enter an interrupt context.
|
|
*/
|
|
void irq_enter(void)
|
|
@@ -346,9 +710,9 @@ void irq_enter(void)
|
|
* Prevent raise_softirq from needlessly waking up ksoftirqd
|
|
* here, as softirq will be serviced on return from interrupt.
|
|
*/
|
|
- local_bh_disable();
|
|
+ local_bh_disable_nort();
|
|
tick_irq_enter();
|
|
- _local_bh_enable();
|
|
+ _local_bh_enable_nort();
|
|
}
|
|
|
|
__irq_enter();
|
|
@@ -356,9 +720,13 @@ void irq_enter(void)
|
|
|
|
static inline void invoke_softirq(void)
|
|
{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ unsigned long flags;
|
|
+#endif
|
|
+
|
|
if (ksoftirqd_running(local_softirq_pending()))
|
|
return;
|
|
-
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
if (!force_irqthreads) {
|
|
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
|
/*
|
|
@@ -378,6 +746,14 @@ static inline void invoke_softirq(void)
|
|
} else {
|
|
wakeup_softirqd();
|
|
}
|
|
+#else /* PREEMPT_RT_FULL */
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ if (__this_cpu_read(ksoftirqd) &&
|
|
+ __this_cpu_read(ksoftirqd)->softirqs_raised)
|
|
+ wakeup_softirqd();
|
|
+ local_irq_restore(flags);
|
|
+#endif
|
|
}
|
|
|
|
static inline void tick_irq_exit(void)
|
|
@@ -414,26 +790,6 @@ void irq_exit(void)
|
|
trace_hardirq_exit(); /* must be last! */
|
|
}
|
|
|
|
-/*
|
|
- * This function must run with irqs disabled!
|
|
- */
|
|
-inline void raise_softirq_irqoff(unsigned int nr)
|
|
-{
|
|
- __raise_softirq_irqoff(nr);
|
|
-
|
|
- /*
|
|
- * If we're in an interrupt or softirq, we're done
|
|
- * (this also catches softirq-disabled code). We will
|
|
- * actually run the softirq once we return from
|
|
- * the irq or softirq.
|
|
- *
|
|
- * Otherwise we wake up ksoftirqd to make sure we
|
|
- * schedule the softirq soon.
|
|
- */
|
|
- if (!in_interrupt())
|
|
- wakeup_softirqd();
|
|
-}
|
|
-
|
|
void raise_softirq(unsigned int nr)
|
|
{
|
|
unsigned long flags;
|
|
@@ -443,12 +799,6 @@ void raise_softirq(unsigned int nr)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-void __raise_softirq_irqoff(unsigned int nr)
|
|
-{
|
|
- trace_softirq_raise(nr);
|
|
- or_softirq_pending(1UL << nr);
|
|
-}
|
|
-
|
|
void open_softirq(int nr, void (*action)(struct softirq_action *))
|
|
{
|
|
softirq_vec[nr].action = action;
|
|
@@ -751,23 +1101,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
|
|
|
|
static int ksoftirqd_should_run(unsigned int cpu)
|
|
{
|
|
- return local_softirq_pending();
|
|
-}
|
|
-
|
|
-static void run_ksoftirqd(unsigned int cpu)
|
|
-{
|
|
- local_irq_disable();
|
|
- if (local_softirq_pending()) {
|
|
- /*
|
|
- * We can safely run softirq on inline stack, as we are not deep
|
|
- * in the task stack here.
|
|
- */
|
|
- __do_softirq();
|
|
- local_irq_enable();
|
|
- cond_resched_rcu_qs();
|
|
- return;
|
|
- }
|
|
- local_irq_enable();
|
|
+ return ksoftirqd_softirq_pending();
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -834,6 +1168,8 @@ static int takeover_tasklets(unsigned int cpu)
|
|
|
|
static struct smp_hotplug_thread softirq_threads = {
|
|
.store = &ksoftirqd,
|
|
+ .setup = ksoftirqd_set_sched_params,
|
|
+ .cleanup = ksoftirqd_clr_sched_params,
|
|
.thread_should_run = ksoftirqd_should_run,
|
|
.thread_fn = run_ksoftirqd,
|
|
.thread_comm = "ksoftirqd/%u",
|
|
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
|
|
index 6a1b656..943d0f77 100644
|
|
--- a/kernel/time/tick-sched.c
|
|
+++ b/kernel/time/tick-sched.c
|
|
@@ -897,14 +897,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
|
return false;
|
|
|
|
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
|
- static int ratelimit;
|
|
-
|
|
- if (ratelimit < 10 &&
|
|
- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
|
|
- pr_warn("NOHZ: local_softirq_pending %02x\n",
|
|
- (unsigned int) local_softirq_pending());
|
|
- ratelimit++;
|
|
- }
|
|
+ softirq_check_pending_idle();
|
|
return false;
|
|
}
|
|
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 7ff9f83e..f5b9c29 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -3916,11 +3916,9 @@ int netif_rx_ni(struct sk_buff *skb)
|
|
|
|
trace_netif_rx_ni_entry(skb);
|
|
|
|
- preempt_disable();
|
|
+ local_bh_disable();
|
|
err = netif_rx_internal(skb);
|
|
- if (local_softirq_pending())
|
|
- do_softirq();
|
|
- preempt_enable();
|
|
+ local_bh_enable();
|
|
|
|
return err;
|
|
}
|
|
--
|
|
2.7.4
|
|
|