forked from rrcarlosr/Jetpack
92 lines
3.0 KiB
Diff
92 lines
3.0 KiB
Diff
From 996450cbe65b475af8824514157c63e17d82f288 Mon Sep 17 00:00:00 2001
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Thu, 31 Aug 2017 18:19:06 +0200
|
|
Subject: [PATCH 340/352] kernel/hrtimer: don't wakeup a process while holding
|
|
the hrtimer base lock
|
|
|
|
We must not wake any process (and thus acquire the pi->lock) while
|
|
holding the hrtimer's base lock. This does not happen usually because
|
|
the hrtimer-callback is invoked in IRQ-context and so
|
|
raise_softirq_irqoff() does not wakeup a process.
|
|
However during CPU-hotplug it might get called from hrtimers_dead_cpu()
|
|
which would wakeup the thread immediately.
|
|
|
|
Reported-by: Mike Galbraith <efault@gmx.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
kernel/time/hrtimer.c | 15 ++++++++++-----
|
|
1 file changed, 10 insertions(+), 5 deletions(-)
|
|
|
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
|
index a5b4e9a..2158fc5 100644
|
|
--- a/kernel/time/hrtimer.c
|
|
+++ b/kernel/time/hrtimer.c
|
|
@@ -1447,7 +1447,7 @@ static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
|
|
|
|
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
|
|
|
|
-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
|
|
+static int __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
|
|
{
|
|
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
|
unsigned int active = cpu_base->active_bases;
|
|
@@ -1497,8 +1497,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
|
|
raise = 1;
|
|
}
|
|
}
|
|
- if (raise)
|
|
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
+ return raise;
|
|
}
|
|
|
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
@@ -1512,6 +1511,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
|
ktime_t expires_next, now, entry_time, delta;
|
|
int retries = 0;
|
|
+ int raise;
|
|
|
|
BUG_ON(!cpu_base->hres_active);
|
|
cpu_base->nr_events++;
|
|
@@ -1530,7 +1530,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|
*/
|
|
cpu_base->expires_next.tv64 = KTIME_MAX;
|
|
|
|
- __hrtimer_run_queues(cpu_base, now);
|
|
+ raise = __hrtimer_run_queues(cpu_base, now);
|
|
|
|
/* Reevaluate the clock bases for the next expiry */
|
|
expires_next = __hrtimer_get_next_event(cpu_base);
|
|
@@ -1541,6 +1541,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
|
cpu_base->expires_next = expires_next;
|
|
cpu_base->in_hrtirq = 0;
|
|
raw_spin_unlock(&cpu_base->lock);
|
|
+ if (raise)
|
|
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
|
|
/* Reprogramming necessary ? */
|
|
if (!tick_program_event(expires_next, 0)) {
|
|
@@ -1620,6 +1622,7 @@ void hrtimer_run_queues(void)
|
|
{
|
|
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
|
ktime_t now;
|
|
+ int raise;
|
|
|
|
if (__hrtimer_hres_active(cpu_base))
|
|
return;
|
|
@@ -1638,8 +1641,10 @@ void hrtimer_run_queues(void)
|
|
|
|
raw_spin_lock(&cpu_base->lock);
|
|
now = hrtimer_update_base(cpu_base);
|
|
- __hrtimer_run_queues(cpu_base, now);
|
|
+ raise = __hrtimer_run_queues(cpu_base, now);
|
|
raw_spin_unlock(&cpu_base->lock);
|
|
+ if (raise)
|
|
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
|
}
|
|
|
|
/*
|
|
--
|
|
2.7.4
|
|
|