forked from rrcarlosr/Jetpack
216 lines
6.2 KiB
Diff
216 lines
6.2 KiB
Diff
From 723b31033a5d814d811e488c8e70190a428871de Mon Sep 17 00:00:00 2001
|
|
From: Ingo Molnar <mingo@elte.hu>
|
|
Date: Fri, 3 Jul 2009 08:29:34 -0500
|
|
Subject: [PATCH 121/352] hrtimers: Prepare full preemption
|
|
|
|
Make cancellation of a running callback in softirq context safe
|
|
against preemption.
|
|
|
|
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
include/linux/hrtimer.h | 12 +++++++++++-
|
|
kernel/time/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
|
|
kernel/time/itimer.c | 1 +
|
|
kernel/time/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
|
|
4 files changed, 77 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
|
|
index 3bddb7c..b71f4b7 100644
|
|
--- a/include/linux/hrtimer.h
|
|
+++ b/include/linux/hrtimer.h
|
|
@@ -207,6 +207,9 @@ struct hrtimer_cpu_base {
|
|
unsigned int nr_hangs;
|
|
unsigned int max_hang_time;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ wait_queue_head_t wait;
|
|
+#endif
|
|
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
|
} ____cacheline_aligned;
|
|
|
|
@@ -416,6 +419,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
|
|
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
|
}
|
|
|
|
+/* Softirq preemption could deadlock timer removal */
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
|
|
+#else
|
|
+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
|
|
+#endif
|
|
+
|
|
/* Query timers: */
|
|
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
|
|
|
|
@@ -440,7 +450,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
|
|
* Helper function to check, whether the timer is running the callback
|
|
* function
|
|
*/
|
|
-static inline int hrtimer_callback_running(struct hrtimer *timer)
|
|
+static inline int hrtimer_callback_running(const struct hrtimer *timer)
|
|
{
|
|
return timer->base->cpu_base->running == timer;
|
|
}
|
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
|
index 179e0b9..7ab5110 100644
|
|
--- a/kernel/time/hrtimer.c
|
|
+++ b/kernel/time/hrtimer.c
|
|
@@ -856,6 +856,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_forward);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
|
|
+
|
|
+/**
|
|
+ * hrtimer_wait_for_timer - Wait for a running timer
|
|
+ *
|
|
+ * @timer: timer to wait for
|
|
+ *
|
|
+ * The function waits in case the timers callback function is
|
|
+ * currently executed on the waitqueue of the timer base. The
|
|
+ * waitqueue is woken up after the timer callback function has
|
|
+ * finished execution.
|
|
+ */
|
|
+void hrtimer_wait_for_timer(const struct hrtimer *timer)
|
|
+{
|
|
+ struct hrtimer_clock_base *base = timer->base;
|
|
+
|
|
+ if (base && base->cpu_base && !hrtimer_hres_active())
|
|
+ wait_event(base->cpu_base->wait,
|
|
+ !(hrtimer_callback_running(timer)));
|
|
+}
|
|
+
|
|
+#else
|
|
+# define wake_up_timer_waiters(b) do { } while (0)
|
|
+#endif
|
|
+
|
|
/*
|
|
* enqueue_hrtimer - internal function to (re)start a timer
|
|
*
|
|
@@ -1073,7 +1099,7 @@ int hrtimer_cancel(struct hrtimer *timer)
|
|
|
|
if (ret >= 0)
|
|
return ret;
|
|
- cpu_relax();
|
|
+ hrtimer_wait_for_timer(timer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
|
@@ -1481,6 +1507,8 @@ void hrtimer_run_queues(void)
|
|
now = hrtimer_update_base(cpu_base);
|
|
__hrtimer_run_queues(cpu_base, now);
|
|
raw_spin_unlock(&cpu_base->lock);
|
|
+
|
|
+ wake_up_timer_waiters(cpu_base);
|
|
}
|
|
|
|
/*
|
|
@@ -1641,6 +1669,9 @@ int hrtimers_prepare_cpu(unsigned int cpu)
|
|
cpu_base->active_bases = 0;
|
|
cpu_base->cpu = cpu;
|
|
hrtimer_init_hres(cpu_base);
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ init_waitqueue_head(&cpu_base->wait);
|
|
+#endif
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
|
|
index 1d5c720..184de67 100644
|
|
--- a/kernel/time/itimer.c
|
|
+++ b/kernel/time/itimer.c
|
|
@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
|
|
/* We are sharing ->siglock with it_real_fn() */
|
|
if (hrtimer_try_to_cancel(timer) < 0) {
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
|
|
goto again;
|
|
}
|
|
expires = timeval_to_ktime(value->it_value);
|
|
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
|
|
index 06ef7e4..558e420 100644
|
|
--- a/kernel/time/posix-timers.c
|
|
+++ b/kernel/time/posix-timers.c
|
|
@@ -842,6 +842,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
|
|
return overrun;
|
|
}
|
|
|
|
+/*
|
|
+ * Protected by RCU!
|
|
+ */
|
|
+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
|
|
+{
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ if (kc->timer_set == common_timer_set)
|
|
+ hrtimer_wait_for_timer(&timr->it.real.timer);
|
|
+ else
|
|
+ /* FIXME: Whacky hack for posix-cpu-timers */
|
|
+ schedule_timeout(1);
|
|
+#endif
|
|
+}
|
|
+
|
|
/* Set a POSIX.1b interval timer. */
|
|
/* timr->it_lock is taken. */
|
|
static int
|
|
@@ -919,6 +933,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
|
|
if (!timr)
|
|
return -EINVAL;
|
|
|
|
+ rcu_read_lock();
|
|
kc = clockid_to_kclock(timr->it_clock);
|
|
if (WARN_ON_ONCE(!kc || !kc->timer_set))
|
|
error = -EINVAL;
|
|
@@ -927,9 +942,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
|
|
|
|
unlock_timer(timr, flag);
|
|
if (error == TIMER_RETRY) {
|
|
+ timer_wait_for_callback(kc, timr);
|
|
rtn = NULL; // We already got the old time...
|
|
+ rcu_read_unlock();
|
|
goto retry;
|
|
}
|
|
+ rcu_read_unlock();
|
|
|
|
if (old_setting && !error &&
|
|
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
|
|
@@ -967,10 +985,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
|
|
if (!timer)
|
|
return -EINVAL;
|
|
|
|
+ rcu_read_lock();
|
|
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
|
unlock_timer(timer, flags);
|
|
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
|
|
+ timer);
|
|
+ rcu_read_unlock();
|
|
goto retry_delete;
|
|
}
|
|
+ rcu_read_unlock();
|
|
|
|
spin_lock(¤t->sighand->siglock);
|
|
list_del(&timer->list);
|
|
@@ -996,8 +1019,18 @@ static void itimer_delete(struct k_itimer *timer)
|
|
retry_delete:
|
|
spin_lock_irqsave(&timer->it_lock, flags);
|
|
|
|
+ /* On RT we can race with a deletion */
|
|
+ if (!timer->it_signal) {
|
|
+ unlock_timer(timer, flags);
|
|
+ return;
|
|
+ }
|
|
+
|
|
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
|
+ rcu_read_lock();
|
|
unlock_timer(timer, flags);
|
|
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
|
|
+ timer);
|
|
+ rcu_read_unlock();
|
|
goto retry_delete;
|
|
}
|
|
list_del(&timer->list);
|
|
--
|
|
2.7.4
|
|
|