forked from rrcarlosr/Jetpack
166 lines
5.5 KiB
Diff
166 lines
5.5 KiB
Diff
From 03374011b547c5f9e9b1c91cf5d33abab2d9e159 Mon Sep 17 00:00:00 2001
|
|
From: Mike Galbraith <efault@gmx.de>
|
|
Date: Fri, 23 Jun 2017 09:37:14 +0200
|
|
Subject: [PATCH 335/352] rtmutex: Fix lock stealing logic
|
|
|
|
1. When trying to acquire an rtmutex, we first try to grab it without
|
|
queueing the waiter, and explicitly check for that initial attempt
|
|
in the !waiter path of __try_to_take_rt_mutex(). Checking whether
|
|
the lock taker is top waiter before allowing a steal attempt in that
|
|
path is a thinko: the lock taker has not yet blocked.
|
|
|
|
2. It seems wrong to change the definition of rt_mutex_waiter_less()
|
|
to mean less or perhaps equal when we have an rt_mutex_waiter_equal().
|
|
|
|
Remove the thinko, restore rt_mutex_waiter_less(), implement and use
|
|
rt_mutex_steal() based upon rt_mutex_waiter_less/equal(), moving all
|
|
qualification criteria into the function itself.
|
|
|
|
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
Signed-off-by: Mike Galbraith <efault@gmx.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
kernel/locking/rtmutex.c | 73 ++++++++++++++++++++++++------------------------
|
|
1 file changed, 36 insertions(+), 37 deletions(-)
|
|
|
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
|
index b73cd7c..5dbf678 100644
|
|
--- a/kernel/locking/rtmutex.c
|
|
+++ b/kernel/locking/rtmutex.c
|
|
@@ -235,25 +235,18 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
|
|
}
|
|
#endif
|
|
|
|
-#define STEAL_NORMAL 0
|
|
-#define STEAL_LATERAL 1
|
|
/*
|
|
* Only use with rt_mutex_waiter_{less,equal}()
|
|
*/
|
|
-#define task_to_waiter(p) \
|
|
- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
|
|
+#define task_to_waiter(p) &(struct rt_mutex_waiter) \
|
|
+ { .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
|
|
|
|
static inline int
|
|
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
|
|
- struct rt_mutex_waiter *right, int mode)
|
|
+ struct rt_mutex_waiter *right)
|
|
{
|
|
- if (mode == STEAL_NORMAL) {
|
|
- if (left->prio < right->prio)
|
|
- return 1;
|
|
- } else {
|
|
- if (left->prio <= right->prio)
|
|
- return 1;
|
|
- }
|
|
+ if (left->prio < right->prio)
|
|
+ return 1;
|
|
|
|
/*
|
|
* If both waiters have dl_prio(), we check the deadlines of the
|
|
@@ -286,6 +279,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
|
|
return 1;
|
|
}
|
|
|
|
+#define STEAL_NORMAL 0
|
|
+#define STEAL_LATERAL 1
|
|
+
|
|
+static inline int
|
|
+rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
|
|
+{
|
|
+ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
|
|
+
|
|
+ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
|
|
+ return 1;
|
|
+
|
|
+ /*
|
|
+ * Note that RT tasks are excluded from lateral-steals
|
|
+ * to prevent the introduction of an unbounded latency.
|
|
+ */
|
|
+ if (mode == STEAL_NORMAL || rt_task(waiter->task))
|
|
+ return 0;
|
|
+
|
|
+ return rt_mutex_waiter_equal(waiter, top_waiter);
|
|
+}
|
|
+
|
|
static void
|
|
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
|
|
{
|
|
@@ -297,7 +311,7 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
|
|
while (*link) {
|
|
parent = *link;
|
|
entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
|
|
- if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
|
|
+ if (rt_mutex_waiter_less(waiter, entry)) {
|
|
link = &parent->rb_left;
|
|
} else {
|
|
link = &parent->rb_right;
|
|
@@ -336,7 +350,7 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
|
|
while (*link) {
|
|
parent = *link;
|
|
entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
|
|
- if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
|
|
+ if (rt_mutex_waiter_less(waiter, entry)) {
|
|
link = &parent->rb_left;
|
|
} else {
|
|
link = &parent->rb_right;
|
|
@@ -847,6 +861,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
* @task: The task which wants to acquire the lock
|
|
* @waiter: The waiter that is queued to the lock's wait tree if the
|
|
* callsite called task_blocked_on_lock(), otherwise NULL
|
|
+ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
|
|
*/
|
|
static int __try_to_take_rt_mutex(struct rt_mutex *lock,
|
|
struct task_struct *task,
|
|
@@ -886,14 +901,11 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
|
|
*/
|
|
if (waiter) {
|
|
/*
|
|
- * If waiter is not the highest priority waiter of
|
|
- * @lock, give up.
|
|
+ * If waiter is not the highest priority waiter of @lock,
|
|
+ * or its peer when lateral steal is allowed, give up.
|
|
*/
|
|
- if (waiter != rt_mutex_top_waiter(lock)) {
|
|
- /* XXX rt_mutex_waiter_less() ? */
|
|
+ if (!rt_mutex_steal(lock, waiter, mode))
|
|
return 0;
|
|
- }
|
|
-
|
|
/*
|
|
* We can acquire the lock. Remove the waiter from the
|
|
* lock waiters tree.
|
|
@@ -910,25 +922,12 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
|
|
* not need to be dequeued.
|
|
*/
|
|
if (rt_mutex_has_waiters(lock)) {
|
|
- struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
|
|
-
|
|
- if (task != pown)
|
|
- return 0;
|
|
-
|
|
- /*
|
|
- * Note that RT tasks are excluded from lateral-steals
|
|
- * to prevent the introduction of an unbounded latency.
|
|
- */
|
|
- if (rt_task(task))
|
|
- mode = STEAL_NORMAL;
|
|
/*
|
|
- * If @task->prio is greater than or equal to
|
|
- * the top waiter priority (kernel view),
|
|
- * @task lost.
|
|
+ * If @task->prio is greater than the top waiter
|
|
+ * priority (kernel view), or equal to it when a
|
|
+ * lateral steal is forbidden, @task lost.
|
|
*/
|
|
- if (!rt_mutex_waiter_less(task_to_waiter(task),
|
|
- rt_mutex_top_waiter(lock),
|
|
- mode))
|
|
+ if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
|
|
return 0;
|
|
/*
|
|
* The current top waiter stays enqueued. We
|
|
--
|
|
2.7.4
|
|
|