Jetpack/kernel_avc/kernel-4.9/rt-patches/0353-cpufreq-times-Use-raw-...

132 lines
4.5 KiB
Diff

From a25d19d2f32d30118162bbd4ea59c5d12e08702b Mon Sep 17 00:00:00 2001
From: Sumit Gupta <sumitg@nvidia.com>
Date: Tue, 7 May 2019 18:31:30 +0530
Subject: [PATCH 353/353] cpufreq: times: Use raw spinlock variants
X-NVConfidentiality: public
Use raw spinlock variants for not-sleeping in RT kernels.
This was reported and suggested in devtalk to fix lockup
issue during boot.
Bug 2540766
Change-Id: I2b1ce57d54e528533d17dd7fe25490b2c6eee880
Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
---
drivers/cpufreq/cpufreq_times.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index 0e8754b6..d4f6504 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -29,8 +29,8 @@
static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
-static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
-static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
+static DEFINE_RAW_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+static DEFINE_RAW_SPINLOCK(uid_lock); /* uid_hash_table */
struct uid_entry {
uid_t uid;
@@ -236,9 +236,9 @@ void cpufreq_task_times_init(struct task_struct *p)
{
unsigned long flags;
- spin_lock_irqsave(&task_time_in_state_lock, flags);
+ raw_spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = NULL;
- spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags);
p->max_state = 0;
}
@@ -253,9 +253,9 @@ void cpufreq_task_times_alloc(struct task_struct *p)
if (!temp)
return;
- spin_lock_irqsave(&task_time_in_state_lock, flags);
+ raw_spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = temp;
- spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags);
p->max_state = max_state;
}
@@ -283,10 +283,10 @@ void cpufreq_task_times_exit(struct task_struct *p)
if (!p->time_in_state)
return;
- spin_lock_irqsave(&task_time_in_state_lock, flags);
+ raw_spin_lock_irqsave(&task_time_in_state_lock, flags);
temp = p->time_in_state;
p->time_in_state = NULL;
- spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags);
kfree(temp);
}
@@ -299,7 +299,7 @@ int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
struct cpu_freqs *freqs;
struct cpu_freqs *last_freqs = NULL;
- spin_lock_irqsave(&task_time_in_state_lock, flags);
+ raw_spin_lock_irqsave(&task_time_in_state_lock, flags);
for_each_possible_cpu(cpu) {
freqs = all_freqs[cpu];
if (!freqs || freqs == last_freqs)
@@ -318,7 +318,7 @@ int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
(unsigned long)cputime_to_clock_t(cputime));
}
}
- spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags);
return 0;
}
@@ -335,17 +335,17 @@ void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime)
state = freqs->offset + READ_ONCE(freqs->last_index);
- spin_lock_irqsave(&task_time_in_state_lock, flags);
+ raw_spin_lock_irqsave(&task_time_in_state_lock, flags);
if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
p->time_in_state)
p->time_in_state[state] += cputime;
- spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags);
- spin_lock_irqsave(&uid_lock, flags);
+ raw_spin_lock_irqsave(&uid_lock, flags);
uid_entry = find_or_register_uid_locked(uid);
if (uid_entry && state < uid_entry->max_state)
uid_entry->time_in_state[state] += cputime;
- spin_unlock_irqrestore(&uid_lock, flags);
+ raw_spin_unlock_irqrestore(&uid_lock, flags);
}
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@@ -393,7 +393,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
struct hlist_node *tmp;
unsigned long flags;
- spin_lock_irqsave(&uid_lock, flags);
+ raw_spin_lock_irqsave(&uid_lock, flags);
for (; uid_start <= uid_end; uid_start++) {
hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
@@ -405,7 +405,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
}
}
- spin_unlock_irqrestore(&uid_lock, flags);
+ raw_spin_unlock_irqrestore(&uid_lock, flags);
}
void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
--
2.7.4