forked from rrcarlosr/Jetpack
132 lines
3.3 KiB
Diff
132 lines
3.3 KiB
Diff
From 401bb6f8c4d9663c106ceeac25bafaf5621f4cc7 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Tue, 14 Jul 2015 14:26:34 +0200
|
|
Subject: [PATCH 228/352] idr: Use local lock instead of preempt enable/disable
|
|
|
|
We need to protect the per cpu variable and prevent migration.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
include/linux/idr.h | 4 ++++
|
|
lib/idr.c | 43 +++++++++++++++++++++++++++++++++++++------
|
|
2 files changed, 41 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/include/linux/idr.h b/include/linux/idr.h
|
|
index 083d61e..5899796 100644
|
|
--- a/include/linux/idr.h
|
|
+++ b/include/linux/idr.h
|
|
@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
|
|
* Each idr_preload() should be matched with an invocation of this
|
|
* function. See idr_preload() for details.
|
|
*/
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+void idr_preload_end(void);
|
|
+#else
|
|
static inline void idr_preload_end(void)
|
|
{
|
|
preempt_enable();
|
|
}
|
|
+#endif
|
|
|
|
/**
|
|
* idr_find - return pointer for given id
|
|
diff --git a/lib/idr.c b/lib/idr.c
|
|
index 6098336..9decbe9 100644
|
|
--- a/lib/idr.c
|
|
+++ b/lib/idr.c
|
|
@@ -30,6 +30,7 @@
|
|
#include <linux/idr.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/percpu.h>
|
|
+#include <linux/locallock.h>
|
|
|
|
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
|
|
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
|
|
@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
|
|
static DEFINE_PER_CPU(int, idr_preload_cnt);
|
|
static DEFINE_SPINLOCK(simple_ida_lock);
|
|
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
|
|
+
|
|
+static inline void idr_preload_lock(void)
|
|
+{
|
|
+ local_lock(idr_lock);
|
|
+}
|
|
+
|
|
+static inline void idr_preload_unlock(void)
|
|
+{
|
|
+ local_unlock(idr_lock);
|
|
+}
|
|
+
|
|
+void idr_preload_end(void)
|
|
+{
|
|
+ idr_preload_unlock();
|
|
+}
|
|
+EXPORT_SYMBOL(idr_preload_end);
|
|
+#else
|
|
+static inline void idr_preload_lock(void)
|
|
+{
|
|
+ preempt_disable();
|
|
+}
|
|
+
|
|
+static inline void idr_preload_unlock(void)
|
|
+{
|
|
+ preempt_enable();
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
/* the maximum ID which can be allocated given idr->layers */
|
|
static int idr_max(int layers)
|
|
{
|
|
@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
|
|
* context. See idr_preload() for details.
|
|
*/
|
|
if (!in_interrupt()) {
|
|
- preempt_disable();
|
|
+ idr_preload_lock();
|
|
new = __this_cpu_read(idr_preload_head);
|
|
if (new) {
|
|
__this_cpu_write(idr_preload_head, new->ary[0]);
|
|
__this_cpu_dec(idr_preload_cnt);
|
|
new->ary[0] = NULL;
|
|
}
|
|
- preempt_enable();
|
|
+ idr_preload_unlock();
|
|
if (new)
|
|
return new;
|
|
}
|
|
@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
|
|
idr_mark_full(pa, id);
|
|
}
|
|
|
|
-
|
|
/**
|
|
* idr_preload - preload for idr_alloc()
|
|
* @gfp_mask: allocation mask to use for preloading
|
|
@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
|
|
WARN_ON_ONCE(in_interrupt());
|
|
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
|
|
|
|
- preempt_disable();
|
|
+ idr_preload_lock();
|
|
|
|
/*
|
|
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
|
|
@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
|
|
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
|
|
struct idr_layer *new;
|
|
|
|
- preempt_enable();
|
|
+ idr_preload_unlock();
|
|
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
|
|
- preempt_disable();
|
|
+ idr_preload_lock();
|
|
if (!new)
|
|
break;
|
|
|
|
--
|
|
2.7.4
|
|
|