forked from rrcarlosr/Jetpack
184 lines
6.0 KiB
Diff
184 lines
6.0 KiB
Diff
From 8bdcac58a9ed6dbe3e0374542c592c1efb55fc73 Mon Sep 17 00:00:00 2001
|
|
From: Mike Galbraith <umgwanakikbuti@gmail.com>
|
|
Date: Thu, 31 Mar 2016 04:08:28 +0200
|
|
Subject: [PATCH 314/352] drivers/block/zram: Replace bit spinlocks with
|
|
rtmutex for -rt
|
|
|
|
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
|
|
OTOH, they're a lot less wasteful than an rtmutex per page.
|
|
|
|
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
drivers/block/zram/zram_drv.c | 30 ++++++++++++++++--------------
|
|
drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
|
|
2 files changed, 57 insertions(+), 14 deletions(-)
|
|
|
|
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
|
index b7c0b69..2200918 100644
|
|
--- a/drivers/block/zram/zram_drv.c
|
|
+++ b/drivers/block/zram/zram_drv.c
|
|
@@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
|
|
goto out_error;
|
|
}
|
|
|
|
+ zram_meta_init_table_locks(meta, disksize);
|
|
+
|
|
return meta;
|
|
|
|
out_error:
|
|
@@ -576,12 +578,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
unsigned long handle;
|
|
unsigned int size;
|
|
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
handle = meta->table[index].handle;
|
|
size = zram_get_obj_size(meta, index);
|
|
|
|
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
memset(mem, 0, PAGE_SIZE);
|
|
return 0;
|
|
}
|
|
@@ -596,7 +598,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
zcomp_stream_put(zram->comp);
|
|
}
|
|
zs_unmap_object(meta->mem_pool, handle);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
if (unlikely(ret)) {
|
|
@@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
struct zram_meta *meta = zram->meta;
|
|
page = bvec->bv_page;
|
|
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
if (unlikely(!meta->table[index].handle) ||
|
|
zram_test_flag(meta, index, ZRAM_ZERO)) {
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
handle_zero_page(bvec);
|
|
return 0;
|
|
}
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
if (is_partial_io(bvec))
|
|
/* Use a temporary buffer to decompress the page */
|
|
@@ -700,10 +702,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
if (user_mem)
|
|
kunmap_atomic(user_mem);
|
|
/* Free memory associated with this sector now. */
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
atomic64_inc(&zram->stats.zero_pages);
|
|
ret = 0;
|
|
@@ -794,12 +796,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
* Free memory associated with this sector
|
|
* before overwriting unused sectors.
|
|
*/
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
|
|
meta->table[index].handle = handle;
|
|
zram_set_obj_size(meta, index, clen);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
|
|
/* Update stats */
|
|
atomic64_add(clen, &zram->stats.compr_data_size);
|
|
@@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
|
}
|
|
|
|
while (n >= PAGE_SIZE) {
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
index++;
|
|
n -= PAGE_SIZE;
|
|
@@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
|
zram = bdev->bd_disk->private_data;
|
|
meta = zram->meta;
|
|
|
|
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_lock_table(&meta->table[index]);
|
|
zram_free_page(zram, index);
|
|
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
|
|
+ zram_unlock_table(&meta->table[index]);
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
}
|
|
|
|
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
|
|
index 74fcf10..fd4020c 100644
|
|
--- a/drivers/block/zram/zram_drv.h
|
|
+++ b/drivers/block/zram/zram_drv.h
|
|
@@ -73,6 +73,9 @@ enum zram_pageflags {
|
|
struct zram_table_entry {
|
|
unsigned long handle;
|
|
unsigned long value;
|
|
+#ifdef CONFIG_PREEMPT_RT_BASE
|
|
+ spinlock_t lock;
|
|
+#endif
|
|
};
|
|
|
|
struct zram_stats {
|
|
@@ -120,4 +123,42 @@ struct zram {
|
|
*/
|
|
bool claim; /* Protected by bdev->bd_mutex */
|
|
};
|
|
+
|
|
+#ifndef CONFIG_PREEMPT_RT_BASE
|
|
+static inline void zram_lock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ bit_spin_lock(ZRAM_ACCESS, &table->value);
|
|
+}
|
|
+
|
|
+static inline void zram_unlock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ bit_spin_unlock(ZRAM_ACCESS, &table->value);
|
|
+}
|
|
+
|
|
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
|
|
+#else /* CONFIG_PREEMPT_RT_BASE */
|
|
+static inline void zram_lock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ spin_lock(&table->lock);
|
|
+ __set_bit(ZRAM_ACCESS, &table->value);
|
|
+}
|
|
+
|
|
+static inline void zram_unlock_table(struct zram_table_entry *table)
|
|
+{
|
|
+ __clear_bit(ZRAM_ACCESS, &table->value);
|
|
+ spin_unlock(&table->lock);
|
|
+}
|
|
+
|
|
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
|
|
+{
|
|
+ size_t num_pages = disksize >> PAGE_SHIFT;
|
|
+ size_t index;
|
|
+
|
|
+ for (index = 0; index < num_pages; index++) {
|
|
+ spinlock_t *lock = &meta->table[index].lock;
|
|
+ spin_lock_init(lock);
|
|
+ }
|
|
+}
|
|
+#endif /* CONFIG_PREEMPT_RT_BASE */
|
|
+
|
|
#endif
|
|
--
|
|
2.7.4
|
|
|