Jetpack/kernel/kernel-4.9/rt-patches/0291-net-core-protect-users...

119 lines
3.9 KiB
Diff

From 6da47f075b748f21fa1ec96ba67c12f66575188f Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 15 Jan 2016 16:33:34 +0100
Subject: [PATCH 291/352] net/core: protect users of napi_alloc_cache against
reentrance
On -RT the code running in BH can not be moved to another CPU so CPU
local variable remain local. However the code can be preempted
and another task may enter BH accessing the same CPU using the same
napi_alloc_cache variable.
This patch ensures that each user of napi_alloc_cache uses a local lock.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
net/core/skbuff.c | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index de66b9f8..cebba4e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -362,6 +362,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -391,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct napi_alloc_cache *nc;
+ void *data;
- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ return data;
}
void *napi_alloc_frag(unsigned int fragsz)
@@ -487,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct napi_alloc_cache *nc;
struct sk_buff *skb;
void *data;
+ bool pfmemalloc;
len += NET_SKB_PAD + NET_IP_ALIGN;
@@ -507,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
data = __alloc_page_frag(&nc->page, len, gfp_mask);
+ pfmemalloc = nc->page.pfmemalloc;
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
if (unlikely(!data))
return NULL;
@@ -518,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
}
/* use OR instead of assignment to avoid clearing of bits in mask */
- if (nc->page.pfmemalloc)
+ if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
@@ -762,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
void __kfree_skb_flush(void)
{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct napi_alloc_cache *nc;
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
/* flush skb_cache if containing objects */
if (nc->skb_count) {
kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
nc->skb_cache);
nc->skb_count = 0;
}
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
}
static inline void _kfree_skb_defer(struct sk_buff *skb)
{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ struct napi_alloc_cache *nc;
/* drop skb->head and call any destructors for packet */
skb_release_all(skb);
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
@@ -793,6 +805,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
nc->skb_cache);
nc->skb_count = 0;
}
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
}
void __kfree_skb_defer(struct sk_buff *skb)
{
--
2.7.4