forked from rrcarlosr/Jetpack
163 lines
4.7 KiB
Diff
163 lines
4.7 KiB
Diff
From 8e74e4845d1825e91b3209872dad7895aaa219f1 Mon Sep 17 00:00:00 2001
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Wed, 13 Jan 2016 15:55:02 +0100
|
|
Subject: [PATCH 236/352] net: move xmit_recursion to per-task variable on -RT
|
|
|
|
A softirq on -RT can be preempted. That means one task is in
|
|
__dev_queue_xmit(), gets preempted and another task may enter
|
|
__dev_queue_xmit() aw well. netperf together with a bridge device
|
|
will then trigger the `recursion alert` because each task increments
|
|
the xmit_recursion variable which is per-CPU.
|
|
A virtual device like br0 is required to trigger this warning.
|
|
|
|
This patch moves the counter to per task instead per-CPU so it counts
|
|
the recursion properly on -RT.
|
|
|
|
Cc: stable-rt@vger.kernel.org
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
include/linux/netdevice.h | 41 ++++++++++++++++++++++++++++++++++++++++-
|
|
include/linux/sched.h | 3 +++
|
|
net/core/dev.c | 9 +++++----
|
|
net/core/filter.c | 6 +++---
|
|
4 files changed, 51 insertions(+), 8 deletions(-)
|
|
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index b495762..54ca094 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -2471,14 +2471,53 @@ void netdev_freemem(struct net_device *dev);
|
|
void synchronize_net(void);
|
|
int init_dummy_netdev(struct net_device *dev);
|
|
|
|
-DECLARE_PER_CPU(int, xmit_recursion);
|
|
#define XMIT_RECURSION_LIMIT 10
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+static inline int dev_recursion_level(void)
|
|
+{
|
|
+ return current->xmit_recursion;
|
|
+}
|
|
+
|
|
+static inline int xmit_rec_read(void)
|
|
+{
|
|
+ return current->xmit_recursion;
|
|
+}
|
|
+
|
|
+static inline void xmit_rec_inc(void)
|
|
+{
|
|
+ current->xmit_recursion++;
|
|
+}
|
|
+
|
|
+static inline void xmit_rec_dec(void)
|
|
+{
|
|
+ current->xmit_recursion--;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+DECLARE_PER_CPU(int, xmit_recursion);
|
|
|
|
static inline int dev_recursion_level(void)
|
|
{
|
|
return this_cpu_read(xmit_recursion);
|
|
}
|
|
|
|
+static inline int xmit_rec_read(void)
|
|
+{
|
|
+ return __this_cpu_read(xmit_recursion);
|
|
+}
|
|
+
|
|
+static inline void xmit_rec_inc(void)
|
|
+{
|
|
+ __this_cpu_inc(xmit_recursion);
|
|
+}
|
|
+
|
|
+static inline void xmit_rec_dec(void)
|
|
+{
|
|
+ __this_cpu_dec(xmit_recursion);
|
|
+}
|
|
+#endif
|
|
+
|
|
struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
|
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
|
|
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 3950b85..93ae84c 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -2175,6 +2175,9 @@ struct task_struct {
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
unsigned long task_state_change;
|
|
#endif
|
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
|
+ int xmit_recursion;
|
|
+#endif
|
|
int pagefault_disabled;
|
|
#ifdef CONFIG_MMU
|
|
struct task_struct *oom_reaper_list;
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 7e9749e..1b44303 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -3225,8 +3225,10 @@ static void skb_update_prio(struct sk_buff *skb)
|
|
#define skb_update_prio(skb)
|
|
#endif
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
DEFINE_PER_CPU(int, xmit_recursion);
|
|
EXPORT_SYMBOL(xmit_recursion);
|
|
+#endif
|
|
|
|
/**
|
|
* dev_loopback_xmit - loop back @skb
|
|
@@ -3460,8 +3462,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
int cpu = smp_processor_id(); /* ok because BHs are off */
|
|
|
|
if (txq->xmit_lock_owner != cpu) {
|
|
- if (unlikely(__this_cpu_read(xmit_recursion) >
|
|
- XMIT_RECURSION_LIMIT))
|
|
+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
|
|
goto recursion_alert;
|
|
|
|
skb = validate_xmit_skb(skb, dev);
|
|
@@ -3471,9 +3472,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
|
HARD_TX_LOCK(dev, txq, cpu);
|
|
|
|
if (!netif_xmit_stopped(txq)) {
|
|
- __this_cpu_inc(xmit_recursion);
|
|
+ xmit_rec_inc();
|
|
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
|
|
- __this_cpu_dec(xmit_recursion);
|
|
+ xmit_rec_dec();
|
|
if (dev_xmit_complete(rc)) {
|
|
HARD_TX_UNLOCK(dev, txq);
|
|
goto out;
|
|
diff --git a/net/core/filter.c b/net/core/filter.c
|
|
index c385c55..feb96c2 100644
|
|
--- a/net/core/filter.c
|
|
+++ b/net/core/filter.c
|
|
@@ -1657,7 +1657,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
{
|
|
int ret;
|
|
|
|
- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
|
|
+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
|
|
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
|
|
kfree_skb(skb);
|
|
return -ENETDOWN;
|
|
@@ -1665,9 +1665,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
skb->dev = dev;
|
|
|
|
- __this_cpu_inc(xmit_recursion);
|
|
+ xmit_rec_inc();
|
|
ret = dev_queue_xmit(skb);
|
|
- __this_cpu_dec(xmit_recursion);
|
|
+ xmit_rec_dec();
|
|
|
|
return ret;
|
|
}
|
|
--
|
|
2.7.4
|
|
|