181 lines
5.4 KiB
Diff
181 lines
5.4 KiB
Diff
From 16d1ba648b39f06369d6a6d1711884f6dbc2b250 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Sun, 13 Nov 2011 17:17:09 +0100
|
|
Subject: [PATCH 153/352] softirq: Check preemption after reenabling interrupts
|
|
|
|
raise_softirq_irqoff() disables interrupts and wakes the softirq
|
|
daemon, but after reenabling interrupts there is no preemption check,
|
|
so the execution of the softirq thread might be delayed arbitrarily.
|
|
|
|
In principle we could add that check to local_irq_enable/restore, but
|
|
that's overkill as the rasie_softirq_irqoff() sections are the only
|
|
ones which show this behaviour.
|
|
|
|
Reported-by: Carsten Emde <cbe@osadl.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
block/blk-softirq.c | 3 +++
|
|
include/linux/preempt.h | 3 +++
|
|
lib/irq_poll.c | 5 +++++
|
|
net/core/dev.c | 7 +++++++
|
|
4 files changed, 18 insertions(+)
|
|
|
|
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
|
|
index 06cf980..c403426 100644
|
|
--- a/block/blk-softirq.c
|
|
+++ b/block/blk-softirq.c
|
|
@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
|
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/*
|
|
@@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
|
|
this_cpu_ptr(&blk_cpu_done));
|
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
return 0;
|
|
}
|
|
@@ -141,6 +143,7 @@ void __blk_complete_request(struct request *req)
|
|
goto do_local;
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
|
index f17045b..2eed4b5 100644
|
|
--- a/include/linux/preempt.h
|
|
+++ b/include/linux/preempt.h
|
|
@@ -165,8 +165,10 @@ do { \
|
|
|
|
#ifdef CONFIG_PREEMPT_RT_BASE
|
|
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
+# define preempt_check_resched_rt() preempt_check_resched()
|
|
#else
|
|
# define preempt_enable_no_resched() preempt_enable()
|
|
+# define preempt_check_resched_rt() barrier();
|
|
#endif
|
|
|
|
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
|
|
@@ -237,6 +239,7 @@ do { \
|
|
#define preempt_disable_notrace() barrier()
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
#define preempt_enable_notrace() barrier()
|
|
+#define preempt_check_resched_rt() barrier()
|
|
#define preemptible() 0
|
|
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
|
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
|
|
index 1d6565e8..b23a797 100644
|
|
--- a/lib/irq_poll.c
|
|
+++ b/lib/irq_poll.c
|
|
@@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop)
|
|
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
|
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(irq_poll_sched);
|
|
|
|
@@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop)
|
|
local_irq_save(flags);
|
|
__irq_poll_complete(iop);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(irq_poll_complete);
|
|
|
|
@@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
|
|
}
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
/* Even though interrupts have been re-enabled, this
|
|
* access is safe because interrupts can only add new
|
|
@@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
|
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
/**
|
|
@@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
|
|
this_cpu_ptr(&blk_cpu_iopoll));
|
|
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 071c589..7ff9f83e 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -2316,6 +2316,7 @@ static void __netif_reschedule(struct Qdisc *q)
|
|
sd->output_queue_tailp = &q->next_sched;
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
void __netif_schedule(struct Qdisc *q)
|
|
@@ -2400,6 +2401,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
|
|
__this_cpu_write(softnet_data.completion_queue, skb);
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(__dev_kfree_skb_irq);
|
|
|
|
@@ -3845,6 +3847,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|
rps_unlock(sd);
|
|
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
|
|
atomic_long_inc(&skb->dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
@@ -4905,6 +4908,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
|
|
sd->rps_ipi_list = NULL;
|
|
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
/* Send pending IPI's to kick RPS processing on remote cpus. */
|
|
while (remsd) {
|
|
@@ -4918,6 +4922,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
|
|
} else
|
|
#endif
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
|
|
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
|
|
@@ -4995,6 +5000,7 @@ void __napi_schedule(struct napi_struct *n)
|
|
local_irq_save(flags);
|
|
____napi_schedule(this_cpu_ptr(&softnet_data), n);
|
|
local_irq_restore(flags);
|
|
+ preempt_check_resched_rt();
|
|
}
|
|
EXPORT_SYMBOL(__napi_schedule);
|
|
|
|
@@ -8099,6 +8105,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|
|
|
raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
local_irq_enable();
|
|
+ preempt_check_resched_rt();
|
|
|
|
/* Process offline CPU's input_pkt_queue */
|
|
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
|
|
--
|
|
2.7.4
|
|
|