#endif
};
++#ifdef CONFIG_CPUMASK_OFFSTACK
++/* We are not allocating a variable-sized bad_irq_desc.affinity */
++#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
++#endif
++
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
#endif
generic_handle_irq(irq);
--- #ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */
--- /* If we're the only interrupt running (ignoring IRQ15 which is for
--- syscalls), lower our priority to IRQ14 so that softirqs run at
--- that level. If there's another, lower-level interrupt, irq_exit
--- will defer softirqs to that. */
+++ #ifndef CONFIG_IPIPE
+++ /*
+++ * If we're the only interrupt running (ignoring IRQ15 which
+++ * is for syscalls), lower our priority to IRQ14 so that
+++ * softirqs run at that level. If there's another,
+++ * lower-level interrupt, irq_exit will defer softirqs to
+++ * that. If the interrupt pipeline is enabled, we are already
+++ * running at IRQ14 priority, so we don't need this code.
+++ */
CSYNC();
pending = bfin_read_IPEND() & ~0x8000;
other_ints = pending & (pending - 1);
#define BRANCH_PROFILE()
#endif
+#ifdef CONFIG_EVENT_TRACER
+#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
+ *(_ftrace_events) \
+ VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+#else
+#define FTRACE_EVENTS()
+#endif
+
+#ifdef CONFIG_TRACING
+#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
+ *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+ VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
+#else
+#define TRACE_PRINTKS()
+#endif
+
/* .data section */
#define DATA_DATA \
*(.data) \
*(__tracepoints) \
VMLINUX_SYMBOL(__stop___tracepoints) = .; \
LIKELY_PROFILE() \
- BRANCH_PROFILE()
+ BRANCH_PROFILE() \
++ + TRACE_PRINTKS() \
+ FTRACE_EVENTS()
#define RO_DATA(align) \
. = ALIGN((align)); \
*(__vermagic) /* Kernel version magic */ \
*(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
-- TRACE_PRINTKS() \
} \
\
.rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
*(.initcall7.init) \
*(.initcall7s.init)
++/**
++ * PERCPU_VADDR - define output section for percpu area
++ * @vaddr: explicit base address (optional)
++ * @phdr: destination PHDR (optional)
++ *
++ * Macro which expands to output section for percpu area. If @vaddr
++ * is not blank, it specifies explicit base address and all percpu
++ * symbols will be offset from the given address. If blank, @vaddr
++ * always equals @laddr + LOAD_OFFSET.
++ *
++ * @phdr defines the output PHDR to use if not blank. Be warned that
++ * output PHDR is sticky. If @phdr is specified, the next output
++ * section in the linker script will go there too. @phdr should have
++ * a leading colon.
++ *
++ * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * If there is no need to put the percpu section at a predetermined
++ * address, use PERCPU().
++ */
++#define PERCPU_VADDR(vaddr, phdr) \
++ VMLINUX_SYMBOL(__per_cpu_load) = .; \
++ .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
++ - LOAD_OFFSET) { \
++ VMLINUX_SYMBOL(__per_cpu_start) = .; \
++ *(.data.percpu.first) \
++ *(.data.percpu.page_aligned) \
++ *(.data.percpu) \
++ *(.data.percpu.shared_aligned) \
++ VMLINUX_SYMBOL(__per_cpu_end) = .; \
++ } phdr \
++ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
++
++/**
++ * PERCPU - define output section for percpu area, simple version
++ * @align: required alignment
++ *
++ * Align to @align and outputs output section for percpu area. This
++ * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
++ * __per_cpu_start will be identical.
++ *
++ * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
++ * that __per_cpu_load is defined as a relative symbol against
++ * .data.percpu which is required for relocatable x86_32
++ * configuration.
++ */
#define PERCPU(align) \
. = ALIGN(align); \
-- VMLINUX_SYMBOL(__per_cpu_start) = .; \
-- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
++ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
++ VMLINUX_SYMBOL(__per_cpu_load) = .; \
++ VMLINUX_SYMBOL(__per_cpu_start) = .; \
++ *(.data.percpu.first) \
*(.data.percpu.page_aligned) \
*(.data.percpu) \
*(.data.percpu.shared_aligned) \
-- } \
-- VMLINUX_SYMBOL(__per_cpu_end) = .;
++ VMLINUX_SYMBOL(__per_cpu_end) = .; \
++ }
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/tick.h>
WARN_ON_ONCE(in_irq());
raw_local_irq_save(flags);
- add_preempt_count(SOFTIRQ_OFFSET);
+ /*
+ * The preempt tracer hooks into add_preempt_count and will break
+ * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
+ * is set and before current->softirq_enabled is cleared.
+ * We must manually increment preempt_count here and manually
+ * call the trace_preempt_off later.
+ */
+ preempt_count() += SOFTIRQ_OFFSET;
/*
* Were softirqs turned off above:
*/
if (softirq_count() == SOFTIRQ_OFFSET)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
+
+ if (preempt_count() == SOFTIRQ_OFFSET)
+ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip)
preempt_enable_no_resched();
cond_resched();
preempt_disable();
+++ rcu_qsctr_inc((long)__bind_cpu);
}
preempt_enable();
set_current_state(TASK_INTERRUPTIBLE);
return 0;
}
++int __init __weak arch_probe_nr_irqs(void)
++{
++ return 0;
++}
++
int __init __weak arch_early_irq_init(void)
{
return 0;