]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branches 'tracing/doc', 'tracing/ftrace', 'tracing/printk' and 'linus' into...
authorIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 08:56:25 +0000 (09:56 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 10 Mar 2009 08:56:25 +0000 (09:56 +0100)
1  2  3  4 
arch/blackfin/kernel/irqchip.c
include/asm-generic/vmlinux.lds.h
kernel/softirq.c

index 23e9aa080710f095e3389b74892c2b6933dbdbda,23e9aa080710f095e3389b74892c2b6933dbdbda,75724eee6494c65c87545e14682f8cfe213877dd,7fd12656484666ee4c29784a1d2d1e0e4e5fee04..1ab5b532ec724c97e02dc1f71282aa9918d9a83f
@@@@@ -70,11 -70,11 -70,6 -70,6 +70,11 @@@@@ static struct irq_desc bad_irq_desc = 
    #endif
    };
    
  ++#ifdef CONFIG_CPUMASK_OFFSTACK
  ++/* We are not allocating a variable-sized bad_irq_desc.affinity */
  ++#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
  ++#endif
  ++
    int show_interrupts(struct seq_file *p, void *v)
    {
        int i = *(loff_t *) v, j;
@@@@@ -149,11 -149,11 -144,11 -144,15 +149,15 @@@@@ asmlinkage void asm_do_IRQ(unsigned in
    #endif
        generic_handle_irq(irq);
    
--- #ifndef CONFIG_IPIPE        /* Useless and bugous over the I-pipe: IRQs are threaded. */
---     /* If we're the only interrupt running (ignoring IRQ15 which is for
---        syscalls), lower our priority to IRQ14 so that softirqs run at
---        that level.  If there's another, lower-level interrupt, irq_exit
---        will defer softirqs to that.  */
+++ #ifndef CONFIG_IPIPE
+++     /*
+++      * If we're the only interrupt running (ignoring IRQ15 which
+++      * is for syscalls), lower our priority to IRQ14 so that
+++      * softirqs run at that level.  If there's another,
+++      * lower-level interrupt, irq_exit will defer softirqs to
+++      * that. If the interrupt pipeline is enabled, we are already
+++      * running at IRQ14 priority, so we don't need this code.
+++      */
        CSYNC();
        pending = bfin_read_IPEND() & ~0x8000;
        other_ints = pending & (pending - 1);
index 89997dfdf3d09f5147326912128ba882c57e39d8,89997dfdf3d09f5147326912128ba882c57e39d8,d656b462402431f0b7e4d94a5270883812a40f61,c61fab1dd2f82cbde0df850274a595d4c79aa450..0e0f39be6c8b668811c168ab28335108cd76c0dc
    #define BRANCH_PROFILE()
    #endif
    
   +#ifdef CONFIG_EVENT_TRACER
   +#define FTRACE_EVENTS()     VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
   +                    *(_ftrace_events)                               \
   +                    VMLINUX_SYMBOL(__stop_ftrace_events) = .;
   +#else
   +#define FTRACE_EVENTS()
   +#endif
   +
   +#ifdef CONFIG_TRACING
   +#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
   +                     *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
   +                     VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
   +#else
   +#define TRACE_PRINTKS()
   +#endif
   +
    /* .data section */
    #define DATA_DATA                                                   \
        *(.data)                                                        \
        *(__tracepoints)                                                \
        VMLINUX_SYMBOL(__stop___tracepoints) = .;                       \
        LIKELY_PROFILE()                                                \
   -    BRANCH_PROFILE()
   +    BRANCH_PROFILE()                                                \
++ +    TRACE_PRINTKS()                                                 \
   +    FTRACE_EVENTS()
    
    #define RO_DATA(align)                                                      \
        . = ALIGN((align));                                             \
                *(__vermagic)           /* Kernel version magic */      \
                *(__markers_strings)    /* Markers: strings */          \
                *(__tracepoints_strings)/* Tracepoints: strings */      \
--              TRACE_PRINTKS()                                 \
        }                                                               \
                                                                        \
        .rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {          \
        *(.initcall7.init)                                              \
        *(.initcall7s.init)
    
  ++/**
  ++ * PERCPU_VADDR - define output section for percpu area
  ++ * @vaddr: explicit base address (optional)
  ++ * @phdr: destination PHDR (optional)
  ++ *
  ++ * Macro which expands to output section for percpu area.  If @vaddr
  ++ * is not blank, it specifies explicit base address and all percpu
  ++ * symbols will be offset from the given address.  If blank, @vaddr
  ++ * always equals @laddr + LOAD_OFFSET.
  ++ *
  ++ * @phdr defines the output PHDR to use if not blank.  Be warned that
  ++ * output PHDR is sticky.  If @phdr is specified, the next output
  ++ * section in the linker script will go there too.  @phdr should have
  ++ * a leading colon.
  ++ *
  ++ * Note that this macros defines __per_cpu_load as an absolute symbol.
  ++ * If there is no need to put the percpu section at a predetermined
  ++ * address, use PERCPU().
  ++ */
  ++#define PERCPU_VADDR(vaddr, phdr)                                   \
  ++    VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
  ++    .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)          \
  ++                            - LOAD_OFFSET) {                        \
  ++            VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
  ++            *(.data.percpu.first)                                   \
  ++            *(.data.percpu.page_aligned)                            \
  ++            *(.data.percpu)                                         \
  ++            *(.data.percpu.shared_aligned)                          \
  ++            VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
  ++    } phdr                                                          \
  ++    . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
  ++
  ++/**
  ++ * PERCPU - define output section for percpu area, simple version
  ++ * @align: required alignment
  ++ *
  ++ * Align to @align and outputs output section for percpu area.  This
  ++ * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
  ++ * __per_cpu_start will be identical.
  ++ *
  ++ * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
  ++ * that __per_cpu_load is defined as a relative symbol against
  ++ * .data.percpu which is required for relocatable x86_32
  ++ * configuration.
  ++ */
    #define PERCPU(align)                                                       \
        . = ALIGN(align);                                               \
  --    VMLINUX_SYMBOL(__per_cpu_start) = .;                            \
  --    .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {          \
  ++    .data.percpu    : AT(ADDR(.data.percpu) - LOAD_OFFSET) {        \
  ++            VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
  ++            VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
  ++            *(.data.percpu.first)                                   \
                *(.data.percpu.page_aligned)                            \
                *(.data.percpu)                                         \
                *(.data.percpu.shared_aligned)                          \
  --    }                                                               \
  --    VMLINUX_SYMBOL(__per_cpu_end) = .;
  ++            VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
  ++    }
diff --combined kernel/softirq.c
index 98dd68eea9e6ab1106428679174d5fc200a31fc9,98dd68eea9e6ab1106428679174d5fc200a31fc9,6edfc2c11d99af1c845689adf0e9fbf1043e723d,9041ea7948feffbf887ed92fb8ce1e73ad55c81c..bbf6d6496f05f426183ce8ed4872345450166a84
    #include <linux/freezer.h>
    #include <linux/kthread.h>
    #include <linux/rcupdate.h>
   +#include <linux/ftrace.h>
    #include <linux/smp.h>
    #include <linux/tick.h>
    
@@@@@ -80,23 -80,23 -80,23 -79,13 +80,23 @@@@@ static void __local_bh_disable(unsigne
        WARN_ON_ONCE(in_irq());
    
        raw_local_irq_save(flags);
   -    add_preempt_count(SOFTIRQ_OFFSET);
   +    /*
   +     * The preempt tracer hooks into add_preempt_count and will break
   +     * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
   +     * is set and before current->softirq_enabled is cleared.
   +     * We must manually increment preempt_count here and manually
   +     * call the trace_preempt_off later.
   +     */
   +    preempt_count() += SOFTIRQ_OFFSET;
        /*
         * Were softirqs turned off above:
         */
        if (softirq_count() == SOFTIRQ_OFFSET)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
   +
   +    if (preempt_count() == SOFTIRQ_OFFSET)
   +            trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
    }
    #else /* !CONFIG_TRACE_IRQFLAGS */
    static inline void __local_bh_disable(unsigned long ip)
@@@@@ -637,6 -637,6 -637,6 -626,7 +637,7 @@@@@ static int ksoftirqd(void * __bind_cpu
                        preempt_enable_no_resched();
                        cond_resched();
                        preempt_disable();
+++                     rcu_qsctr_inc((long)__bind_cpu);
                }
                preempt_enable();
                set_current_state(TASK_INTERRUPTIBLE);
@@@@@ -806,11 -806,11 -806,6 -796,6 +807,11 @@@@@ int __init __weak early_irq_init(void
        return 0;
    }
    
  ++int __init __weak arch_probe_nr_irqs(void)
  ++{
  ++    return 0;
  ++}
  ++
    int __init __weak arch_early_irq_init(void)
    {
        return 0;