]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
cpumask: convert arch/x86/kernel/nmi.c's backtrace_mask to a cpumask_var_t
authorRusty Russell <rusty@rustcorp.com.au>
Fri, 13 Mar 2009 04:19:49 +0000 (14:49 +1030)
committerRusty Russell <rusty@rustcorp.com.au>
Fri, 13 Mar 2009 04:19:49 +0000 (14:49 +1030)
Impact: cleanup, reduce memory usage for CONFIG_CPUMASK_OFFSTACK=y

I *think* every path calls check_nmi_watchdog before using the
watchdog, so that's the right place for the initialization.

If that's wrong, we'll get a nice NULL-deref with
CONFIG_CPUMASK_OFFSTACK=y, and have uncovered another bug.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/x86/kernel/apic/nmi.c

index bdfad80c3cf196e752f8cf16ce8a3f37ed096c6c..d6bd6240715256f4947166f347c0ce1d296a440c 100644 (file)
@@ -39,7 +39,7 @@
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
 
-static cpumask_t backtrace_mask = CPU_MASK_NONE;
+static cpumask_var_t backtrace_mask;
 
 /* nmi_active:
  * >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void)
        if (!prev_nmi_count)
                goto error;
 
+       alloc_cpumask_var(&backtrace_mask, GFP_KERNEL);
        printk(KERN_INFO "Testing NMI watchdog ... ");
 
 #ifdef CONFIG_SMP
@@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
                touched = 1;
        }
 
-       if (cpu_isset(cpu, backtrace_mask)) {
+       if (cpumask_test_cpu(cpu, backtrace_mask)) {
                static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
 
                spin_lock(&lock);
                printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
                dump_stack();
                spin_unlock(&lock);
-               cpu_clear(cpu, backtrace_mask);
+               cpumask_clear_cpu(cpu, backtrace_mask);
        }
 
        /* Could check oops_in_progress here too, but it's safer not to */
@@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void)
 {
        int i;
 
-       backtrace_mask = cpu_online_map;
+       cpumask_copy(backtrace_mask, cpu_online_mask);
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
-               if (cpus_empty(backtrace_mask))
+               if (cpumask_empty(backtrace_mask))
                        break;
                mdelay(1);
        }