]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/kernel/nmi_64.c
Merge branch 'x86/numa' into x86/devel
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / nmi_64.c
1 /*
2  *  NMI watchdog support on APIC systems
3  *
4  *  Started by Ingo Molnar <mingo@redhat.com>
5  *
6  *  Fixes:
7  *  Mikael Pettersson   : AMD K7 support for local APIC NMI watchdog.
8  *  Mikael Pettersson   : Power Management for local APIC NMI watchdog.
9  *  Pavel Machek and
10  *  Mikael Pettersson   : PM converted to driver model. Disable/enable API.
11  */
12
13 #include <linux/nmi.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/sysdev.h>
19 #include <linux/sysctl.h>
20 #include <linux/kprobes.h>
21 #include <linux/cpumask.h>
22 #include <linux/kdebug.h>
23
24 #include <asm/i8259.h>
25 #include <asm/io_apic.h>
26 #include <asm/smp.h>
27 #include <asm/nmi.h>
28 #include <asm/proto.h>
29 #include <asm/mce.h>
30
31 #include <mach_traps.h>
32
33 int unknown_nmi_panic;
34 int nmi_watchdog_enabled;
35 int panic_on_unrecovered_nmi;
36
37 static cpumask_t backtrace_mask = CPU_MASK_NONE;
38
39 /* nmi_active:
40  * >0: the lapic NMI watchdog is active, but can be disabled
41  * <0: the lapic NMI watchdog has not been set up, and cannot
42  *     be enabled
43  *  0: the lapic NMI watchdog is disabled, but can be enabled
44  */
45 atomic_t nmi_active = ATOMIC_INIT(0);           /* oprofile uses this */
46 static int panic_on_timeout;
47
48 unsigned int nmi_watchdog = NMI_DEFAULT;
49 static unsigned int nmi_hz = HZ;
50
51 static DEFINE_PER_CPU(short, wd_enabled);
52
53 /* Run after command line and cpu_init init, but before all other checks */
54 void nmi_watchdog_default(void)
55 {
56         if (nmi_watchdog != NMI_DEFAULT)
57                 return;
58         nmi_watchdog = NMI_NONE;
59 }
60
61 static int endflag __initdata = 0;
62
63 #ifdef CONFIG_SMP
64 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
65  * the CPU is idle. To make sure the NMI watchdog really ticks on all
66  * CPUs during the test make them busy.
67  */
68 static __init void nmi_cpu_busy(void *data)
69 {
70         local_irq_enable_in_hardirq();
71         /* Intentionally don't use cpu_relax here. This is
72            to make sure that the performance counter really ticks,
73            even if there is a simulator or similar that catches the
74            pause instruction. On a real HT machine this is fine because
75            all other CPUs are busy with "useless" delay loops and don't
76            care if they get somewhat less cycles. */
77         while (endflag == 0)
78                 mb();
79 }
80 #endif
81
82 int __init check_nmi_watchdog(void)
83 {
84         int *prev_nmi_count;
85         int cpu;
86
87         if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
88                 return 0;
89
90         if (!atomic_read(&nmi_active))
91                 return 0;
92
93         prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
94         if (!prev_nmi_count)
95                 goto error;
96
97         printk(KERN_INFO "Testing NMI watchdog ... ");
98
99 #ifdef CONFIG_SMP
100         if (nmi_watchdog == NMI_LOCAL_APIC)
101                 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
102 #endif
103
104         for (cpu = 0; cpu < nr_cpu_ids; cpu++)
105                 prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count;
106         local_irq_enable();
107         mdelay((20*1000)/nmi_hz); // wait 20 ticks
108
109         for_each_online_cpu(cpu) {
110                 if (!per_cpu(wd_enabled, cpu))
111                         continue;
112                 if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) {
113                         printk(KERN_WARNING "WARNING: CPU#%d: NMI "
114                                "appears to be stuck (%d->%d)!\n",
115                                 cpu,
116                                 prev_nmi_count[cpu],
117                                 cpu_pda(cpu)->__nmi_count);
118                         per_cpu(wd_enabled, cpu) = 0;
119                         atomic_dec(&nmi_active);
120                 }
121         }
122         endflag = 1;
123         if (!atomic_read(&nmi_active)) {
124                 kfree(prev_nmi_count);
125                 atomic_set(&nmi_active, -1);
126                 goto error;
127         }
128         printk("OK.\n");
129
130         /* now that we know it works we can reduce NMI frequency to
131            something more reasonable; makes a difference in some configs */
132         if (nmi_watchdog == NMI_LOCAL_APIC)
133                 nmi_hz = lapic_adjust_nmi_hz(1);
134
135         kfree(prev_nmi_count);
136         return 0;
137 error:
138         if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
139                 disable_8259A_irq(0);
140
141         return -1;
142 }
143
144 static int __init setup_nmi_watchdog(char *str)
145 {
146         int nmi;
147
148         if (!strncmp(str,"panic",5)) {
149                 panic_on_timeout = 1;
150                 str = strchr(str, ',');
151                 if (!str)
152                         return 1;
153                 ++str;
154         }
155
156         get_option(&str, &nmi);
157
158         if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
159                 return 0;
160
161         nmi_watchdog = nmi;
162         return 1;
163 }
164
165 __setup("nmi_watchdog=", setup_nmi_watchdog);
166
167 #ifdef CONFIG_PM
168
169 static int nmi_pm_active; /* nmi_active before suspend */
170
171 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
172 {
173         /* only CPU0 goes here, other CPUs should be offline */
174         nmi_pm_active = atomic_read(&nmi_active);
175         stop_apic_nmi_watchdog(NULL);
176         BUG_ON(atomic_read(&nmi_active) != 0);
177         return 0;
178 }
179
180 static int lapic_nmi_resume(struct sys_device *dev)
181 {
182         /* only CPU0 goes here, other CPUs should be offline */
183         if (nmi_pm_active > 0) {
184                 setup_apic_nmi_watchdog(NULL);
185                 touch_nmi_watchdog();
186         }
187         return 0;
188 }
189
190 static struct sysdev_class nmi_sysclass = {
191         .name           = "lapic_nmi",
192         .resume         = lapic_nmi_resume,
193         .suspend        = lapic_nmi_suspend,
194 };
195
196 static struct sys_device device_lapic_nmi = {
197         .id     = 0,
198         .cls    = &nmi_sysclass,
199 };
200
201 static int __init init_lapic_nmi_sysfs(void)
202 {
203         int error;
204
205         /* should really be a BUG_ON but b/c this is an
206          * init call, it just doesn't work.  -dcz
207          */
208         if (nmi_watchdog != NMI_LOCAL_APIC)
209                 return 0;
210
211         if (atomic_read(&nmi_active) < 0)
212                 return 0;
213
214         error = sysdev_class_register(&nmi_sysclass);
215         if (!error)
216                 error = sysdev_register(&device_lapic_nmi);
217         return error;
218 }
219 /* must come after the local APIC's device_initcall() */
220 late_initcall(init_lapic_nmi_sysfs);
221
222 #endif  /* CONFIG_PM */
223
224 static void __acpi_nmi_enable(void *__unused)
225 {
226         apic_write(APIC_LVT0, APIC_DM_NMI);
227 }
228
229 /*
230  * Enable timer based NMIs on all CPUs:
231  */
232 void acpi_nmi_enable(void)
233 {
234         if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
235                 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
236 }
237
238 static void __acpi_nmi_disable(void *__unused)
239 {
240         apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
241 }
242
243 /*
244  * Disable timer based NMIs on all CPUs:
245  */
246 void acpi_nmi_disable(void)
247 {
248         if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
249                 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
250 }
251
252 void setup_apic_nmi_watchdog(void *unused)
253 {
254         if (__get_cpu_var(wd_enabled))
255                 return;
256
257         /* cheap hack to support suspend/resume */
258         /* if cpu0 is not active neither should the other cpus */
259         if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
260                 return;
261
262         switch (nmi_watchdog) {
263         case NMI_LOCAL_APIC:
264                 __get_cpu_var(wd_enabled) = 1;
265                 if (lapic_watchdog_init(nmi_hz) < 0) {
266                         __get_cpu_var(wd_enabled) = 0;
267                         return;
268                 }
269                 /* FALL THROUGH */
270         case NMI_IO_APIC:
271                 __get_cpu_var(wd_enabled) = 1;
272                 atomic_inc(&nmi_active);
273         }
274 }
275
276 void stop_apic_nmi_watchdog(void *unused)
277 {
278         /* only support LOCAL and IO APICs for now */
279         if ((nmi_watchdog != NMI_LOCAL_APIC) &&
280             (nmi_watchdog != NMI_IO_APIC))
281                 return;
282         if (__get_cpu_var(wd_enabled) == 0)
283                 return;
284         if (nmi_watchdog == NMI_LOCAL_APIC)
285                 lapic_watchdog_stop();
286         __get_cpu_var(wd_enabled) = 0;
287         atomic_dec(&nmi_active);
288 }
289
290 /*
291  * the best way to detect whether a CPU has a 'hard lockup' problem
292  * is to check it's local APIC timer IRQ counts. If they are not
293  * changing then that CPU has some problem.
294  *
295  * as these watchdog NMI IRQs are generated on every CPU, we only
296  * have to check the current processor.
297  */
298
299 static DEFINE_PER_CPU(unsigned, last_irq_sum);
300 static DEFINE_PER_CPU(local_t, alert_counter);
301 static DEFINE_PER_CPU(int, nmi_touch);
302
303 void touch_nmi_watchdog(void)
304 {
305         if (nmi_watchdog > 0) {
306                 unsigned cpu;
307
308                 /*
309                  * Tell other CPUs to reset their alert counters. We cannot
310                  * do it ourselves because the alert count increase is not
311                  * atomic.
312                  */
313                 for_each_present_cpu(cpu) {
314                         if (per_cpu(nmi_touch, cpu) != 1)
315                                 per_cpu(nmi_touch, cpu) = 1;
316                 }
317         }
318
319         touch_softlockup_watchdog();
320 }
321 EXPORT_SYMBOL(touch_nmi_watchdog);
322
323 notrace __kprobes int
324 nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
325 {
326         int sum;
327         int touched = 0;
328         int cpu = smp_processor_id();
329         int rc = 0;
330
331         /* check for other users first */
332         if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
333                         == NOTIFY_STOP) {
334                 rc = 1;
335                 touched = 1;
336         }
337
338         sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
339         if (__get_cpu_var(nmi_touch)) {
340                 __get_cpu_var(nmi_touch) = 0;
341                 touched = 1;
342         }
343
344         if (cpu_isset(cpu, backtrace_mask)) {
345                 static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
346
347                 spin_lock(&lock);
348                 printk("NMI backtrace for cpu %d\n", cpu);
349                 dump_stack();
350                 spin_unlock(&lock);
351                 cpu_clear(cpu, backtrace_mask);
352         }
353
354 #ifdef CONFIG_X86_MCE
355         /* Could check oops_in_progress here too, but it's safer
356            not too */
357         if (atomic_read(&mce_entry) > 0)
358                 touched = 1;
359 #endif
360         /* if the apic timer isn't firing, this cpu isn't doing much */
361         if (!touched && __get_cpu_var(last_irq_sum) == sum) {
362                 /*
363                  * Ayiee, looks like this CPU is stuck ...
364                  * wait a few IRQs (5 seconds) before doing the oops ...
365                  */
366                 local_inc(&__get_cpu_var(alert_counter));
367                 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
368                         die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs,
369                                 panic_on_timeout);
370         } else {
371                 __get_cpu_var(last_irq_sum) = sum;
372                 local_set(&__get_cpu_var(alert_counter), 0);
373         }
374
375         /* see if the nmi watchdog went off */
376         if (!__get_cpu_var(wd_enabled))
377                 return rc;
378         switch (nmi_watchdog) {
379         case NMI_LOCAL_APIC:
380                 rc |= lapic_wd_event(nmi_hz);
381                 break;
382         case NMI_IO_APIC:
383                 /* don't know how to accurately check for this.
384                  * just assume it was a watchdog timer interrupt
385                  * This matches the old behaviour.
386                  */
387                 rc = 1;
388                 break;
389         }
390         return rc;
391 }
392
393 static unsigned ignore_nmis;
394
395 asmlinkage notrace __kprobes void
396 do_nmi(struct pt_regs *regs, long error_code)
397 {
398         nmi_enter();
399         add_pda(__nmi_count,1);
400         if (!ignore_nmis)
401                 default_do_nmi(regs);
402         nmi_exit();
403 }
404
405 void stop_nmi(void)
406 {
407         acpi_nmi_disable();
408         ignore_nmis++;
409 }
410
411 void restart_nmi(void)
412 {
413         ignore_nmis--;
414         acpi_nmi_enable();
415 }
416
417 #ifdef CONFIG_SYSCTL
418
419 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
420 {
421         unsigned char reason = get_nmi_reason();
422         char buf[64];
423
424         sprintf(buf, "NMI received for unknown reason %02x\n", reason);
425         die_nmi(buf, regs, 1);  /* Always panic here */
426         return 0;
427 }
428
429 /*
430  * proc handler for /proc/sys/kernel/nmi
431  */
432 int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
433                         void __user *buffer, size_t *length, loff_t *ppos)
434 {
435         int old_state;
436
437         nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
438         old_state = nmi_watchdog_enabled;
439         proc_dointvec(table, write, file, buffer, length, ppos);
440         if (!!old_state == !!nmi_watchdog_enabled)
441                 return 0;
442
443         if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
444                 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
445                 return -EIO;
446         }
447
448         /* if nmi_watchdog is not set yet, then set it */
449         nmi_watchdog_default();
450
451         if (nmi_watchdog == NMI_LOCAL_APIC) {
452                 if (nmi_watchdog_enabled)
453                         enable_lapic_nmi_watchdog();
454                 else
455                         disable_lapic_nmi_watchdog();
456         } else {
457                 printk( KERN_WARNING
458                         "NMI watchdog doesn't know what hardware to touch\n");
459                 return -EIO;
460         }
461         return 0;
462 }
463
464 #endif
465
466 int do_nmi_callback(struct pt_regs *regs, int cpu)
467 {
468 #ifdef CONFIG_SYSCTL
469         if (unknown_nmi_panic)
470                 return unknown_nmi_panic_callback(regs, cpu);
471 #endif
472         return 0;
473 }
474
475 void __trigger_all_cpu_backtrace(void)
476 {
477         int i;
478
479         backtrace_mask = cpu_online_map;
480         /* Wait for up to 10 seconds for all CPUs to do the backtrace */
481         for (i = 0; i < 10 * 1000; i++) {
482                 if (cpus_empty(backtrace_mask))
483                         break;
484                 mdelay(1);
485         }
486 }
487
488 EXPORT_SYMBOL(nmi_active);
489 EXPORT_SYMBOL(nmi_watchdog);