1 /* arch/sparc64/kernel/traps.c
3 * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
8 * I like traps on v9, :))))
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/smp.h>
17 #include <linux/init.h>
18 #include <linux/kdebug.h>
21 #include <asm/delay.h>
22 #include <asm/system.h>
23 #include <asm/ptrace.h>
24 #include <asm/oplib.h>
26 #include <asm/pgtable.h>
27 #include <asm/unistd.h>
28 #include <asm/uaccess.h>
29 #include <asm/fpumacro.h>
32 #include <asm/estate.h>
33 #include <asm/chafsr.h>
34 #include <asm/sfafsr.h>
35 #include <asm/psrcompat.h>
36 #include <asm/processor.h>
37 #include <asm/timer.h>
40 #include <asm/memctrl.h>
45 /* When an irrecoverable trap occurs at tl > 0, the trap entry
46 * code logs the trap state registers at every level in the trap
47 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
60 static void dump_tl1_traplog(struct tl1_traplog *p)
64 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
65 "dumping track stack.\n", p->tl);
67 limit = (tlb_type == hypervisor) ? 2 : 4;
68 for (i = 0; i < limit; i++) {
70 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
71 "TNPC[%016lx] TT[%lx]\n",
73 p->trapstack[i].tstate, p->trapstack[i].tpc,
74 p->trapstack[i].tnpc, p->trapstack[i].tt);
75 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
79 void bad_trap(struct pt_regs *regs, long lvl)
84 if (notify_die(DIE_TRAP, "bad trap", regs,
85 0, lvl, SIGTRAP) == NOTIFY_STOP)
89 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
90 die_if_kernel(buffer, regs);
94 if (regs->tstate & TSTATE_PRIV) {
95 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
96 die_if_kernel(buffer, regs);
98 if (test_thread_flag(TIF_32BIT)) {
99 regs->tpc &= 0xffffffff;
100 regs->tnpc &= 0xffffffff;
102 info.si_signo = SIGILL;
104 info.si_code = ILL_ILLTRP;
105 info.si_addr = (void __user *)regs->tpc;
106 info.si_trapno = lvl;
107 force_sig_info(SIGILL, &info, current);
110 void bad_trap_tl1(struct pt_regs *regs, long lvl)
114 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
115 0, lvl, SIGTRAP) == NOTIFY_STOP)
118 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
120 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
121 die_if_kernel (buffer, regs);
124 #ifdef CONFIG_DEBUG_BUGVERBOSE
125 void do_BUG(const char *file, int line)
128 printk("kernel BUG at %s:%d!\n", file, line);
132 static DEFINE_SPINLOCK(dimm_handler_lock);
133 static dimm_printer_t dimm_handler;
135 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
140 spin_lock_irqsave(&dimm_handler_lock, flags);
142 ret = dimm_handler(synd_code, paddr, buf, buflen);
143 } else if (tlb_type == spitfire) {
144 if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
150 spin_unlock_irqrestore(&dimm_handler_lock, flags);
155 int register_dimm_printer(dimm_printer_t func)
160 spin_lock_irqsave(&dimm_handler_lock, flags);
165 spin_unlock_irqrestore(&dimm_handler_lock, flags);
169 EXPORT_SYMBOL_GPL(register_dimm_printer);
171 void unregister_dimm_printer(dimm_printer_t func)
175 spin_lock_irqsave(&dimm_handler_lock, flags);
176 if (dimm_handler == func)
178 spin_unlock_irqrestore(&dimm_handler_lock, flags);
180 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
182 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
186 if (notify_die(DIE_TRAP, "instruction access exception", regs,
187 0, 0x8, SIGTRAP) == NOTIFY_STOP)
190 if (regs->tstate & TSTATE_PRIV) {
191 printk("spitfire_insn_access_exception: SFSR[%016lx] "
192 "SFAR[%016lx], going.\n", sfsr, sfar);
193 die_if_kernel("Iax", regs);
195 if (test_thread_flag(TIF_32BIT)) {
196 regs->tpc &= 0xffffffff;
197 regs->tnpc &= 0xffffffff;
199 info.si_signo = SIGSEGV;
201 info.si_code = SEGV_MAPERR;
202 info.si_addr = (void __user *)regs->tpc;
204 force_sig_info(SIGSEGV, &info, current);
207 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
209 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
210 0, 0x8, SIGTRAP) == NOTIFY_STOP)
213 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
214 spitfire_insn_access_exception(regs, sfsr, sfar);
217 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
219 unsigned short type = (type_ctx >> 16);
220 unsigned short ctx = (type_ctx & 0xffff);
223 if (notify_die(DIE_TRAP, "instruction access exception", regs,
224 0, 0x8, SIGTRAP) == NOTIFY_STOP)
227 if (regs->tstate & TSTATE_PRIV) {
228 printk("sun4v_insn_access_exception: ADDR[%016lx] "
229 "CTX[%04x] TYPE[%04x], going.\n",
231 die_if_kernel("Iax", regs);
234 if (test_thread_flag(TIF_32BIT)) {
235 regs->tpc &= 0xffffffff;
236 regs->tnpc &= 0xffffffff;
238 info.si_signo = SIGSEGV;
240 info.si_code = SEGV_MAPERR;
241 info.si_addr = (void __user *) addr;
243 force_sig_info(SIGSEGV, &info, current);
246 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
248 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
249 0, 0x8, SIGTRAP) == NOTIFY_STOP)
252 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
253 sun4v_insn_access_exception(regs, addr, type_ctx);
256 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
260 if (notify_die(DIE_TRAP, "data access exception", regs,
261 0, 0x30, SIGTRAP) == NOTIFY_STOP)
264 if (regs->tstate & TSTATE_PRIV) {
265 /* Test if this comes from uaccess places. */
266 const struct exception_table_entry *entry;
268 entry = search_exception_tables(regs->tpc);
270 /* Ouch, somebody is trying VM hole tricks on us... */
271 #ifdef DEBUG_EXCEPTIONS
272 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
273 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
274 regs->tpc, entry->fixup);
276 regs->tpc = entry->fixup;
277 regs->tnpc = regs->tpc + 4;
281 printk("spitfire_data_access_exception: SFSR[%016lx] "
282 "SFAR[%016lx], going.\n", sfsr, sfar);
283 die_if_kernel("Dax", regs);
286 info.si_signo = SIGSEGV;
288 info.si_code = SEGV_MAPERR;
289 info.si_addr = (void __user *)sfar;
291 force_sig_info(SIGSEGV, &info, current);
294 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
296 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
297 0, 0x30, SIGTRAP) == NOTIFY_STOP)
300 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
301 spitfire_data_access_exception(regs, sfsr, sfar);
304 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
306 unsigned short type = (type_ctx >> 16);
307 unsigned short ctx = (type_ctx & 0xffff);
310 if (notify_die(DIE_TRAP, "data access exception", regs,
311 0, 0x8, SIGTRAP) == NOTIFY_STOP)
314 if (regs->tstate & TSTATE_PRIV) {
315 printk("sun4v_data_access_exception: ADDR[%016lx] "
316 "CTX[%04x] TYPE[%04x], going.\n",
318 die_if_kernel("Dax", regs);
321 if (test_thread_flag(TIF_32BIT)) {
322 regs->tpc &= 0xffffffff;
323 regs->tnpc &= 0xffffffff;
325 info.si_signo = SIGSEGV;
327 info.si_code = SEGV_MAPERR;
328 info.si_addr = (void __user *) addr;
330 force_sig_info(SIGSEGV, &info, current);
333 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
335 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
336 0, 0x8, SIGTRAP) == NOTIFY_STOP)
339 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
340 sun4v_data_access_exception(regs, addr, type_ctx);
344 /* This is really pathetic... */
345 extern volatile int pci_poke_in_progress;
346 extern volatile int pci_poke_cpu;
347 extern volatile int pci_poke_faulted;
350 /* When access exceptions happen, we must do this. */
351 static void spitfire_clean_and_reenable_l1_caches(void)
355 if (tlb_type != spitfire)
359 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
360 spitfire_put_icache_tag(va, 0x0);
361 spitfire_put_dcache_tag(va, 0x0);
364 /* Re-enable in LSU. */
365 __asm__ __volatile__("flush %%g6\n\t"
367 "stxa %0, [%%g0] %1\n\t"
370 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
371 LSU_CONTROL_IM | LSU_CONTROL_DM),
372 "i" (ASI_LSU_CONTROL)
376 static void spitfire_enable_estate_errors(void)
378 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
381 : "r" (ESTATE_ERR_ALL),
382 "i" (ASI_ESTATE_ERROR_EN));
385 static char ecc_syndrome_table[] = {
386 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
387 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
388 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
389 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
390 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
391 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
392 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
393 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
394 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
395 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
396 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
397 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
398 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
399 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
400 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
401 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
402 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
403 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
404 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
405 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
406 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
407 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
408 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
409 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
410 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
411 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
412 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
413 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
414 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
415 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
416 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
417 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
420 static char *syndrome_unknown = "<Unknown>";
422 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
424 unsigned short scode;
425 char memmod_str[64], *p;
428 scode = ecc_syndrome_table[udbl & 0xff];
429 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
430 p = syndrome_unknown;
433 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
434 "Memory Module \"%s\"\n",
435 smp_processor_id(), scode, p);
439 scode = ecc_syndrome_table[udbh & 0xff];
440 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
441 p = syndrome_unknown;
444 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
445 "Memory Module \"%s\"\n",
446 smp_processor_id(), scode, p);
451 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
454 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
455 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
456 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
458 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
460 /* We always log it, even if someone is listening for this
463 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
464 0, TRAP_TYPE_CEE, SIGTRAP);
466 /* The Correctable ECC Error trap does not disable I/D caches. So
467 * we only have to restore the ESTATE Error Enable register.
469 spitfire_enable_estate_errors();
472 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
476 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
477 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
478 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
480 /* XXX add more human friendly logging of the error status
481 * XXX as is implemented for cheetah
484 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
486 /* We always log it, even if someone is listening for this
489 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
492 if (regs->tstate & TSTATE_PRIV) {
494 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
495 die_if_kernel("UE", regs);
498 /* XXX need more intelligent processing here, such as is implemented
499 * XXX for cheetah errors, in fact if the E-cache still holds the
500 * XXX line with bad parity this will loop
503 spitfire_clean_and_reenable_l1_caches();
504 spitfire_enable_estate_errors();
506 if (test_thread_flag(TIF_32BIT)) {
507 regs->tpc &= 0xffffffff;
508 regs->tnpc &= 0xffffffff;
510 info.si_signo = SIGBUS;
512 info.si_code = BUS_OBJERR;
513 info.si_addr = (void *)0;
515 force_sig_info(SIGBUS, &info, current);
518 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
520 unsigned long afsr, tt, udbh, udbl;
523 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
524 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
525 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
526 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
527 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
530 if (tt == TRAP_TYPE_DAE &&
531 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
532 spitfire_clean_and_reenable_l1_caches();
533 spitfire_enable_estate_errors();
535 pci_poke_faulted = 1;
536 regs->tnpc = regs->tpc + 4;
541 if (afsr & SFAFSR_UE)
542 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
544 if (tt == TRAP_TYPE_CEE) {
545 /* Handle the case where we took a CEE trap, but ACK'd
546 * only the UE state in the UDB error registers.
548 if (afsr & SFAFSR_UE) {
549 if (udbh & UDBE_CE) {
550 __asm__ __volatile__(
551 "stxa %0, [%1] %2\n\t"
554 : "r" (udbh & UDBE_CE),
555 "r" (0x0), "i" (ASI_UDB_ERROR_W));
557 if (udbl & UDBE_CE) {
558 __asm__ __volatile__(
559 "stxa %0, [%1] %2\n\t"
562 : "r" (udbl & UDBE_CE),
563 "r" (0x18), "i" (ASI_UDB_ERROR_W));
567 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
571 int cheetah_pcache_forced_on;
573 void cheetah_enable_pcache(void)
577 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
580 __asm__ __volatile__("ldxa [%%g0] %1, %0"
582 : "i" (ASI_DCU_CONTROL_REG));
583 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
584 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
587 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
590 /* Cheetah error trap handling. */
591 static unsigned long ecache_flush_physbase;
592 static unsigned long ecache_flush_linesize;
593 static unsigned long ecache_flush_size;
595 /* This table is ordered in priority of errors and matches the
596 * AFAR overwrite policy as well.
599 struct afsr_error_table {
604 static const char CHAFSR_PERR_msg[] =
605 "System interface protocol error";
606 static const char CHAFSR_IERR_msg[] =
607 "Internal processor error";
608 static const char CHAFSR_ISAP_msg[] =
609 "System request parity error on incoming addresss";
610 static const char CHAFSR_UCU_msg[] =
611 "Uncorrectable E-cache ECC error for ifetch/data";
612 static const char CHAFSR_UCC_msg[] =
613 "SW Correctable E-cache ECC error for ifetch/data";
614 static const char CHAFSR_UE_msg[] =
615 "Uncorrectable system bus data ECC error for read";
616 static const char CHAFSR_EDU_msg[] =
617 "Uncorrectable E-cache ECC error for stmerge/blkld";
618 static const char CHAFSR_EMU_msg[] =
619 "Uncorrectable system bus MTAG error";
620 static const char CHAFSR_WDU_msg[] =
621 "Uncorrectable E-cache ECC error for writeback";
622 static const char CHAFSR_CPU_msg[] =
623 "Uncorrectable ECC error for copyout";
624 static const char CHAFSR_CE_msg[] =
625 "HW corrected system bus data ECC error for read";
626 static const char CHAFSR_EDC_msg[] =
627 "HW corrected E-cache ECC error for stmerge/blkld";
628 static const char CHAFSR_EMC_msg[] =
629 "HW corrected system bus MTAG ECC error";
630 static const char CHAFSR_WDC_msg[] =
631 "HW corrected E-cache ECC error for writeback";
632 static const char CHAFSR_CPC_msg[] =
633 "HW corrected ECC error for copyout";
634 static const char CHAFSR_TO_msg[] =
635 "Unmapped error from system bus";
636 static const char CHAFSR_BERR_msg[] =
637 "Bus error response from system bus";
638 static const char CHAFSR_IVC_msg[] =
639 "HW corrected system bus data ECC error for ivec read";
640 static const char CHAFSR_IVU_msg[] =
641 "Uncorrectable system bus data ECC error for ivec read";
642 static struct afsr_error_table __cheetah_error_table[] = {
643 { CHAFSR_PERR, CHAFSR_PERR_msg },
644 { CHAFSR_IERR, CHAFSR_IERR_msg },
645 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
646 { CHAFSR_UCU, CHAFSR_UCU_msg },
647 { CHAFSR_UCC, CHAFSR_UCC_msg },
648 { CHAFSR_UE, CHAFSR_UE_msg },
649 { CHAFSR_EDU, CHAFSR_EDU_msg },
650 { CHAFSR_EMU, CHAFSR_EMU_msg },
651 { CHAFSR_WDU, CHAFSR_WDU_msg },
652 { CHAFSR_CPU, CHAFSR_CPU_msg },
653 { CHAFSR_CE, CHAFSR_CE_msg },
654 { CHAFSR_EDC, CHAFSR_EDC_msg },
655 { CHAFSR_EMC, CHAFSR_EMC_msg },
656 { CHAFSR_WDC, CHAFSR_WDC_msg },
657 { CHAFSR_CPC, CHAFSR_CPC_msg },
658 { CHAFSR_TO, CHAFSR_TO_msg },
659 { CHAFSR_BERR, CHAFSR_BERR_msg },
660 /* These two do not update the AFAR. */
661 { CHAFSR_IVC, CHAFSR_IVC_msg },
662 { CHAFSR_IVU, CHAFSR_IVU_msg },
665 static const char CHPAFSR_DTO_msg[] =
666 "System bus unmapped error for prefetch/storequeue-read";
667 static const char CHPAFSR_DBERR_msg[] =
668 "System bus error for prefetch/storequeue-read";
669 static const char CHPAFSR_THCE_msg[] =
670 "Hardware corrected E-cache Tag ECC error";
671 static const char CHPAFSR_TSCE_msg[] =
672 "SW handled correctable E-cache Tag ECC error";
673 static const char CHPAFSR_TUE_msg[] =
674 "Uncorrectable E-cache Tag ECC error";
675 static const char CHPAFSR_DUE_msg[] =
676 "System bus uncorrectable data ECC error due to prefetch/store-fill";
677 static struct afsr_error_table __cheetah_plus_error_table[] = {
678 { CHAFSR_PERR, CHAFSR_PERR_msg },
679 { CHAFSR_IERR, CHAFSR_IERR_msg },
680 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
681 { CHAFSR_UCU, CHAFSR_UCU_msg },
682 { CHAFSR_UCC, CHAFSR_UCC_msg },
683 { CHAFSR_UE, CHAFSR_UE_msg },
684 { CHAFSR_EDU, CHAFSR_EDU_msg },
685 { CHAFSR_EMU, CHAFSR_EMU_msg },
686 { CHAFSR_WDU, CHAFSR_WDU_msg },
687 { CHAFSR_CPU, CHAFSR_CPU_msg },
688 { CHAFSR_CE, CHAFSR_CE_msg },
689 { CHAFSR_EDC, CHAFSR_EDC_msg },
690 { CHAFSR_EMC, CHAFSR_EMC_msg },
691 { CHAFSR_WDC, CHAFSR_WDC_msg },
692 { CHAFSR_CPC, CHAFSR_CPC_msg },
693 { CHAFSR_TO, CHAFSR_TO_msg },
694 { CHAFSR_BERR, CHAFSR_BERR_msg },
695 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
696 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
697 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
698 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
699 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
700 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
701 /* These two do not update the AFAR. */
702 { CHAFSR_IVC, CHAFSR_IVC_msg },
703 { CHAFSR_IVU, CHAFSR_IVU_msg },
706 static const char JPAFSR_JETO_msg[] =
707 "System interface protocol error, hw timeout caused";
708 static const char JPAFSR_SCE_msg[] =
709 "Parity error on system snoop results";
710 static const char JPAFSR_JEIC_msg[] =
711 "System interface protocol error, illegal command detected";
712 static const char JPAFSR_JEIT_msg[] =
713 "System interface protocol error, illegal ADTYPE detected";
714 static const char JPAFSR_OM_msg[] =
715 "Out of range memory error has occurred";
716 static const char JPAFSR_ETP_msg[] =
717 "Parity error on L2 cache tag SRAM";
718 static const char JPAFSR_UMS_msg[] =
719 "Error due to unsupported store";
720 static const char JPAFSR_RUE_msg[] =
721 "Uncorrectable ECC error from remote cache/memory";
722 static const char JPAFSR_RCE_msg[] =
723 "Correctable ECC error from remote cache/memory";
724 static const char JPAFSR_BP_msg[] =
725 "JBUS parity error on returned read data";
726 static const char JPAFSR_WBP_msg[] =
727 "JBUS parity error on data for writeback or block store";
728 static const char JPAFSR_FRC_msg[] =
729 "Foreign read to DRAM incurring correctable ECC error";
730 static const char JPAFSR_FRU_msg[] =
731 "Foreign read to DRAM incurring uncorrectable ECC error";
732 static struct afsr_error_table __jalapeno_error_table[] = {
733 { JPAFSR_JETO, JPAFSR_JETO_msg },
734 { JPAFSR_SCE, JPAFSR_SCE_msg },
735 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
736 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
737 { CHAFSR_PERR, CHAFSR_PERR_msg },
738 { CHAFSR_IERR, CHAFSR_IERR_msg },
739 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
740 { CHAFSR_UCU, CHAFSR_UCU_msg },
741 { CHAFSR_UCC, CHAFSR_UCC_msg },
742 { CHAFSR_UE, CHAFSR_UE_msg },
743 { CHAFSR_EDU, CHAFSR_EDU_msg },
744 { JPAFSR_OM, JPAFSR_OM_msg },
745 { CHAFSR_WDU, CHAFSR_WDU_msg },
746 { CHAFSR_CPU, CHAFSR_CPU_msg },
747 { CHAFSR_CE, CHAFSR_CE_msg },
748 { CHAFSR_EDC, CHAFSR_EDC_msg },
749 { JPAFSR_ETP, JPAFSR_ETP_msg },
750 { CHAFSR_WDC, CHAFSR_WDC_msg },
751 { CHAFSR_CPC, CHAFSR_CPC_msg },
752 { CHAFSR_TO, CHAFSR_TO_msg },
753 { CHAFSR_BERR, CHAFSR_BERR_msg },
754 { JPAFSR_UMS, JPAFSR_UMS_msg },
755 { JPAFSR_RUE, JPAFSR_RUE_msg },
756 { JPAFSR_RCE, JPAFSR_RCE_msg },
757 { JPAFSR_BP, JPAFSR_BP_msg },
758 { JPAFSR_WBP, JPAFSR_WBP_msg },
759 { JPAFSR_FRC, JPAFSR_FRC_msg },
760 { JPAFSR_FRU, JPAFSR_FRU_msg },
761 /* These two do not update the AFAR. */
762 { CHAFSR_IVU, CHAFSR_IVU_msg },
765 static struct afsr_error_table *cheetah_error_table;
766 static unsigned long cheetah_afsr_errors;
768 struct cheetah_err_info *cheetah_error_log;
770 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
772 struct cheetah_err_info *p;
773 int cpu = smp_processor_id();
775 if (!cheetah_error_log)
778 p = cheetah_error_log + (cpu * 2);
779 if ((afsr & CHAFSR_TL1) != 0UL)
785 extern unsigned int tl0_icpe[], tl1_icpe[];
786 extern unsigned int tl0_dcpe[], tl1_dcpe[];
787 extern unsigned int tl0_fecc[], tl1_fecc[];
788 extern unsigned int tl0_cee[], tl1_cee[];
789 extern unsigned int tl0_iae[], tl1_iae[];
790 extern unsigned int tl0_dae[], tl1_dae[];
791 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
792 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
793 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
794 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
795 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
797 void __init cheetah_ecache_flush_init(void)
799 unsigned long largest_size, smallest_linesize, order, ver;
802 /* Scan all cpu device tree nodes, note two values:
803 * 1) largest E-cache size
804 * 2) smallest E-cache line size
807 smallest_linesize = ~0UL;
809 for (i = 0; i < NR_CPUS; i++) {
812 val = cpu_data(i).ecache_size;
816 if (val > largest_size)
819 val = cpu_data(i).ecache_line_size;
820 if (val < smallest_linesize)
821 smallest_linesize = val;
825 if (largest_size == 0UL || smallest_linesize == ~0UL) {
826 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
831 ecache_flush_size = (2 * largest_size);
832 ecache_flush_linesize = smallest_linesize;
834 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
836 if (ecache_flush_physbase == ~0UL) {
837 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
838 "contiguous physical memory.\n",
843 /* Now allocate error trap reporting scoreboard. */
844 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
845 for (order = 0; order < MAX_ORDER; order++) {
846 if ((PAGE_SIZE << order) >= sz)
849 cheetah_error_log = (struct cheetah_err_info *)
850 __get_free_pages(GFP_KERNEL, order);
851 if (!cheetah_error_log) {
852 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
853 "error logging scoreboard (%d bytes).\n", sz);
856 memset(cheetah_error_log, 0, PAGE_SIZE << order);
858 /* Mark all AFSRs as invalid so that the trap handler will
859 * log new new information there.
861 for (i = 0; i < 2 * NR_CPUS; i++)
862 cheetah_error_log[i].afsr = CHAFSR_INVALID;
864 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
865 if ((ver >> 32) == __JALAPENO_ID ||
866 (ver >> 32) == __SERRANO_ID) {
867 cheetah_error_table = &__jalapeno_error_table[0];
868 cheetah_afsr_errors = JPAFSR_ERRORS;
869 } else if ((ver >> 32) == 0x003e0015) {
870 cheetah_error_table = &__cheetah_plus_error_table[0];
871 cheetah_afsr_errors = CHPAFSR_ERRORS;
873 cheetah_error_table = &__cheetah_error_table[0];
874 cheetah_afsr_errors = CHAFSR_ERRORS;
877 /* Now patch trap tables. */
878 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
879 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
880 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
881 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
882 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
883 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
884 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
885 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
886 if (tlb_type == cheetah_plus) {
887 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
888 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
889 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
890 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
895 static void cheetah_flush_ecache(void)
897 unsigned long flush_base = ecache_flush_physbase;
898 unsigned long flush_linesize = ecache_flush_linesize;
899 unsigned long flush_size = ecache_flush_size;
901 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
902 " bne,pt %%xcc, 1b\n\t"
903 " ldxa [%2 + %0] %3, %%g0\n\t"
905 : "0" (flush_size), "r" (flush_base),
906 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
909 static void cheetah_flush_ecache_line(unsigned long physaddr)
913 physaddr &= ~(8UL - 1UL);
914 physaddr = (ecache_flush_physbase +
915 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
916 alias = physaddr + (ecache_flush_size >> 1UL);
917 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
918 "ldxa [%1] %2, %%g0\n\t"
921 : "r" (physaddr), "r" (alias),
922 "i" (ASI_PHYS_USE_EC));
925 /* Unfortunately, the diagnostic access to the I-cache tags we need to
926 * use to clear the thing interferes with I-cache coherency transactions.
928 * So we must only flush the I-cache when it is disabled.
930 static void __cheetah_flush_icache(void)
932 unsigned int icache_size, icache_line_size;
935 icache_size = local_cpu_data().icache_size;
936 icache_line_size = local_cpu_data().icache_line_size;
938 /* Clear the valid bits in all the tags. */
939 for (addr = 0; addr < icache_size; addr += icache_line_size) {
940 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
943 : "r" (addr | (2 << 3)),
948 static void cheetah_flush_icache(void)
950 unsigned long dcu_save;
952 /* Save current DCU, disable I-cache. */
953 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
954 "or %0, %2, %%g1\n\t"
955 "stxa %%g1, [%%g0] %1\n\t"
958 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
961 __cheetah_flush_icache();
963 /* Restore DCU register */
964 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
967 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
970 static void cheetah_flush_dcache(void)
972 unsigned int dcache_size, dcache_line_size;
975 dcache_size = local_cpu_data().dcache_size;
976 dcache_line_size = local_cpu_data().dcache_line_size;
978 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
979 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
982 : "r" (addr), "i" (ASI_DCACHE_TAG));
986 /* In order to make the even parity correct we must do two things.
987 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
988 * Next, we clear out all 32-bytes of data for that line. Data of
989 * all-zero + tag parity value of zero == correct parity.
991 static void cheetah_plus_zap_dcache_parity(void)
993 unsigned int dcache_size, dcache_line_size;
996 dcache_size = local_cpu_data().dcache_size;
997 dcache_line_size = local_cpu_data().dcache_line_size;
999 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1000 unsigned long tag = (addr >> 14);
1003 __asm__ __volatile__("membar #Sync\n\t"
1004 "stxa %0, [%1] %2\n\t"
1007 : "r" (tag), "r" (addr),
1008 "i" (ASI_DCACHE_UTAG));
1009 for (line = addr; line < addr + dcache_line_size; line += 8)
1010 __asm__ __volatile__("membar #Sync\n\t"
1011 "stxa %%g0, [%0] %1\n\t"
1015 "i" (ASI_DCACHE_DATA));
1019 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1020 * something palatable to the memory controller driver get_unumber
1044 static unsigned char cheetah_ecc_syntab[] = {
1045 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1046 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1047 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1048 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1049 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1050 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1051 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1052 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1053 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1054 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1055 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1056 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1057 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1058 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1059 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1060 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1061 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1062 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1063 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1064 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1065 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1066 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1067 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1068 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1069 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1070 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1071 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1072 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1073 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1074 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1075 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1076 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1078 static unsigned char cheetah_mtag_syntab[] = {
1089 /* Return the highest priority error conditon mentioned. */
1090 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1092 unsigned long tmp = 0;
1095 for (i = 0; cheetah_error_table[i].mask; i++) {
1096 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1102 static const char *cheetah_get_string(unsigned long bit)
1106 for (i = 0; cheetah_error_table[i].mask; i++) {
1107 if ((bit & cheetah_error_table[i].mask) != 0UL)
1108 return cheetah_error_table[i].name;
1113 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1114 unsigned long afsr, unsigned long afar, int recoverable)
1116 unsigned long hipri;
1119 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1120 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1122 (afsr & CHAFSR_TL1) ? 1 : 0);
1123 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1124 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1125 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1126 printk("%s" "ERROR(%d): ",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1128 printk("TPC<%pS>\n", (void *) regs->tpc);
1129 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1130 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1131 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1132 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1133 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1134 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1135 hipri = cheetah_get_hipri(afsr);
1136 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1137 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1138 hipri, cheetah_get_string(hipri));
1140 /* Try to get unumber if relevant. */
1141 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1142 CHAFSR_CPC | CHAFSR_CPU | \
1143 CHAFSR_UE | CHAFSR_CE | \
1144 CHAFSR_EDC | CHAFSR_EDU | \
1145 CHAFSR_UCC | CHAFSR_UCU | \
1146 CHAFSR_WDU | CHAFSR_WDC)
1147 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1148 if (afsr & ESYND_ERRORS) {
1152 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1153 syndrome = cheetah_ecc_syntab[syndrome];
1154 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1156 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1157 (recoverable ? KERN_WARNING : KERN_CRIT),
1158 smp_processor_id(), unum);
1159 } else if (afsr & MSYND_ERRORS) {
1163 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1164 syndrome = cheetah_mtag_syntab[syndrome];
1165 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1167 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1168 (recoverable ? KERN_WARNING : KERN_CRIT),
1169 smp_processor_id(), unum);
1172 /* Now dump the cache snapshots. */
1173 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1174 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1175 (int) info->dcache_index,
1179 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1180 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1181 info->dcache_data[0],
1182 info->dcache_data[1],
1183 info->dcache_data[2],
1184 info->dcache_data[3]);
1185 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1186 "u[%016lx] l[%016lx]\n",
1187 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1188 (int) info->icache_index,
1193 info->icache_lower);
1194 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1195 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1196 info->icache_data[0],
1197 info->icache_data[1],
1198 info->icache_data[2],
1199 info->icache_data[3]);
1200 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1201 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 info->icache_data[4],
1203 info->icache_data[5],
1204 info->icache_data[6],
1205 info->icache_data[7]);
1206 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1207 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1208 (int) info->ecache_index, info->ecache_tag);
1209 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1210 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1211 info->ecache_data[0],
1212 info->ecache_data[1],
1213 info->ecache_data[2],
1214 info->ecache_data[3]);
1216 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1217 while (afsr != 0UL) {
1218 unsigned long bit = cheetah_get_hipri(afsr);
1220 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1221 (recoverable ? KERN_WARNING : KERN_CRIT),
1222 bit, cheetah_get_string(bit));
1228 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1231 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1233 unsigned long afsr, afar;
1236 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1239 if ((afsr & cheetah_afsr_errors) != 0) {
1241 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1249 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1251 : : "r" (afsr), "i" (ASI_AFSR));
1256 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1258 struct cheetah_err_info local_snapshot, *p;
1262 cheetah_flush_ecache();
1264 p = cheetah_get_error_log(afsr);
1266 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1268 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1269 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1273 /* Grab snapshot of logged error. */
1274 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1276 /* If the current trap snapshot does not match what the
1277 * trap handler passed along into our args, big trouble.
1278 * In such a case, mark the local copy as invalid.
1280 * Else, it matches and we mark the afsr in the non-local
1281 * copy as invalid so we may log new error traps there.
1283 if (p->afsr != afsr || p->afar != afar)
1284 local_snapshot.afsr = CHAFSR_INVALID;
1286 p->afsr = CHAFSR_INVALID;
1288 cheetah_flush_icache();
1289 cheetah_flush_dcache();
1291 /* Re-enable I-cache/D-cache */
1292 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1293 "or %%g1, %1, %%g1\n\t"
1294 "stxa %%g1, [%%g0] %0\n\t"
1297 : "i" (ASI_DCU_CONTROL_REG),
1298 "i" (DCU_DC | DCU_IC)
1301 /* Re-enable error reporting */
1302 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1303 "or %%g1, %1, %%g1\n\t"
1304 "stxa %%g1, [%%g0] %0\n\t"
1307 : "i" (ASI_ESTATE_ERROR_EN),
1308 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1311 /* Decide if we can continue after handling this trap and
1312 * logging the error.
1315 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1318 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1319 * error was logged while we had error reporting traps disabled.
1321 if (cheetah_recheck_errors(&local_snapshot)) {
1322 unsigned long new_afsr = local_snapshot.afsr;
1324 /* If we got a new asynchronous error, die... */
1325 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1326 CHAFSR_WDU | CHAFSR_CPU |
1327 CHAFSR_IVU | CHAFSR_UE |
1328 CHAFSR_BERR | CHAFSR_TO))
1333 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1336 panic("Irrecoverable Fast-ECC error trap.\n");
1338 /* Flush E-cache to kick the error trap handlers out. */
1339 cheetah_flush_ecache();
1342 /* Try to fix a correctable error by pushing the line out from
1343 * the E-cache. Recheck error reporting registers to see if the
1344 * problem is intermittent.
1346 static int cheetah_fix_ce(unsigned long physaddr)
1348 unsigned long orig_estate;
1349 unsigned long alias1, alias2;
1352 /* Make sure correctable error traps are disabled. */
1353 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1354 "andn %0, %1, %%g1\n\t"
1355 "stxa %%g1, [%%g0] %2\n\t"
1357 : "=&r" (orig_estate)
1358 : "i" (ESTATE_ERROR_CEEN),
1359 "i" (ASI_ESTATE_ERROR_EN)
1362 /* We calculate alias addresses that will force the
1363 * cache line in question out of the E-cache. Then
1364 * we bring it back in with an atomic instruction so
1365 * that we get it in some modified/exclusive state,
1366 * then we displace it again to try and get proper ECC
1367 * pushed back into the system.
1369 physaddr &= ~(8UL - 1UL);
1370 alias1 = (ecache_flush_physbase +
1371 (physaddr & ((ecache_flush_size >> 1) - 1)));
1372 alias2 = alias1 + (ecache_flush_size >> 1);
1373 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1374 "ldxa [%1] %3, %%g0\n\t"
1375 "casxa [%2] %3, %%g0, %%g0\n\t"
1376 "membar #StoreLoad | #StoreStore\n\t"
1377 "ldxa [%0] %3, %%g0\n\t"
1378 "ldxa [%1] %3, %%g0\n\t"
1381 : "r" (alias1), "r" (alias2),
1382 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1384 /* Did that trigger another error? */
1385 if (cheetah_recheck_errors(NULL)) {
1386 /* Try one more time. */
1387 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1389 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1390 if (cheetah_recheck_errors(NULL))
1395 /* No new error, intermittent problem. */
1399 /* Restore error enables. */
1400 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1402 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1407 /* Return non-zero if PADDR is a valid physical memory address. */
1408 static int cheetah_check_main_memory(unsigned long paddr)
1410 unsigned long vaddr = PAGE_OFFSET + paddr;
1412 if (vaddr > (unsigned long) high_memory)
1415 return kern_addr_valid(vaddr);
1418 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1420 struct cheetah_err_info local_snapshot, *p;
1421 int recoverable, is_memory;
1423 p = cheetah_get_error_log(afsr);
1425 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1427 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1428 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1432 /* Grab snapshot of logged error. */
1433 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1435 /* If the current trap snapshot does not match what the
1436 * trap handler passed along into our args, big trouble.
1437 * In such a case, mark the local copy as invalid.
1439 * Else, it matches and we mark the afsr in the non-local
1440 * copy as invalid so we may log new error traps there.
1442 if (p->afsr != afsr || p->afar != afar)
1443 local_snapshot.afsr = CHAFSR_INVALID;
1445 p->afsr = CHAFSR_INVALID;
1447 is_memory = cheetah_check_main_memory(afar);
1449 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1450 /* XXX Might want to log the results of this operation
1451 * XXX somewhere... -DaveM
1453 cheetah_fix_ce(afar);
1457 int flush_all, flush_line;
1459 flush_all = flush_line = 0;
1460 if ((afsr & CHAFSR_EDC) != 0UL) {
1461 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1465 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1466 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1472 /* Trap handler only disabled I-cache, flush it. */
1473 cheetah_flush_icache();
1475 /* Re-enable I-cache */
1476 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1477 "or %%g1, %1, %%g1\n\t"
1478 "stxa %%g1, [%%g0] %0\n\t"
1481 : "i" (ASI_DCU_CONTROL_REG),
1486 cheetah_flush_ecache();
1487 else if (flush_line)
1488 cheetah_flush_ecache_line(afar);
1491 /* Re-enable error reporting */
1492 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1493 "or %%g1, %1, %%g1\n\t"
1494 "stxa %%g1, [%%g0] %0\n\t"
1497 : "i" (ASI_ESTATE_ERROR_EN),
1498 "i" (ESTATE_ERROR_CEEN)
1501 /* Decide if we can continue after handling this trap and
1502 * logging the error.
1505 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1508 /* Re-check AFSR/AFAR */
1509 (void) cheetah_recheck_errors(&local_snapshot);
1512 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1515 panic("Irrecoverable Correctable-ECC error trap.\n");
1518 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1520 struct cheetah_err_info local_snapshot, *p;
1521 int recoverable, is_memory;
1524 /* Check for the special PCI poke sequence. */
1525 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1526 cheetah_flush_icache();
1527 cheetah_flush_dcache();
1529 /* Re-enable I-cache/D-cache */
1530 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1531 "or %%g1, %1, %%g1\n\t"
1532 "stxa %%g1, [%%g0] %0\n\t"
1535 : "i" (ASI_DCU_CONTROL_REG),
1536 "i" (DCU_DC | DCU_IC)
1539 /* Re-enable error reporting */
1540 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1541 "or %%g1, %1, %%g1\n\t"
1542 "stxa %%g1, [%%g0] %0\n\t"
1545 : "i" (ASI_ESTATE_ERROR_EN),
1546 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1549 (void) cheetah_recheck_errors(NULL);
1551 pci_poke_faulted = 1;
1553 regs->tnpc = regs->tpc + 4;
1558 p = cheetah_get_error_log(afsr);
1560 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1562 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1563 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1567 /* Grab snapshot of logged error. */
1568 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1570 /* If the current trap snapshot does not match what the
1571 * trap handler passed along into our args, big trouble.
1572 * In such a case, mark the local copy as invalid.
1574 * Else, it matches and we mark the afsr in the non-local
1575 * copy as invalid so we may log new error traps there.
1577 if (p->afsr != afsr || p->afar != afar)
1578 local_snapshot.afsr = CHAFSR_INVALID;
1580 p->afsr = CHAFSR_INVALID;
1582 is_memory = cheetah_check_main_memory(afar);
1585 int flush_all, flush_line;
1587 flush_all = flush_line = 0;
1588 if ((afsr & CHAFSR_EDU) != 0UL) {
1589 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1593 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1594 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1600 cheetah_flush_icache();
1601 cheetah_flush_dcache();
1603 /* Re-enable I/D caches */
1604 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1605 "or %%g1, %1, %%g1\n\t"
1606 "stxa %%g1, [%%g0] %0\n\t"
1609 : "i" (ASI_DCU_CONTROL_REG),
1610 "i" (DCU_IC | DCU_DC)
1614 cheetah_flush_ecache();
1615 else if (flush_line)
1616 cheetah_flush_ecache_line(afar);
1619 /* Re-enable error reporting */
1620 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1621 "or %%g1, %1, %%g1\n\t"
1622 "stxa %%g1, [%%g0] %0\n\t"
1625 : "i" (ASI_ESTATE_ERROR_EN),
1626 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1629 /* Decide if we can continue after handling this trap and
1630 * logging the error.
1633 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1636 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1637 * error was logged while we had error reporting traps disabled.
1639 if (cheetah_recheck_errors(&local_snapshot)) {
1640 unsigned long new_afsr = local_snapshot.afsr;
1642 /* If we got a new asynchronous error, die... */
1643 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1644 CHAFSR_WDU | CHAFSR_CPU |
1645 CHAFSR_IVU | CHAFSR_UE |
1646 CHAFSR_BERR | CHAFSR_TO))
1651 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1653 /* "Recoverable" here means we try to yank the page from ever
1654 * being newly used again. This depends upon a few things:
1655 * 1) Must be main memory, and AFAR must be valid.
1656 * 2) If we trapped from user, OK.
1657 * 3) Else, if we trapped from kernel we must find exception
1658 * table entry (ie. we have to have been accessing user
1661 * If AFAR is not in main memory, or we trapped from kernel
1662 * and cannot find an exception table entry, it is unacceptable
1663 * to try and continue.
1665 if (recoverable && is_memory) {
1666 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1667 /* OK, usermode access. */
1670 const struct exception_table_entry *entry;
1672 entry = search_exception_tables(regs->tpc);
1674 /* OK, kernel access to userspace. */
1678 /* BAD, privileged state is corrupted. */
1683 if (pfn_valid(afar >> PAGE_SHIFT))
1684 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1688 /* Only perform fixup if we still have a
1689 * recoverable condition.
1692 regs->tpc = entry->fixup;
1693 regs->tnpc = regs->tpc + 4;
1702 panic("Irrecoverable deferred error trap.\n");
1705 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1707 * Bit0: 0=dcache,1=icache
1708 * Bit1: 0=recoverable,1=unrecoverable
1710 * The hardware has disabled both the I-cache and D-cache in
1711 * the %dcr register.
1713 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1716 __cheetah_flush_icache();
1718 cheetah_plus_zap_dcache_parity();
1719 cheetah_flush_dcache();
1721 /* Re-enable I-cache/D-cache */
1722 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1723 "or %%g1, %1, %%g1\n\t"
1724 "stxa %%g1, [%%g0] %0\n\t"
1727 : "i" (ASI_DCU_CONTROL_REG),
1728 "i" (DCU_DC | DCU_IC)
1732 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1734 (type & 0x1) ? 'I' : 'D',
1736 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1737 panic("Irrecoverable Cheetah+ parity error.");
1740 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1742 (type & 0x1) ? 'I' : 'D',
1744 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1747 struct sun4v_error_entry {
1752 #define SUN4V_ERR_TYPE_UNDEFINED 0
1753 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1754 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1755 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1756 #define SUN4V_ERR_TYPE_WARNING_RES 4
1759 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1760 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1761 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1762 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1763 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1764 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1765 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1766 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1774 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1775 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1777 static const char *sun4v_err_type_to_str(u32 type)
1780 case SUN4V_ERR_TYPE_UNDEFINED:
1782 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1783 return "uncorrected resumable";
1784 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1785 return "precise nonresumable";
1786 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1787 return "deferred nonresumable";
1788 case SUN4V_ERR_TYPE_WARNING_RES:
1789 return "warning resumable";
1795 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1799 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1800 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1802 ent->err_handle, ent->err_stick,
1804 sun4v_err_type_to_str(ent->err_type));
1805 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1808 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1810 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1815 "integer-regs" : ""),
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1823 "queue-full" : ""));
1824 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1826 ent->err_raddr, ent->err_size, ent->err_cpu);
1830 if ((cnt = atomic_read(ocnt)) != 0) {
1831 atomic_set(ocnt, 0);
1833 printk("%s: Queue overflowed %d times.\n",
1838 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1839 * Log the event and clear the first word of the entry.
1841 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1843 struct sun4v_error_entry *ent, local_copy;
1844 struct trap_per_cpu *tb;
1845 unsigned long paddr;
1850 tb = &trap_block[cpu];
1851 paddr = tb->resum_kernel_buf_pa + offset;
1854 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1856 /* We have a local copy now, so release the entry. */
1857 ent->err_handle = 0;
1862 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1863 /* If err_type is 0x4, it's a powerdown request. Do
1864 * not do the usual resumable error log because that
1865 * makes it look like some abnormal error.
1867 printk(KERN_INFO "Power down request...\n");
1868 kill_cad_pid(SIGINT, 1);
1872 sun4v_log_error(regs, &local_copy, cpu,
1873 KERN_ERR "RESUMABLE ERROR",
1874 &sun4v_resum_oflow_cnt);
1877 /* If we try to printk() we'll probably make matters worse, by trying
1878 * to retake locks this cpu already holds or causing more errors. So
1879 * just bump a counter, and we'll report these counter bumps above.
1881 void sun4v_resum_overflow(struct pt_regs *regs)
1883 atomic_inc(&sun4v_resum_oflow_cnt);
1886 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1887 * Log the event, clear the first word of the entry, and die.
1889 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1891 struct sun4v_error_entry *ent, local_copy;
1892 struct trap_per_cpu *tb;
1893 unsigned long paddr;
1898 tb = &trap_block[cpu];
1899 paddr = tb->nonresum_kernel_buf_pa + offset;
1902 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1904 /* We have a local copy now, so release the entry. */
1905 ent->err_handle = 0;
1911 /* Check for the special PCI poke sequence. */
1912 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1913 pci_poke_faulted = 1;
1915 regs->tnpc = regs->tpc + 4;
1920 sun4v_log_error(regs, &local_copy, cpu,
1921 KERN_EMERG "NON-RESUMABLE ERROR",
1922 &sun4v_nonresum_oflow_cnt);
1924 panic("Non-resumable error.");
1927 /* If we try to printk() we'll probably make matters worse, by trying
1928 * to retake locks this cpu already holds or causing more errors. So
1929 * just bump a counter, and we'll report these counter bumps above.
1931 void sun4v_nonresum_overflow(struct pt_regs *regs)
1933 /* XXX Actually even this can make not that much sense. Perhaps
1934 * XXX we should just pull the plug and panic directly from here?
1936 atomic_inc(&sun4v_nonresum_oflow_cnt);
1939 unsigned long sun4v_err_itlb_vaddr;
1940 unsigned long sun4v_err_itlb_ctx;
1941 unsigned long sun4v_err_itlb_pte;
1942 unsigned long sun4v_err_itlb_error;
1944 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1947 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1949 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1951 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1952 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1953 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1954 (void *) regs->u_regs[UREG_I7]);
1955 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1956 "pte[%lx] error[%lx]\n",
1957 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1958 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1963 unsigned long sun4v_err_dtlb_vaddr;
1964 unsigned long sun4v_err_dtlb_ctx;
1965 unsigned long sun4v_err_dtlb_pte;
1966 unsigned long sun4v_err_dtlb_error;
1968 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1971 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1973 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1975 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1976 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1977 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1978 (void *) regs->u_regs[UREG_I7]);
1979 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1980 "pte[%lx] error[%lx]\n",
1981 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1982 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1987 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1989 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1993 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1995 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1999 void do_fpe_common(struct pt_regs *regs)
2001 if (regs->tstate & TSTATE_PRIV) {
2002 regs->tpc = regs->tnpc;
2005 unsigned long fsr = current_thread_info()->xfsr[0];
2008 if (test_thread_flag(TIF_32BIT)) {
2009 regs->tpc &= 0xffffffff;
2010 regs->tnpc &= 0xffffffff;
2012 info.si_signo = SIGFPE;
2014 info.si_addr = (void __user *)regs->tpc;
2016 info.si_code = __SI_FAULT;
2017 if ((fsr & 0x1c000) == (1 << 14)) {
2019 info.si_code = FPE_FLTINV;
2020 else if (fsr & 0x08)
2021 info.si_code = FPE_FLTOVF;
2022 else if (fsr & 0x04)
2023 info.si_code = FPE_FLTUND;
2024 else if (fsr & 0x02)
2025 info.si_code = FPE_FLTDIV;
2026 else if (fsr & 0x01)
2027 info.si_code = FPE_FLTRES;
2029 force_sig_info(SIGFPE, &info, current);
2033 void do_fpieee(struct pt_regs *regs)
2035 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2036 0, 0x24, SIGFPE) == NOTIFY_STOP)
2039 do_fpe_common(regs);
2042 extern int do_mathemu(struct pt_regs *, struct fpustate *);
2044 void do_fpother(struct pt_regs *regs)
2046 struct fpustate *f = FPUSTATE;
2049 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2050 0, 0x25, SIGFPE) == NOTIFY_STOP)
2053 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2054 case (2 << 14): /* unfinished_FPop */
2055 case (3 << 14): /* unimplemented_FPop */
2056 ret = do_mathemu(regs, f);
2061 do_fpe_common(regs);
2064 void do_tof(struct pt_regs *regs)
2068 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2069 0, 0x26, SIGEMT) == NOTIFY_STOP)
2072 if (regs->tstate & TSTATE_PRIV)
2073 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2074 if (test_thread_flag(TIF_32BIT)) {
2075 regs->tpc &= 0xffffffff;
2076 regs->tnpc &= 0xffffffff;
2078 info.si_signo = SIGEMT;
2080 info.si_code = EMT_TAGOVF;
2081 info.si_addr = (void __user *)regs->tpc;
2083 force_sig_info(SIGEMT, &info, current);
2086 void do_div0(struct pt_regs *regs)
2090 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2091 0, 0x28, SIGFPE) == NOTIFY_STOP)
2094 if (regs->tstate & TSTATE_PRIV)
2095 die_if_kernel("TL0: Kernel divide by zero.", regs);
2096 if (test_thread_flag(TIF_32BIT)) {
2097 regs->tpc &= 0xffffffff;
2098 regs->tnpc &= 0xffffffff;
2100 info.si_signo = SIGFPE;
2102 info.si_code = FPE_INTDIV;
2103 info.si_addr = (void __user *)regs->tpc;
2105 force_sig_info(SIGFPE, &info, current);
2108 static void instruction_dump(unsigned int *pc)
2112 if ((((unsigned long) pc) & 3))
2115 printk("Instruction DUMP:");
2116 for (i = -3; i < 6; i++)
2117 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2121 static void user_instruction_dump(unsigned int __user *pc)
2124 unsigned int buf[9];
2126 if ((((unsigned long) pc) & 3))
2129 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2132 printk("Instruction DUMP:");
2133 for (i = 0; i < 9; i++)
2134 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2138 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2140 unsigned long fp, thread_base, ksp;
2141 struct thread_info *tp;
2144 ksp = (unsigned long) _ksp;
2147 tp = task_thread_info(tsk);
2150 asm("mov %%fp, %0" : "=r" (ksp));
2154 if (tp == current_thread_info())
2157 fp = ksp + STACK_BIAS;
2158 thread_base = (unsigned long) tp;
2160 printk("Call Trace:\n");
2162 struct sparc_stackf *sf;
2163 struct pt_regs *regs;
2166 if (!kstack_valid(tp, fp))
2168 sf = (struct sparc_stackf *) fp;
2169 regs = (struct pt_regs *) (sf + 1);
2171 if (kstack_is_trap_frame(tp, regs)) {
2172 if (!(regs->tstate & TSTATE_PRIV))
2175 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2177 pc = sf->callers_pc;
2178 fp = (unsigned long)sf->fp + STACK_BIAS;
2181 printk(" [%016lx] %pS\n", pc, (void *) pc);
2182 } while (++count < 16);
2185 void dump_stack(void)
2187 show_stack(current, NULL);
2190 EXPORT_SYMBOL(dump_stack);
2192 static inline int is_kernel_stack(struct task_struct *task,
2193 struct reg_window *rw)
2195 unsigned long rw_addr = (unsigned long) rw;
2196 unsigned long thread_base, thread_end;
2198 if (rw_addr < PAGE_OFFSET) {
2199 if (task != &init_task)
2203 thread_base = (unsigned long) task_stack_page(task);
2204 thread_end = thread_base + sizeof(union thread_union);
2205 if (rw_addr >= thread_base &&
2206 rw_addr < thread_end &&
2213 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2215 unsigned long fp = rw->ins[6];
2220 return (struct reg_window *) (fp + STACK_BIAS);
2223 void die_if_kernel(char *str, struct pt_regs *regs)
2225 static int die_counter;
2228 /* Amuse the user. */
2231 " \"@'/ .. \\`@\"\n"
2235 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2236 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2237 __asm__ __volatile__("flushw");
2239 add_taint(TAINT_DIE);
2240 if (regs->tstate & TSTATE_PRIV) {
2241 struct reg_window *rw = (struct reg_window *)
2242 (regs->u_regs[UREG_FP] + STACK_BIAS);
2244 /* Stop the back trace when we hit userland or we
2245 * find some badly aligned kernel stack.
2249 is_kernel_stack(current, rw)) {
2250 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2251 (void *) rw->ins[7]);
2253 rw = kernel_stack_up(rw);
2255 instruction_dump ((unsigned int *) regs->tpc);
2257 if (test_thread_flag(TIF_32BIT)) {
2258 regs->tpc &= 0xffffffff;
2259 regs->tnpc &= 0xffffffff;
2261 user_instruction_dump ((unsigned int __user *) regs->tpc);
2263 if (regs->tstate & TSTATE_PRIV)
2268 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2269 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2271 extern int handle_popc(u32 insn, struct pt_regs *regs);
2272 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2274 void do_illegal_instruction(struct pt_regs *regs)
2276 unsigned long pc = regs->tpc;
2277 unsigned long tstate = regs->tstate;
2281 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2282 0, 0x10, SIGILL) == NOTIFY_STOP)
2285 if (tstate & TSTATE_PRIV)
2286 die_if_kernel("Kernel illegal instruction", regs);
2287 if (test_thread_flag(TIF_32BIT))
2289 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2290 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2291 if (handle_popc(insn, regs))
2293 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2294 if (handle_ldf_stq(insn, regs))
2296 } else if (tlb_type == hypervisor) {
2297 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2298 if (!vis_emul(regs, insn))
2301 struct fpustate *f = FPUSTATE;
2303 /* XXX maybe verify XFSR bits like
2304 * XXX do_fpother() does?
2306 if (do_mathemu(regs, f))
2311 info.si_signo = SIGILL;
2313 info.si_code = ILL_ILLOPC;
2314 info.si_addr = (void __user *)pc;
2316 force_sig_info(SIGILL, &info, current);
2319 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2321 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2325 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2326 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2329 if (regs->tstate & TSTATE_PRIV) {
2330 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2333 info.si_signo = SIGBUS;
2335 info.si_code = BUS_ADRALN;
2336 info.si_addr = (void __user *)sfar;
2338 force_sig_info(SIGBUS, &info, current);
2341 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2345 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2346 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2349 if (regs->tstate & TSTATE_PRIV) {
2350 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2353 info.si_signo = SIGBUS;
2355 info.si_code = BUS_ADRALN;
2356 info.si_addr = (void __user *) addr;
2358 force_sig_info(SIGBUS, &info, current);
2361 void do_privop(struct pt_regs *regs)
2365 if (notify_die(DIE_TRAP, "privileged operation", regs,
2366 0, 0x11, SIGILL) == NOTIFY_STOP)
2369 if (test_thread_flag(TIF_32BIT)) {
2370 regs->tpc &= 0xffffffff;
2371 regs->tnpc &= 0xffffffff;
2373 info.si_signo = SIGILL;
2375 info.si_code = ILL_PRVOPC;
2376 info.si_addr = (void __user *)regs->tpc;
2378 force_sig_info(SIGILL, &info, current);
2381 void do_privact(struct pt_regs *regs)
2386 /* Trap level 1 stuff or other traps we should never see... */
2387 void do_cee(struct pt_regs *regs)
2389 die_if_kernel("TL0: Cache Error Exception", regs);
2392 void do_cee_tl1(struct pt_regs *regs)
2394 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2395 die_if_kernel("TL1: Cache Error Exception", regs);
2398 void do_dae_tl1(struct pt_regs *regs)
2400 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2401 die_if_kernel("TL1: Data Access Exception", regs);
2404 void do_iae_tl1(struct pt_regs *regs)
2406 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2407 die_if_kernel("TL1: Instruction Access Exception", regs);
2410 void do_div0_tl1(struct pt_regs *regs)
2412 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2413 die_if_kernel("TL1: DIV0 Exception", regs);
2416 void do_fpdis_tl1(struct pt_regs *regs)
2418 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2419 die_if_kernel("TL1: FPU Disabled", regs);
2422 void do_fpieee_tl1(struct pt_regs *regs)
2424 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2425 die_if_kernel("TL1: FPU IEEE Exception", regs);
2428 void do_fpother_tl1(struct pt_regs *regs)
2430 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2431 die_if_kernel("TL1: FPU Other Exception", regs);
2434 void do_ill_tl1(struct pt_regs *regs)
2436 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2437 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2440 void do_irq_tl1(struct pt_regs *regs)
2442 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2443 die_if_kernel("TL1: IRQ Exception", regs);
2446 void do_lddfmna_tl1(struct pt_regs *regs)
2448 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2449 die_if_kernel("TL1: LDDF Exception", regs);
2452 void do_stdfmna_tl1(struct pt_regs *regs)
2454 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2455 die_if_kernel("TL1: STDF Exception", regs);
2458 void do_paw(struct pt_regs *regs)
2460 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2463 void do_paw_tl1(struct pt_regs *regs)
2465 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2466 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2469 void do_vaw(struct pt_regs *regs)
2471 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2474 void do_vaw_tl1(struct pt_regs *regs)
2476 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2477 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2480 void do_tof_tl1(struct pt_regs *regs)
2482 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2483 die_if_kernel("TL1: Tag Overflow Exception", regs);
2486 void do_getpsr(struct pt_regs *regs)
2488 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2489 regs->tpc = regs->tnpc;
2491 if (test_thread_flag(TIF_32BIT)) {
2492 regs->tpc &= 0xffffffff;
2493 regs->tnpc &= 0xffffffff;
2497 struct trap_per_cpu trap_block[NR_CPUS];
2499 /* This can get invoked before sched_init() so play it super safe
2500 * and use hard_smp_processor_id().
2502 void init_cur_cpu_trap(struct thread_info *t)
2504 int cpu = hard_smp_processor_id();
2505 struct trap_per_cpu *p = &trap_block[cpu];
2511 extern void thread_info_offsets_are_bolixed_dave(void);
2512 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2513 extern void tsb_config_offsets_are_bolixed_dave(void);
2515 /* Only invoked on boot processor. */
2516 void __init trap_init(void)
2518 /* Compile time sanity check. */
2519 if (TI_TASK != offsetof(struct thread_info, task) ||
2520 TI_FLAGS != offsetof(struct thread_info, flags) ||
2521 TI_CPU != offsetof(struct thread_info, cpu) ||
2522 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2523 TI_KSP != offsetof(struct thread_info, ksp) ||
2524 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2525 TI_KREGS != offsetof(struct thread_info, kregs) ||
2526 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2527 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2528 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2529 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2530 TI_GSR != offsetof(struct thread_info, gsr) ||
2531 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2532 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2533 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2534 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2535 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2536 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2537 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2538 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2539 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2540 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2541 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2542 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2543 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2544 (TI_FPREGS & (64 - 1)))
2545 thread_info_offsets_are_bolixed_dave();
2547 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2548 (TRAP_PER_CPU_PGD_PADDR !=
2549 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2550 (TRAP_PER_CPU_CPU_MONDO_PA !=
2551 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2552 (TRAP_PER_CPU_DEV_MONDO_PA !=
2553 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2554 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2555 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2556 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2557 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2558 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2559 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2560 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2561 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2562 (TRAP_PER_CPU_FAULT_INFO !=
2563 offsetof(struct trap_per_cpu, fault_info)) ||
2564 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2565 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2566 (TRAP_PER_CPU_CPU_LIST_PA !=
2567 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2568 (TRAP_PER_CPU_TSB_HUGE !=
2569 offsetof(struct trap_per_cpu, tsb_huge)) ||
2570 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2571 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2572 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2573 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2574 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2575 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2576 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2577 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2578 (TRAP_PER_CPU_RESUM_QMASK !=
2579 offsetof(struct trap_per_cpu, resum_qmask)) ||
2580 (TRAP_PER_CPU_NONRESUM_QMASK !=
2581 offsetof(struct trap_per_cpu, nonresum_qmask)))
2582 trap_per_cpu_offsets_are_bolixed_dave();
2584 if ((TSB_CONFIG_TSB !=
2585 offsetof(struct tsb_config, tsb)) ||
2586 (TSB_CONFIG_RSS_LIMIT !=
2587 offsetof(struct tsb_config, tsb_rss_limit)) ||
2588 (TSB_CONFIG_NENTRIES !=
2589 offsetof(struct tsb_config, tsb_nentries)) ||
2590 (TSB_CONFIG_REG_VAL !=
2591 offsetof(struct tsb_config, tsb_reg_val)) ||
2592 (TSB_CONFIG_MAP_VADDR !=
2593 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2594 (TSB_CONFIG_MAP_PTE !=
2595 offsetof(struct tsb_config, tsb_map_pte)))
2596 tsb_config_offsets_are_bolixed_dave();
2598 /* Attach to the address space of init_task. On SMP we
2599 * do this in smp.c:smp_callin for other cpus.
2601 atomic_inc(&init_mm.mm_count);
2602 current->active_mm = &init_mm;