2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
15 * it to save wrong values... Be aware!
18 #include <asm/memory.h>
20 #include <asm/vfpmacros.h>
21 #include <asm/arch/entry-macro.S>
22 #include <asm/thread_notify.h>
24 #include "entry-header.S"
27 * Interrupt handling. Preserves r7, r8, r9
30 get_irqnr_preamble r5, lr
31 1: get_irqnr_and_base r0, r6, r5, lr
34 @ routine called with r0 = irq number, r1 = struct pt_regs *
43 * this macro assumes that irqstat (r6) and base (r5) are
44 * preserved from get_irqnr_and_base above
46 test_for_ipi r0, r6, r5, lr
51 #ifdef CONFIG_LOCAL_TIMERS
52 test_for_ltirq r0, r6, r5, lr
62 * Invalid mode handlers
64 .macro inv_entry, reason
65 sub sp, sp, #S_FRAME_SIZE
71 inv_entry BAD_PREFETCH
83 inv_entry BAD_UNDEFINSTR
86 @ XXX fall through to common_invalid
90 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
96 add r0, sp, #S_PC @ here for interlock avoidance
97 mov r7, #-1 @ "" "" "" ""
98 str r4, [sp] @ save preserved r0
99 stmia r0, {r5 - r7} @ lr_<exception>,
100 @ cpsr_<exception>, "old_r0"
109 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
110 #define SPFIX(code...) code
112 #define SPFIX(code...)
116 sub sp, sp, #S_FRAME_SIZE
118 SPFIX( bicne sp, sp, #4 )
122 add r5, sp, #S_SP @ here for interlock avoidance
123 mov r4, #-1 @ "" "" "" ""
124 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
125 SPFIX( addne r0, r0, #4 )
126 str r1, [sp] @ save the "real" r0 copied
127 @ from the exception stack
132 @ We are now ready to fill in the remaining blanks on the stack:
136 @ r2 - lr_<exception>, already fixed up for correct return/restart
137 @ r3 - spsr_<exception>
138 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
148 @ get ready to re-enable interrupts if appropriate
152 biceq r9, r9, #PSR_I_BIT
155 @ Call the processor-specific abort handler:
157 @ r2 - aborted context pc
158 @ r3 - aborted context cpsr
160 @ The abort handler must return the aborted address in r0, and
161 @ the fault status register in r1. r9 must be preserved.
172 @ set desired IRQ state, then call main handler
179 @ IRQs off again before pulling preserved data off the stack
184 @ restore SPSR and restart the instruction
188 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
194 #ifdef CONFIG_TRACE_IRQFLAGS
195 bl trace_hardirqs_off
197 #ifdef CONFIG_PREEMPT
199 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
200 add r7, r8, #1 @ increment it
201 str r7, [tsk, #TI_PREEMPT]
205 #ifdef CONFIG_PREEMPT
206 ldr r0, [tsk, #TI_FLAGS] @ get flags
207 tst r0, #_TIF_NEED_RESCHED
210 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
211 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
213 strne r0, [r0, -r0] @ bug()
215 ldr r0, [sp, #S_PSR] @ irqs are already disabled
217 #ifdef CONFIG_TRACE_IRQFLAGS
219 bleq trace_hardirqs_on
221 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
225 #ifdef CONFIG_PREEMPT
227 teq r8, #0 @ was preempt count = 0
228 ldreq r6, .LCirq_stat
230 ldr r0, [r6, #4] @ local_irq_count
231 ldr r1, [r6, #8] @ local_bh_count
234 mov r7, #0 @ preempt_schedule_irq
235 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
236 1: bl preempt_schedule_irq @ irq en/disable is done inside
237 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
238 tst r0, #_TIF_NEED_RESCHED
239 beq preempt_return @ go again
248 @ call emulation code, which returns using r9 if it has emulated
249 @ the instruction, or the more conventional lr if we are to treat
250 @ this as a real undefined instruction
258 mov r0, sp @ struct pt_regs *regs
262 @ IRQs off again before pulling preserved data off the stack
267 @ restore SPSR and restart the instruction
269 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
271 ldmia sp, {r0 - pc}^ @ Restore SVC registers
278 @ re-enable interrupts if appropriate
282 biceq r9, r9, #PSR_I_BIT
286 @ set args, then call main handler
288 @ r0 - address of faulting instruction
289 @ r1 - pointer to registers on stack
291 mov r0, r2 @ address (pc)
293 bl do_PrefetchAbort @ call abort handler
296 @ IRQs off again before pulling preserved data off the stack
301 @ restore SPSR and restart the instruction
305 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
316 #ifdef CONFIG_PREEMPT
324 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
327 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
328 #error "sizeof(struct pt_regs) must be a multiple of 8"
332 sub sp, sp, #S_FRAME_SIZE
336 add r0, sp, #S_PC @ here for interlock avoidance
337 mov r4, #-1 @ "" "" "" ""
339 str r1, [sp] @ save the "real" r0 copied
340 @ from the exception stack
343 @ We are now ready to fill in the remaining blanks on the stack:
345 @ r2 - lr_<exception>, already fixed up for correct return/restart
346 @ r3 - spsr_<exception>
347 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
349 @ Also, separately save sp_usr and lr_usr
355 @ Enable the alignment trap while in kernel mode
360 @ Clear FP to mark the first stack frame
365 .macro kuser_cmpxchg_check
366 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
368 #warning "NPTL on non MMU needs fixing"
370 @ Make sure our user space atomic helper is restarted
371 @ if it was interrupted in a critical region. Here we
372 @ perform a quick test inline since it should be false
373 @ 99.9999% of the time. The rest is done out of line.
375 blhs kuser_cmpxchg_fixup
386 @ Call the processor-specific abort handler:
388 @ r2 - aborted context pc
389 @ r3 - aborted context cpsr
391 @ The abort handler must return the aborted address in r0, and
392 @ the fault status register in r1.
403 @ IRQs on, then call the main handler
407 adr lr, ret_from_exception
415 #ifdef CONFIG_TRACE_IRQFLAGS
416 bl trace_hardirqs_off
419 #ifdef CONFIG_PREEMPT
420 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
421 add r7, r8, #1 @ increment it
422 str r7, [tsk, #TI_PREEMPT]
426 #ifdef CONFIG_PREEMPT
427 ldr r0, [tsk, #TI_PREEMPT]
428 str r8, [tsk, #TI_PREEMPT]
432 #ifdef CONFIG_TRACE_IRQFLAGS
445 tst r3, #PSR_T_BIT @ Thumb mode?
446 bne __und_usr_unknown @ ignore FP
450 @ fall through to the emulation code, which returns using r9 if
451 @ it has emulated the instruction, or the more conventional lr
452 @ if we are to treat this as a real undefined instruction
456 adr r9, ret_from_exception
457 adr lr, __und_usr_unknown
460 @ fallthrough to call_fpe
464 * The out of line fixup for the ldrt above.
466 .section .fixup, "ax"
469 .section __ex_table,"a"
474 * Check whether the instruction is a co-processor instruction.
475 * If yes, we need to call the relevant co-processor handler.
477 * Note that we don't do a full check here for the co-processor
478 * instructions; all instructions with bit 27 set are well
479 * defined. The only instructions that should fault are the
480 * co-processor instructions. However, we have to watch out
481 * for the ARM6/ARM7 SWI bug.
483 * NEON is a special case that has to be handled here. Not all
484 * NEON instructions are co-processor instructions, so we have
485 * to make a special case of checking for them. Plus, there's
486 * five groups of them, so we have a table of mask/opcode pairs
487 * to check against, and if any match then we branch off into the
490 * Emulators may wish to make use of the following registers:
491 * r0 = instruction opcode.
493 * r9 = normal "successful" return address
494 * r10 = this threads thread_info structure.
495 * lr = unrecognised instruction return address
499 adr r6, .LCneon_opcodes
501 ldr r7, [r6], #4 @ mask value
502 cmp r7, #0 @ end mask?
505 ldr r7, [r6], #4 @ opcode bits matching in mask
506 cmp r8, r7 @ NEON instruction?
510 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
511 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
512 b do_vfp @ let VFP handler handle this
515 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
516 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
517 and r8, r0, #0x0f000000 @ mask out op-code bits
518 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
521 get_thread_info r10 @ get current thread
522 and r8, r0, #0x00000f00 @ mask out CP number
524 add r6, r10, #TI_USED_CP
525 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
527 @ Test if we need to give access to iWMMXt coprocessors
528 ldr r5, [r10, #TI_FLAGS]
529 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
530 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
531 bcs iwmmxt_task_enable
533 add pc, pc, r8, lsr #6
537 b do_fpe @ CP#1 (FPE)
538 b do_fpe @ CP#2 (FPE)
541 b crunch_task_enable @ CP#4 (MaverickCrunch)
542 b crunch_task_enable @ CP#5 (MaverickCrunch)
543 b crunch_task_enable @ CP#6 (MaverickCrunch)
553 b do_vfp @ CP#10 (VFP)
554 b do_vfp @ CP#11 (VFP)
556 mov pc, lr @ CP#10 (VFP)
557 mov pc, lr @ CP#11 (VFP)
561 mov pc, lr @ CP#14 (Debug)
562 mov pc, lr @ CP#15 (Control)
568 .word 0xfe000000 @ mask
569 .word 0xf2000000 @ opcode
571 .word 0xff100000 @ mask
572 .word 0xf4000000 @ opcode
574 .word 0x00000000 @ mask
575 .word 0x00000000 @ opcode
581 add r10, r10, #TI_FPSTATE @ r10 = workspace
582 ldr pc, [r4] @ Call FP module USR entry point
585 * The FP module is called with these registers set:
588 * r9 = normal "successful" return address
590 * lr = unrecognised FP instruction return address
602 adr lr, ret_from_exception
609 enable_irq @ Enable interrupts
610 mov r0, r2 @ address (pc)
612 bl do_PrefetchAbort @ call abort handler
615 * This is the return code to user mode for abort handlers
617 ENTRY(ret_from_exception)
623 * Register switch for ARMv3 and ARMv4 processors
624 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
625 * previous and next are guaranteed not to be the same.
628 add ip, r1, #TI_CPU_SAVE
629 ldr r3, [r2, #TI_TP_VALUE]
630 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
632 ldr r6, [r2, #TI_CPU_DOMAIN]
634 #if __LINUX_ARM_ARCH__ >= 6
635 #ifdef CONFIG_CPU_32v6K
638 strex r5, r4, [ip] @ Clear exclusive monitor
641 #if defined(CONFIG_HAS_TLS_REG)
642 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
643 #elif !defined(CONFIG_TLS_REG_EMUL)
645 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
648 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
651 add r4, r2, #TI_CPU_SAVE
652 ldr r0, =thread_notify_head
653 mov r1, #THREAD_NOTIFY_SWITCH
654 bl atomic_notifier_call_chain
656 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
663 * These are segment of kernel provided user code reachable from user space
664 * at a fixed address in kernel memory. This is used to provide user space
665 * with some operations which require kernel help because of unimplemented
666 * native feature and/or instructions in many ARM CPUs. The idea is for
667 * this code to be executed directly in user mode for best efficiency but
668 * which is too intimate with the kernel counter part to be left to user
669 * libraries. In fact this code might even differ from one CPU to another
670 * depending on the available instruction set and restrictions like on
671 * SMP systems. In other words, the kernel reserves the right to change
672 * this code as needed without warning. Only the entry points and their
673 * results are guaranteed to be stable.
675 * Each segment is 32-byte aligned and will be moved to the top of the high
676 * vector page. New segments (if ever needed) must be added in front of
677 * existing ones. This mechanism should be used only for things that are
678 * really small and justified, and not be abused freely.
680 * User space is expected to implement those things inline when optimizing
681 * for a processor that has the necessary native support, but only if such
682 * resulting binaries are already to be incompatible with earlier ARM
683 * processors due to the use of unsupported instructions other than what
684 * is provided here. In other words don't make binaries unable to run on
685 * earlier processors just for the sake of not using these kernel helpers
686 * if your compiled code is not going to use the new instructions for other
691 #ifdef CONFIG_ARM_THUMB
699 .globl __kuser_helper_start
700 __kuser_helper_start:
703 * Reference prototype:
705 * void __kernel_memory_barrier(void)
709 * lr = return address
719 * Definition and user space usage example:
721 * typedef void (__kernel_dmb_t)(void);
722 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
724 * Apply any needed memory barrier to preserve consistency with data modified
725 * manually and __kuser_cmpxchg usage.
727 * This could be used as follows:
729 * #define __kernel_dmb() \
730 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
731 * : : : "r0", "lr","cc" )
734 __kuser_memory_barrier: @ 0xffff0fa0
736 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
737 mcr p15, 0, r0, c7, c10, 5 @ dmb
744 * Reference prototype:
746 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
753 * lr = return address
757 * r0 = returned value (zero or non-zero)
758 * C flag = set if r0 == 0, clear if r0 != 0
764 * Definition and user space usage example:
766 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
767 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
769 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
770 * Return zero if *ptr was changed or non-zero if no exchange happened.
771 * The C flag is also set if *ptr was changed to allow for assembly
772 * optimization in the calling code.
776 * - This routine already includes memory barriers as needed.
778 * For example, a user space atomic_add implementation could look like this:
780 * #define atomic_add(ptr, val) \
781 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
782 * register unsigned int __result asm("r1"); \
784 * "1: @ atomic_add\n\t" \
785 * "ldr r0, [r2]\n\t" \
786 * "mov r3, #0xffff0fff\n\t" \
787 * "add lr, pc, #4\n\t" \
788 * "add r1, r0, %2\n\t" \
789 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
791 * : "=&r" (__result) \
792 * : "r" (__ptr), "rIL" (val) \
793 * : "r0","r3","ip","lr","cc","memory" ); \
797 __kuser_cmpxchg: @ 0xffff0fc0
799 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
802 * Poor you. No fast solution possible...
803 * The kernel itself must perform the operation.
804 * A special ghost syscall is used for that (see traps.c).
807 mov r7, #0xff00 @ 0xfff0 into r7 for EABI
812 #elif __LINUX_ARM_ARCH__ < 6
817 * The only thing that can break atomicity in this cmpxchg
818 * implementation is either an IRQ or a data abort exception
819 * causing another process/thread to be scheduled in the middle
820 * of the critical sequence. To prevent this, code is added to
821 * the IRQ and data abort exception handlers to set the pc back
822 * to the beginning of the critical section if it is found to be
823 * within that critical section (see kuser_cmpxchg_fixup).
825 1: ldr r3, [r2] @ load current val
826 subs r3, r3, r0 @ compare with oldval
827 2: streq r1, [r2] @ store newval if eq
828 rsbs r0, r3, #0 @ set return val and C flag
833 @ Called from kuser_cmpxchg_check macro.
834 @ r2 = address of interrupted insn (must be preserved).
835 @ sp = saved regs. r7 and r8 are clobbered.
836 @ 1b = first critical insn, 2b = last critical insn.
837 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
839 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
841 rsbcss r8, r8, #(2b - 1b)
842 strcs r7, [sp, #S_PC]
847 #warning "NPTL on non MMU needs fixing"
856 mcr p15, 0, r0, c7, c10, 5 @ dmb
864 /* beware -- each __kuser slot must be 8 instructions max */
866 b __kuser_memory_barrier
876 * Reference prototype:
878 * int __kernel_get_tls(void)
882 * lr = return address
892 * Definition and user space usage example:
894 * typedef int (__kernel_get_tls_t)(void);
895 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
897 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
899 * This could be used as follows:
901 * #define __kernel_get_tls() \
902 * ({ register unsigned int __val asm("r0"); \
903 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
904 * : "=r" (__val) : : "lr","cc" ); \
908 __kuser_get_tls: @ 0xffff0fe0
910 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
911 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
913 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
918 .word 0 @ pad up to __kuser_helper_version
922 * Reference declaration:
924 * extern unsigned int __kernel_helper_version;
926 * Definition and user space usage example:
928 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
930 * User space may read this to determine the curent number of helpers
934 __kuser_helper_version: @ 0xffff0ffc
935 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
937 .globl __kuser_helper_end
944 * This code is copied to 0xffff0200 so we can use branches in the
945 * vectors, rather than ldr's. Note that this code must not
946 * exceed 0x300 bytes.
948 * Common stub entry macro:
949 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
951 * SP points to a minimal amount of processor-private memory, the address
952 * of which is copied into r0 for the mode specific abort handler.
954 .macro vector_stub, name, mode, correction=0
959 sub lr, lr, #\correction
963 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
966 stmia sp, {r0, lr} @ save r0, lr
968 str lr, [sp, #8] @ save spsr
971 @ Prepare for SVC32 mode. IRQs remain disabled.
974 eor r0, r0, #(\mode ^ SVC_MODE)
978 @ the branch table must immediately follow this code
982 ldr lr, [pc, lr, lsl #2]
983 movs pc, lr @ branch to handler in SVC mode
989 * Interrupt dispatcher
991 vector_stub irq, IRQ_MODE, 4
993 .long __irq_usr @ 0 (USR_26 / USR_32)
994 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
995 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
996 .long __irq_svc @ 3 (SVC_26 / SVC_32)
997 .long __irq_invalid @ 4
998 .long __irq_invalid @ 5
999 .long __irq_invalid @ 6
1000 .long __irq_invalid @ 7
1001 .long __irq_invalid @ 8
1002 .long __irq_invalid @ 9
1003 .long __irq_invalid @ a
1004 .long __irq_invalid @ b
1005 .long __irq_invalid @ c
1006 .long __irq_invalid @ d
1007 .long __irq_invalid @ e
1008 .long __irq_invalid @ f
1011 * Data abort dispatcher
1012 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1014 vector_stub dabt, ABT_MODE, 8
1016 .long __dabt_usr @ 0 (USR_26 / USR_32)
1017 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1018 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1019 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1020 .long __dabt_invalid @ 4
1021 .long __dabt_invalid @ 5
1022 .long __dabt_invalid @ 6
1023 .long __dabt_invalid @ 7
1024 .long __dabt_invalid @ 8
1025 .long __dabt_invalid @ 9
1026 .long __dabt_invalid @ a
1027 .long __dabt_invalid @ b
1028 .long __dabt_invalid @ c
1029 .long __dabt_invalid @ d
1030 .long __dabt_invalid @ e
1031 .long __dabt_invalid @ f
1034 * Prefetch abort dispatcher
1035 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1037 vector_stub pabt, ABT_MODE, 4
1039 .long __pabt_usr @ 0 (USR_26 / USR_32)
1040 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1041 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1042 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1043 .long __pabt_invalid @ 4
1044 .long __pabt_invalid @ 5
1045 .long __pabt_invalid @ 6
1046 .long __pabt_invalid @ 7
1047 .long __pabt_invalid @ 8
1048 .long __pabt_invalid @ 9
1049 .long __pabt_invalid @ a
1050 .long __pabt_invalid @ b
1051 .long __pabt_invalid @ c
1052 .long __pabt_invalid @ d
1053 .long __pabt_invalid @ e
1054 .long __pabt_invalid @ f
1057 * Undef instr entry dispatcher
1058 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1060 vector_stub und, UND_MODE
1062 .long __und_usr @ 0 (USR_26 / USR_32)
1063 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1064 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1065 .long __und_svc @ 3 (SVC_26 / SVC_32)
1066 .long __und_invalid @ 4
1067 .long __und_invalid @ 5
1068 .long __und_invalid @ 6
1069 .long __und_invalid @ 7
1070 .long __und_invalid @ 8
1071 .long __und_invalid @ 9
1072 .long __und_invalid @ a
1073 .long __und_invalid @ b
1074 .long __und_invalid @ c
1075 .long __und_invalid @ d
1076 .long __und_invalid @ e
1077 .long __und_invalid @ f
1081 /*=============================================================================
1083 *-----------------------------------------------------------------------------
1084 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1085 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1086 * Basically to switch modes, we *HAVE* to clobber one register... brain
1087 * damage alert! I don't think that we can execute any code in here in any
1088 * other mode than FIQ... Ok you can switch to another mode, but you can't
1089 * get out of that mode without clobbering one register.
1095 /*=============================================================================
1096 * Address exception handler
1097 *-----------------------------------------------------------------------------
1098 * These aren't too critical.
1099 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1106 * We group all the following data together to optimise
1107 * for CPUs with separate I & D caches.
1117 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1119 .globl __vectors_start
1122 b vector_und + stubs_offset
1123 ldr pc, .LCvswi + stubs_offset
1124 b vector_pabt + stubs_offset
1125 b vector_dabt + stubs_offset
1126 b vector_addrexcptn + stubs_offset
1127 b vector_irq + stubs_offset
1128 b vector_fiq + stubs_offset
1130 .globl __vectors_end
1136 .globl cr_no_alignment