2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 * C code is not supposed to know about undefined top of stack. Every time
54 * a C function with an pt_regs argument is called from the SYSCALL based
55 * fast path FIXUP_TOP_OF_STACK is needed.
56 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
60 /* %rsp:at FRAMEEND */
61 .macro FIXUP_TOP_OF_STACK tmp
62 movq %gs:pda_oldrsp,\tmp
64 movq $__USER_DS,SS(%rsp)
65 movq $__USER_CS,CS(%rsp)
67 movq R11(%rsp),\tmp /* get eflags */
68 movq \tmp,EFLAGS(%rsp)
71 .macro RESTORE_TOP_OF_STACK tmp,offset=0
72 movq RSP-\offset(%rsp),\tmp
73 movq \tmp,%gs:pda_oldrsp
74 movq EFLAGS-\offset(%rsp),\tmp
75 movq \tmp,R11-\offset(%rsp)
78 .macro FAKE_STACK_FRAME child_rip
79 /* push in order ss, rsp, eflags, cs, rip */
82 CFI_ADJUST_CFA_OFFSET 8
83 /*CFI_REL_OFFSET ss,0*/
85 CFI_ADJUST_CFA_OFFSET 8
87 pushq $(1<<9) /* eflags - interrupts on */
88 CFI_ADJUST_CFA_OFFSET 8
89 /*CFI_REL_OFFSET rflags,0*/
90 pushq $__KERNEL_CS /* cs */
91 CFI_ADJUST_CFA_OFFSET 8
92 /*CFI_REL_OFFSET cs,0*/
93 pushq \child_rip /* rip */
94 CFI_ADJUST_CFA_OFFSET 8
96 pushq %rax /* orig rax */
97 CFI_ADJUST_CFA_OFFSET 8
100 .macro UNFAKE_STACK_FRAME
102 CFI_ADJUST_CFA_OFFSET -(6*8)
105 .macro CFI_DEFAULT_STACK start=1
110 CFI_DEF_CFA_OFFSET SS+8
112 CFI_REL_OFFSET r15,R15
113 CFI_REL_OFFSET r14,R14
114 CFI_REL_OFFSET r13,R13
115 CFI_REL_OFFSET r12,R12
116 CFI_REL_OFFSET rbp,RBP
117 CFI_REL_OFFSET rbx,RBX
118 CFI_REL_OFFSET r11,R11
119 CFI_REL_OFFSET r10,R10
122 CFI_REL_OFFSET rax,RAX
123 CFI_REL_OFFSET rcx,RCX
124 CFI_REL_OFFSET rdx,RDX
125 CFI_REL_OFFSET rsi,RSI
126 CFI_REL_OFFSET rdi,RDI
127 CFI_REL_OFFSET rip,RIP
128 /*CFI_REL_OFFSET cs,CS*/
129 /*CFI_REL_OFFSET rflags,EFLAGS*/
130 CFI_REL_OFFSET rsp,RSP
131 /*CFI_REL_OFFSET ss,SS*/
134 * A newly forked process directly context switches into this.
140 GET_THREAD_INFO(%rcx)
141 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
145 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
146 je int_ret_from_sys_call
147 testl $_TIF_IA32,threadinfo_flags(%rcx)
148 jnz int_ret_from_sys_call
149 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
150 jmp ret_from_sys_call
153 call syscall_trace_leave
154 GET_THREAD_INFO(%rcx)
159 * System call entry. Upto 6 arguments in registers are supported.
161 * SYSCALL does not save anything on the stack and does not change the
167 * rax system call number
169 * rcx return address for syscall/sysret, C arg3
172 * r10 arg3 (--> moved to rcx for C)
175 * r11 eflags for syscall/sysret, temporary for C
176 * r12-r15,rbp,rbx saved by C code, not touched.
178 * Interrupts are off on entry.
179 * Only called from user space.
181 * EM64T CPUs have somewhat weird error reporting for non canonical RIPs in SYSRET.
182 * We can't handle any exceptions there because the exception handler would
183 * end up running on the user stack which is unsafe. To avoid problems
184 * any code that might end up with a user touched pt_regs should return
185 * using int_ret_from_syscall.
187 * XXX if we had a free scratch register we could save the RSP into the stack frame
188 * and report it properly in ps. Unfortunately we haven't.
195 /*CFI_REGISTER rflags,r11*/
197 movq %rsp,%gs:pda_oldrsp
198 movq %gs:pda_kernelstack,%rsp
201 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
202 movq %rcx,RIP-ARGOFFSET(%rsp)
203 CFI_REL_OFFSET rip,RIP-ARGOFFSET
204 GET_THREAD_INFO(%rcx)
205 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
208 cmpq $__NR_syscall_max,%rax
211 call *sys_call_table(,%rax,8) # XXX: rip relative
212 movq %rax,RAX-ARGOFFSET(%rsp)
214 * Syscall return path ending with SYSRET (fast path)
215 * Has incomplete stack frame and undefined top of stack.
217 .globl ret_from_sys_call
219 movl $_TIF_ALLWORK_MASK,%edi
222 GET_THREAD_INFO(%rcx)
224 movl threadinfo_flags(%rcx),%edx
228 movq RIP-ARGOFFSET(%rsp),%rcx
230 RESTORE_ARGS 0,-ARG_SKIP,1
231 /*CFI_REGISTER rflags,r11*/
232 movq %gs:pda_oldrsp,%rsp
236 /* Handle reschedules */
237 /* edx: work, edi: workmask */
240 bt $TIF_NEED_RESCHED,%edx
244 CFI_ADJUST_CFA_OFFSET 8
247 CFI_ADJUST_CFA_OFFSET -8
250 /* Handle a signal */
253 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
256 /* Really a signal */
257 /* edx: work flags (arg3) */
258 leaq do_notify_resume(%rip),%rax
259 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
260 xorl %esi,%esi # oldset -> arg2
261 call ptregscall_common
262 1: movl $_TIF_NEED_RESCHED,%edi
263 /* Stack frame might have been changed. The IRET path does
264 some additional checks to handle this */
268 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
269 jmp ret_from_sys_call
271 /* Do syscall tracing */
275 movq $-ENOSYS,RAX(%rsp)
276 FIXUP_TOP_OF_STACK %rdi
278 call syscall_trace_enter
279 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
281 cmpq $__NR_syscall_max,%rax
283 movq %r10,%rcx /* fixup for C */
284 call *sys_call_table(,%rax,8)
285 movq %rax,RAX-ARGOFFSET(%rsp)
288 call syscall_trace_leave
289 RESTORE_TOP_OF_STACK %rbx
291 /* Stack frame might have been changed. Use the more careful IRET path */
292 jmp int_ret_from_sys_call
296 * Syscall return path ending with IRET.
297 * Has correct top of stack, but partial stack frame.
299 ENTRY(int_ret_from_sys_call)
301 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
302 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
303 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
304 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
305 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
306 CFI_REL_OFFSET rip,RIP-ARGOFFSET
307 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
308 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
309 CFI_REL_OFFSET rax,RAX-ARGOFFSET
310 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
311 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
312 CFI_REL_OFFSET r8,R8-ARGOFFSET
313 CFI_REL_OFFSET r9,R9-ARGOFFSET
314 CFI_REL_OFFSET r10,R10-ARGOFFSET
315 CFI_REL_OFFSET r11,R11-ARGOFFSET
317 testl $3,CS-ARGOFFSET(%rsp)
318 je retint_restore_args
319 movl $_TIF_ALLWORK_MASK,%edi
320 /* edi: mask to check */
322 GET_THREAD_INFO(%rcx)
323 movl threadinfo_flags(%rcx),%edx
326 andl $~TS_COMPAT,threadinfo_status(%rcx)
329 /* Either reschedule or signal or syscall exit tracking needed. */
330 /* First do a reschedule test. */
331 /* edx: work, edi: workmask */
333 bt $TIF_NEED_RESCHED,%edx
337 CFI_ADJUST_CFA_OFFSET 8
340 CFI_ADJUST_CFA_OFFSET -8
344 /* handle signals and tracing -- both require a full stack frame */
348 /* Check for syscall exit trace */
349 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
352 CFI_ADJUST_CFA_OFFSET 8
353 leaq 8(%rsp),%rdi # &ptregs -> arg1
354 call syscall_trace_leave
356 CFI_ADJUST_CFA_OFFSET -8
357 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
362 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
364 movq %rsp,%rdi # &ptregs -> arg1
365 xorl %esi,%esi # oldset -> arg2
366 call do_notify_resume
367 1: movl $_TIF_NEED_RESCHED,%edi
375 * Certain special system calls that need to save a complete full stack frame.
378 .macro PTREGSCALL label,func,arg
381 leaq \func(%rip),%rax
382 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
383 jmp ptregscall_common
388 PTREGSCALL stub_clone, sys_clone, %r8
389 PTREGSCALL stub_fork, sys_fork, %rdi
390 PTREGSCALL stub_vfork, sys_vfork, %rdi
391 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
392 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
393 PTREGSCALL stub_iopl, sys_iopl, %rsi
395 ENTRY(ptregscall_common)
397 CFI_ADJUST_CFA_OFFSET -8
398 CFI_REGISTER rip, r11
401 CFI_REGISTER rip, r15
402 FIXUP_TOP_OF_STACK %r11
404 RESTORE_TOP_OF_STACK %r11
406 CFI_REGISTER rip, r11
409 CFI_ADJUST_CFA_OFFSET 8
410 CFI_REL_OFFSET rip, 0
417 CFI_ADJUST_CFA_OFFSET -8
418 CFI_REGISTER rip, r11
420 FIXUP_TOP_OF_STACK %r11
422 RESTORE_TOP_OF_STACK %r11
425 jmp int_ret_from_sys_call
429 * sigreturn is special because it needs to restore all registers on return.
430 * This cannot be done with SYSRET, so use the IRET return path instead.
432 ENTRY(stub_rt_sigreturn)
435 CFI_ADJUST_CFA_OFFSET -8
438 FIXUP_TOP_OF_STACK %r11
439 call sys_rt_sigreturn
440 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
442 jmp int_ret_from_sys_call
446 * initial frame state for interrupts and exceptions
450 CFI_DEF_CFA rsp,SS+8-\ref
451 /*CFI_REL_OFFSET ss,SS-\ref*/
452 CFI_REL_OFFSET rsp,RSP-\ref
453 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
454 /*CFI_REL_OFFSET cs,CS-\ref*/
455 CFI_REL_OFFSET rip,RIP-\ref
458 /* initial frame state for interrupts (and exceptions without error code) */
459 #define INTR_FRAME _frame RIP
460 /* initial frame state for exceptions with error code (and interrupts with
461 vector already pushed) */
462 #define XCPT_FRAME _frame ORIG_RAX
465 * Interrupt entry/exit.
467 * Interrupt entry points save only callee clobbered registers in fast path.
469 * Entry runs with interrupts off.
472 /* 0(%rsp): interrupt number */
473 .macro interrupt func
475 #ifdef CONFIG_DEBUG_INFO
479 * Setup a stack frame pointer. This allows gdb to trace
480 * back to the original stack.
483 CFI_DEF_CFA_REGISTER rbp
486 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
491 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
492 movq %gs:pda_irqstackptr,%rax
493 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
494 pushq %rdi # save old stack
495 #ifndef CONFIG_DEBUG_INFO
496 CFI_ADJUST_CFA_OFFSET 8
501 ENTRY(common_interrupt)
504 /* 0(%rsp): oldrsp-ARGOFFSET */
507 #ifndef CONFIG_DEBUG_INFO
508 CFI_ADJUST_CFA_OFFSET -8
511 decl %gs:pda_irqcount
512 #ifdef CONFIG_DEBUG_INFO
514 CFI_DEF_CFA_REGISTER rsp
516 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
518 GET_THREAD_INFO(%rcx)
519 testl $3,CS-ARGOFFSET(%rsp)
522 /* Interrupt came from user space */
524 * Has a correct top of stack, but a partial stack frame
525 * %rcx: thread info. Interrupts off.
527 retint_with_reschedule:
528 movl $_TIF_WORK_MASK,%edi
530 movl threadinfo_flags(%rcx),%edx
542 .section __ex_table,"a"
543 .quad iret_label,bad_iret
546 /* force a signal here? this matches i386 behaviour */
547 /* running with kernel gs */
549 movq $-9999,%rdi /* better code? */
554 /* edi: workmask, edx: work */
557 bt $TIF_NEED_RESCHED,%edx
561 CFI_ADJUST_CFA_OFFSET 8
564 CFI_ADJUST_CFA_OFFSET -8
565 GET_THREAD_INFO(%rcx)
570 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
574 movq $-1,ORIG_RAX(%rsp)
575 xorl %esi,%esi # oldset
576 movq %rsp,%rdi # &pt_regs
577 call do_notify_resume
580 movl $_TIF_NEED_RESCHED,%edi
581 GET_THREAD_INFO(%rcx)
584 #ifdef CONFIG_PREEMPT
585 /* Returning to kernel space. Check if we need preemption */
586 /* rcx: threadinfo. interrupts off. */
589 cmpl $0,threadinfo_preempt_count(%rcx)
590 jnz retint_restore_args
591 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
592 jnc retint_restore_args
593 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
594 jnc retint_restore_args
595 call preempt_schedule_irq
603 .macro apicinterrupt num,func
606 CFI_ADJUST_CFA_OFFSET 8
612 ENTRY(thermal_interrupt)
613 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
615 ENTRY(threshold_interrupt)
616 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
619 ENTRY(reschedule_interrupt)
620 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
622 .macro INVALIDATE_ENTRY num
623 ENTRY(invalidate_interrupt\num)
624 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
636 ENTRY(call_function_interrupt)
637 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
640 #ifdef CONFIG_X86_LOCAL_APIC
641 ENTRY(apic_timer_interrupt)
642 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
644 ENTRY(error_interrupt)
645 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
647 ENTRY(spurious_interrupt)
648 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
652 * Exception entry points.
656 pushq $0 /* push error code/oldrax */
657 CFI_ADJUST_CFA_OFFSET 8
658 pushq %rax /* push real oldrax to the rdi slot */
659 CFI_ADJUST_CFA_OFFSET 8
665 .macro errorentry sym
668 CFI_ADJUST_CFA_OFFSET 8
674 /* error code is on the stack already */
675 /* handle NMI like exceptions that can happen everywhere */
676 .macro paranoidentry sym, ist=0
680 movl $MSR_GS_BASE,%ecx
688 movq %gs:pda_data_offset, %rbp
691 movq ORIG_RAX(%rsp),%rsi
692 movq $-1,ORIG_RAX(%rsp)
694 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
698 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
704 * Exception entry point. This expects an error code/orig_rax on the stack
705 * and the exception handler in %rax.
709 /* rdi slot contains rax, oldrax contains error code */
712 CFI_ADJUST_CFA_OFFSET (14*8)
714 CFI_REL_OFFSET rsi,RSI
715 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
717 CFI_REL_OFFSET rdx,RDX
719 CFI_REL_OFFSET rcx,RCX
720 movq %rsi,10*8(%rsp) /* store rax */
721 CFI_REL_OFFSET rax,RAX
727 CFI_REL_OFFSET r10,R10
729 CFI_REL_OFFSET r11,R11
731 CFI_REL_OFFSET rbx,RBX
733 CFI_REL_OFFSET rbp,RBP
735 CFI_REL_OFFSET r12,R12
737 CFI_REL_OFFSET r13,R13
739 CFI_REL_OFFSET r14,R14
741 CFI_REL_OFFSET r15,R15
750 movq ORIG_RAX(%rsp),%rsi /* get error code */
751 movq $-1,ORIG_RAX(%rsp)
753 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
758 GET_THREAD_INFO(%rcx)
761 movl threadinfo_flags(%rcx),%edx
762 movl $_TIF_WORK_MASK,%edi
772 /* There are two places in the kernel that can potentially fault with
773 usergs. Handle them here. The exception handlers after
774 iret run with kernel gs again, so don't set the user space flag.
775 B stepping K8s sometimes report an truncated RIP for IRET
776 exceptions returning to compat mode. Check for these here too. */
777 leaq iret_label(%rip),%rbp
780 movl %ebp,%ebp /* zero extend */
783 cmpq $gs_change,RIP(%rsp)
787 /* Reload gs selector with exception handling */
788 /* edi: new selector */
792 CFI_ADJUST_CFA_OFFSET 8
797 2: mfence /* workaround */
800 CFI_ADJUST_CFA_OFFSET -8
804 .section __ex_table,"a"
806 .quad gs_change,bad_gs
809 /* running with kernelgs */
811 swapgs /* switch back to user gs */
818 * Create a kernel thread.
820 * C extern interface:
821 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
823 * asm input arguments:
824 * rdi: fn, rsi: arg, rdx: flags
828 FAKE_STACK_FRAME $child_rip
831 # rdi: flags, rsi: usp, rdx: will be &pt_regs
833 orq kernel_thread_flags(%rip),%rdi
846 * It isn't worth to check for reschedule here,
847 * so internally to the x86_64 port you can rely on kernel_thread()
848 * not to reschedule the child before returning, this avoids the need
849 * of hacks for example to fork off the per-CPU idle tasks.
850 * [Hopefully no generic code relies on the reschedule -AK]
860 * Here we are in the child and the registers are set as they were
861 * at kernel_thread() invocation in the parent.
871 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
873 * C extern interface:
874 * extern long execve(char *name, char **argv, char **envp)
876 * asm input arguments:
877 * rdi: name, rsi: argv, rdx: envp
879 * We want to fallback into:
880 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
882 * do_sys_execve asm fallback arguments:
883 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
893 je int_ret_from_sys_call
899 KPROBE_ENTRY(page_fault)
900 errorentry do_page_fault
903 ENTRY(coprocessor_error)
904 zeroentry do_coprocessor_error
906 ENTRY(simd_coprocessor_error)
907 zeroentry do_simd_coprocessor_error
909 ENTRY(device_not_available)
910 zeroentry math_state_restore
912 /* runs on exception stack */
916 CFI_ADJUST_CFA_OFFSET 8
917 paranoidentry do_debug, DEBUG_STACK
922 /* runs on exception stack */
926 CFI_ADJUST_CFA_OFFSET 8
929 * "Paranoid" exit path from exception stack.
930 * Paranoid because this is used by NMIs and cannot take
931 * any kernel state for granted.
932 * We don't do kernel preemption checks here, because only
933 * NMI should be common and it does not enable IRQs and
934 * cannot get reschedule ticks.
936 /* ebx: no swapgs flag */
938 testl %ebx,%ebx /* swapgs needed? */
941 jnz paranoid_userspace
948 GET_THREAD_INFO(%rcx)
949 movl threadinfo_flags(%rcx),%ebx
950 andl $_TIF_WORK_MASK,%ebx
952 movq %rsp,%rdi /* &pt_regs */
954 movq %rax,%rsp /* switch stack for scheduling */
955 testl $_TIF_NEED_RESCHED,%ebx
956 jnz paranoid_schedule
957 movl %ebx,%edx /* arg3: thread flags */
959 xorl %esi,%esi /* arg2: oldset */
960 movq %rsp,%rdi /* arg1: &pt_regs */
961 call do_notify_resume
963 jmp paranoid_userspace
968 jmp paranoid_userspace
975 CFI_ADJUST_CFA_OFFSET 8
976 paranoidentry do_int3, DEBUG_STACK
982 zeroentry do_overflow
988 zeroentry do_invalid_op
990 ENTRY(coprocessor_segment_overrun)
991 zeroentry do_coprocessor_segment_overrun
994 zeroentry do_reserved
996 /* runs on exception stack */
999 paranoidentry do_double_fault
1004 errorentry do_invalid_TSS
1006 ENTRY(segment_not_present)
1007 errorentry do_segment_not_present
1009 /* runs on exception stack */
1010 ENTRY(stack_segment)
1012 paranoidentry do_stack_segment
1016 KPROBE_ENTRY(general_protection)
1017 errorentry do_general_protection
1020 ENTRY(alignment_check)
1021 errorentry do_alignment_check
1024 zeroentry do_divide_error
1026 ENTRY(spurious_interrupt_bug)
1027 zeroentry do_spurious_interrupt_bug
1029 #ifdef CONFIG_X86_MCE
1030 /* runs on exception stack */
1031 ENTRY(machine_check)
1034 CFI_ADJUST_CFA_OFFSET 8
1035 paranoidentry do_machine_check
1042 movq %gs:pda_irqstackptr,%rax
1044 CFI_DEF_CFA_REGISTER rdx
1045 incl %gs:pda_irqcount
1048 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1051 CFI_DEF_CFA_REGISTER rsp
1052 decl %gs:pda_irqcount