3 #include <asm/ptrace.h>
4 #include <asm/system.h>
5 #include <asm/pgtable.h>
7 #include <asm-generic/vmlinux.lds.h>
10 VMLINUX_SYMBOL(__start_ivt_text) = .; \
12 VMLINUX_SYMBOL(__end_ivt_text) = .;
14 OUTPUT_FORMAT("elf64-ia64-little")
23 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
27 /* Sections to be discarded */
32 *(.IA_64.unwind.exit.text)
33 *(.IA_64.unwind_info.exit.text)
36 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
37 phys_start = _start - LOAD_OFFSET;
45 .text : AT(ADDR(.text) - LOAD_OFFSET)
54 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
56 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
59 .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
66 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
67 code_continues : {} :code /* switch back to regular program... */
71 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
73 __start___ex_table = .;
75 __stop___ex_table = .;
80 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
82 __start___mca_table = .;
84 __stop___mca_table = .;
87 .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET)
89 __start___phys_stack_reg_patchlist = .;
90 *(.data.patch.phys_stack_reg)
91 __end___phys_stack_reg_patchlist = .;
97 /* Unwind info & table: */
99 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
100 { *(.IA_64.unwind_info*) }
101 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
107 code_continues2 : {} : code
111 .opd : AT(ADDR(.opd) - LOAD_OFFSET)
114 /* Initialization code and data: */
116 . = ALIGN(PAGE_SIZE);
118 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
125 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
128 #ifdef CONFIG_BLK_DEV_INITRD
129 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
131 __initramfs_start = .;
138 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
144 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
146 __initcall_start = .;
151 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
153 __start___vtop_patchlist = .;
155 __end___vtop_patchlist = .;
158 .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
160 __start___rse_patchlist = .;
162 __end___rse_patchlist = .;
165 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
167 __start___mckinley_e9_bundles = .;
168 *(.data.patch.mckinley_e9)
169 __end___mckinley_e9_bundles = .;
172 #if defined(CONFIG_IA64_GENERIC)
175 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
184 __con_initcall_start = .;
185 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
186 { *(.con_initcall.init) }
187 __con_initcall_end = .;
188 __security_initcall_start = .;
189 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
190 { *(.security_initcall.init) }
191 __security_initcall_end = .;
192 . = ALIGN(PAGE_SIZE);
195 /* The initial task and kernel stack */
196 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
197 { *(.data.init_task) }
199 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
200 { *(__special_page_section)
201 __start_gate_section = .;
203 __stop_gate_section = .;
205 . = ALIGN(PAGE_SIZE);
206 __xen_start_gate_section = .;
208 __xen_stop_gate_section = .;
211 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
215 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
216 { *(.data.read_mostly) }
218 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
219 { *(.data.cacheline_aligned) }
223 . = ALIGN(PERCPU_PAGE_SIZE);
224 __phys_per_cpu_start = .;
225 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
229 *(.data.percpu.shared_aligned)
232 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
233 * into percpu page size
237 .data : AT(ADDR(.data) - LOAD_OFFSET)
240 . = ALIGN(PERCPU_PAGE_SIZE);
242 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
250 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
251 .got : AT(ADDR(.got) - LOAD_OFFSET)
252 { *(.got.plt) *(.got) }
253 __gp = ADDR(.got) + 0x200000;
254 /* We want the small data sections together, so single-instruction offsets
255 can access them all, and initialized data all before uninitialized, so
256 we can shorten the on-disk segment size. */
257 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
258 { *(.sdata) *(.sdata1) *(.srdata) }
261 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
262 { *(.sbss) *(.scommon) }
263 .bss : AT(ADDR(.bss) - LOAD_OFFSET)
264 { *(.bss) *(COMMON) }
270 /* Stabs debugging sections. */
271 .stab 0 : { *(.stab) }
272 .stabstr 0 : { *(.stabstr) }
273 .stab.excl 0 : { *(.stab.excl) }
274 .stab.exclstr 0 : { *(.stab.exclstr) }
275 .stab.index 0 : { *(.stab.index) }
276 .stab.indexstr 0 : { *(.stab.indexstr) }
277 /* DWARF debug sections.
278 Symbols in the DWARF debugging sections are relative to the beginning
279 of the section so we begin them at 0. */
281 .debug 0 : { *(.debug) }
282 .line 0 : { *(.line) }
283 /* GNU DWARF 1 extensions */
284 .debug_srcinfo 0 : { *(.debug_srcinfo) }
285 .debug_sfnames 0 : { *(.debug_sfnames) }
286 /* DWARF 1.1 and DWARF 2 */
287 .debug_aranges 0 : { *(.debug_aranges) }
288 .debug_pubnames 0 : { *(.debug_pubnames) }
290 .debug_info 0 : { *(.debug_info) }
291 .debug_abbrev 0 : { *(.debug_abbrev) }
292 .debug_line 0 : { *(.debug_line) }
293 .debug_frame 0 : { *(.debug_frame) }
294 .debug_str 0 : { *(.debug_str) }
295 .debug_loc 0 : { *(.debug_loc) }
296 .debug_macinfo 0 : { *(.debug_macinfo) }
297 /* SGI/MIPS DWARF 2 extensions */
298 .debug_weaknames 0 : { *(.debug_weaknames) }
299 .debug_funcnames 0 : { *(.debug_funcnames) }
300 .debug_typenames 0 : { *(.debug_typenames) }
301 .debug_varnames 0 : { *(.debug_varnames) }
302 /* These must appear regardless of . */
303 /DISCARD/ : { *(.comment) }
304 /DISCARD/ : { *(.note) }