]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/init_64.c
x86, generic: CPA add statistics about state of direct mapping v4
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / init_64.c
1 /*
2  *  linux/arch/x86_64/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
6  *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7  */
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/module.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/nmi.h>
31
32 #include <asm/processor.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/dma.h>
38 #include <asm/fixmap.h>
39 #include <asm/e820.h>
40 #include <asm/apic.h>
41 #include <asm/tlb.h>
42 #include <asm/mmu_context.h>
43 #include <asm/proto.h>
44 #include <asm/smp.h>
45 #include <asm/sections.h>
46 #include <asm/kdebug.h>
47 #include <asm/numa.h>
48 #include <asm/cacheflush.h>
49
50 static unsigned long dma_reserve __initdata;
51
52 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53
54 int direct_gbpages __meminitdata
55 #ifdef CONFIG_DIRECT_GBPAGES
56                                 = 1
57 #endif
58 ;
59
60 static int __init parse_direct_gbpages_off(char *arg)
61 {
62         direct_gbpages = 0;
63         return 0;
64 }
65 early_param("nogbpages", parse_direct_gbpages_off);
66
67 static int __init parse_direct_gbpages_on(char *arg)
68 {
69         direct_gbpages = 1;
70         return 0;
71 }
72 early_param("gbpages", parse_direct_gbpages_on);
73
74 /*
75  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
76  * physical space so we can cache the place of the first one and move
77  * around without checking the pgd every time.
78  */
79
80 void show_mem(void)
81 {
82         long i, total = 0, reserved = 0;
83         long shared = 0, cached = 0;
84         struct page *page;
85         pg_data_t *pgdat;
86
87         printk(KERN_INFO "Mem-info:\n");
88         show_free_areas();
89         for_each_online_pgdat(pgdat) {
90                 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
91                         /*
92                          * This loop can take a while with 256 GB and
93                          * 4k pages so defer the NMI watchdog:
94                          */
95                         if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
96                                 touch_nmi_watchdog();
97
98                         if (!pfn_valid(pgdat->node_start_pfn + i))
99                                 continue;
100
101                         page = pfn_to_page(pgdat->node_start_pfn + i);
102                         total++;
103                         if (PageReserved(page))
104                                 reserved++;
105                         else if (PageSwapCache(page))
106                                 cached++;
107                         else if (page_count(page))
108                                 shared += page_count(page) - 1;
109                 }
110         }
111         printk(KERN_INFO "%lu pages of RAM\n",          total);
112         printk(KERN_INFO "%lu reserved pages\n",        reserved);
113         printk(KERN_INFO "%lu pages shared\n",          shared);
114         printk(KERN_INFO "%lu pages swap cached\n",     cached);
115 }
116
117 int after_bootmem;
118
119 static __init void *spp_getpage(void)
120 {
121         void *ptr;
122
123         if (after_bootmem)
124                 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
125         else
126                 ptr = alloc_bootmem_pages(PAGE_SIZE);
127
128         if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
129                 panic("set_pte_phys: cannot allocate page data %s\n",
130                         after_bootmem ? "after bootmem" : "");
131         }
132
133         pr_debug("spp_getpage %p\n", ptr);
134
135         return ptr;
136 }
137
138 static __init void
139 set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
140 {
141         pgd_t *pgd;
142         pud_t *pud;
143         pmd_t *pmd;
144         pte_t *pte, new_pte;
145
146         pr_debug("set_pte_phys %lx to %lx\n", vaddr, phys);
147
148         pgd = pgd_offset_k(vaddr);
149         if (pgd_none(*pgd)) {
150                 printk(KERN_ERR
151                         "PGD FIXMAP MISSING, it should be setup in head.S!\n");
152                 return;
153         }
154         pud = pud_offset(pgd, vaddr);
155         if (pud_none(*pud)) {
156                 pmd = (pmd_t *) spp_getpage();
157                 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
158                 if (pmd != pmd_offset(pud, 0)) {
159                         printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
160                                 pmd, pmd_offset(pud, 0));
161                         return;
162                 }
163         }
164         pmd = pmd_offset(pud, vaddr);
165         if (pmd_none(*pmd)) {
166                 pte = (pte_t *) spp_getpage();
167                 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
168                 if (pte != pte_offset_kernel(pmd, 0)) {
169                         printk(KERN_ERR "PAGETABLE BUG #02!\n");
170                         return;
171                 }
172         }
173         new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
174
175         pte = pte_offset_kernel(pmd, vaddr);
176         if (!pte_none(*pte) && pte_val(new_pte) &&
177             pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
178                 pte_ERROR(*pte);
179         set_pte(pte, new_pte);
180
181         /*
182          * It's enough to flush this one mapping.
183          * (PGE mappings get flushed as well)
184          */
185         __flush_tlb_one(vaddr);
186 }
187
188 /*
189  * The head.S code sets up the kernel high mapping:
190  *
191  *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
192  *
193  * phys_addr holds the negative offset to the kernel, which is added
194  * to the compile time generated pmds. This results in invalid pmds up
195  * to the point where we hit the physaddr 0 mapping.
196  *
197  * We limit the mappings to the region from _text to _end.  _end is
198  * rounded up to the 2MB boundary. This catches the invalid pmds as
199  * well, as they are located before _text:
200  */
201 void __init cleanup_highmap(void)
202 {
203         unsigned long vaddr = __START_KERNEL_map;
204         unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
205         pmd_t *pmd = level2_kernel_pgt;
206         pmd_t *last_pmd = pmd + PTRS_PER_PMD;
207
208         for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
209                 if (pmd_none(*pmd))
210                         continue;
211                 if (vaddr < (unsigned long) _text || vaddr > end)
212                         set_pmd(pmd, __pmd(0));
213         }
214 }
215
216 /* NOTE: this is meant to be run only at boot */
217 void __init __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
218 {
219         unsigned long address = __fix_to_virt(idx);
220
221         if (idx >= __end_of_fixed_addresses) {
222                 printk(KERN_ERR "Invalid __set_fixmap\n");
223                 return;
224         }
225         set_pte_phys(address, phys, prot);
226 }
227
228 static unsigned long __initdata table_start;
229 static unsigned long __meminitdata table_end;
230
231 static __meminit void *alloc_low_page(unsigned long *phys)
232 {
233         unsigned long pfn = table_end++;
234         void *adr;
235
236         if (after_bootmem) {
237                 adr = (void *)get_zeroed_page(GFP_ATOMIC);
238                 *phys = __pa(adr);
239
240                 return adr;
241         }
242
243         if (pfn >= end_pfn)
244                 panic("alloc_low_page: ran out of memory");
245
246         adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
247         memset(adr, 0, PAGE_SIZE);
248         *phys  = pfn * PAGE_SIZE;
249         return adr;
250 }
251
252 static __meminit void unmap_low_page(void *adr)
253 {
254         if (after_bootmem)
255                 return;
256
257         early_iounmap(adr, PAGE_SIZE);
258 }
259
260 /* Must run before zap_low_mappings */
261 __meminit void *early_ioremap(unsigned long addr, unsigned long size)
262 {
263         pmd_t *pmd, *last_pmd;
264         unsigned long vaddr;
265         int i, pmds;
266
267         pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
268         vaddr = __START_KERNEL_map;
269         pmd = level2_kernel_pgt;
270         last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
271
272         for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
273                 for (i = 0; i < pmds; i++) {
274                         if (pmd_present(pmd[i]))
275                                 goto continue_outer_loop;
276                 }
277                 vaddr += addr & ~PMD_MASK;
278                 addr &= PMD_MASK;
279
280                 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
281                         set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
282                 __flush_tlb_all();
283
284                 return (void *)vaddr;
285 continue_outer_loop:
286                 ;
287         }
288         printk(KERN_ERR "early_ioremap(0x%lx, %lu) failed\n", addr, size);
289
290         return NULL;
291 }
292
293 /*
294  * To avoid virtual aliases later:
295  */
296 __meminit void early_iounmap(void *addr, unsigned long size)
297 {
298         unsigned long vaddr;
299         pmd_t *pmd;
300         int i, pmds;
301
302         vaddr = (unsigned long)addr;
303         pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
304         pmd = level2_kernel_pgt + pmd_index(vaddr);
305
306         for (i = 0; i < pmds; i++)
307                 pmd_clear(pmd + i);
308
309         __flush_tlb_all();
310 }
311
312 static unsigned long __meminit
313 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
314 {
315         unsigned long pages = 0;
316
317         int i = pmd_index(address);
318
319         for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
320                 pmd_t *pmd = pmd_page + pmd_index(address);
321
322                 if (address >= end) {
323                         if (!after_bootmem) {
324                                 for (; i < PTRS_PER_PMD; i++, pmd++)
325                                         set_pmd(pmd, __pmd(0));
326                         }
327                         break;
328                 }
329
330                 if (pmd_val(*pmd))
331                         continue;
332
333                 pages++;
334                 set_pte((pte_t *)pmd,
335                         pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
336         }
337         update_page_count(PG_LEVEL_2M, pages);
338         return address;
339 }
340
341 static unsigned long __meminit
342 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
343 {
344         pmd_t *pmd = pmd_offset(pud, 0);
345         unsigned long last_map_addr;
346
347         spin_lock(&init_mm.page_table_lock);
348         last_map_addr = phys_pmd_init(pmd, address, end);
349         spin_unlock(&init_mm.page_table_lock);
350         __flush_tlb_all();
351         return last_map_addr;
352 }
353
354 static unsigned long __meminit
355 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
356 {
357         unsigned long pages = 0;
358         unsigned long last_map_addr = end;
359         int i = pud_index(addr);
360
361         for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
362                 unsigned long pmd_phys;
363                 pud_t *pud = pud_page + pud_index(addr);
364                 pmd_t *pmd;
365
366                 if (addr >= end)
367                         break;
368
369                 if (!after_bootmem &&
370                                 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
371                         set_pud(pud, __pud(0));
372                         continue;
373                 }
374
375                 if (pud_val(*pud)) {
376                         if (!pud_large(*pud))
377                                 last_map_addr = phys_pmd_update(pud, addr, end);
378                         continue;
379                 }
380
381                 if (direct_gbpages) {
382                         pages++;
383                         set_pte((pte_t *)pud,
384                                 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
385                         last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
386                         continue;
387                 }
388
389                 pmd = alloc_low_page(&pmd_phys);
390
391                 spin_lock(&init_mm.page_table_lock);
392                 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
393                 last_map_addr = phys_pmd_init(pmd, addr, end);
394                 spin_unlock(&init_mm.page_table_lock);
395
396                 unmap_low_page(pmd);
397         }
398         __flush_tlb_all();
399         update_page_count(PG_LEVEL_1G, pages);
400
401         return last_map_addr >> PAGE_SHIFT;
402 }
403
404 static void __init find_early_table_space(unsigned long end)
405 {
406         unsigned long puds, pmds, tables, start;
407
408         puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
409         tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
410         if (!direct_gbpages) {
411                 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
412                 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
413         }
414
415         /*
416          * RED-PEN putting page tables only on node 0 could
417          * cause a hotspot and fill up ZONE_DMA. The page tables
418          * need roughly 0.5KB per GB.
419          */
420         start = 0x8000;
421         table_start = find_e820_area(start, end, tables, PAGE_SIZE);
422         if (table_start == -1UL)
423                 panic("Cannot find space for the kernel page tables");
424
425         table_start >>= PAGE_SHIFT;
426         table_end = table_start;
427
428         early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
429                 end, table_start << PAGE_SHIFT,
430                 (table_start << PAGE_SHIFT) + tables);
431 }
432
433 static void __init init_gbpages(void)
434 {
435         if (direct_gbpages && cpu_has_gbpages)
436                 printk(KERN_INFO "Using GB pages for direct mapping\n");
437         else
438                 direct_gbpages = 0;
439 }
440
441 #ifdef CONFIG_MEMTEST_BOOTPARAM
442
443 static void __init memtest(unsigned long start_phys, unsigned long size,
444                                  unsigned pattern)
445 {
446         unsigned long i;
447         unsigned long *start;
448         unsigned long start_bad;
449         unsigned long last_bad;
450         unsigned long val;
451         unsigned long start_phys_aligned;
452         unsigned long count;
453         unsigned long incr;
454
455         switch (pattern) {
456         case 0:
457                 val = 0UL;
458                 break;
459         case 1:
460                 val = -1UL;
461                 break;
462         case 2:
463                 val = 0x5555555555555555UL;
464                 break;
465         case 3:
466                 val = 0xaaaaaaaaaaaaaaaaUL;
467                 break;
468         default:
469                 return;
470         }
471
472         incr = sizeof(unsigned long);
473         start_phys_aligned = ALIGN(start_phys, incr);
474         count = (size - (start_phys_aligned - start_phys))/incr;
475         start = __va(start_phys_aligned);
476         start_bad = 0;
477         last_bad = 0;
478
479         for (i = 0; i < count; i++)
480                 start[i] = val;
481         for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
482                 if (*start != val) {
483                         if (start_phys_aligned == last_bad + incr) {
484                                 last_bad += incr;
485                         } else {
486                                 if (start_bad) {
487                                         printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
488                                                 val, start_bad, last_bad + incr);
489                                         reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
490                                 }
491                                 start_bad = last_bad = start_phys_aligned;
492                         }
493                 }
494         }
495         if (start_bad) {
496                 printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
497                         val, start_bad, last_bad + incr);
498                 reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
499         }
500
501 }
502
503 static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE;
504
505 static int __init parse_memtest(char *arg)
506 {
507         if (arg)
508                 memtest_pattern = simple_strtoul(arg, NULL, 0);
509         return 0;
510 }
511
512 early_param("memtest", parse_memtest);
513
514 static void __init early_memtest(unsigned long start, unsigned long end)
515 {
516         u64 t_start, t_size;
517         unsigned pattern;
518
519         if (!memtest_pattern)
520                 return;
521
522         printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
523         for (pattern = 0; pattern < memtest_pattern; pattern++) {
524                 t_start = start;
525                 t_size = 0;
526                 while (t_start < end) {
527                         t_start = find_e820_area_size(t_start, &t_size, 1);
528
529                         /* done ? */
530                         if (t_start >= end)
531                                 break;
532                         if (t_start + t_size > end)
533                                 t_size = end - t_start;
534
535                         printk(KERN_CONT "\n  %016llx - %016llx pattern %d",
536                                 (unsigned long long)t_start,
537                                 (unsigned long long)t_start + t_size, pattern);
538
539                         memtest(t_start, t_size, pattern);
540
541                         t_start += t_size;
542                 }
543         }
544         printk(KERN_CONT "\n");
545 }
546 #else
547 static void __init early_memtest(unsigned long start, unsigned long end)
548 {
549 }
550 #endif
551
552 /*
553  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
554  * This runs before bootmem is initialized and gets pages directly from
555  * the physical memory. To access them they are temporarily mapped.
556  */
557 unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
558 {
559         unsigned long next, last_map_addr = end;
560         unsigned long start_phys = start, end_phys = end;
561
562         printk(KERN_INFO "init_memory_mapping\n");
563
564         /*
565          * Find space for the kernel direct mapping tables.
566          *
567          * Later we should allocate these tables in the local node of the
568          * memory mapped. Unfortunately this is done currently before the
569          * nodes are discovered.
570          */
571         if (!after_bootmem) {
572                 init_gbpages();
573                 find_early_table_space(end);
574         }
575
576         start = (unsigned long)__va(start);
577         end = (unsigned long)__va(end);
578
579         for (; start < end; start = next) {
580                 pgd_t *pgd = pgd_offset_k(start);
581                 unsigned long pud_phys;
582                 pud_t *pud;
583
584                 if (after_bootmem)
585                         pud = pud_offset(pgd, start & PGDIR_MASK);
586                 else
587                         pud = alloc_low_page(&pud_phys);
588
589                 next = start + PGDIR_SIZE;
590                 if (next > end)
591                         next = end;
592                 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
593                 if (!after_bootmem)
594                         set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
595                 unmap_low_page(pud);
596         }
597
598         if (!after_bootmem)
599                 mmu_cr4_features = read_cr4();
600         __flush_tlb_all();
601
602         if (!after_bootmem)
603                 reserve_early(table_start << PAGE_SHIFT,
604                                  table_end << PAGE_SHIFT, "PGTABLE");
605
606         if (!after_bootmem)
607                 early_memtest(start_phys, end_phys);
608
609         return last_map_addr;
610 }
611
612 #ifndef CONFIG_NUMA
613 void __init paging_init(void)
614 {
615         unsigned long max_zone_pfns[MAX_NR_ZONES];
616
617         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
618         max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
619         max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
620         max_zone_pfns[ZONE_NORMAL] = end_pfn;
621
622         memory_present(0, 0, end_pfn);
623         sparse_init();
624         free_area_init_nodes(max_zone_pfns);
625 }
626 #endif
627
628 /*
629  * Memory hotplug specific functions
630  */
631 #ifdef CONFIG_MEMORY_HOTPLUG
632 /*
633  * Memory is added always to NORMAL zone. This means you will never get
634  * additional DMA/DMA32 memory.
635  */
636 int arch_add_memory(int nid, u64 start, u64 size)
637 {
638         struct pglist_data *pgdat = NODE_DATA(nid);
639         struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
640         unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
641         unsigned long nr_pages = size >> PAGE_SHIFT;
642         int ret;
643
644         last_mapped_pfn = init_memory_mapping(start, start + size-1);
645         if (last_mapped_pfn > max_pfn_mapped)
646                 max_pfn_mapped = last_mapped_pfn;
647
648         ret = __add_pages(zone, start_pfn, nr_pages);
649         WARN_ON(1);
650
651         return ret;
652 }
653 EXPORT_SYMBOL_GPL(arch_add_memory);
654
655 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
656 int memory_add_physaddr_to_nid(u64 start)
657 {
658         return 0;
659 }
660 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
661 #endif
662
663 #endif /* CONFIG_MEMORY_HOTPLUG */
664
665 /*
666  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
667  * is valid. The argument is a physical page number.
668  *
669  *
670  * On x86, access has to be given to the first megabyte of ram because that area
671  * contains bios code and data regions used by X and dosemu and similar apps.
672  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
673  * mmio resources as well as potential bios/acpi data regions.
674  */
675 int devmem_is_allowed(unsigned long pagenr)
676 {
677         if (pagenr <= 256)
678                 return 1;
679         if (!page_is_ram(pagenr))
680                 return 1;
681         return 0;
682 }
683
684
685 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
686                          kcore_modules, kcore_vsyscall;
687
688 void __init mem_init(void)
689 {
690         long codesize, reservedpages, datasize, initsize;
691
692         pci_iommu_alloc();
693
694         /* clear_bss() already clear the empty_zero_page */
695
696         reservedpages = 0;
697
698         /* this will put all low memory onto the freelists */
699 #ifdef CONFIG_NUMA
700         totalram_pages = numa_free_all_bootmem();
701 #else
702         totalram_pages = free_all_bootmem();
703 #endif
704         reservedpages = end_pfn - totalram_pages -
705                                         absent_pages_in_range(0, end_pfn);
706         after_bootmem = 1;
707
708         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
709         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
710         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
711
712         /* Register memory areas for /proc/kcore */
713         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
714         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
715                    VMALLOC_END-VMALLOC_START);
716         kclist_add(&kcore_kernel, &_stext, _end - _stext);
717         kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
718         kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
719                                  VSYSCALL_END - VSYSCALL_START);
720
721         printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
722                                 "%ldk reserved, %ldk data, %ldk init)\n",
723                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
724                 end_pfn << (PAGE_SHIFT-10),
725                 codesize >> 10,
726                 reservedpages << (PAGE_SHIFT-10),
727                 datasize >> 10,
728                 initsize >> 10);
729
730         cpa_init();
731 }
732
733 void free_init_pages(char *what, unsigned long begin, unsigned long end)
734 {
735         unsigned long addr = begin;
736
737         if (addr >= end)
738                 return;
739
740         /*
741          * If debugging page accesses then do not free this memory but
742          * mark them not present - any buggy init-section access will
743          * create a kernel page fault:
744          */
745 #ifdef CONFIG_DEBUG_PAGEALLOC
746         printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
747                 begin, PAGE_ALIGN(end));
748         set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
749 #else
750         printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
751
752         for (; addr < end; addr += PAGE_SIZE) {
753                 ClearPageReserved(virt_to_page(addr));
754                 init_page_count(virt_to_page(addr));
755                 memset((void *)(addr & ~(PAGE_SIZE-1)),
756                         POISON_FREE_INITMEM, PAGE_SIZE);
757                 free_page(addr);
758                 totalram_pages++;
759         }
760 #endif
761 }
762
763 void free_initmem(void)
764 {
765         free_init_pages("unused kernel memory",
766                         (unsigned long)(&__init_begin),
767                         (unsigned long)(&__init_end));
768 }
769
770 #ifdef CONFIG_DEBUG_RODATA
771 const int rodata_test_data = 0xC3;
772 EXPORT_SYMBOL_GPL(rodata_test_data);
773
774 void mark_rodata_ro(void)
775 {
776         unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
777
778         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
779                (end - start) >> 10);
780         set_memory_ro(start, (end - start) >> PAGE_SHIFT);
781
782         /*
783          * The rodata section (but not the kernel text!) should also be
784          * not-executable.
785          */
786         start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
787         set_memory_nx(start, (end - start) >> PAGE_SHIFT);
788
789         rodata_test();
790
791 #ifdef CONFIG_CPA_DEBUG
792         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
793         set_memory_rw(start, (end-start) >> PAGE_SHIFT);
794
795         printk(KERN_INFO "Testing CPA: again\n");
796         set_memory_ro(start, (end-start) >> PAGE_SHIFT);
797 #endif
798 }
799
800 #endif
801
802 #ifdef CONFIG_BLK_DEV_INITRD
803 void free_initrd_mem(unsigned long start, unsigned long end)
804 {
805         free_init_pages("initrd memory", start, end);
806 }
807 #endif
808
809 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
810 {
811 #ifdef CONFIG_NUMA
812         int nid, next_nid;
813 #endif
814         unsigned long pfn = phys >> PAGE_SHIFT;
815
816         if (pfn >= end_pfn) {
817                 /*
818                  * This can happen with kdump kernels when accessing
819                  * firmware tables:
820                  */
821                 if (pfn < max_pfn_mapped)
822                         return;
823
824                 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
825                                 phys, len);
826                 return;
827         }
828
829         /* Should check here against the e820 map to avoid double free */
830 #ifdef CONFIG_NUMA
831         nid = phys_to_nid(phys);
832         next_nid = phys_to_nid(phys + len - 1);
833         if (nid == next_nid)
834                 reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
835         else
836                 reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
837 #else
838         reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
839 #endif
840
841         if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
842                 dma_reserve += len / PAGE_SIZE;
843                 set_dma_reserve(dma_reserve);
844         }
845 }
846
847 int kern_addr_valid(unsigned long addr)
848 {
849         unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
850         pgd_t *pgd;
851         pud_t *pud;
852         pmd_t *pmd;
853         pte_t *pte;
854
855         if (above != 0 && above != -1UL)
856                 return 0;
857
858         pgd = pgd_offset_k(addr);
859         if (pgd_none(*pgd))
860                 return 0;
861
862         pud = pud_offset(pgd, addr);
863         if (pud_none(*pud))
864                 return 0;
865
866         pmd = pmd_offset(pud, addr);
867         if (pmd_none(*pmd))
868                 return 0;
869
870         if (pmd_large(*pmd))
871                 return pfn_valid(pmd_pfn(*pmd));
872
873         pte = pte_offset_kernel(pmd, addr);
874         if (pte_none(*pte))
875                 return 0;
876
877         return pfn_valid(pte_pfn(*pte));
878 }
879
880 /*
881  * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
882  * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
883  * not need special handling anymore:
884  */
885 static struct vm_area_struct gate_vma = {
886         .vm_start       = VSYSCALL_START,
887         .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
888         .vm_page_prot   = PAGE_READONLY_EXEC,
889         .vm_flags       = VM_READ | VM_EXEC
890 };
891
892 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
893 {
894 #ifdef CONFIG_IA32_EMULATION
895         if (test_tsk_thread_flag(tsk, TIF_IA32))
896                 return NULL;
897 #endif
898         return &gate_vma;
899 }
900
901 int in_gate_area(struct task_struct *task, unsigned long addr)
902 {
903         struct vm_area_struct *vma = get_gate_vma(task);
904
905         if (!vma)
906                 return 0;
907
908         return (addr >= vma->vm_start) && (addr < vma->vm_end);
909 }
910
911 /*
912  * Use this when you have no reliable task/vma, typically from interrupt
913  * context. It is less reliable than using the task's vma and may give
914  * false positives:
915  */
916 int in_gate_area_no_task(unsigned long addr)
917 {
918         return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
919 }
920
921 const char *arch_vma_name(struct vm_area_struct *vma)
922 {
923         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
924                 return "[vdso]";
925         if (vma == &gate_vma)
926                 return "[vsyscall]";
927         return NULL;
928 }
929
930 #ifdef CONFIG_SPARSEMEM_VMEMMAP
931 /*
932  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
933  */
934 static long __meminitdata addr_start, addr_end;
935 static void __meminitdata *p_start, *p_end;
936 static int __meminitdata node_start;
937
938 int __meminit
939 vmemmap_populate(struct page *start_page, unsigned long size, int node)
940 {
941         unsigned long addr = (unsigned long)start_page;
942         unsigned long end = (unsigned long)(start_page + size);
943         unsigned long next;
944         pgd_t *pgd;
945         pud_t *pud;
946         pmd_t *pmd;
947
948         for (; addr < end; addr = next) {
949                 next = pmd_addr_end(addr, end);
950
951                 pgd = vmemmap_pgd_populate(addr, node);
952                 if (!pgd)
953                         return -ENOMEM;
954
955                 pud = vmemmap_pud_populate(pgd, addr, node);
956                 if (!pud)
957                         return -ENOMEM;
958
959                 pmd = pmd_offset(pud, addr);
960                 if (pmd_none(*pmd)) {
961                         pte_t entry;
962                         void *p;
963
964                         p = vmemmap_alloc_block(PMD_SIZE, node);
965                         if (!p)
966                                 return -ENOMEM;
967
968                         entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
969                                                         PAGE_KERNEL_LARGE);
970                         set_pmd(pmd, __pmd(pte_val(entry)));
971
972                         /* check to see if we have contiguous blocks */
973                         if (p_end != p || node_start != node) {
974                                 if (p_start)
975                                         printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
976                                                 addr_start, addr_end-1, p_start, p_end-1, node_start);
977                                 addr_start = addr;
978                                 node_start = node;
979                                 p_start = p;
980                         }
981                         addr_end = addr + PMD_SIZE;
982                         p_end = p + PMD_SIZE;
983                 } else {
984                         vmemmap_verify((pte_t *)pmd, node, addr, next);
985                 }
986         }
987         return 0;
988 }
989
990 void __meminit vmemmap_populate_print_last(void)
991 {
992         if (p_start) {
993                 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
994                         addr_start, addr_end-1, p_start, p_end-1, node_start);
995                 p_start = NULL;
996                 p_end = NULL;
997                 node_start = 0;
998         }
999 }
1000 #endif