3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
36 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
46 #include <asm/sections.h>
51 #ifndef CPU_FTR_COHERENT_ICACHE
52 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
53 #define CPU_FTR_NOEXECUTE 0
56 int init_bootmem_done;
58 unsigned long memory_limit;
60 int page_is_ram(unsigned long pfn)
62 unsigned long paddr = (pfn << PAGE_SHIFT);
64 #ifndef CONFIG_PPC64 /* XXX for now */
65 return paddr < __pa(high_memory);
68 for (i=0; i < lmb.memory.cnt; i++) {
71 base = lmb.memory.region[i].base;
73 if ((paddr >= base) &&
74 (paddr < (base + lmb.memory.region[i].size))) {
83 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
84 unsigned long size, pgprot_t vma_prot)
86 if (ppc_md.phys_mem_access_prot)
87 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
89 if (!page_is_ram(pfn))
90 vma_prot = __pgprot(pgprot_val(vma_prot)
91 | _PAGE_GUARDED | _PAGE_NO_CACHE);
94 EXPORT_SYMBOL(phys_mem_access_prot);
96 #ifdef CONFIG_MEMORY_HOTPLUG
98 void online_page(struct page *page)
100 ClearPageReserved(page);
101 init_page_count(page);
108 int memory_add_physaddr_to_nid(u64 start)
110 return hot_add_scn_to_nid(start);
114 int arch_add_memory(int nid, u64 start, u64 size)
116 struct pglist_data *pgdata;
118 unsigned long start_pfn = start >> PAGE_SHIFT;
119 unsigned long nr_pages = size >> PAGE_SHIFT;
121 pgdata = NODE_DATA(nid);
123 start = (unsigned long)__va(start);
124 create_section_mapping(start, start + size);
126 /* this should work for most non-highmem platforms */
127 zone = pgdata->node_zones;
129 return __add_pages(zone, start_pfn, nr_pages);
132 #ifdef CONFIG_MEMORY_HOTREMOVE
133 int remove_memory(u64 start, u64 size)
135 unsigned long start_pfn, end_pfn;
138 start_pfn = start >> PAGE_SHIFT;
139 end_pfn = start_pfn + (size >> PAGE_SHIFT);
140 ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
143 /* Arch-specific calls go here - next patch */
147 #endif /* CONFIG_MEMORY_HOTREMOVE */
150 * walk_memory_resource() needs to make sure there is no holes in a given
151 * memory range. On PPC64, since this range comes from /sysfs, the range
152 * is guaranteed to be valid, non-overlapping and can not contain any
153 * holes. By the time we get here (memory add or remove), /proc/device-tree
154 * is updated and correct. Only reason we need to check against device-tree
155 * would be if we allow user-land to specify a memory range through a
156 * system call/ioctl etc. instead of doing offline/online through /sysfs.
159 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
160 int (*func)(unsigned long, unsigned long, void *))
162 return (*func)(start_pfn, nr_pages, arg);
165 #endif /* CONFIG_MEMORY_HOTPLUG */
169 unsigned long total = 0, reserved = 0;
170 unsigned long shared = 0, cached = 0;
171 unsigned long highmem = 0;
176 printk("Mem-info:\n");
178 for_each_online_pgdat(pgdat) {
180 pgdat_resize_lock(pgdat, &flags);
181 for (i = 0; i < pgdat->node_spanned_pages; i++) {
182 if (!pfn_valid(pgdat->node_start_pfn + i))
184 page = pgdat_page_nr(pgdat, i);
186 if (PageHighMem(page))
188 if (PageReserved(page))
190 else if (PageSwapCache(page))
192 else if (page_count(page))
193 shared += page_count(page) - 1;
195 pgdat_resize_unlock(pgdat, &flags);
197 printk("%ld pages of RAM\n", total);
198 #ifdef CONFIG_HIGHMEM
199 printk("%ld pages of HIGHMEM\n", highmem);
201 printk("%ld reserved pages\n", reserved);
202 printk("%ld pages shared\n", shared);
203 printk("%ld pages swap cached\n", cached);
207 * Initialize the bootmem system and give it all the memory we
208 * have available. If we are using highmem, we only put the
209 * lowmem into the bootmem system.
211 #ifndef CONFIG_NEED_MULTIPLE_NODES
212 void __init do_init_bootmem(void)
215 unsigned long start, bootmap_pages;
216 unsigned long total_pages;
219 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
220 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
221 #ifdef CONFIG_HIGHMEM
222 total_pages = total_lowmem >> PAGE_SHIFT;
223 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
227 * Find an area to use for the bootmem bitmap. Calculate the size of
228 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
229 * Add 1 additional page in case the address isn't page-aligned.
231 bootmap_pages = bootmem_bootmap_pages(total_pages);
233 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
235 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
236 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
238 /* Add active regions with valid PFNs */
239 for (i = 0; i < lmb.memory.cnt; i++) {
240 unsigned long start_pfn, end_pfn;
241 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
242 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
243 add_active_range(0, start_pfn, end_pfn);
246 /* Add all physical memory to the bootmem map, mark each area
249 #ifdef CONFIG_HIGHMEM
250 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
252 /* reserve the sections we're already using */
253 for (i = 0; i < lmb.reserved.cnt; i++) {
254 unsigned long addr = lmb.reserved.region[i].base +
255 lmb_size_bytes(&lmb.reserved, i) - 1;
256 if (addr < lowmem_end_addr)
257 reserve_bootmem(lmb.reserved.region[i].base,
258 lmb_size_bytes(&lmb.reserved, i),
260 else if (lmb.reserved.region[i].base < lowmem_end_addr) {
261 unsigned long adjusted_size = lowmem_end_addr -
262 lmb.reserved.region[i].base;
263 reserve_bootmem(lmb.reserved.region[i].base,
264 adjusted_size, BOOTMEM_DEFAULT);
268 free_bootmem_with_active_regions(0, max_pfn);
270 /* reserve the sections we're already using */
271 for (i = 0; i < lmb.reserved.cnt; i++)
272 reserve_bootmem(lmb.reserved.region[i].base,
273 lmb_size_bytes(&lmb.reserved, i),
277 /* XXX need to clip this if using highmem? */
278 sparse_memory_present_with_active_regions(0);
280 init_bootmem_done = 1;
283 /* mark pages that don't exist as nosave */
284 static int __init mark_nonram_nosave(void)
286 unsigned long lmb_next_region_start_pfn,
290 for (i = 0; i < lmb.memory.cnt - 1; i++) {
292 (lmb.memory.region[i].base >> PAGE_SHIFT) +
293 (lmb.memory.region[i].size >> PAGE_SHIFT);
294 lmb_next_region_start_pfn =
295 lmb.memory.region[i+1].base >> PAGE_SHIFT;
297 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
298 register_nosave_region(lmb_region_max_pfn,
299 lmb_next_region_start_pfn);
306 * paging_init() sets up the page tables - in fact we've already done this.
308 void __init paging_init(void)
310 unsigned long total_ram = lmb_phys_mem_size();
311 unsigned long top_of_ram = lmb_end_of_DRAM();
312 unsigned long max_zone_pfns[MAX_NR_ZONES];
314 #ifdef CONFIG_HIGHMEM
315 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
316 pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
317 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
318 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
319 kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
320 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
322 kmap_prot = PAGE_KERNEL;
323 #endif /* CONFIG_HIGHMEM */
325 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
326 top_of_ram, total_ram);
327 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
328 (top_of_ram - total_ram) >> 20);
329 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
330 #ifdef CONFIG_HIGHMEM
331 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
332 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
334 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
336 free_area_init_nodes(max_zone_pfns);
338 mark_nonram_nosave();
340 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
342 void __init mem_init(void)
344 #ifdef CONFIG_NEED_MULTIPLE_NODES
350 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
352 num_physpages = lmb.memory.size >> PAGE_SHIFT;
353 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
355 #ifdef CONFIG_NEED_MULTIPLE_NODES
356 for_each_online_node(nid) {
357 if (NODE_DATA(nid)->node_spanned_pages != 0) {
358 printk("freeing bootmem node %d\n", nid);
360 free_all_bootmem_node(NODE_DATA(nid));
365 totalram_pages += free_all_bootmem();
367 for_each_online_pgdat(pgdat) {
368 for (i = 0; i < pgdat->node_spanned_pages; i++) {
369 if (!pfn_valid(pgdat->node_start_pfn + i))
371 page = pgdat_page_nr(pgdat, i);
372 if (PageReserved(page))
377 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
378 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
379 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
380 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
382 #ifdef CONFIG_HIGHMEM
384 unsigned long pfn, highmem_mapnr;
386 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
387 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
388 struct page *page = pfn_to_page(pfn);
389 if (lmb_is_reserved(pfn << PAGE_SHIFT))
391 ClearPageReserved(page);
392 init_page_count(page);
397 totalram_pages += totalhigh_pages;
398 printk(KERN_DEBUG "High memory: %luk\n",
399 totalhigh_pages << (PAGE_SHIFT-10));
401 #endif /* CONFIG_HIGHMEM */
403 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
404 "%luk reserved, %luk data, %luk bss, %luk init)\n",
405 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
406 num_physpages << (PAGE_SHIFT-10),
408 reservedpages << (PAGE_SHIFT-10),
417 * This is called when a page has been modified by the kernel.
418 * It just marks the page as not i-cache clean. We do the i-cache
419 * flush later when the page is given to a user process, if necessary.
421 void flush_dcache_page(struct page *page)
423 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
425 /* avoid an atomic op if possible */
426 if (test_bit(PG_arch_1, &page->flags))
427 clear_bit(PG_arch_1, &page->flags);
429 EXPORT_SYMBOL(flush_dcache_page);
431 void flush_dcache_icache_page(struct page *page)
434 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
435 __flush_dcache_icache(start);
436 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
437 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
438 /* On 8xx there is no need to kmap since highmem is not supported */
439 __flush_dcache_icache(page_address(page));
441 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
445 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
450 * We shouldnt have to do this, but some versions of glibc
451 * require it (ld.so assumes zero filled pages are icache clean)
454 flush_dcache_page(pg);
456 EXPORT_SYMBOL(clear_user_page);
458 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
461 copy_page(vto, vfrom);
464 * We should be able to use the following optimisation, however
465 * there are two problems.
466 * Firstly a bug in some versions of binutils meant PLT sections
467 * were not marked executable.
468 * Secondly the first word in the GOT section is blrl, used
469 * to establish the GOT address. Until recently the GOT was
470 * not marked executable.
474 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
478 flush_dcache_page(pg);
481 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
482 unsigned long addr, int len)
486 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
487 flush_icache_range(maddr, maddr + len);
490 EXPORT_SYMBOL(flush_icache_user_range);
493 * This is called at the end of handling a user page fault, when the
494 * fault has been handled by updating a PTE in the linux page tables.
495 * We use it to preload an HPTE into the hash table corresponding to
496 * the updated linux PTE.
498 * This must always be called with the pte lock held.
500 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
503 #ifdef CONFIG_PPC_STD_MMU
504 unsigned long access = 0, trap;
506 unsigned long pfn = pte_pfn(pte);
508 /* handle i-cache coherency */
509 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
510 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
512 struct page *page = pfn_to_page(pfn);
514 /* On 8xx, cache control instructions (particularly
515 * "dcbst" from flush_dcache_icache) fault as write
516 * operation if there is an unpopulated TLB entry
517 * for the address in question. To workaround that,
518 * we invalidate the TLB here, thus avoiding dcbst
521 _tlbie(address, 0 /* 8xx doesn't care about PID */);
523 /* The _PAGE_USER test should really be _PAGE_EXEC, but
524 * older glibc versions execute some code from no-exec
525 * pages, which for now we are supporting. If exec-only
526 * pages are ever implemented, this will have to change.
528 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
529 && !test_bit(PG_arch_1, &page->flags)) {
530 if (vma->vm_mm == current->active_mm) {
531 __flush_dcache_icache((void *) address);
533 flush_dcache_icache_page(page);
534 set_bit(PG_arch_1, &page->flags);
538 #ifdef CONFIG_PPC_STD_MMU
539 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
540 if (!pte_young(pte) || address >= TASK_SIZE)
543 /* We try to figure out if we are coming from an instruction
544 * access fault and pass that down to __hash_page so we avoid
545 * double-faulting on execution of fresh text. We have to test
546 * for regs NULL since init will get here first thing at boot
548 * We also avoid filling the hash if not coming from a fault
550 if (current->thread.regs == NULL)
552 trap = TRAP(current->thread.regs);
554 access |= _PAGE_EXEC;
555 else if (trap != 0x300)
557 hash_preload(vma->vm_mm, address, access, trap);
558 #endif /* CONFIG_PPC_STD_MMU */