]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/powerpc/mm/mem.c
[POWERPC] 85xx: Add support for relocatable kernel (and booting at non-zero)
[linux-2.6-omap-h63xx.git] / arch / powerpc / mm / mem.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
35
36 #include <asm/pgalloc.h>
37 #include <asm/prom.h>
38 #include <asm/io.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/mmu.h>
42 #include <asm/smp.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
45 #include <asm/tlb.h>
46 #include <asm/sections.h>
47 #include <asm/vdso.h>
48
49 #include "mmu_decl.h"
50
51 #ifndef CPU_FTR_COHERENT_ICACHE
52 #define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
53 #define CPU_FTR_NOEXECUTE       0
54 #endif
55
56 int init_bootmem_done;
57 int mem_init_done;
58 unsigned long memory_limit;
59
60 int page_is_ram(unsigned long pfn)
61 {
62         unsigned long paddr = (pfn << PAGE_SHIFT);
63
64 #ifndef CONFIG_PPC64    /* XXX for now */
65         return paddr < __pa(high_memory);
66 #else
67         int i;
68         for (i=0; i < lmb.memory.cnt; i++) {
69                 unsigned long base;
70
71                 base = lmb.memory.region[i].base;
72
73                 if ((paddr >= base) &&
74                         (paddr < (base + lmb.memory.region[i].size))) {
75                         return 1;
76                 }
77         }
78
79         return 0;
80 #endif
81 }
82
83 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
84                               unsigned long size, pgprot_t vma_prot)
85 {
86         if (ppc_md.phys_mem_access_prot)
87                 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
88
89         if (!page_is_ram(pfn))
90                 vma_prot = __pgprot(pgprot_val(vma_prot)
91                                     | _PAGE_GUARDED | _PAGE_NO_CACHE);
92         return vma_prot;
93 }
94 EXPORT_SYMBOL(phys_mem_access_prot);
95
96 #ifdef CONFIG_MEMORY_HOTPLUG
97
98 void online_page(struct page *page)
99 {
100         ClearPageReserved(page);
101         init_page_count(page);
102         __free_page(page);
103         totalram_pages++;
104         num_physpages++;
105 }
106
107 #ifdef CONFIG_NUMA
108 int memory_add_physaddr_to_nid(u64 start)
109 {
110         return hot_add_scn_to_nid(start);
111 }
112 #endif
113
114 int arch_add_memory(int nid, u64 start, u64 size)
115 {
116         struct pglist_data *pgdata;
117         struct zone *zone;
118         unsigned long start_pfn = start >> PAGE_SHIFT;
119         unsigned long nr_pages = size >> PAGE_SHIFT;
120
121         pgdata = NODE_DATA(nid);
122
123         start = (unsigned long)__va(start);
124         create_section_mapping(start, start + size);
125
126         /* this should work for most non-highmem platforms */
127         zone = pgdata->node_zones;
128
129         return __add_pages(zone, start_pfn, nr_pages);
130 }
131
132 #ifdef CONFIG_MEMORY_HOTREMOVE
133 int remove_memory(u64 start, u64 size)
134 {
135         unsigned long start_pfn, end_pfn;
136         int ret;
137
138         start_pfn = start >> PAGE_SHIFT;
139         end_pfn = start_pfn + (size >> PAGE_SHIFT);
140         ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
141         if (ret)
142                 goto out;
143         /* Arch-specific calls go here - next patch */
144 out:
145         return ret;
146 }
147 #endif /* CONFIG_MEMORY_HOTREMOVE */
148
149 /*
150  * walk_memory_resource() needs to make sure there is no holes in a given
151  * memory range. On PPC64, since this range comes from /sysfs, the range
152  * is guaranteed to be valid, non-overlapping and can not contain any
153  * holes. By the time we get here (memory add or remove), /proc/device-tree
154  * is updated and correct. Only reason we need to check against device-tree
155  * would be if we allow user-land to specify a memory range through a
156  * system call/ioctl etc. instead of doing offline/online through /sysfs.
157  */
158 int
159 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
160                         int (*func)(unsigned long, unsigned long, void *))
161 {
162         return  (*func)(start_pfn, nr_pages, arg);
163 }
164
165 #endif /* CONFIG_MEMORY_HOTPLUG */
166
167 void show_mem(void)
168 {
169         unsigned long total = 0, reserved = 0;
170         unsigned long shared = 0, cached = 0;
171         unsigned long highmem = 0;
172         struct page *page;
173         pg_data_t *pgdat;
174         unsigned long i;
175
176         printk("Mem-info:\n");
177         show_free_areas();
178         for_each_online_pgdat(pgdat) {
179                 unsigned long flags;
180                 pgdat_resize_lock(pgdat, &flags);
181                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
182                         if (!pfn_valid(pgdat->node_start_pfn + i))
183                                 continue;
184                         page = pgdat_page_nr(pgdat, i);
185                         total++;
186                         if (PageHighMem(page))
187                                 highmem++;
188                         if (PageReserved(page))
189                                 reserved++;
190                         else if (PageSwapCache(page))
191                                 cached++;
192                         else if (page_count(page))
193                                 shared += page_count(page) - 1;
194                 }
195                 pgdat_resize_unlock(pgdat, &flags);
196         }
197         printk("%ld pages of RAM\n", total);
198 #ifdef CONFIG_HIGHMEM
199         printk("%ld pages of HIGHMEM\n", highmem);
200 #endif
201         printk("%ld reserved pages\n", reserved);
202         printk("%ld pages shared\n", shared);
203         printk("%ld pages swap cached\n", cached);
204 }
205
206 /*
207  * Initialize the bootmem system and give it all the memory we
208  * have available.  If we are using highmem, we only put the
209  * lowmem into the bootmem system.
210  */
211 #ifndef CONFIG_NEED_MULTIPLE_NODES
212 void __init do_init_bootmem(void)
213 {
214         unsigned long i;
215         unsigned long start, bootmap_pages;
216         unsigned long total_pages;
217         int boot_mapsize;
218
219         max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
220         total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
221 #ifdef CONFIG_HIGHMEM
222         total_pages = total_lowmem >> PAGE_SHIFT;
223         max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
224 #endif
225
226         /*
227          * Find an area to use for the bootmem bitmap.  Calculate the size of
228          * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
229          * Add 1 additional page in case the address isn't page-aligned.
230          */
231         bootmap_pages = bootmem_bootmap_pages(total_pages);
232
233         start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
234
235         min_low_pfn = MEMORY_START >> PAGE_SHIFT;
236         boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
237
238         /* Add active regions with valid PFNs */
239         for (i = 0; i < lmb.memory.cnt; i++) {
240                 unsigned long start_pfn, end_pfn;
241                 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
242                 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
243                 add_active_range(0, start_pfn, end_pfn);
244         }
245
246         /* Add all physical memory to the bootmem map, mark each area
247          * present.
248          */
249 #ifdef CONFIG_HIGHMEM
250         free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
251
252         /* reserve the sections we're already using */
253         for (i = 0; i < lmb.reserved.cnt; i++) {
254                 unsigned long addr = lmb.reserved.region[i].base +
255                                      lmb_size_bytes(&lmb.reserved, i) - 1;
256                 if (addr < lowmem_end_addr)
257                         reserve_bootmem(lmb.reserved.region[i].base,
258                                         lmb_size_bytes(&lmb.reserved, i),
259                                         BOOTMEM_DEFAULT);
260                 else if (lmb.reserved.region[i].base < lowmem_end_addr) {
261                         unsigned long adjusted_size = lowmem_end_addr -
262                                       lmb.reserved.region[i].base;
263                         reserve_bootmem(lmb.reserved.region[i].base,
264                                         adjusted_size, BOOTMEM_DEFAULT);
265                 }
266         }
267 #else
268         free_bootmem_with_active_regions(0, max_pfn);
269
270         /* reserve the sections we're already using */
271         for (i = 0; i < lmb.reserved.cnt; i++)
272                 reserve_bootmem(lmb.reserved.region[i].base,
273                                 lmb_size_bytes(&lmb.reserved, i),
274                                 BOOTMEM_DEFAULT);
275
276 #endif
277         /* XXX need to clip this if using highmem? */
278         sparse_memory_present_with_active_regions(0);
279
280         init_bootmem_done = 1;
281 }
282
283 /* mark pages that don't exist as nosave */
284 static int __init mark_nonram_nosave(void)
285 {
286         unsigned long lmb_next_region_start_pfn,
287                       lmb_region_max_pfn;
288         int i;
289
290         for (i = 0; i < lmb.memory.cnt - 1; i++) {
291                 lmb_region_max_pfn =
292                         (lmb.memory.region[i].base >> PAGE_SHIFT) +
293                         (lmb.memory.region[i].size >> PAGE_SHIFT);
294                 lmb_next_region_start_pfn =
295                         lmb.memory.region[i+1].base >> PAGE_SHIFT;
296
297                 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
298                         register_nosave_region(lmb_region_max_pfn,
299                                                lmb_next_region_start_pfn);
300         }
301
302         return 0;
303 }
304
305 /*
306  * paging_init() sets up the page tables - in fact we've already done this.
307  */
308 void __init paging_init(void)
309 {
310         unsigned long total_ram = lmb_phys_mem_size();
311         unsigned long top_of_ram = lmb_end_of_DRAM();
312         unsigned long max_zone_pfns[MAX_NR_ZONES];
313
314 #ifdef CONFIG_HIGHMEM
315         map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
316         pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
317                         (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
318         map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
319         kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
320                         (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
321                          KMAP_FIX_BEGIN);
322         kmap_prot = PAGE_KERNEL;
323 #endif /* CONFIG_HIGHMEM */
324
325         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
326                top_of_ram, total_ram);
327         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
328                (top_of_ram - total_ram) >> 20);
329         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
330 #ifdef CONFIG_HIGHMEM
331         max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
332         max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
333 #else
334         max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
335 #endif
336         free_area_init_nodes(max_zone_pfns);
337
338         mark_nonram_nosave();
339 }
340 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
341
342 void __init mem_init(void)
343 {
344 #ifdef CONFIG_NEED_MULTIPLE_NODES
345         int nid;
346 #endif
347         pg_data_t *pgdat;
348         unsigned long i;
349         struct page *page;
350         unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
351
352         num_physpages = lmb.memory.size >> PAGE_SHIFT;
353         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
354
355 #ifdef CONFIG_NEED_MULTIPLE_NODES
356         for_each_online_node(nid) {
357                 if (NODE_DATA(nid)->node_spanned_pages != 0) {
358                         printk("freeing bootmem node %d\n", nid);
359                         totalram_pages +=
360                                 free_all_bootmem_node(NODE_DATA(nid));
361                 }
362         }
363 #else
364         max_mapnr = max_pfn;
365         totalram_pages += free_all_bootmem();
366 #endif
367         for_each_online_pgdat(pgdat) {
368                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
369                         if (!pfn_valid(pgdat->node_start_pfn + i))
370                                 continue;
371                         page = pgdat_page_nr(pgdat, i);
372                         if (PageReserved(page))
373                                 reservedpages++;
374                 }
375         }
376
377         codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
378         datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
379         initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
380         bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
381
382 #ifdef CONFIG_HIGHMEM
383         {
384                 unsigned long pfn, highmem_mapnr;
385
386                 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
387                 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
388                         struct page *page = pfn_to_page(pfn);
389                         if (lmb_is_reserved(pfn << PAGE_SHIFT))
390                                 continue;
391                         ClearPageReserved(page);
392                         init_page_count(page);
393                         __free_page(page);
394                         totalhigh_pages++;
395                         reservedpages--;
396                 }
397                 totalram_pages += totalhigh_pages;
398                 printk(KERN_DEBUG "High memory: %luk\n",
399                        totalhigh_pages << (PAGE_SHIFT-10));
400         }
401 #endif /* CONFIG_HIGHMEM */
402
403         printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
404                "%luk reserved, %luk data, %luk bss, %luk init)\n",
405                 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
406                 num_physpages << (PAGE_SHIFT-10),
407                 codesize >> 10,
408                 reservedpages << (PAGE_SHIFT-10),
409                 datasize >> 10,
410                 bsssize >> 10,
411                 initsize >> 10);
412
413         mem_init_done = 1;
414 }
415
416 /*
417  * This is called when a page has been modified by the kernel.
418  * It just marks the page as not i-cache clean.  We do the i-cache
419  * flush later when the page is given to a user process, if necessary.
420  */
421 void flush_dcache_page(struct page *page)
422 {
423         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
424                 return;
425         /* avoid an atomic op if possible */
426         if (test_bit(PG_arch_1, &page->flags))
427                 clear_bit(PG_arch_1, &page->flags);
428 }
429 EXPORT_SYMBOL(flush_dcache_page);
430
431 void flush_dcache_icache_page(struct page *page)
432 {
433 #ifdef CONFIG_BOOKE
434         void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
435         __flush_dcache_icache(start);
436         kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
437 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
438         /* On 8xx there is no need to kmap since highmem is not supported */
439         __flush_dcache_icache(page_address(page)); 
440 #else
441         __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
442 #endif
443
444 }
445 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
446 {
447         clear_page(page);
448
449         /*
450          * We shouldnt have to do this, but some versions of glibc
451          * require it (ld.so assumes zero filled pages are icache clean)
452          * - Anton
453          */
454         flush_dcache_page(pg);
455 }
456 EXPORT_SYMBOL(clear_user_page);
457
458 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
459                     struct page *pg)
460 {
461         copy_page(vto, vfrom);
462
463         /*
464          * We should be able to use the following optimisation, however
465          * there are two problems.
466          * Firstly a bug in some versions of binutils meant PLT sections
467          * were not marked executable.
468          * Secondly the first word in the GOT section is blrl, used
469          * to establish the GOT address. Until recently the GOT was
470          * not marked executable.
471          * - Anton
472          */
473 #if 0
474         if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
475                 return;
476 #endif
477
478         flush_dcache_page(pg);
479 }
480
481 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
482                              unsigned long addr, int len)
483 {
484         unsigned long maddr;
485
486         maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
487         flush_icache_range(maddr, maddr + len);
488         kunmap(page);
489 }
490 EXPORT_SYMBOL(flush_icache_user_range);
491
492 /*
493  * This is called at the end of handling a user page fault, when the
494  * fault has been handled by updating a PTE in the linux page tables.
495  * We use it to preload an HPTE into the hash table corresponding to
496  * the updated linux PTE.
497  * 
498  * This must always be called with the pte lock held.
499  */
500 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
501                       pte_t pte)
502 {
503 #ifdef CONFIG_PPC_STD_MMU
504         unsigned long access = 0, trap;
505 #endif
506         unsigned long pfn = pte_pfn(pte);
507
508         /* handle i-cache coherency */
509         if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
510             !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
511             pfn_valid(pfn)) {
512                 struct page *page = pfn_to_page(pfn);
513 #ifdef CONFIG_8xx
514                 /* On 8xx, cache control instructions (particularly
515                  * "dcbst" from flush_dcache_icache) fault as write
516                  * operation if there is an unpopulated TLB entry
517                  * for the address in question. To workaround that,
518                  * we invalidate the TLB here, thus avoiding dcbst
519                  * misbehaviour.
520                  */
521                 _tlbie(address, 0 /* 8xx doesn't care about PID */);
522 #endif
523                 /* The _PAGE_USER test should really be _PAGE_EXEC, but
524                  * older glibc versions execute some code from no-exec
525                  * pages, which for now we are supporting.  If exec-only
526                  * pages are ever implemented, this will have to change.
527                  */
528                 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
529                     && !test_bit(PG_arch_1, &page->flags)) {
530                         if (vma->vm_mm == current->active_mm) {
531                                 __flush_dcache_icache((void *) address);
532                         } else
533                                 flush_dcache_icache_page(page);
534                         set_bit(PG_arch_1, &page->flags);
535                 }
536         }
537
538 #ifdef CONFIG_PPC_STD_MMU
539         /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
540         if (!pte_young(pte) || address >= TASK_SIZE)
541                 return;
542
543         /* We try to figure out if we are coming from an instruction
544          * access fault and pass that down to __hash_page so we avoid
545          * double-faulting on execution of fresh text. We have to test
546          * for regs NULL since init will get here first thing at boot
547          *
548          * We also avoid filling the hash if not coming from a fault
549          */
550         if (current->thread.regs == NULL)
551                 return;
552         trap = TRAP(current->thread.regs);
553         if (trap == 0x400)
554                 access |= _PAGE_EXEC;
555         else if (trap != 0x300)
556                 return;
557         hash_preload(vma->vm_mm, address, access, trap);
558 #endif /* CONFIG_PPC_STD_MMU */
559 }