]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/mm/init.c
9b7f0bf26f573ed353de0738dc3f6f4358d69121
[linux-2.6-omap-h63xx.git] / arch / arm / mm / init.c
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18
19 #include <asm/mach-types.h>
20 #include <asm/setup.h>
21 #include <asm/sizes.h>
22 #include <asm/tlb.h>
23
24 #include <asm/mach/arch.h>
25 #include <asm/mach/map.h>
26
27 #include "mm.h"
28
29 static unsigned long phys_initrd_start __initdata = 0;
30 static unsigned long phys_initrd_size __initdata = 0;
31
32 static void __init early_initrd(char **p)
33 {
34         unsigned long start, size;
35
36         start = memparse(*p, p);
37         if (**p == ',') {
38                 size = memparse((*p) + 1, p);
39
40                 phys_initrd_start = start;
41                 phys_initrd_size = size;
42         }
43 }
44 __early_param("initrd=", early_initrd);
45
46 static int __init parse_tag_initrd(const struct tag *tag)
47 {
48         printk(KERN_WARNING "ATAG_INITRD is deprecated; "
49                 "please update your bootloader.\n");
50         phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
51         phys_initrd_size = tag->u.initrd.size;
52         return 0;
53 }
54
55 __tagtable(ATAG_INITRD, parse_tag_initrd);
56
57 static int __init parse_tag_initrd2(const struct tag *tag)
58 {
59         phys_initrd_start = tag->u.initrd.start;
60         phys_initrd_size = tag->u.initrd.size;
61         return 0;
62 }
63
64 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
65
66 /*
67  * This is used to pass memory configuration data from paging_init
68  * to mem_init, and by show_mem() to skip holes in the memory map.
69  */
70 static struct meminfo meminfo = { 0, };
71
72 void show_mem(void)
73 {
74         int free = 0, total = 0, reserved = 0;
75         int shared = 0, cached = 0, slab = 0, node, i;
76         struct meminfo * mi = &meminfo;
77
78         printk("Mem-info:\n");
79         show_free_areas();
80         for_each_online_node(node) {
81                 pg_data_t *n = NODE_DATA(node);
82                 struct page *map = n->node_mem_map - n->node_start_pfn;
83
84                 for_each_nodebank (i,mi,node) {
85                         struct membank *bank = &mi->bank[i];
86                         unsigned int pfn1, pfn2;
87                         struct page *page, *end;
88
89                         pfn1 = bank_pfn_start(bank);
90                         pfn2 = bank_pfn_end(bank);
91
92                         page = map + pfn1;
93                         end  = map + pfn2;
94
95                         do {
96                                 total++;
97                                 if (PageReserved(page))
98                                         reserved++;
99                                 else if (PageSwapCache(page))
100                                         cached++;
101                                 else if (PageSlab(page))
102                                         slab++;
103                                 else if (!page_count(page))
104                                         free++;
105                                 else
106                                         shared += page_count(page) - 1;
107                                 page++;
108                         } while (page < end);
109                 }
110         }
111
112         printk("%d pages of RAM\n", total);
113         printk("%d free pages\n", free);
114         printk("%d reserved pages\n", reserved);
115         printk("%d slab pages\n", slab);
116         printk("%d pages shared\n", shared);
117         printk("%d pages swap cached\n", cached);
118 }
119
120 /*
121  * FIXME: We really want to avoid allocating the bootmap bitmap
122  * over the top of the initrd.  Hopefully, this is located towards
123  * the start of a bank, so if we allocate the bootmap bitmap at
124  * the end, we won't clash.
125  */
126 static unsigned int __init
127 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
128 {
129         unsigned int start_pfn, i, bootmap_pfn;
130
131         start_pfn   = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
132         bootmap_pfn = 0;
133
134         for_each_nodebank(i, mi, node) {
135                 struct membank *bank = &mi->bank[i];
136                 unsigned int start, end;
137
138                 start = bank_pfn_start(bank);
139                 end   = bank_pfn_end(bank);
140
141                 if (end < start_pfn)
142                         continue;
143
144                 if (start < start_pfn)
145                         start = start_pfn;
146
147                 if (end <= start)
148                         continue;
149
150                 if (end - start >= bootmap_pages) {
151                         bootmap_pfn = start;
152                         break;
153                 }
154         }
155
156         if (bootmap_pfn == 0)
157                 BUG();
158
159         return bootmap_pfn;
160 }
161
162 static int __init check_initrd(struct meminfo *mi)
163 {
164         int initrd_node = -2;
165 #ifdef CONFIG_BLK_DEV_INITRD
166         unsigned long end = phys_initrd_start + phys_initrd_size;
167
168         /*
169          * Make sure that the initrd is within a valid area of
170          * memory.
171          */
172         if (phys_initrd_size) {
173                 unsigned int i;
174
175                 initrd_node = -1;
176
177                 for (i = 0; i < mi->nr_banks; i++) {
178                         struct membank *bank = &mi->bank[i];
179                         if (bank_phys_start(bank) <= phys_initrd_start &&
180                             end <= bank_phys_end(bank))
181                                 initrd_node = bank->node;
182                 }
183         }
184
185         if (initrd_node == -1) {
186                 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
187                        "physical memory - disabling initrd\n",
188                        phys_initrd_start, phys_initrd_size);
189                 phys_initrd_start = phys_initrd_size = 0;
190         }
191 #endif
192
193         return initrd_node;
194 }
195
196 static inline void map_memory_bank(struct membank *bank)
197 {
198 #ifdef CONFIG_MMU
199         struct map_desc map;
200
201         map.pfn = bank_pfn_start(bank);
202         map.virtual = __phys_to_virt(bank_phys_start(bank));
203         map.length = bank_phys_size(bank);
204         map.type = MT_MEMORY;
205
206         create_mapping(&map);
207 #endif
208 }
209
210 static unsigned long __init
211 bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
212 {
213         unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
214         unsigned long start_pfn, end_pfn, boot_pfn;
215         unsigned int boot_pages;
216         pg_data_t *pgdat;
217         int i;
218
219         start_pfn = -1UL;
220         end_pfn = 0;
221
222         /*
223          * Calculate the pfn range, and map the memory banks for this node.
224          */
225         for_each_nodebank(i, mi, node) {
226                 struct membank *bank = &mi->bank[i];
227                 unsigned long start, end;
228
229                 start = bank_pfn_start(bank);
230                 end = bank_pfn_end(bank);
231
232                 if (start_pfn > start)
233                         start_pfn = start;
234                 if (end_pfn < end)
235                         end_pfn = end;
236
237                 map_memory_bank(bank);
238         }
239
240         /*
241          * If there is no memory in this node, ignore it.
242          */
243         if (end_pfn == 0)
244                 return end_pfn;
245
246         /*
247          * Allocate the bootmem bitmap page.
248          */
249         boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
250         boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
251
252         /*
253          * Initialise the bootmem allocator for this node, handing the
254          * memory banks over to bootmem.
255          */
256         node_set_online(node);
257         pgdat = NODE_DATA(node);
258         init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
259
260         for_each_nodebank(i, mi, node) {
261                 struct membank *bank = &mi->bank[i];
262                 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
263         }
264
265         /*
266          * Reserve the bootmem bitmap for this node.
267          */
268         reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
269                              boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
270
271         /*
272          * Reserve any special node zero regions.
273          */
274         if (node == 0)
275                 reserve_node_zero(pgdat);
276
277 #ifdef CONFIG_BLK_DEV_INITRD
278         /*
279          * If the initrd is in this node, reserve its memory.
280          */
281         if (node == initrd_node) {
282                 int res = reserve_bootmem_node(pgdat, phys_initrd_start,
283                                      phys_initrd_size, BOOTMEM_EXCLUSIVE);
284
285                 if (res == 0) {
286                         initrd_start = __phys_to_virt(phys_initrd_start);
287                         initrd_end = initrd_start + phys_initrd_size;
288                 } else {
289                         printk(KERN_ERR
290                                 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
291                                 "memory region - disabling initrd\n",
292                                 phys_initrd_start, phys_initrd_size);
293                 }
294         }
295 #endif
296
297         /*
298          * initialise the zones within this node.
299          */
300         memset(zone_size, 0, sizeof(zone_size));
301         memset(zhole_size, 0, sizeof(zhole_size));
302
303         /*
304          * The size of this node has already been determined.  If we need
305          * to do anything fancy with the allocation of this memory to the
306          * zones, now is the time to do it.
307          */
308         zone_size[0] = end_pfn - start_pfn;
309
310         /*
311          * For each bank in this node, calculate the size of the holes.
312          *  holes = node_size - sum(bank_sizes_in_node)
313          */
314         zhole_size[0] = zone_size[0];
315         for_each_nodebank(i, mi, node)
316                 zhole_size[0] -= bank_pfn_size(&mi->bank[i]);
317
318         /*
319          * Adjust the sizes according to any special requirements for
320          * this machine type.
321          */
322         arch_adjust_zones(node, zone_size, zhole_size);
323
324         free_area_init_node(node, zone_size, start_pfn, zhole_size);
325
326         return end_pfn;
327 }
328
329 void __init bootmem_init(struct meminfo *mi)
330 {
331         unsigned long memend_pfn = 0;
332         int node, initrd_node;
333
334         memcpy(&meminfo, mi, sizeof(meminfo));
335
336         /*
337          * Locate which node contains the ramdisk image, if any.
338          */
339         initrd_node = check_initrd(mi);
340
341         /*
342          * Run through each node initialising the bootmem allocator.
343          */
344         for_each_node(node) {
345                 unsigned long end_pfn;
346
347                 end_pfn = bootmem_init_node(node, initrd_node, mi);
348
349                 /*
350                  * Remember the highest memory PFN.
351                  */
352                 if (end_pfn > memend_pfn)
353                         memend_pfn = end_pfn;
354         }
355
356         high_memory = __va(memend_pfn << PAGE_SHIFT);
357
358         /*
359          * This doesn't seem to be used by the Linux memory manager any
360          * more, but is used by ll_rw_block.  If we can get rid of it, we
361          * also get rid of some of the stuff above as well.
362          *
363          * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
364          * the system, not the maximum PFN.
365          */
366         max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
367 }
368
369 static inline void free_area(unsigned long addr, unsigned long end, char *s)
370 {
371         unsigned int size = (end - addr) >> 10;
372
373         for (; addr < end; addr += PAGE_SIZE) {
374                 struct page *page = virt_to_page(addr);
375                 ClearPageReserved(page);
376                 init_page_count(page);
377                 free_page(addr);
378                 totalram_pages++;
379         }
380
381         if (size && s)
382                 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
383 }
384
385 static inline void
386 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
387 {
388         struct page *start_pg, *end_pg;
389         unsigned long pg, pgend;
390
391         /*
392          * Convert start_pfn/end_pfn to a struct page pointer.
393          */
394         start_pg = pfn_to_page(start_pfn);
395         end_pg = pfn_to_page(end_pfn);
396
397         /*
398          * Convert to physical addresses, and
399          * round start upwards and end downwards.
400          */
401         pg = PAGE_ALIGN(__pa(start_pg));
402         pgend = __pa(end_pg) & PAGE_MASK;
403
404         /*
405          * If there are free pages between these,
406          * free the section of the memmap array.
407          */
408         if (pg < pgend)
409                 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
410 }
411
412 /*
413  * The mem_map array can get very big.  Free the unused area of the memory map.
414  */
415 static void __init free_unused_memmap_node(int node, struct meminfo *mi)
416 {
417         unsigned long bank_start, prev_bank_end = 0;
418         unsigned int i;
419
420         /*
421          * [FIXME] This relies on each bank being in address order.  This
422          * may not be the case, especially if the user has provided the
423          * information on the command line.
424          */
425         for_each_nodebank(i, mi, node) {
426                 struct membank *bank = &mi->bank[i];
427
428                 bank_start = bank_pfn_start(bank);
429                 if (bank_start < prev_bank_end) {
430                         printk(KERN_ERR "MEM: unordered memory banks.  "
431                                 "Not freeing memmap.\n");
432                         break;
433                 }
434
435                 /*
436                  * If we had a previous bank, and there is a space
437                  * between the current bank and the previous, free it.
438                  */
439                 if (prev_bank_end && prev_bank_end != bank_start)
440                         free_memmap(node, prev_bank_end, bank_start);
441
442                 prev_bank_end = bank_pfn_end(bank);
443         }
444 }
445
446 /*
447  * mem_init() marks the free areas in the mem_map and tells us how much
448  * memory is free.  This is done after various parts of the system have
449  * claimed their memory after the kernel image.
450  */
451 void __init mem_init(void)
452 {
453         unsigned int codepages, datapages, initpages;
454         int i, node;
455
456         codepages = &_etext - &_text;
457         datapages = &_end - &__data_start;
458         initpages = &__init_end - &__init_begin;
459
460 #ifndef CONFIG_DISCONTIGMEM
461         max_mapnr   = virt_to_page(high_memory) - mem_map;
462 #endif
463
464         /* this will put all unused low memory onto the freelists */
465         for_each_online_node(node) {
466                 pg_data_t *pgdat = NODE_DATA(node);
467
468                 free_unused_memmap_node(node, &meminfo);
469
470                 if (pgdat->node_spanned_pages != 0)
471                         totalram_pages += free_all_bootmem_node(pgdat);
472         }
473
474 #ifdef CONFIG_SA1111
475         /* now that our DMA memory is actually so designated, we can free it */
476         free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
477 #endif
478
479         /*
480          * Since our memory may not be contiguous, calculate the
481          * real number of pages we have in this system
482          */
483         printk(KERN_INFO "Memory:");
484
485         num_physpages = 0;
486         for (i = 0; i < meminfo.nr_banks; i++) {
487                 num_physpages += bank_pfn_size(&meminfo.bank[i]);
488                 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
489         }
490
491         printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
492         printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
493                 "%dK data, %dK init)\n",
494                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
495                 codepages >> 10, datapages >> 10, initpages >> 10);
496
497         if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
498                 extern int sysctl_overcommit_memory;
499                 /*
500                  * On a machine this small we won't get
501                  * anywhere without overcommit, so turn
502                  * it on by default.
503                  */
504                 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
505         }
506 }
507
508 void free_initmem(void)
509 {
510         if (!machine_is_integrator() && !machine_is_cintegrator()) {
511                 free_area((unsigned long)(&__init_begin),
512                           (unsigned long)(&__init_end),
513                           "init");
514         }
515 }
516
517 #ifdef CONFIG_BLK_DEV_INITRD
518
519 static int keep_initrd;
520
521 void free_initrd_mem(unsigned long start, unsigned long end)
522 {
523         if (!keep_initrd)
524                 free_area(start, end, "initrd");
525 }
526
527 static int __init keepinitrd_setup(char *__unused)
528 {
529         keep_initrd = 1;
530         return 1;
531 }
532
533 __setup("keepinitrd", keepinitrd_setup);
534 #endif