]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/arm/kernel/setup.c
Merge current mainline tree into linux-omap tree
[linux-2.6-omap-h63xx.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39 #include <asm/traps.h>
40
41 #include "compat.h"
42 #include "atags.h"
43
44 #ifndef MEM_SIZE
45 #define MEM_SIZE        (16*1024*1024)
46 #endif
47
48 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
49 char fpe_type[8];
50
51 static int __init fpe_setup(char *line)
52 {
53         memcpy(fpe_type, line, 8);
54         return 1;
55 }
56
57 __setup("fpe=", fpe_setup);
58 #endif
59
60 extern void paging_init(struct meminfo *, struct machine_desc *desc);
61 extern void reboot_setup(char *str);
62 extern int root_mountflags;
63 extern void _stext, _text, _etext, __data_start, _edata, _end;
64
65 unsigned int processor_id;
66 EXPORT_SYMBOL(processor_id);
67 unsigned int __machine_arch_type;
68 EXPORT_SYMBOL(__machine_arch_type);
69
70 unsigned int __atags_pointer __initdata;
71
72 unsigned int system_rev;
73 EXPORT_SYMBOL(system_rev);
74
75 unsigned int system_serial_low;
76 EXPORT_SYMBOL(system_serial_low);
77
78 unsigned int system_serial_high;
79 EXPORT_SYMBOL(system_serial_high);
80
81 unsigned int elf_hwcap;
82 EXPORT_SYMBOL(elf_hwcap);
83
84 unsigned long __initdata vmalloc_reserve = 128 << 20;
85
86
87 #ifdef MULTI_CPU
88 struct processor processor;
89 #endif
90 #ifdef MULTI_TLB
91 struct cpu_tlb_fns cpu_tlb;
92 #endif
93 #ifdef MULTI_USER
94 struct cpu_user_fns cpu_user;
95 #endif
96 #ifdef MULTI_CACHE
97 struct cpu_cache_fns cpu_cache;
98 #endif
99 #ifdef CONFIG_OUTER_CACHE
100 struct outer_cache_fns outer_cache;
101 #endif
102
103 struct stack {
104         u32 irq[3];
105         u32 abt[3];
106         u32 und[3];
107 } ____cacheline_aligned;
108
109 static struct stack stacks[NR_CPUS];
110
111 char elf_platform[ELF_PLATFORM_SIZE];
112 EXPORT_SYMBOL(elf_platform);
113
114 unsigned long phys_initrd_start __initdata = 0;
115 unsigned long phys_initrd_size __initdata = 0;
116
117 static struct meminfo meminfo __initdata = { 0, };
118 static const char *cpu_name;
119 static const char *machine_name;
120 static char __initdata command_line[COMMAND_LINE_SIZE];
121
122 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
123 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
124 #define ENDIANNESS ((char)endian_test.l)
125
126 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
127
128 /*
129  * Standard memory resources
130  */
131 static struct resource mem_res[] = {
132         {
133                 .name = "Video RAM",
134                 .start = 0,
135                 .end = 0,
136                 .flags = IORESOURCE_MEM
137         },
138         {
139                 .name = "Kernel text",
140                 .start = 0,
141                 .end = 0,
142                 .flags = IORESOURCE_MEM
143         },
144         {
145                 .name = "Kernel data",
146                 .start = 0,
147                 .end = 0,
148                 .flags = IORESOURCE_MEM
149         }
150 };
151
152 #define video_ram   mem_res[0]
153 #define kernel_code mem_res[1]
154 #define kernel_data mem_res[2]
155
156 static struct resource io_res[] = {
157         {
158                 .name = "reserved",
159                 .start = 0x3bc,
160                 .end = 0x3be,
161                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
162         },
163         {
164                 .name = "reserved",
165                 .start = 0x378,
166                 .end = 0x37f,
167                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
168         },
169         {
170                 .name = "reserved",
171                 .start = 0x278,
172                 .end = 0x27f,
173                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
174         }
175 };
176
177 #define lp0 io_res[0]
178 #define lp1 io_res[1]
179 #define lp2 io_res[2]
180
181 static const char *cache_types[16] = {
182         "write-through",
183         "write-back",
184         "write-back",
185         "undefined 3",
186         "undefined 4",
187         "undefined 5",
188         "write-back",
189         "write-back",
190         "undefined 8",
191         "undefined 9",
192         "undefined 10",
193         "undefined 11",
194         "undefined 12",
195         "undefined 13",
196         "write-back",
197         "undefined 15",
198 };
199
200 static const char *cache_clean[16] = {
201         "not required",
202         "read-block",
203         "cp15 c7 ops",
204         "undefined 3",
205         "undefined 4",
206         "undefined 5",
207         "cp15 c7 ops",
208         "cp15 c7 ops",
209         "undefined 8",
210         "undefined 9",
211         "undefined 10",
212         "undefined 11",
213         "undefined 12",
214         "undefined 13",
215         "cp15 c7 ops",
216         "undefined 15",
217 };
218
219 static const char *cache_lockdown[16] = {
220         "not supported",
221         "not supported",
222         "not supported",
223         "undefined 3",
224         "undefined 4",
225         "undefined 5",
226         "format A",
227         "format B",
228         "undefined 8",
229         "undefined 9",
230         "undefined 10",
231         "undefined 11",
232         "undefined 12",
233         "undefined 13",
234         "format C",
235         "undefined 15",
236 };
237
238 static const char *proc_arch[] = {
239         "undefined/unknown",
240         "3",
241         "4",
242         "4T",
243         "5",
244         "5T",
245         "5TE",
246         "5TEJ",
247         "6TEJ",
248         "7",
249         "?(11)",
250         "?(12)",
251         "?(13)",
252         "?(14)",
253         "?(15)",
254         "?(16)",
255         "?(17)",
256 };
257
258 static const char *v7_cache_policy[4] = {
259         "reserved",
260         "AVIVT",
261         "VIPT",
262         "PIPT",
263 };
264
265 static const char *v7_cache_type[8] = {
266         "none",
267         "instruction only",
268         "data only",
269         "separate instruction and data",
270         "unified",
271         "unknown type",
272         "unknown type",
273         "unknown type",
274 };
275
276 #define CACHE_TYPE(x)   (((x) >> 25) & 15)
277 #define CACHE_S(x)      ((x) & (1 << 24))
278 #define CACHE_DSIZE(x)  (((x) >> 12) & 4095)    /* only if S=1 */
279 #define CACHE_ISIZE(x)  ((x) & 4095)
280
281 #define CACHE_SIZE(y)   (((y) >> 6) & 7)
282 #define CACHE_ASSOC(y)  (((y) >> 3) & 7)
283 #define CACHE_M(y)      ((y) & (1 << 2))
284 #define CACHE_LINE(y)   ((y) & 3)
285
286 #define CACHE_TYPE_V7(x)        (((x) >> 14) & 3)
287 #define CACHE_UNIFIED(x)        ((((x) >> 27) & 7)+1)
288 #define CACHE_COHERENT(x)       ((((x) >> 24) & 7)+1)
289
290 #define CACHE_ID_LEVEL_MASK     7
291 #define CACHE_ID_LEVEL_BITS     3
292
293 #define CACHE_LINE_V7(v)        ((1 << (((v) & 7)+4)))
294 #define CACHE_ASSOC_V7(v)       ((((v) >> 3) & ((1<<10)-1))+1)
295 #define CACHE_SETS_V7(v)        ((((v) >> 13) & ((1<<15)-1))+1)
296 #define CACHE_SIZE_V7(v)        (CACHE_LINE_V7(v)*CACHE_ASSOC_V7(v)*CACHE_SETS_V7(v))
297 #define CACHE_WA_V7(v)          (((v) & (1<<28)) != 0)
298 #define CACHE_RA_V7(v)          (((v) & (1<<29)) != 0)
299 #define CACHE_WB_V7(v)          (((v) & (1<<30)) != 0)
300 #define CACHE_WT_V7(v)          (((v) & (1<<31)) != 0)
301
302 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
303 {
304         unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
305
306         printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
307                 cpu, prefix,
308                 mult << (8 + CACHE_SIZE(cache)),
309                 (mult << CACHE_ASSOC(cache)) >> 1,
310                 8 << CACHE_LINE(cache),
311                 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
312                         CACHE_LINE(cache)));
313 }
314
315 static void dump_v7_cache(const char *type, int cpu, unsigned int level)
316 {
317         unsigned int cachesize;
318                     
319         write_extended_cpuid(2,0,0,0,level);  /* Set the cache size selection register */
320         write_extended_cpuid(0,7,5,4,0);      /* Prefetch flush to wait for above */
321         cachesize = read_extended_cpuid(1,0,0,0);
322
323         printk("CPU%u: %s cache: %d bytes, associativity %d, %d byte lines, %d sets,\n      supports%s%s%s%s\n",
324                cpu, type,
325                CACHE_SIZE_V7(cachesize),CACHE_ASSOC_V7(cachesize),
326                CACHE_LINE_V7(cachesize),CACHE_SETS_V7(cachesize),
327                CACHE_WA_V7(cachesize) ? " WA" : "",
328                CACHE_RA_V7(cachesize) ? " RA" : "",
329                CACHE_WB_V7(cachesize) ? " WB" : "",
330                CACHE_WT_V7(cachesize) ? " WT" : "");
331 }
332
333 static void __init dump_cpu_info(int cpu)
334 {
335         unsigned int info = read_cpuid(CPUID_CACHETYPE);
336
337         if (info != processor_id && (info & (1 << 31))) {
338                 /* ARMv7 style of cache info register */
339                 unsigned int id = read_extended_cpuid(1,0,0,1);
340                 unsigned int level = 0;
341                 printk("CPU%u: L1 I %s cache. Caches unified at level %u, coherent at level %u\n",
342                        cpu,
343                        v7_cache_policy[CACHE_TYPE_V7(info)],
344                        CACHE_UNIFIED(id),
345                        CACHE_COHERENT(id));
346
347                 while (id & CACHE_ID_LEVEL_MASK) {
348                         printk("CPU%u: Level %u cache is %s\n",
349                                cpu, (level >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
350
351                         if (id & 1) {
352                                 /* Dump I at this level */
353                                 dump_v7_cache("I", cpu, level | 1);
354                         }
355
356                         if (id & (4 | 2)) {
357                                 /* Dump D or unified at this level */
358                                 dump_v7_cache((id & 4) ? "unified" : "D", cpu, level);
359                         }
360
361                         /* Next level out */
362                         level += 2;
363                         id >>= CACHE_ID_LEVEL_BITS;
364                 }
365         } else if (info != processor_id) {
366                 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
367                        cache_types[CACHE_TYPE(info)]);
368                 if (CACHE_S(info)) {
369                         dump_cache("I cache", cpu, CACHE_ISIZE(info));
370                         dump_cache("D cache", cpu, CACHE_DSIZE(info));
371                 } else {
372                         dump_cache("cache", cpu, CACHE_ISIZE(info));
373                 }
374         }
375
376         if (arch_is_coherent())
377                 printk("Cache coherency enabled\n");
378 }
379
380 int cpu_architecture(void)
381 {
382         int cpu_arch;
383
384         if ((processor_id & 0x0008f000) == 0) {
385                 cpu_arch = CPU_ARCH_UNKNOWN;
386         } else if ((processor_id & 0x0008f000) == 0x00007000) {
387                 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
388         } else if ((processor_id & 0x00080000) == 0x00000000) {
389                 cpu_arch = (processor_id >> 16) & 7;
390                 if (cpu_arch)
391                         cpu_arch += CPU_ARCH_ARMv3;
392         } else if ((processor_id & 0x000f0000) == 0x000f0000) {
393                 unsigned int mmfr0;
394
395                 /* Revised CPUID format. Read the Memory Model Feature
396                  * Register 0 and check for VMSAv7 or PMSAv7 */
397                 asm("mrc        p15, 0, %0, c0, c1, 4"
398                     : "=r" (mmfr0));
399                 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
400                     (mmfr0 & 0x000000f0) == 0x00000030)
401                         cpu_arch = CPU_ARCH_ARMv7;
402                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
403                          (mmfr0 & 0x000000f0) == 0x00000020)
404                         cpu_arch = CPU_ARCH_ARMv6;
405                 else
406                         cpu_arch = CPU_ARCH_UNKNOWN;
407         } else
408                 cpu_arch = CPU_ARCH_UNKNOWN;
409
410         return cpu_arch;
411 }
412
413 /*
414  * These functions re-use the assembly code in head.S, which
415  * already provide the required functionality.
416  */
417 extern struct proc_info_list *lookup_processor_type(unsigned int);
418 extern struct machine_desc *lookup_machine_type(unsigned int);
419
420 static void __init setup_processor(void)
421 {
422         struct proc_info_list *list;
423
424         /*
425          * locate processor in the list of supported processor
426          * types.  The linker builds this table for us from the
427          * entries in arch/arm/mm/proc-*.S
428          */
429         list = lookup_processor_type(processor_id);
430         if (!list) {
431                 printk("CPU configuration botched (ID %08x), unable "
432                        "to continue.\n", processor_id);
433                 while (1);
434         }
435
436         cpu_name = list->cpu_name;
437
438 #ifdef MULTI_CPU
439         processor = *list->proc;
440 #endif
441 #ifdef MULTI_TLB
442         cpu_tlb = *list->tlb;
443 #endif
444 #ifdef MULTI_USER
445         cpu_user = *list->user;
446 #endif
447 #ifdef MULTI_CACHE
448         cpu_cache = *list->cache;
449 #endif
450
451         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
452                cpu_name, processor_id, (int)processor_id & 15,
453                proc_arch[cpu_architecture()], cr_alignment);
454
455         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
456         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
457         elf_hwcap = list->elf_hwcap;
458 #ifndef CONFIG_ARM_THUMB
459         elf_hwcap &= ~HWCAP_THUMB;
460 #endif
461
462         cpu_proc_init();
463 }
464
465 /*
466  * cpu_init - initialise one CPU.
467  *
468  * cpu_init dumps the cache information, initialises SMP specific
469  * information, and sets up the per-CPU stacks.
470  */
471 void cpu_init(void)
472 {
473         unsigned int cpu = smp_processor_id();
474         struct stack *stk = &stacks[cpu];
475
476         if (cpu >= NR_CPUS) {
477                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
478                 BUG();
479         }
480
481         if (system_state == SYSTEM_BOOTING)
482                 dump_cpu_info(cpu);
483
484         /*
485          * setup stacks for re-entrant exception handlers
486          */
487         __asm__ (
488         "msr    cpsr_c, %1\n\t"
489         "add    sp, %0, %2\n\t"
490         "msr    cpsr_c, %3\n\t"
491         "add    sp, %0, %4\n\t"
492         "msr    cpsr_c, %5\n\t"
493         "add    sp, %0, %6\n\t"
494         "msr    cpsr_c, %7"
495             :
496             : "r" (stk),
497               "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
498               "I" (offsetof(struct stack, irq[0])),
499               "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
500               "I" (offsetof(struct stack, abt[0])),
501               "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
502               "I" (offsetof(struct stack, und[0])),
503               "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
504             : "r14");
505 }
506
507 static struct machine_desc * __init setup_machine(unsigned int nr)
508 {
509         struct machine_desc *list;
510
511         /*
512          * locate machine in the list of supported machines.
513          */
514         list = lookup_machine_type(nr);
515         if (!list) {
516                 printk("Machine configuration botched (nr %d), unable "
517                        "to continue.\n", nr);
518                 while (1);
519         }
520
521         printk("Machine: %s\n", list->name);
522
523         return list;
524 }
525
526 static void __init early_initrd(char **p)
527 {
528         unsigned long start, size;
529
530         start = memparse(*p, p);
531         if (**p == ',') {
532                 size = memparse((*p) + 1, p);
533
534                 phys_initrd_start = start;
535                 phys_initrd_size = size;
536         }
537 }
538 __early_param("initrd=", early_initrd);
539
540 static void __init arm_add_memory(unsigned long start, unsigned long size)
541 {
542         struct membank *bank;
543
544         /*
545          * Ensure that start/size are aligned to a page boundary.
546          * Size is appropriately rounded down, start is rounded up.
547          */
548         size -= start & ~PAGE_MASK;
549
550         bank = &meminfo.bank[meminfo.nr_banks++];
551
552         bank->start = PAGE_ALIGN(start);
553         bank->size  = size & PAGE_MASK;
554         bank->node  = PHYS_TO_NID(start);
555 }
556
557 /*
558  * Pick out the memory size.  We look for mem=size@start,
559  * where start and size are "size[KkMm]"
560  */
561 static void __init early_mem(char **p)
562 {
563         static int usermem __initdata = 0;
564         unsigned long size, start;
565
566         /*
567          * If the user specifies memory size, we
568          * blow away any automatically generated
569          * size.
570          */
571         if (usermem == 0) {
572                 usermem = 1;
573                 meminfo.nr_banks = 0;
574         }
575
576         start = PHYS_OFFSET;
577         size  = memparse(*p, p);
578         if (**p == '@')
579                 start = memparse(*p + 1, p);
580
581         arm_add_memory(start, size);
582 }
583 __early_param("mem=", early_mem);
584
585 /*
586  * vmalloc=size forces the vmalloc area to be exactly 'size'
587  * bytes. This can be used to increase (or decrease) the vmalloc
588  * area - the default is 128m.
589  */
590 static void __init early_vmalloc(char **arg)
591 {
592         vmalloc_reserve = memparse(*arg, arg);
593 }
594 __early_param("vmalloc=", early_vmalloc);
595
596 /*
597  * Initial parsing of the command line.
598  */
599 static void __init parse_cmdline(char **cmdline_p, char *from)
600 {
601         char c = ' ', *to = command_line;
602         int len = 0;
603
604         for (;;) {
605                 if (c == ' ') {
606                         extern struct early_params __early_begin, __early_end;
607                         struct early_params *p;
608
609                         for (p = &__early_begin; p < &__early_end; p++) {
610                                 int len = strlen(p->arg);
611
612                                 if (memcmp(from, p->arg, len) == 0) {
613                                         if (to != command_line)
614                                                 to -= 1;
615                                         from += len;
616                                         p->fn(&from);
617
618                                         while (*from != ' ' && *from != '\0')
619                                                 from++;
620                                         break;
621                                 }
622                         }
623                 }
624                 c = *from++;
625                 if (!c)
626                         break;
627                 if (COMMAND_LINE_SIZE <= ++len)
628                         break;
629                 *to++ = c;
630         }
631         *to = '\0';
632         *cmdline_p = command_line;
633 }
634
635 static void __init
636 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
637 {
638 #ifdef CONFIG_BLK_DEV_RAM
639         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
640
641         rd_image_start = image_start;
642         rd_prompt = prompt;
643         rd_doload = doload;
644
645         if (rd_sz)
646                 rd_size = rd_sz;
647 #endif
648 }
649
650 static void __init
651 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
652 {
653         struct resource *res;
654         int i;
655
656         kernel_code.start   = virt_to_phys(&_text);
657         kernel_code.end     = virt_to_phys(&_etext - 1);
658         kernel_data.start   = virt_to_phys(&__data_start);
659         kernel_data.end     = virt_to_phys(&_end - 1);
660
661         for (i = 0; i < mi->nr_banks; i++) {
662                 unsigned long virt_start, virt_end;
663
664                 if (mi->bank[i].size == 0)
665                         continue;
666
667                 virt_start = __phys_to_virt(mi->bank[i].start);
668                 virt_end   = virt_start + mi->bank[i].size - 1;
669
670                 res = alloc_bootmem_low(sizeof(*res));
671                 res->name  = "System RAM";
672                 res->start = __virt_to_phys(virt_start);
673                 res->end   = __virt_to_phys(virt_end);
674                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
675
676                 request_resource(&iomem_resource, res);
677
678                 if (kernel_code.start >= res->start &&
679                     kernel_code.end <= res->end)
680                         request_resource(res, &kernel_code);
681                 if (kernel_data.start >= res->start &&
682                     kernel_data.end <= res->end)
683                         request_resource(res, &kernel_data);
684         }
685
686         if (mdesc->video_start) {
687                 video_ram.start = mdesc->video_start;
688                 video_ram.end   = mdesc->video_end;
689                 request_resource(&iomem_resource, &video_ram);
690         }
691
692         /*
693          * Some machines don't have the possibility of ever
694          * possessing lp0, lp1 or lp2
695          */
696         if (mdesc->reserve_lp0)
697                 request_resource(&ioport_resource, &lp0);
698         if (mdesc->reserve_lp1)
699                 request_resource(&ioport_resource, &lp1);
700         if (mdesc->reserve_lp2)
701                 request_resource(&ioport_resource, &lp2);
702 }
703
704 /*
705  *  Tag parsing.
706  *
707  * This is the new way of passing data to the kernel at boot time.  Rather
708  * than passing a fixed inflexible structure to the kernel, we pass a list
709  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
710  * tag for the list to be recognised (to distinguish the tagged list from
711  * a param_struct).  The list is terminated with a zero-length tag (this tag
712  * is not parsed in any way).
713  */
714 static int __init parse_tag_core(const struct tag *tag)
715 {
716         if (tag->hdr.size > 2) {
717                 if ((tag->u.core.flags & 1) == 0)
718                         root_mountflags &= ~MS_RDONLY;
719                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
720         }
721         return 0;
722 }
723
724 __tagtable(ATAG_CORE, parse_tag_core);
725
726 static int __init parse_tag_mem32(const struct tag *tag)
727 {
728         if (meminfo.nr_banks >= NR_BANKS) {
729                 printk(KERN_WARNING
730                        "Ignoring memory bank 0x%08x size %dKB\n",
731                         tag->u.mem.start, tag->u.mem.size / 1024);
732                 return -EINVAL;
733         }
734         arm_add_memory(tag->u.mem.start, tag->u.mem.size);
735         return 0;
736 }
737
738 __tagtable(ATAG_MEM, parse_tag_mem32);
739
740 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
741 struct screen_info screen_info = {
742  .orig_video_lines      = 30,
743  .orig_video_cols       = 80,
744  .orig_video_mode       = 0,
745  .orig_video_ega_bx     = 0,
746  .orig_video_isVGA      = 1,
747  .orig_video_points     = 8
748 };
749
750 static int __init parse_tag_videotext(const struct tag *tag)
751 {
752         screen_info.orig_x            = tag->u.videotext.x;
753         screen_info.orig_y            = tag->u.videotext.y;
754         screen_info.orig_video_page   = tag->u.videotext.video_page;
755         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
756         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
757         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
758         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
759         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
760         screen_info.orig_video_points = tag->u.videotext.video_points;
761         return 0;
762 }
763
764 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
765 #endif
766
767 static int __init parse_tag_ramdisk(const struct tag *tag)
768 {
769         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
770                       (tag->u.ramdisk.flags & 2) == 0,
771                       tag->u.ramdisk.start, tag->u.ramdisk.size);
772         return 0;
773 }
774
775 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
776
777 static int __init parse_tag_initrd(const struct tag *tag)
778 {
779         printk(KERN_WARNING "ATAG_INITRD is deprecated; "
780                 "please update your bootloader.\n");
781         phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
782         phys_initrd_size = tag->u.initrd.size;
783         return 0;
784 }
785
786 __tagtable(ATAG_INITRD, parse_tag_initrd);
787
788 static int __init parse_tag_initrd2(const struct tag *tag)
789 {
790         phys_initrd_start = tag->u.initrd.start;
791         phys_initrd_size = tag->u.initrd.size;
792         return 0;
793 }
794
795 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
796
797 static int __init parse_tag_serialnr(const struct tag *tag)
798 {
799         system_serial_low = tag->u.serialnr.low;
800         system_serial_high = tag->u.serialnr.high;
801         return 0;
802 }
803
804 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
805
806 static int __init parse_tag_revision(const struct tag *tag)
807 {
808         system_rev = tag->u.revision.rev;
809         return 0;
810 }
811
812 __tagtable(ATAG_REVISION, parse_tag_revision);
813
814 static int __init parse_tag_cmdline(const struct tag *tag)
815 {
816         strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
817         return 0;
818 }
819
820 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
821
822 /*
823  * Scan the tag table for this tag, and call its parse function.
824  * The tag table is built by the linker from all the __tagtable
825  * declarations.
826  */
827 static int __init parse_tag(const struct tag *tag)
828 {
829         extern struct tagtable __tagtable_begin, __tagtable_end;
830         struct tagtable *t;
831
832         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
833                 if (tag->hdr.tag == t->tag) {
834                         t->parse(tag);
835                         break;
836                 }
837
838         return t < &__tagtable_end;
839 }
840
841 /*
842  * Parse all tags in the list, checking both the global and architecture
843  * specific tag tables.
844  */
845 static void __init parse_tags(const struct tag *t)
846 {
847         for (; t->hdr.size; t = tag_next(t))
848                 if (!parse_tag(t))
849                         printk(KERN_WARNING
850                                 "Ignoring unrecognised tag 0x%08x\n",
851                                 t->hdr.tag);
852 }
853
854 /*
855  * This holds our defaults.
856  */
857 static struct init_tags {
858         struct tag_header hdr1;
859         struct tag_core   core;
860         struct tag_header hdr2;
861         struct tag_mem32  mem;
862         struct tag_header hdr3;
863 } init_tags __initdata = {
864         { tag_size(tag_core), ATAG_CORE },
865         { 1, PAGE_SIZE, 0xff },
866         { tag_size(tag_mem32), ATAG_MEM },
867         { MEM_SIZE, PHYS_OFFSET },
868         { 0, ATAG_NONE }
869 };
870
871 static void (*init_machine)(void) __initdata;
872
873 static int __init customize_machine(void)
874 {
875         /* customizes platform devices, or adds new ones */
876         if (init_machine)
877                 init_machine();
878         return 0;
879 }
880 arch_initcall(customize_machine);
881
882 void __init setup_arch(char **cmdline_p)
883 {
884         struct tag *tags = (struct tag *)&init_tags;
885         struct machine_desc *mdesc;
886         char *from = default_command_line;
887
888         setup_processor();
889         mdesc = setup_machine(machine_arch_type);
890         machine_name = mdesc->name;
891
892         if (mdesc->soft_reboot)
893                 reboot_setup("s");
894
895         if (__atags_pointer)
896                 tags = phys_to_virt(__atags_pointer);
897         else if (mdesc->boot_params)
898                 tags = phys_to_virt(mdesc->boot_params);
899
900         /*
901          * If we have the old style parameters, convert them to
902          * a tag list.
903          */
904         if (tags->hdr.tag != ATAG_CORE)
905                 convert_to_tag_list(tags);
906         if (tags->hdr.tag != ATAG_CORE)
907                 tags = (struct tag *)&init_tags;
908
909         if (mdesc->fixup)
910                 mdesc->fixup(mdesc, tags, &from, &meminfo);
911
912         if (tags->hdr.tag == ATAG_CORE) {
913                 if (meminfo.nr_banks != 0)
914                         squash_mem_tags(tags);
915                 save_atags(tags);
916                 parse_tags(tags);
917         }
918
919         init_mm.start_code = (unsigned long) &_text;
920         init_mm.end_code   = (unsigned long) &_etext;
921         init_mm.end_data   = (unsigned long) &_edata;
922         init_mm.brk        = (unsigned long) &_end;
923
924         memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
925         boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
926         parse_cmdline(cmdline_p, from);
927         paging_init(&meminfo, mdesc);
928         request_standard_resources(&meminfo, mdesc);
929
930 #ifdef CONFIG_SMP
931         smp_init_cpus();
932 #endif
933
934         cpu_init();
935
936         /*
937          * Set up various architecture-specific pointers
938          */
939         init_arch_irq = mdesc->init_irq;
940         system_timer = mdesc->timer;
941         init_machine = mdesc->init_machine;
942
943 #ifdef CONFIG_VT
944 #if defined(CONFIG_VGA_CONSOLE)
945         conswitchp = &vga_con;
946 #elif defined(CONFIG_DUMMY_CONSOLE)
947         conswitchp = &dummy_con;
948 #endif
949 #endif
950         early_trap_init();
951 }
952
953
954 static int __init topology_init(void)
955 {
956         int cpu;
957
958         for_each_possible_cpu(cpu) {
959                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
960                 cpuinfo->cpu.hotpluggable = 1;
961                 register_cpu(&cpuinfo->cpu, cpu);
962         }
963
964         return 0;
965 }
966
967 subsys_initcall(topology_init);
968
969 static const char *hwcap_str[] = {
970         "swp",
971         "half",
972         "thumb",
973         "26bit",
974         "fastmult",
975         "fpa",
976         "vfp",
977         "edsp",
978         "java",
979         "iwmmxt",
980         "crunch",
981         NULL
982 };
983
984 static void
985 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
986 {
987         unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
988
989         seq_printf(m, "%s size\t\t: %d\n"
990                       "%s assoc\t\t: %d\n"
991                       "%s line length\t: %d\n"
992                       "%s sets\t\t: %d\n",
993                 type, mult << (8 + CACHE_SIZE(cache)),
994                 type, (mult << CACHE_ASSOC(cache)) >> 1,
995                 type, 8 << CACHE_LINE(cache),
996                 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
997                             CACHE_LINE(cache)));
998 }
999
1000 static void c_show_v7_cache(struct seq_file *m, const char *type, unsigned int levelselect)
1001 {
1002         unsigned int cachesize;
1003         unsigned int level = (levelselect >> 1) + 1;
1004                     
1005         write_extended_cpuid(2,0,0,0,levelselect);  /* Set the cache size selection register */
1006         write_extended_cpuid(0,7,5,4,0);      /* Prefetch flush to wait for above */
1007         cachesize = read_extended_cpuid(1,0,0,0);
1008
1009         seq_printf(m, "L%u %s size\t\t: %d bytes\n"
1010                    "L%u %s assoc\t\t: %d\n"
1011                    "L%u %s line length\t: %d\n"
1012                    "L%u %s sets\t\t: %d\n"
1013                    "L%u %s supports\t\t:%s%s%s%s\n",
1014                    level, type, CACHE_SIZE_V7(cachesize),
1015                    level, type, CACHE_ASSOC_V7(cachesize),
1016                    level, type, CACHE_LINE_V7(cachesize),
1017                    level, type, CACHE_SETS_V7(cachesize),
1018                    level, type, CACHE_WA_V7(cachesize) ? " WA" : "",
1019                    CACHE_RA_V7(cachesize) ? " RA" : "",
1020                    CACHE_WB_V7(cachesize) ? " WB" : "",
1021                    CACHE_WT_V7(cachesize) ? " WT" : "");
1022 }
1023
1024 static int c_show(struct seq_file *m, void *v)
1025 {
1026         int i;
1027
1028         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1029                    cpu_name, (int)processor_id & 15, elf_platform);
1030
1031 #if defined(CONFIG_SMP)
1032         for_each_online_cpu(i) {
1033                 /*
1034                  * glibc reads /proc/cpuinfo to determine the number of
1035                  * online processors, looking for lines beginning with
1036                  * "processor".  Give glibc what it expects.
1037                  */
1038                 seq_printf(m, "processor\t: %d\n", i);
1039                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1040                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1041                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1042         }
1043 #else /* CONFIG_SMP */
1044         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1045                    loops_per_jiffy / (500000/HZ),
1046                    (loops_per_jiffy / (5000/HZ)) % 100);
1047 #endif
1048
1049         /* dump out the processor features */
1050         seq_puts(m, "Features\t: ");
1051
1052         for (i = 0; hwcap_str[i]; i++)
1053                 if (elf_hwcap & (1 << i))
1054                         seq_printf(m, "%s ", hwcap_str[i]);
1055
1056         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
1057         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1058
1059         if ((processor_id & 0x0008f000) == 0x00000000) {
1060                 /* pre-ARM7 */
1061                 seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
1062         } else {
1063                 if ((processor_id & 0x0008f000) == 0x00007000) {
1064                         /* ARM7 */
1065                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1066                                    (processor_id >> 16) & 127);
1067                 } else {
1068                         /* post-ARM7 */
1069                         seq_printf(m, "CPU variant\t: 0x%x\n",
1070                                    (processor_id >> 20) & 15);
1071                 }
1072                 seq_printf(m, "CPU part\t: 0x%03x\n",
1073                            (processor_id >> 4) & 0xfff);
1074         }
1075         seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
1076
1077         {
1078                 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
1079                 if (cache_info != processor_id && (cache_info & (1<<31))) {
1080                         /* V7 style of cache info register */
1081                         unsigned int id = read_extended_cpuid(1,0,0,1);
1082                         unsigned int levelselect = 0;
1083                         seq_printf(m, "L1 I cache\t:%s\n"
1084                                    "Cache unification level\t: %u\n"
1085                                    "Cache coherency level\t: %u\n",
1086                                    v7_cache_policy[CACHE_TYPE_V7(cache_info)],
1087                                    CACHE_UNIFIED(id),
1088                                    CACHE_COHERENT(id));
1089
1090                         while (id & CACHE_ID_LEVEL_MASK) {
1091                                 seq_printf(m, "Level %u cache\t\t: %s\n",
1092                                            (levelselect >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
1093
1094                                 if (id & 1) {
1095                                         /* Dump I at this level */
1096                                         c_show_v7_cache(m, "I", levelselect | 1);
1097                                 }
1098
1099                                 if (id & (4 | 2)) {
1100                                         /* Dump D or unified at this level */
1101                                         c_show_v7_cache(m, (id & 4) ? "cache" : "D", levelselect);
1102                                 }
1103
1104                                 /* Next level out */
1105                                 levelselect += 2;
1106                                 id >>= CACHE_ID_LEVEL_BITS;
1107                         }
1108                 } else if (cache_info != processor_id) {
1109                         seq_printf(m, "Cache type\t: %s\n"
1110                                       "Cache clean\t: %s\n"
1111                                       "Cache lockdown\t: %s\n"
1112                                       "Cache format\t: %s\n",
1113                                    cache_types[CACHE_TYPE(cache_info)],
1114                                    cache_clean[CACHE_TYPE(cache_info)],
1115                                    cache_lockdown[CACHE_TYPE(cache_info)],
1116                                    CACHE_S(cache_info) ? "Harvard" : "Unified");
1117
1118                         if (CACHE_S(cache_info)) {
1119                                 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
1120                                 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
1121                         } else {
1122                                 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
1123                         }
1124                 }
1125         }
1126
1127         seq_puts(m, "\n");
1128
1129         seq_printf(m, "Hardware\t: %s\n", machine_name);
1130         seq_printf(m, "Revision\t: %04x\n", system_rev);
1131         seq_printf(m, "Serial\t\t: %08x%08x\n",
1132                    system_serial_high, system_serial_low);
1133
1134         return 0;
1135 }
1136
1137 static void *c_start(struct seq_file *m, loff_t *pos)
1138 {
1139         return *pos < 1 ? (void *)1 : NULL;
1140 }
1141
1142 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1143 {
1144         ++*pos;
1145         return NULL;
1146 }
1147
1148 static void c_stop(struct seq_file *m, void *v)
1149 {
1150 }
1151
1152 const struct seq_operations cpuinfo_op = {
1153         .start  = c_start,
1154         .next   = c_next,
1155         .stop   = c_stop,
1156         .show   = c_show
1157 };