2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39 #include <asm/traps.h>
45 #define MEM_SIZE (16*1024*1024)
48 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
51 static int __init fpe_setup(char *line)
53 memcpy(fpe_type, line, 8);
57 __setup("fpe=", fpe_setup);
60 extern void paging_init(struct meminfo *, struct machine_desc *desc);
61 extern void reboot_setup(char *str);
62 extern int root_mountflags;
63 extern void _stext, _text, _etext, __data_start, _edata, _end;
65 unsigned int processor_id;
66 EXPORT_SYMBOL(processor_id);
67 unsigned int __machine_arch_type;
68 EXPORT_SYMBOL(__machine_arch_type);
70 unsigned int __atags_pointer __initdata;
72 unsigned int system_rev;
73 EXPORT_SYMBOL(system_rev);
75 unsigned int system_serial_low;
76 EXPORT_SYMBOL(system_serial_low);
78 unsigned int system_serial_high;
79 EXPORT_SYMBOL(system_serial_high);
81 unsigned int elf_hwcap;
82 EXPORT_SYMBOL(elf_hwcap);
84 unsigned long __initdata vmalloc_reserve = 128 << 20;
88 struct processor processor;
91 struct cpu_tlb_fns cpu_tlb;
94 struct cpu_user_fns cpu_user;
97 struct cpu_cache_fns cpu_cache;
99 #ifdef CONFIG_OUTER_CACHE
100 struct outer_cache_fns outer_cache;
107 } ____cacheline_aligned;
109 static struct stack stacks[NR_CPUS];
111 char elf_platform[ELF_PLATFORM_SIZE];
112 EXPORT_SYMBOL(elf_platform);
114 unsigned long phys_initrd_start __initdata = 0;
115 unsigned long phys_initrd_size __initdata = 0;
117 static struct meminfo meminfo __initdata = { 0, };
118 static const char *cpu_name;
119 static const char *machine_name;
120 static char __initdata command_line[COMMAND_LINE_SIZE];
122 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
123 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
124 #define ENDIANNESS ((char)endian_test.l)
126 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
129 * Standard memory resources
131 static struct resource mem_res[] = {
136 .flags = IORESOURCE_MEM
139 .name = "Kernel text",
142 .flags = IORESOURCE_MEM
145 .name = "Kernel data",
148 .flags = IORESOURCE_MEM
152 #define video_ram mem_res[0]
153 #define kernel_code mem_res[1]
154 #define kernel_data mem_res[2]
156 static struct resource io_res[] = {
161 .flags = IORESOURCE_IO | IORESOURCE_BUSY
167 .flags = IORESOURCE_IO | IORESOURCE_BUSY
173 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177 #define lp0 io_res[0]
178 #define lp1 io_res[1]
179 #define lp2 io_res[2]
181 static const char *cache_types[16] = {
200 static const char *cache_clean[16] = {
219 static const char *cache_lockdown[16] = {
238 static const char *proc_arch[] = {
258 static const char *v7_cache_policy[4] = {
265 static const char *v7_cache_type[8] = {
269 "separate instruction and data",
276 #define CACHE_TYPE(x) (((x) >> 25) & 15)
277 #define CACHE_S(x) ((x) & (1 << 24))
278 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
279 #define CACHE_ISIZE(x) ((x) & 4095)
281 #define CACHE_SIZE(y) (((y) >> 6) & 7)
282 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
283 #define CACHE_M(y) ((y) & (1 << 2))
284 #define CACHE_LINE(y) ((y) & 3)
286 #define CACHE_TYPE_V7(x) (((x) >> 14) & 3)
287 #define CACHE_UNIFIED(x) ((((x) >> 27) & 7)+1)
288 #define CACHE_COHERENT(x) ((((x) >> 24) & 7)+1)
290 #define CACHE_ID_LEVEL_MASK 7
291 #define CACHE_ID_LEVEL_BITS 3
293 #define CACHE_LINE_V7(v) ((1 << (((v) & 7)+4)))
294 #define CACHE_ASSOC_V7(v) ((((v) >> 3) & ((1<<10)-1))+1)
295 #define CACHE_SETS_V7(v) ((((v) >> 13) & ((1<<15)-1))+1)
296 #define CACHE_SIZE_V7(v) (CACHE_LINE_V7(v)*CACHE_ASSOC_V7(v)*CACHE_SETS_V7(v))
297 #define CACHE_WA_V7(v) (((v) & (1<<28)) != 0)
298 #define CACHE_RA_V7(v) (((v) & (1<<29)) != 0)
299 #define CACHE_WB_V7(v) (((v) & (1<<30)) != 0)
300 #define CACHE_WT_V7(v) (((v) & (1<<31)) != 0)
302 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
304 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
306 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
308 mult << (8 + CACHE_SIZE(cache)),
309 (mult << CACHE_ASSOC(cache)) >> 1,
310 8 << CACHE_LINE(cache),
311 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
315 static void dump_v7_cache(const char *type, int cpu, unsigned int level)
317 unsigned int cachesize;
319 write_extended_cpuid(2,0,0,0,level); /* Set the cache size selection register */
320 write_extended_cpuid(0,7,5,4,0); /* Prefetch flush to wait for above */
321 cachesize = read_extended_cpuid(1,0,0,0);
323 printk("CPU%u: %s cache: %d bytes, associativity %d, %d byte lines, %d sets,\n supports%s%s%s%s\n",
325 CACHE_SIZE_V7(cachesize),CACHE_ASSOC_V7(cachesize),
326 CACHE_LINE_V7(cachesize),CACHE_SETS_V7(cachesize),
327 CACHE_WA_V7(cachesize) ? " WA" : "",
328 CACHE_RA_V7(cachesize) ? " RA" : "",
329 CACHE_WB_V7(cachesize) ? " WB" : "",
330 CACHE_WT_V7(cachesize) ? " WT" : "");
333 static void __init dump_cpu_info(int cpu)
335 unsigned int info = read_cpuid(CPUID_CACHETYPE);
337 if (info != processor_id && (info & (1 << 31))) {
338 /* ARMv7 style of cache info register */
339 unsigned int id = read_extended_cpuid(1,0,0,1);
340 unsigned int level = 0;
341 printk("CPU%u: L1 I %s cache. Caches unified at level %u, coherent at level %u\n",
343 v7_cache_policy[CACHE_TYPE_V7(info)],
347 while (id & CACHE_ID_LEVEL_MASK) {
348 printk("CPU%u: Level %u cache is %s\n",
349 cpu, (level >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
352 /* Dump I at this level */
353 dump_v7_cache("I", cpu, level | 1);
357 /* Dump D or unified at this level */
358 dump_v7_cache((id & 4) ? "unified" : "D", cpu, level);
363 id >>= CACHE_ID_LEVEL_BITS;
365 } else if (info != processor_id) {
366 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
367 cache_types[CACHE_TYPE(info)]);
369 dump_cache("I cache", cpu, CACHE_ISIZE(info));
370 dump_cache("D cache", cpu, CACHE_DSIZE(info));
372 dump_cache("cache", cpu, CACHE_ISIZE(info));
376 if (arch_is_coherent())
377 printk("Cache coherency enabled\n");
380 int cpu_architecture(void)
384 if ((processor_id & 0x0008f000) == 0) {
385 cpu_arch = CPU_ARCH_UNKNOWN;
386 } else if ((processor_id & 0x0008f000) == 0x00007000) {
387 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
388 } else if ((processor_id & 0x00080000) == 0x00000000) {
389 cpu_arch = (processor_id >> 16) & 7;
391 cpu_arch += CPU_ARCH_ARMv3;
392 } else if ((processor_id & 0x000f0000) == 0x000f0000) {
395 /* Revised CPUID format. Read the Memory Model Feature
396 * Register 0 and check for VMSAv7 or PMSAv7 */
397 asm("mrc p15, 0, %0, c0, c1, 4"
399 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
400 (mmfr0 & 0x000000f0) == 0x00000030)
401 cpu_arch = CPU_ARCH_ARMv7;
402 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
403 (mmfr0 & 0x000000f0) == 0x00000020)
404 cpu_arch = CPU_ARCH_ARMv6;
406 cpu_arch = CPU_ARCH_UNKNOWN;
408 cpu_arch = CPU_ARCH_UNKNOWN;
414 * These functions re-use the assembly code in head.S, which
415 * already provide the required functionality.
417 extern struct proc_info_list *lookup_processor_type(unsigned int);
418 extern struct machine_desc *lookup_machine_type(unsigned int);
420 static void __init setup_processor(void)
422 struct proc_info_list *list;
425 * locate processor in the list of supported processor
426 * types. The linker builds this table for us from the
427 * entries in arch/arm/mm/proc-*.S
429 list = lookup_processor_type(processor_id);
431 printk("CPU configuration botched (ID %08x), unable "
432 "to continue.\n", processor_id);
436 cpu_name = list->cpu_name;
439 processor = *list->proc;
442 cpu_tlb = *list->tlb;
445 cpu_user = *list->user;
448 cpu_cache = *list->cache;
451 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
452 cpu_name, processor_id, (int)processor_id & 15,
453 proc_arch[cpu_architecture()], cr_alignment);
455 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
456 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
457 elf_hwcap = list->elf_hwcap;
458 #ifndef CONFIG_ARM_THUMB
459 elf_hwcap &= ~HWCAP_THUMB;
466 * cpu_init - initialise one CPU.
468 * cpu_init dumps the cache information, initialises SMP specific
469 * information, and sets up the per-CPU stacks.
473 unsigned int cpu = smp_processor_id();
474 struct stack *stk = &stacks[cpu];
476 if (cpu >= NR_CPUS) {
477 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
481 if (system_state == SYSTEM_BOOTING)
485 * setup stacks for re-entrant exception handlers
497 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
498 "I" (offsetof(struct stack, irq[0])),
499 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
500 "I" (offsetof(struct stack, abt[0])),
501 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
502 "I" (offsetof(struct stack, und[0])),
503 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
507 static struct machine_desc * __init setup_machine(unsigned int nr)
509 struct machine_desc *list;
512 * locate machine in the list of supported machines.
514 list = lookup_machine_type(nr);
516 printk("Machine configuration botched (nr %d), unable "
517 "to continue.\n", nr);
521 printk("Machine: %s\n", list->name);
526 static void __init early_initrd(char **p)
528 unsigned long start, size;
530 start = memparse(*p, p);
532 size = memparse((*p) + 1, p);
534 phys_initrd_start = start;
535 phys_initrd_size = size;
538 __early_param("initrd=", early_initrd);
540 static void __init arm_add_memory(unsigned long start, unsigned long size)
542 struct membank *bank;
545 * Ensure that start/size are aligned to a page boundary.
546 * Size is appropriately rounded down, start is rounded up.
548 size -= start & ~PAGE_MASK;
550 bank = &meminfo.bank[meminfo.nr_banks++];
552 bank->start = PAGE_ALIGN(start);
553 bank->size = size & PAGE_MASK;
554 bank->node = PHYS_TO_NID(start);
558 * Pick out the memory size. We look for mem=size@start,
559 * where start and size are "size[KkMm]"
561 static void __init early_mem(char **p)
563 static int usermem __initdata = 0;
564 unsigned long size, start;
567 * If the user specifies memory size, we
568 * blow away any automatically generated
573 meminfo.nr_banks = 0;
577 size = memparse(*p, p);
579 start = memparse(*p + 1, p);
581 arm_add_memory(start, size);
583 __early_param("mem=", early_mem);
586 * vmalloc=size forces the vmalloc area to be exactly 'size'
587 * bytes. This can be used to increase (or decrease) the vmalloc
588 * area - the default is 128m.
590 static void __init early_vmalloc(char **arg)
592 vmalloc_reserve = memparse(*arg, arg);
594 __early_param("vmalloc=", early_vmalloc);
597 * Initial parsing of the command line.
599 static void __init parse_cmdline(char **cmdline_p, char *from)
601 char c = ' ', *to = command_line;
606 extern struct early_params __early_begin, __early_end;
607 struct early_params *p;
609 for (p = &__early_begin; p < &__early_end; p++) {
610 int len = strlen(p->arg);
612 if (memcmp(from, p->arg, len) == 0) {
613 if (to != command_line)
618 while (*from != ' ' && *from != '\0')
627 if (COMMAND_LINE_SIZE <= ++len)
632 *cmdline_p = command_line;
636 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
638 #ifdef CONFIG_BLK_DEV_RAM
639 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
641 rd_image_start = image_start;
651 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
653 struct resource *res;
656 kernel_code.start = virt_to_phys(&_text);
657 kernel_code.end = virt_to_phys(&_etext - 1);
658 kernel_data.start = virt_to_phys(&__data_start);
659 kernel_data.end = virt_to_phys(&_end - 1);
661 for (i = 0; i < mi->nr_banks; i++) {
662 unsigned long virt_start, virt_end;
664 if (mi->bank[i].size == 0)
667 virt_start = __phys_to_virt(mi->bank[i].start);
668 virt_end = virt_start + mi->bank[i].size - 1;
670 res = alloc_bootmem_low(sizeof(*res));
671 res->name = "System RAM";
672 res->start = __virt_to_phys(virt_start);
673 res->end = __virt_to_phys(virt_end);
674 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
676 request_resource(&iomem_resource, res);
678 if (kernel_code.start >= res->start &&
679 kernel_code.end <= res->end)
680 request_resource(res, &kernel_code);
681 if (kernel_data.start >= res->start &&
682 kernel_data.end <= res->end)
683 request_resource(res, &kernel_data);
686 if (mdesc->video_start) {
687 video_ram.start = mdesc->video_start;
688 video_ram.end = mdesc->video_end;
689 request_resource(&iomem_resource, &video_ram);
693 * Some machines don't have the possibility of ever
694 * possessing lp0, lp1 or lp2
696 if (mdesc->reserve_lp0)
697 request_resource(&ioport_resource, &lp0);
698 if (mdesc->reserve_lp1)
699 request_resource(&ioport_resource, &lp1);
700 if (mdesc->reserve_lp2)
701 request_resource(&ioport_resource, &lp2);
707 * This is the new way of passing data to the kernel at boot time. Rather
708 * than passing a fixed inflexible structure to the kernel, we pass a list
709 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
710 * tag for the list to be recognised (to distinguish the tagged list from
711 * a param_struct). The list is terminated with a zero-length tag (this tag
712 * is not parsed in any way).
714 static int __init parse_tag_core(const struct tag *tag)
716 if (tag->hdr.size > 2) {
717 if ((tag->u.core.flags & 1) == 0)
718 root_mountflags &= ~MS_RDONLY;
719 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
724 __tagtable(ATAG_CORE, parse_tag_core);
726 static int __init parse_tag_mem32(const struct tag *tag)
728 if (meminfo.nr_banks >= NR_BANKS) {
730 "Ignoring memory bank 0x%08x size %dKB\n",
731 tag->u.mem.start, tag->u.mem.size / 1024);
734 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
738 __tagtable(ATAG_MEM, parse_tag_mem32);
740 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
741 struct screen_info screen_info = {
742 .orig_video_lines = 30,
743 .orig_video_cols = 80,
744 .orig_video_mode = 0,
745 .orig_video_ega_bx = 0,
746 .orig_video_isVGA = 1,
747 .orig_video_points = 8
750 static int __init parse_tag_videotext(const struct tag *tag)
752 screen_info.orig_x = tag->u.videotext.x;
753 screen_info.orig_y = tag->u.videotext.y;
754 screen_info.orig_video_page = tag->u.videotext.video_page;
755 screen_info.orig_video_mode = tag->u.videotext.video_mode;
756 screen_info.orig_video_cols = tag->u.videotext.video_cols;
757 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
758 screen_info.orig_video_lines = tag->u.videotext.video_lines;
759 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
760 screen_info.orig_video_points = tag->u.videotext.video_points;
764 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
767 static int __init parse_tag_ramdisk(const struct tag *tag)
769 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
770 (tag->u.ramdisk.flags & 2) == 0,
771 tag->u.ramdisk.start, tag->u.ramdisk.size);
775 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
777 static int __init parse_tag_initrd(const struct tag *tag)
779 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
780 "please update your bootloader.\n");
781 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
782 phys_initrd_size = tag->u.initrd.size;
786 __tagtable(ATAG_INITRD, parse_tag_initrd);
788 static int __init parse_tag_initrd2(const struct tag *tag)
790 phys_initrd_start = tag->u.initrd.start;
791 phys_initrd_size = tag->u.initrd.size;
795 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
797 static int __init parse_tag_serialnr(const struct tag *tag)
799 system_serial_low = tag->u.serialnr.low;
800 system_serial_high = tag->u.serialnr.high;
804 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
806 static int __init parse_tag_revision(const struct tag *tag)
808 system_rev = tag->u.revision.rev;
812 __tagtable(ATAG_REVISION, parse_tag_revision);
814 static int __init parse_tag_cmdline(const struct tag *tag)
816 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
820 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
823 * Scan the tag table for this tag, and call its parse function.
824 * The tag table is built by the linker from all the __tagtable
827 static int __init parse_tag(const struct tag *tag)
829 extern struct tagtable __tagtable_begin, __tagtable_end;
832 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
833 if (tag->hdr.tag == t->tag) {
838 return t < &__tagtable_end;
842 * Parse all tags in the list, checking both the global and architecture
843 * specific tag tables.
845 static void __init parse_tags(const struct tag *t)
847 for (; t->hdr.size; t = tag_next(t))
850 "Ignoring unrecognised tag 0x%08x\n",
855 * This holds our defaults.
857 static struct init_tags {
858 struct tag_header hdr1;
859 struct tag_core core;
860 struct tag_header hdr2;
861 struct tag_mem32 mem;
862 struct tag_header hdr3;
863 } init_tags __initdata = {
864 { tag_size(tag_core), ATAG_CORE },
865 { 1, PAGE_SIZE, 0xff },
866 { tag_size(tag_mem32), ATAG_MEM },
867 { MEM_SIZE, PHYS_OFFSET },
871 static void (*init_machine)(void) __initdata;
873 static int __init customize_machine(void)
875 /* customizes platform devices, or adds new ones */
880 arch_initcall(customize_machine);
882 void __init setup_arch(char **cmdline_p)
884 struct tag *tags = (struct tag *)&init_tags;
885 struct machine_desc *mdesc;
886 char *from = default_command_line;
889 mdesc = setup_machine(machine_arch_type);
890 machine_name = mdesc->name;
892 if (mdesc->soft_reboot)
896 tags = phys_to_virt(__atags_pointer);
897 else if (mdesc->boot_params)
898 tags = phys_to_virt(mdesc->boot_params);
901 * If we have the old style parameters, convert them to
904 if (tags->hdr.tag != ATAG_CORE)
905 convert_to_tag_list(tags);
906 if (tags->hdr.tag != ATAG_CORE)
907 tags = (struct tag *)&init_tags;
910 mdesc->fixup(mdesc, tags, &from, &meminfo);
912 if (tags->hdr.tag == ATAG_CORE) {
913 if (meminfo.nr_banks != 0)
914 squash_mem_tags(tags);
919 init_mm.start_code = (unsigned long) &_text;
920 init_mm.end_code = (unsigned long) &_etext;
921 init_mm.end_data = (unsigned long) &_edata;
922 init_mm.brk = (unsigned long) &_end;
924 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
925 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
926 parse_cmdline(cmdline_p, from);
927 paging_init(&meminfo, mdesc);
928 request_standard_resources(&meminfo, mdesc);
937 * Set up various architecture-specific pointers
939 init_arch_irq = mdesc->init_irq;
940 system_timer = mdesc->timer;
941 init_machine = mdesc->init_machine;
944 #if defined(CONFIG_VGA_CONSOLE)
945 conswitchp = &vga_con;
946 #elif defined(CONFIG_DUMMY_CONSOLE)
947 conswitchp = &dummy_con;
954 static int __init topology_init(void)
958 for_each_possible_cpu(cpu) {
959 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
960 cpuinfo->cpu.hotpluggable = 1;
961 register_cpu(&cpuinfo->cpu, cpu);
967 subsys_initcall(topology_init);
969 static const char *hwcap_str[] = {
985 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
987 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
989 seq_printf(m, "%s size\t\t: %d\n"
991 "%s line length\t: %d\n"
993 type, mult << (8 + CACHE_SIZE(cache)),
994 type, (mult << CACHE_ASSOC(cache)) >> 1,
995 type, 8 << CACHE_LINE(cache),
996 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
1000 static void c_show_v7_cache(struct seq_file *m, const char *type, unsigned int levelselect)
1002 unsigned int cachesize;
1003 unsigned int level = (levelselect >> 1) + 1;
1005 write_extended_cpuid(2,0,0,0,levelselect); /* Set the cache size selection register */
1006 write_extended_cpuid(0,7,5,4,0); /* Prefetch flush to wait for above */
1007 cachesize = read_extended_cpuid(1,0,0,0);
1009 seq_printf(m, "L%u %s size\t\t: %d bytes\n"
1010 "L%u %s assoc\t\t: %d\n"
1011 "L%u %s line length\t: %d\n"
1012 "L%u %s sets\t\t: %d\n"
1013 "L%u %s supports\t\t:%s%s%s%s\n",
1014 level, type, CACHE_SIZE_V7(cachesize),
1015 level, type, CACHE_ASSOC_V7(cachesize),
1016 level, type, CACHE_LINE_V7(cachesize),
1017 level, type, CACHE_SETS_V7(cachesize),
1018 level, type, CACHE_WA_V7(cachesize) ? " WA" : "",
1019 CACHE_RA_V7(cachesize) ? " RA" : "",
1020 CACHE_WB_V7(cachesize) ? " WB" : "",
1021 CACHE_WT_V7(cachesize) ? " WT" : "");
1024 static int c_show(struct seq_file *m, void *v)
1028 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1029 cpu_name, (int)processor_id & 15, elf_platform);
1031 #if defined(CONFIG_SMP)
1032 for_each_online_cpu(i) {
1034 * glibc reads /proc/cpuinfo to determine the number of
1035 * online processors, looking for lines beginning with
1036 * "processor". Give glibc what it expects.
1038 seq_printf(m, "processor\t: %d\n", i);
1039 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1040 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1041 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1043 #else /* CONFIG_SMP */
1044 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1045 loops_per_jiffy / (500000/HZ),
1046 (loops_per_jiffy / (5000/HZ)) % 100);
1049 /* dump out the processor features */
1050 seq_puts(m, "Features\t: ");
1052 for (i = 0; hwcap_str[i]; i++)
1053 if (elf_hwcap & (1 << i))
1054 seq_printf(m, "%s ", hwcap_str[i]);
1056 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
1057 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1059 if ((processor_id & 0x0008f000) == 0x00000000) {
1061 seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
1063 if ((processor_id & 0x0008f000) == 0x00007000) {
1065 seq_printf(m, "CPU variant\t: 0x%02x\n",
1066 (processor_id >> 16) & 127);
1069 seq_printf(m, "CPU variant\t: 0x%x\n",
1070 (processor_id >> 20) & 15);
1072 seq_printf(m, "CPU part\t: 0x%03x\n",
1073 (processor_id >> 4) & 0xfff);
1075 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
1078 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
1079 if (cache_info != processor_id && (cache_info & (1<<31))) {
1080 /* V7 style of cache info register */
1081 unsigned int id = read_extended_cpuid(1,0,0,1);
1082 unsigned int levelselect = 0;
1083 seq_printf(m, "L1 I cache\t:%s\n"
1084 "Cache unification level\t: %u\n"
1085 "Cache coherency level\t: %u\n",
1086 v7_cache_policy[CACHE_TYPE_V7(cache_info)],
1088 CACHE_COHERENT(id));
1090 while (id & CACHE_ID_LEVEL_MASK) {
1091 seq_printf(m, "Level %u cache\t\t: %s\n",
1092 (levelselect >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
1095 /* Dump I at this level */
1096 c_show_v7_cache(m, "I", levelselect | 1);
1100 /* Dump D or unified at this level */
1101 c_show_v7_cache(m, (id & 4) ? "cache" : "D", levelselect);
1104 /* Next level out */
1106 id >>= CACHE_ID_LEVEL_BITS;
1108 } else if (cache_info != processor_id) {
1109 seq_printf(m, "Cache type\t: %s\n"
1110 "Cache clean\t: %s\n"
1111 "Cache lockdown\t: %s\n"
1112 "Cache format\t: %s\n",
1113 cache_types[CACHE_TYPE(cache_info)],
1114 cache_clean[CACHE_TYPE(cache_info)],
1115 cache_lockdown[CACHE_TYPE(cache_info)],
1116 CACHE_S(cache_info) ? "Harvard" : "Unified");
1118 if (CACHE_S(cache_info)) {
1119 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
1120 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
1122 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
1129 seq_printf(m, "Hardware\t: %s\n", machine_name);
1130 seq_printf(m, "Revision\t: %04x\n", system_rev);
1131 seq_printf(m, "Serial\t\t: %08x%08x\n",
1132 system_serial_high, system_serial_low);
1137 static void *c_start(struct seq_file *m, loff_t *pos)
1139 return *pos < 1 ? (void *)1 : NULL;
1142 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1148 static void c_stop(struct seq_file *m, void *v)
1152 const struct seq_operations cpuinfo_op = {