]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/sparc64/mm/init.c
48851a2e4fe1f0d1eafc9b28631374bad336d146
[linux-2.6-omap-h63xx.git] / arch / sparc64 / mm / init.c
1 /*  $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2  *  arch/sparc64/mm/init.c
3  *
4  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5  *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7  
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24
25 #include <asm/head.h>
26 #include <asm/system.h>
27 #include <asm/page.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/iommu.h>
32 #include <asm/io.h>
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
35 #include <asm/tlbflush.h>
36 #include <asm/dma.h>
37 #include <asm/starfire.h>
38 #include <asm/tlb.h>
39 #include <asm/spitfire.h>
40 #include <asm/sections.h>
41
42 extern void device_scan(void);
43
44 struct sparc_phys_banks {
45         unsigned long base_addr;
46         unsigned long num_bytes;
47 };
48
49 #define SPARC_PHYS_BANKS 32
50
51 static struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
52
53 unsigned long *sparc64_valid_addr_bitmap __read_mostly;
54
55 /* Ugly, but necessary... -DaveM */
56 unsigned long phys_base __read_mostly;
57 unsigned long kern_base __read_mostly;
58 unsigned long kern_size __read_mostly;
59 unsigned long pfn_base __read_mostly;
60
61 /* get_new_mmu_context() uses "cache + 1".  */
62 DEFINE_SPINLOCK(ctx_alloc_lock);
63 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
64 #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
65 unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
66
67 /* References to special section boundaries */
68 extern char  _start[], _end[];
69
70 /* Initial ramdisk setup */
71 extern unsigned long sparc_ramdisk_image64;
72 extern unsigned int sparc_ramdisk_image;
73 extern unsigned int sparc_ramdisk_size;
74
75 struct page *mem_map_zero __read_mostly;
76
77 int bigkernel = 0;
78
79 /* XXX Tune this... */
80 #define PGT_CACHE_LOW   25
81 #define PGT_CACHE_HIGH  50
82
83 void check_pgt_cache(void)
84 {
85         preempt_disable();
86         if (pgtable_cache_size > PGT_CACHE_HIGH) {
87                 do {
88                         if (pgd_quicklist)
89                                 free_pgd_slow(get_pgd_fast());
90                         if (pte_quicklist[0])
91                                 free_pte_slow(pte_alloc_one_fast(NULL, 0));
92                         if (pte_quicklist[1])
93                                 free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
94                 } while (pgtable_cache_size > PGT_CACHE_LOW);
95         }
96         preempt_enable();
97 }
98
99 #ifdef CONFIG_DEBUG_DCFLUSH
100 atomic_t dcpage_flushes = ATOMIC_INIT(0);
101 #ifdef CONFIG_SMP
102 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
103 #endif
104 #endif
105
106 __inline__ void flush_dcache_page_impl(struct page *page)
107 {
108 #ifdef CONFIG_DEBUG_DCFLUSH
109         atomic_inc(&dcpage_flushes);
110 #endif
111
112 #ifdef DCACHE_ALIASING_POSSIBLE
113         __flush_dcache_page(page_address(page),
114                             ((tlb_type == spitfire) &&
115                              page_mapping(page) != NULL));
116 #else
117         if (page_mapping(page) != NULL &&
118             tlb_type == spitfire)
119                 __flush_icache_page(__pa(page_address(page)));
120 #endif
121 }
122
123 #define PG_dcache_dirty         PG_arch_1
124 #define PG_dcache_cpu_shift     24
125 #define PG_dcache_cpu_mask      (256 - 1)
126
127 #if NR_CPUS > 256
128 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
129 #endif
130
131 #define dcache_dirty_cpu(page) \
132         (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
133
134 static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
135 {
136         unsigned long mask = this_cpu;
137         unsigned long non_cpu_bits;
138
139         non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
140         mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
141
142         __asm__ __volatile__("1:\n\t"
143                              "ldx       [%2], %%g7\n\t"
144                              "and       %%g7, %1, %%g1\n\t"
145                              "or        %%g1, %0, %%g1\n\t"
146                              "casx      [%2], %%g7, %%g1\n\t"
147                              "cmp       %%g7, %%g1\n\t"
148                              "membar    #StoreLoad | #StoreStore\n\t"
149                              "bne,pn    %%xcc, 1b\n\t"
150                              " nop"
151                              : /* no outputs */
152                              : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
153                              : "g1", "g7");
154 }
155
156 static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
157 {
158         unsigned long mask = (1UL << PG_dcache_dirty);
159
160         __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
161                              "1:\n\t"
162                              "ldx       [%2], %%g7\n\t"
163                              "srlx      %%g7, %4, %%g1\n\t"
164                              "and       %%g1, %3, %%g1\n\t"
165                              "cmp       %%g1, %0\n\t"
166                              "bne,pn    %%icc, 2f\n\t"
167                              " andn     %%g7, %1, %%g1\n\t"
168                              "casx      [%2], %%g7, %%g1\n\t"
169                              "cmp       %%g7, %%g1\n\t"
170                              "membar    #StoreLoad | #StoreStore\n\t"
171                              "bne,pn    %%xcc, 1b\n\t"
172                              " nop\n"
173                              "2:"
174                              : /* no outputs */
175                              : "r" (cpu), "r" (mask), "r" (&page->flags),
176                                "i" (PG_dcache_cpu_mask),
177                                "i" (PG_dcache_cpu_shift)
178                              : "g1", "g7");
179 }
180
181 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
182 {
183         struct page *page;
184         unsigned long pfn;
185         unsigned long pg_flags;
186
187         pfn = pte_pfn(pte);
188         if (pfn_valid(pfn) &&
189             (page = pfn_to_page(pfn), page_mapping(page)) &&
190             ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
191                 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
192                            PG_dcache_cpu_mask);
193                 int this_cpu = get_cpu();
194
195                 /* This is just to optimize away some function calls
196                  * in the SMP case.
197                  */
198                 if (cpu == this_cpu)
199                         flush_dcache_page_impl(page);
200                 else
201                         smp_flush_dcache_page_impl(page, cpu);
202
203                 clear_dcache_dirty_cpu(page, cpu);
204
205                 put_cpu();
206         }
207 }
208
209 void flush_dcache_page(struct page *page)
210 {
211         struct address_space *mapping;
212         int this_cpu;
213
214         /* Do not bother with the expensive D-cache flush if it
215          * is merely the zero page.  The 'bigcore' testcase in GDB
216          * causes this case to run millions of times.
217          */
218         if (page == ZERO_PAGE(0))
219                 return;
220
221         this_cpu = get_cpu();
222
223         mapping = page_mapping(page);
224         if (mapping && !mapping_mapped(mapping)) {
225                 int dirty = test_bit(PG_dcache_dirty, &page->flags);
226                 if (dirty) {
227                         int dirty_cpu = dcache_dirty_cpu(page);
228
229                         if (dirty_cpu == this_cpu)
230                                 goto out;
231                         smp_flush_dcache_page_impl(page, dirty_cpu);
232                 }
233                 set_dcache_dirty(page, this_cpu);
234         } else {
235                 /* We could delay the flush for the !page_mapping
236                  * case too.  But that case is for exec env/arg
237                  * pages and those are %99 certainly going to get
238                  * faulted into the tlb (and thus flushed) anyways.
239                  */
240                 flush_dcache_page_impl(page);
241         }
242
243 out:
244         put_cpu();
245 }
246
247 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
248 {
249         /* Cheetah has coherent I-cache. */
250         if (tlb_type == spitfire) {
251                 unsigned long kaddr;
252
253                 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
254                         __flush_icache_page(__get_phys(kaddr));
255         }
256 }
257
258 unsigned long page_to_pfn(struct page *page)
259 {
260         return (unsigned long) ((page - mem_map) + pfn_base);
261 }
262
263 struct page *pfn_to_page(unsigned long pfn)
264 {
265         return (mem_map + (pfn - pfn_base));
266 }
267
268 void show_mem(void)
269 {
270         printk("Mem-info:\n");
271         show_free_areas();
272         printk("Free swap:       %6ldkB\n",
273                nr_swap_pages << (PAGE_SHIFT-10));
274         printk("%ld pages of RAM\n", num_physpages);
275         printk("%d free pages\n", nr_free_pages());
276         printk("%d pages in page table cache\n",pgtable_cache_size);
277 }
278
279 void mmu_info(struct seq_file *m)
280 {
281         if (tlb_type == cheetah)
282                 seq_printf(m, "MMU Type\t: Cheetah\n");
283         else if (tlb_type == cheetah_plus)
284                 seq_printf(m, "MMU Type\t: Cheetah+\n");
285         else if (tlb_type == spitfire)
286                 seq_printf(m, "MMU Type\t: Spitfire\n");
287         else
288                 seq_printf(m, "MMU Type\t: ???\n");
289
290 #ifdef CONFIG_DEBUG_DCFLUSH
291         seq_printf(m, "DCPageFlushes\t: %d\n",
292                    atomic_read(&dcpage_flushes));
293 #ifdef CONFIG_SMP
294         seq_printf(m, "DCPageFlushesXC\t: %d\n",
295                    atomic_read(&dcpage_flushes_xcall));
296 #endif /* CONFIG_SMP */
297 #endif /* CONFIG_DEBUG_DCFLUSH */
298 }
299
300 struct linux_prom_translation {
301         unsigned long virt;
302         unsigned long size;
303         unsigned long data;
304 };
305 static struct linux_prom_translation prom_trans[512] __initdata;
306
307 extern unsigned long prom_boot_page;
308 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
309 extern int prom_get_mmu_ihandle(void);
310 extern void register_prom_callbacks(void);
311
312 /* Exported for SMP bootup purposes. */
313 unsigned long kern_locked_tte_data;
314
315 /* Exported for kernel TLB miss handling in ktlb.S */
316 unsigned long prom_pmd_phys __read_mostly;
317 unsigned int swapper_pgd_zero __read_mostly;
318
319 /* Allocate power-of-2 aligned chunks from the end of the
320  * kernel image.  Return physical address.
321  */
322 static inline unsigned long early_alloc_phys(unsigned long size)
323 {
324         unsigned long base;
325
326         BUILD_BUG_ON(size & (size - 1));
327
328         kern_size = (kern_size + (size - 1)) & ~(size - 1);
329         base = kern_base + kern_size;
330         kern_size += size;
331
332         return base;
333 }
334
335 static inline unsigned long load_phys32(unsigned long pa)
336 {
337         unsigned long val;
338
339         __asm__ __volatile__("lduwa     [%1] %2, %0"
340                              : "=&r" (val)
341                              : "r" (pa), "i" (ASI_PHYS_USE_EC));
342
343         return val;
344 }
345
346 static inline unsigned long load_phys64(unsigned long pa)
347 {
348         unsigned long val;
349
350         __asm__ __volatile__("ldxa      [%1] %2, %0"
351                              : "=&r" (val)
352                              : "r" (pa), "i" (ASI_PHYS_USE_EC));
353
354         return val;
355 }
356
357 static inline void store_phys32(unsigned long pa, unsigned long val)
358 {
359         __asm__ __volatile__("stwa      %0, [%1] %2"
360                              : /* no outputs */
361                              : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
362 }
363
364 static inline void store_phys64(unsigned long pa, unsigned long val)
365 {
366         __asm__ __volatile__("stxa      %0, [%1] %2"
367                              : /* no outputs */
368                              : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
369 }
370
371 #define BASE_PAGE_SIZE 8192
372
373 /*
374  * Translate PROM's mapping we capture at boot time into physical address.
375  * The second parameter is only set from prom_callback() invocations.
376  */
377 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
378 {
379         unsigned long pmd_phys = (prom_pmd_phys +
380                                   ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
381         unsigned long pte_phys;
382         pmd_t pmd_ent;
383         pte_t pte_ent;
384         unsigned long base;
385
386         pmd_val(pmd_ent) = load_phys32(pmd_phys);
387         if (pmd_none(pmd_ent)) {
388                 if (error)
389                         *error = 1;
390                 return 0;
391         }
392
393         pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
394         pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
395         pte_val(pte_ent) = load_phys64(pte_phys);
396         if (!pte_present(pte_ent)) {
397                 if (error)
398                         *error = 1;
399                 return 0;
400         }
401         if (error) {
402                 *error = 0;
403                 return pte_val(pte_ent);
404         }
405         base = pte_val(pte_ent) & _PAGE_PADDR;
406         return (base + (promva & (BASE_PAGE_SIZE - 1)));
407 }
408
409 /* The obp translations are saved based on 8k pagesize, since obp can
410  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
411  * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
412  * scheme (also, see rant in inherit_locked_prom_mappings()).
413  */
414 static void __init build_obp_range(unsigned long start, unsigned long end, unsigned long data)
415 {
416         unsigned long vaddr;
417
418         for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
419                 unsigned long val, pte_phys, pmd_phys;
420                 pmd_t pmd_ent;
421                 int i;
422
423                 pmd_phys = (prom_pmd_phys +
424                             (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
425                 pmd_val(pmd_ent) = load_phys32(pmd_phys);
426                 if (pmd_none(pmd_ent)) {
427                         pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
428
429                         for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
430                                 store_phys64(pte_phys+i*sizeof(pte_t),0);
431
432                         pmd_val(pmd_ent) = pte_phys >> 11UL;
433                         store_phys32(pmd_phys, pmd_val(pmd_ent));
434                 }
435
436                 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
437                 pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
438
439                 val = data;
440
441                 /* Clear diag TTE bits. */
442                 if (tlb_type == spitfire)
443                         val &= ~0x0003fe0000000000UL;
444
445                 store_phys64(pte_phys, val | _PAGE_MODIFIED);
446
447                 data += BASE_PAGE_SIZE;
448         }
449 }
450
451 static inline int in_obp_range(unsigned long vaddr)
452 {
453         return (vaddr >= LOW_OBP_ADDRESS &&
454                 vaddr < HI_OBP_ADDRESS);
455 }
456
457 #define OBP_PMD_SIZE 2048
458 static void __init build_obp_pgtable(int prom_trans_ents)
459 {
460         unsigned long i;
461
462         prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
463         for (i = 0; i < OBP_PMD_SIZE; i += 4)
464                 store_phys32(prom_pmd_phys + i, 0);
465
466         for (i = 0; i < prom_trans_ents; i++) {
467                 unsigned long start, end;
468
469                 if (!in_obp_range(prom_trans[i].virt))
470                         continue;
471
472                 start = prom_trans[i].virt;
473                 end = start + prom_trans[i].size;
474                 if (end > HI_OBP_ADDRESS)
475                         end = HI_OBP_ADDRESS;
476
477                 build_obp_range(start, end, prom_trans[i].data);
478         }
479 }
480
481 /* Read OBP translations property into 'prom_trans[]'.
482  * Return the number of entries.
483  */
484 static int __init read_obp_translations(void)
485 {
486         int n, node;
487
488         node = prom_finddevice("/virtual-memory");
489         n = prom_getproplen(node, "translations");
490         if (unlikely(n == 0 || n == -1)) {
491                 prom_printf("prom_mappings: Couldn't get size.\n");
492                 prom_halt();
493         }
494         if (unlikely(n > sizeof(prom_trans))) {
495                 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
496                 prom_halt();
497         }
498
499         if ((n = prom_getproperty(node, "translations",
500                                   (char *)&prom_trans[0],
501                                   sizeof(prom_trans))) == -1) {
502                 prom_printf("prom_mappings: Couldn't get property.\n");
503                 prom_halt();
504         }
505         n = n / sizeof(struct linux_prom_translation);
506         return n;
507 }
508
509 static void __init remap_kernel(void)
510 {
511         unsigned long phys_page, tte_vaddr, tte_data;
512         int tlb_ent = sparc64_highest_locked_tlbent();
513
514         tte_vaddr = (unsigned long) KERNBASE;
515         phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
516         tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
517                                  _PAGE_CP | _PAGE_CV | _PAGE_P |
518                                  _PAGE_L | _PAGE_W));
519
520         kern_locked_tte_data = tte_data;
521
522         /* Now lock us into the TLBs via OBP. */
523         prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
524         prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
525         if (bigkernel) {
526                 prom_dtlb_load(tlb_ent - 1,
527                                tte_data + 0x400000, 
528                                tte_vaddr + 0x400000);
529                 prom_itlb_load(tlb_ent - 1,
530                                tte_data + 0x400000, 
531                                tte_vaddr + 0x400000);
532         }
533 }
534
535 static void __init inherit_prom_mappings(void)
536 {
537         int n;
538
539         n = read_obp_translations();
540         build_obp_pgtable(n);
541
542         /* Now fixup OBP's idea about where we really are mapped. */
543         prom_printf("Remapping the kernel... ");
544         remap_kernel();
545
546         prom_printf("done.\n");
547
548         register_prom_callbacks();
549 }
550
551 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
552  * upwards as reserved for use by the firmware (I wonder if this
553  * will be the same on Cheetah...).  We use this virtual address
554  * range for the VPTE table mappings of the nucleus so we need
555  * to zap them when we enter the PROM.  -DaveM
556  */
557 static void __flush_nucleus_vptes(void)
558 {
559         unsigned long prom_reserved_base = 0xfffffffc00000000UL;
560         int i;
561
562         /* Only DTLB must be checked for VPTE entries. */
563         if (tlb_type == spitfire) {
564                 for (i = 0; i < 63; i++) {
565                         unsigned long tag;
566
567                         /* Spitfire Errata #32 workaround */
568                         /* NOTE: Always runs on spitfire, so no cheetah+
569                          *       page size encodings.
570                          */
571                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
572                                              "flush     %%g6"
573                                              : /* No outputs */
574                                              : "r" (0),
575                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
576
577                         tag = spitfire_get_dtlb_tag(i);
578                         if (((tag & ~(PAGE_MASK)) == 0) &&
579                             ((tag &  (PAGE_MASK)) >= prom_reserved_base)) {
580                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
581                                                      "membar #Sync"
582                                                      : /* no outputs */
583                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
584                                 spitfire_put_dtlb_data(i, 0x0UL);
585                         }
586                 }
587         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
588                 for (i = 0; i < 512; i++) {
589                         unsigned long tag = cheetah_get_dtlb_tag(i, 2);
590
591                         if ((tag & ~PAGE_MASK) == 0 &&
592                             (tag & PAGE_MASK) >= prom_reserved_base) {
593                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
594                                                      "membar #Sync"
595                                                      : /* no outputs */
596                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
597                                 cheetah_put_dtlb_data(i, 0x0UL, 2);
598                         }
599
600                         if (tlb_type != cheetah_plus)
601                                 continue;
602
603                         tag = cheetah_get_dtlb_tag(i, 3);
604
605                         if ((tag & ~PAGE_MASK) == 0 &&
606                             (tag & PAGE_MASK) >= prom_reserved_base) {
607                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
608                                                      "membar #Sync"
609                                                      : /* no outputs */
610                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
611                                 cheetah_put_dtlb_data(i, 0x0UL, 3);
612                         }
613                 }
614         } else {
615                 /* Implement me :-) */
616                 BUG();
617         }
618 }
619
620 static int prom_ditlb_set;
621 struct prom_tlb_entry {
622         int             tlb_ent;
623         unsigned long   tlb_tag;
624         unsigned long   tlb_data;
625 };
626 struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
627
628 void prom_world(int enter)
629 {
630         unsigned long pstate;
631         int i;
632
633         if (!enter)
634                 set_fs((mm_segment_t) { get_thread_current_ds() });
635
636         if (!prom_ditlb_set)
637                 return;
638
639         /* Make sure the following runs atomically. */
640         __asm__ __volatile__("flushw\n\t"
641                              "rdpr      %%pstate, %0\n\t"
642                              "wrpr      %0, %1, %%pstate"
643                              : "=r" (pstate)
644                              : "i" (PSTATE_IE));
645
646         if (enter) {
647                 /* Kick out nucleus VPTEs. */
648                 __flush_nucleus_vptes();
649
650                 /* Install PROM world. */
651                 for (i = 0; i < 16; i++) {
652                         if (prom_dtlb[i].tlb_ent != -1) {
653                                 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
654                                                      "membar #Sync"
655                                         : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
656                                         "i" (ASI_DMMU));
657                                 if (tlb_type == spitfire)
658                                         spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
659                                                                prom_dtlb[i].tlb_data);
660                                 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
661                                         cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
662                                                                prom_dtlb[i].tlb_data);
663                         }
664                         if (prom_itlb[i].tlb_ent != -1) {
665                                 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
666                                                      "membar #Sync"
667                                                      : : "r" (prom_itlb[i].tlb_tag),
668                                                      "r" (TLB_TAG_ACCESS),
669                                                      "i" (ASI_IMMU));
670                                 if (tlb_type == spitfire)
671                                         spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
672                                                                prom_itlb[i].tlb_data);
673                                 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
674                                         cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
675                                                                prom_itlb[i].tlb_data);
676                         }
677                 }
678         } else {
679                 for (i = 0; i < 16; i++) {
680                         if (prom_dtlb[i].tlb_ent != -1) {
681                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
682                                                      "membar #Sync"
683                                         : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
684                                 if (tlb_type == spitfire)
685                                         spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
686                                 else
687                                         cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
688                         }
689                         if (prom_itlb[i].tlb_ent != -1) {
690                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
691                                                      "membar #Sync"
692                                                      : : "r" (TLB_TAG_ACCESS),
693                                                      "i" (ASI_IMMU));
694                                 if (tlb_type == spitfire)
695                                         spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
696                                 else
697                                         cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
698                         }
699                 }
700         }
701         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
702                              : : "r" (pstate));
703 }
704
705 void inherit_locked_prom_mappings(int save_p)
706 {
707         int i;
708         int dtlb_seen = 0;
709         int itlb_seen = 0;
710
711         /* Fucking losing PROM has more mappings in the TLB, but
712          * it (conveniently) fails to mention any of these in the
713          * translations property.  The only ones that matter are
714          * the locked PROM tlb entries, so we impose the following
715          * irrecovable rule on the PROM, it is allowed 8 locked
716          * entries in the ITLB and 8 in the DTLB.
717          *
718          * Supposedly the upper 16GB of the address space is
719          * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
720          * SOMEWHERE!!!!!!!!!!!!!!!!!  Furthermore the entire interface
721          * used between the client program and the firmware on sun5
722          * systems to coordinate mmu mappings is also COMPLETELY
723          * UNDOCUMENTED!!!!!! Thanks S(t)un!
724          */
725         if (save_p) {
726                 for (i = 0; i < 16; i++) {
727                         prom_itlb[i].tlb_ent = -1;
728                         prom_dtlb[i].tlb_ent = -1;
729                 }
730         }
731         if (tlb_type == spitfire) {
732                 int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
733                 for (i = 0; i < high; i++) {
734                         unsigned long data;
735
736                         /* Spitfire Errata #32 workaround */
737                         /* NOTE: Always runs on spitfire, so no cheetah+
738                          *       page size encodings.
739                          */
740                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
741                                              "flush     %%g6"
742                                              : /* No outputs */
743                                              : "r" (0),
744                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
745
746                         data = spitfire_get_dtlb_data(i);
747                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
748                                 unsigned long tag;
749
750                                 /* Spitfire Errata #32 workaround */
751                                 /* NOTE: Always runs on spitfire, so no
752                                  *       cheetah+ page size encodings.
753                                  */
754                                 __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
755                                                      "flush     %%g6"
756                                                      : /* No outputs */
757                                                      : "r" (0),
758                                                      "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
759
760                                 tag = spitfire_get_dtlb_tag(i);
761                                 if (save_p) {
762                                         prom_dtlb[dtlb_seen].tlb_ent = i;
763                                         prom_dtlb[dtlb_seen].tlb_tag = tag;
764                                         prom_dtlb[dtlb_seen].tlb_data = data;
765                                 }
766                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
767                                                      "membar #Sync"
768                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
769                                 spitfire_put_dtlb_data(i, 0x0UL);
770
771                                 dtlb_seen++;
772                                 if (dtlb_seen > 15)
773                                         break;
774                         }
775                 }
776
777                 for (i = 0; i < high; i++) {
778                         unsigned long data;
779
780                         /* Spitfire Errata #32 workaround */
781                         /* NOTE: Always runs on spitfire, so no
782                          *       cheetah+ page size encodings.
783                          */
784                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
785                                              "flush     %%g6"
786                                              : /* No outputs */
787                                              : "r" (0),
788                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
789
790                         data = spitfire_get_itlb_data(i);
791                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
792                                 unsigned long tag;
793
794                                 /* Spitfire Errata #32 workaround */
795                                 /* NOTE: Always runs on spitfire, so no
796                                  *       cheetah+ page size encodings.
797                                  */
798                                 __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
799                                                      "flush     %%g6"
800                                                      : /* No outputs */
801                                                      : "r" (0),
802                                                      "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
803
804                                 tag = spitfire_get_itlb_tag(i);
805                                 if (save_p) {
806                                         prom_itlb[itlb_seen].tlb_ent = i;
807                                         prom_itlb[itlb_seen].tlb_tag = tag;
808                                         prom_itlb[itlb_seen].tlb_data = data;
809                                 }
810                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
811                                                      "membar #Sync"
812                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
813                                 spitfire_put_itlb_data(i, 0x0UL);
814
815                                 itlb_seen++;
816                                 if (itlb_seen > 15)
817                                         break;
818                         }
819                 }
820         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
821                 int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
822
823                 for (i = 0; i < high; i++) {
824                         unsigned long data;
825
826                         data = cheetah_get_ldtlb_data(i);
827                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
828                                 unsigned long tag;
829
830                                 tag = cheetah_get_ldtlb_tag(i);
831                                 if (save_p) {
832                                         prom_dtlb[dtlb_seen].tlb_ent = i;
833                                         prom_dtlb[dtlb_seen].tlb_tag = tag;
834                                         prom_dtlb[dtlb_seen].tlb_data = data;
835                                 }
836                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
837                                                      "membar #Sync"
838                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
839                                 cheetah_put_ldtlb_data(i, 0x0UL);
840
841                                 dtlb_seen++;
842                                 if (dtlb_seen > 15)
843                                         break;
844                         }
845                 }
846
847                 for (i = 0; i < high; i++) {
848                         unsigned long data;
849
850                         data = cheetah_get_litlb_data(i);
851                         if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
852                                 unsigned long tag;
853
854                                 tag = cheetah_get_litlb_tag(i);
855                                 if (save_p) {
856                                         prom_itlb[itlb_seen].tlb_ent = i;
857                                         prom_itlb[itlb_seen].tlb_tag = tag;
858                                         prom_itlb[itlb_seen].tlb_data = data;
859                                 }
860                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
861                                                      "membar #Sync"
862                                                      : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
863                                 cheetah_put_litlb_data(i, 0x0UL);
864
865                                 itlb_seen++;
866                                 if (itlb_seen > 15)
867                                         break;
868                         }
869                 }
870         } else {
871                 /* Implement me :-) */
872                 BUG();
873         }
874         if (save_p)
875                 prom_ditlb_set = 1;
876 }
877
878 /* Give PROM back his world, done during reboots... */
879 void prom_reload_locked(void)
880 {
881         int i;
882
883         for (i = 0; i < 16; i++) {
884                 if (prom_dtlb[i].tlb_ent != -1) {
885                         __asm__ __volatile__("stxa %0, [%1] %2\n\t"
886                                              "membar #Sync"
887                                 : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
888                                 "i" (ASI_DMMU));
889                         if (tlb_type == spitfire)
890                                 spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
891                                                        prom_dtlb[i].tlb_data);
892                         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
893                                 cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
894                                                       prom_dtlb[i].tlb_data);
895                 }
896
897                 if (prom_itlb[i].tlb_ent != -1) {
898                         __asm__ __volatile__("stxa %0, [%1] %2\n\t"
899                                              "membar #Sync"
900                                              : : "r" (prom_itlb[i].tlb_tag),
901                                              "r" (TLB_TAG_ACCESS),
902                                              "i" (ASI_IMMU));
903                         if (tlb_type == spitfire)
904                                 spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
905                                                        prom_itlb[i].tlb_data);
906                         else
907                                 cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
908                                                        prom_itlb[i].tlb_data);
909                 }
910         }
911 }
912
913 #ifdef DCACHE_ALIASING_POSSIBLE
914 void __flush_dcache_range(unsigned long start, unsigned long end)
915 {
916         unsigned long va;
917
918         if (tlb_type == spitfire) {
919                 int n = 0;
920
921                 for (va = start; va < end; va += 32) {
922                         spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
923                         if (++n >= 512)
924                                 break;
925                 }
926         } else {
927                 start = __pa(start);
928                 end = __pa(end);
929                 for (va = start; va < end; va += 32)
930                         __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
931                                              "membar #Sync"
932                                              : /* no outputs */
933                                              : "r" (va),
934                                                "i" (ASI_DCACHE_INVALIDATE));
935         }
936 }
937 #endif /* DCACHE_ALIASING_POSSIBLE */
938
939 /* If not locked, zap it. */
940 void __flush_tlb_all(void)
941 {
942         unsigned long pstate;
943         int i;
944
945         __asm__ __volatile__("flushw\n\t"
946                              "rdpr      %%pstate, %0\n\t"
947                              "wrpr      %0, %1, %%pstate"
948                              : "=r" (pstate)
949                              : "i" (PSTATE_IE));
950         if (tlb_type == spitfire) {
951                 for (i = 0; i < 64; i++) {
952                         /* Spitfire Errata #32 workaround */
953                         /* NOTE: Always runs on spitfire, so no
954                          *       cheetah+ page size encodings.
955                          */
956                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
957                                              "flush     %%g6"
958                                              : /* No outputs */
959                                              : "r" (0),
960                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
961
962                         if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
963                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
964                                                      "membar #Sync"
965                                                      : /* no outputs */
966                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
967                                 spitfire_put_dtlb_data(i, 0x0UL);
968                         }
969
970                         /* Spitfire Errata #32 workaround */
971                         /* NOTE: Always runs on spitfire, so no
972                          *       cheetah+ page size encodings.
973                          */
974                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
975                                              "flush     %%g6"
976                                              : /* No outputs */
977                                              : "r" (0),
978                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
979
980                         if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
981                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
982                                                      "membar #Sync"
983                                                      : /* no outputs */
984                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
985                                 spitfire_put_itlb_data(i, 0x0UL);
986                         }
987                 }
988         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
989                 cheetah_flush_dtlb_all();
990                 cheetah_flush_itlb_all();
991         }
992         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
993                              : : "r" (pstate));
994 }
995
996 /* Caller does TLB context flushing on local CPU if necessary.
997  * The caller also ensures that CTX_VALID(mm->context) is false.
998  *
999  * We must be careful about boundary cases so that we never
1000  * let the user have CTX 0 (nucleus) or we ever use a CTX
1001  * version of zero (and thus NO_CONTEXT would not be caught
1002  * by version mis-match tests in mmu_context.h).
1003  */
1004 void get_new_mmu_context(struct mm_struct *mm)
1005 {
1006         unsigned long ctx, new_ctx;
1007         unsigned long orig_pgsz_bits;
1008         
1009
1010         spin_lock(&ctx_alloc_lock);
1011         orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
1012         ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
1013         new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
1014         if (new_ctx >= (1 << CTX_NR_BITS)) {
1015                 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
1016                 if (new_ctx >= ctx) {
1017                         int i;
1018                         new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
1019                                 CTX_FIRST_VERSION;
1020                         if (new_ctx == 1)
1021                                 new_ctx = CTX_FIRST_VERSION;
1022
1023                         /* Don't call memset, for 16 entries that's just
1024                          * plain silly...
1025                          */
1026                         mmu_context_bmap[0] = 3;
1027                         mmu_context_bmap[1] = 0;
1028                         mmu_context_bmap[2] = 0;
1029                         mmu_context_bmap[3] = 0;
1030                         for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
1031                                 mmu_context_bmap[i + 0] = 0;
1032                                 mmu_context_bmap[i + 1] = 0;
1033                                 mmu_context_bmap[i + 2] = 0;
1034                                 mmu_context_bmap[i + 3] = 0;
1035                         }
1036                         goto out;
1037                 }
1038         }
1039         mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
1040         new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
1041 out:
1042         tlb_context_cache = new_ctx;
1043         mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
1044         spin_unlock(&ctx_alloc_lock);
1045 }
1046
1047 #ifndef CONFIG_SMP
1048 struct pgtable_cache_struct pgt_quicklists;
1049 #endif
1050
1051 /* OK, we have to color these pages. The page tables are accessed
1052  * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1053  * code, as well as by PAGE_OFFSET range direct-mapped addresses by 
1054  * other parts of the kernel. By coloring, we make sure that the tlbmiss 
1055  * fast handlers do not get data from old/garbage dcache lines that 
1056  * correspond to an old/stale virtual address (user/kernel) that 
1057  * previously mapped the pagetable page while accessing vpte range 
1058  * addresses. The idea is that if the vpte color and PAGE_OFFSET range 
1059  * color is the same, then when the kernel initializes the pagetable 
1060  * using the later address range, accesses with the first address
1061  * range will see the newly initialized data rather than the garbage.
1062  */
1063 #ifdef DCACHE_ALIASING_POSSIBLE
1064 #define DC_ALIAS_SHIFT  1
1065 #else
1066 #define DC_ALIAS_SHIFT  0
1067 #endif
1068 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1069 {
1070         struct page *page;
1071         unsigned long color;
1072
1073         {
1074                 pte_t *ptep = pte_alloc_one_fast(mm, address);
1075
1076                 if (ptep)
1077                         return ptep;
1078         }
1079
1080         color = VPTE_COLOR(address);
1081         page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
1082         if (page) {
1083                 unsigned long *to_free;
1084                 unsigned long paddr;
1085                 pte_t *pte;
1086
1087 #ifdef DCACHE_ALIASING_POSSIBLE
1088                 set_page_count(page, 1);
1089                 ClearPageCompound(page);
1090
1091                 set_page_count((page + 1), 1);
1092                 ClearPageCompound(page + 1);
1093 #endif
1094                 paddr = (unsigned long) page_address(page);
1095                 memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
1096
1097                 if (!color) {
1098                         pte = (pte_t *) paddr;
1099                         to_free = (unsigned long *) (paddr + PAGE_SIZE);
1100                 } else {
1101                         pte = (pte_t *) (paddr + PAGE_SIZE);
1102                         to_free = (unsigned long *) paddr;
1103                 }
1104
1105 #ifdef DCACHE_ALIASING_POSSIBLE
1106                 /* Now free the other one up, adjust cache size. */
1107                 preempt_disable();
1108                 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
1109                 pte_quicklist[color ^ 0x1] = to_free;
1110                 pgtable_cache_size++;
1111                 preempt_enable();
1112 #endif
1113
1114                 return pte;
1115         }
1116         return NULL;
1117 }
1118
1119 void sparc_ultra_dump_itlb(void)
1120 {
1121         int slot;
1122
1123         if (tlb_type == spitfire) {
1124                 printk ("Contents of itlb: ");
1125                 for (slot = 0; slot < 14; slot++) printk ("    ");
1126                 printk ("%2x:%016lx,%016lx\n",
1127                         0,
1128                         spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1129                 for (slot = 1; slot < 64; slot+=3) {
1130                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
1131                                 slot,
1132                                 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
1133                                 slot+1,
1134                                 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
1135                                 slot+2,
1136                                 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
1137                 }
1138         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1139                 printk ("Contents of itlb0:\n");
1140                 for (slot = 0; slot < 16; slot+=2) {
1141                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1142                                 slot,
1143                                 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
1144                                 slot+1,
1145                                 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
1146                 }
1147                 printk ("Contents of itlb2:\n");
1148                 for (slot = 0; slot < 128; slot+=2) {
1149                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1150                                 slot,
1151                                 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
1152                                 slot+1,
1153                                 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
1154                 }
1155         }
1156 }
1157
1158 void sparc_ultra_dump_dtlb(void)
1159 {
1160         int slot;
1161
1162         if (tlb_type == spitfire) {
1163                 printk ("Contents of dtlb: ");
1164                 for (slot = 0; slot < 14; slot++) printk ("    ");
1165                 printk ("%2x:%016lx,%016lx\n", 0,
1166                         spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1167                 for (slot = 1; slot < 64; slot+=3) {
1168                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
1169                                 slot,
1170                                 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
1171                                 slot+1,
1172                                 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
1173                                 slot+2,
1174                                 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
1175                 }
1176         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1177                 printk ("Contents of dtlb0:\n");
1178                 for (slot = 0; slot < 16; slot+=2) {
1179                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1180                                 slot,
1181                                 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
1182                                 slot+1,
1183                                 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
1184                 }
1185                 printk ("Contents of dtlb2:\n");
1186                 for (slot = 0; slot < 512; slot+=2) {
1187                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1188                                 slot,
1189                                 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
1190                                 slot+1,
1191                                 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
1192                 }
1193                 if (tlb_type == cheetah_plus) {
1194                         printk ("Contents of dtlb3:\n");
1195                         for (slot = 0; slot < 512; slot+=2) {
1196                                 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1197                                         slot,
1198                                         cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
1199                                         slot+1,
1200                                         cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
1201                         }
1202                 }
1203         }
1204 }
1205
1206 extern unsigned long cmdline_memory_size;
1207
1208 unsigned long __init bootmem_init(unsigned long *pages_avail)
1209 {
1210         unsigned long bootmap_size, start_pfn, end_pfn;
1211         unsigned long end_of_phys_memory = 0UL;
1212         unsigned long bootmap_pfn, bytes_avail, size;
1213         int i;
1214
1215 #ifdef CONFIG_DEBUG_BOOTMEM
1216         prom_printf("bootmem_init: Scan sp_banks, ");
1217 #endif
1218
1219         bytes_avail = 0UL;
1220         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1221                 end_of_phys_memory = sp_banks[i].base_addr +
1222                         sp_banks[i].num_bytes;
1223                 bytes_avail += sp_banks[i].num_bytes;
1224                 if (cmdline_memory_size) {
1225                         if (bytes_avail > cmdline_memory_size) {
1226                                 unsigned long slack = bytes_avail - cmdline_memory_size;
1227
1228                                 bytes_avail -= slack;
1229                                 end_of_phys_memory -= slack;
1230
1231                                 sp_banks[i].num_bytes -= slack;
1232                                 if (sp_banks[i].num_bytes == 0) {
1233                                         sp_banks[i].base_addr = 0xdeadbeef;
1234                                 } else {
1235                                         sp_banks[i+1].num_bytes = 0;
1236                                         sp_banks[i+1].base_addr = 0xdeadbeef;
1237                                 }
1238                                 break;
1239                         }
1240                 }
1241         }
1242
1243         *pages_avail = bytes_avail >> PAGE_SHIFT;
1244
1245         /* Start with page aligned address of last symbol in kernel
1246          * image.  The kernel is hard mapped below PAGE_OFFSET in a
1247          * 4MB locked TLB translation.
1248          */
1249         start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
1250
1251         bootmap_pfn = start_pfn;
1252
1253         end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1254
1255 #ifdef CONFIG_BLK_DEV_INITRD
1256         /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1257         if (sparc_ramdisk_image || sparc_ramdisk_image64) {
1258                 unsigned long ramdisk_image = sparc_ramdisk_image ?
1259                         sparc_ramdisk_image : sparc_ramdisk_image64;
1260                 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
1261                         ramdisk_image -= KERNBASE;
1262                 initrd_start = ramdisk_image + phys_base;
1263                 initrd_end = initrd_start + sparc_ramdisk_size;
1264                 if (initrd_end > end_of_phys_memory) {
1265                         printk(KERN_CRIT "initrd extends beyond end of memory "
1266                                          "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1267                                initrd_end, end_of_phys_memory);
1268                         initrd_start = 0;
1269                 }
1270                 if (initrd_start) {
1271                         if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
1272                             initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
1273                                 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
1274                 }
1275         }
1276 #endif  
1277         /* Initialize the boot-time allocator. */
1278         max_pfn = max_low_pfn = end_pfn;
1279         min_low_pfn = pfn_base;
1280
1281 #ifdef CONFIG_DEBUG_BOOTMEM
1282         prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1283                     min_low_pfn, bootmap_pfn, max_low_pfn);
1284 #endif
1285         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
1286
1287         /* Now register the available physical memory with the
1288          * allocator.
1289          */
1290         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1291 #ifdef CONFIG_DEBUG_BOOTMEM
1292                 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
1293                             i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
1294 #endif
1295                 free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
1296         }
1297
1298 #ifdef CONFIG_BLK_DEV_INITRD
1299         if (initrd_start) {
1300                 size = initrd_end - initrd_start;
1301
1302                 /* Resert the initrd image area. */
1303 #ifdef CONFIG_DEBUG_BOOTMEM
1304                 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1305                         initrd_start, initrd_end);
1306 #endif
1307                 reserve_bootmem(initrd_start, size);
1308                 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1309
1310                 initrd_start += PAGE_OFFSET;
1311                 initrd_end += PAGE_OFFSET;
1312         }
1313 #endif
1314         /* Reserve the kernel text/data/bss. */
1315 #ifdef CONFIG_DEBUG_BOOTMEM
1316         prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
1317 #endif
1318         reserve_bootmem(kern_base, kern_size);
1319         *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1320
1321         /* Reserve the bootmem map.   We do not account for it
1322          * in pages_avail because we will release that memory
1323          * in free_all_bootmem.
1324          */
1325         size = bootmap_size;
1326 #ifdef CONFIG_DEBUG_BOOTMEM
1327         prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1328                     (bootmap_pfn << PAGE_SHIFT), size);
1329 #endif
1330         reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1331         *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
1332
1333         return end_pfn;
1334 }
1335
1336 #ifdef CONFIG_DEBUG_PAGEALLOC
1337 static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1338 {
1339         unsigned long vstart = PAGE_OFFSET + pstart;
1340         unsigned long vend = PAGE_OFFSET + pend;
1341         unsigned long alloc_bytes = 0UL;
1342
1343         if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1344                 prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
1345                             vstart, vend);
1346                 prom_halt();
1347         }
1348
1349         while (vstart < vend) {
1350                 unsigned long this_end, paddr = __pa(vstart);
1351                 pgd_t *pgd = pgd_offset_k(vstart);
1352                 pud_t *pud;
1353                 pmd_t *pmd;
1354                 pte_t *pte;
1355
1356                 pud = pud_offset(pgd, vstart);
1357                 if (pud_none(*pud)) {
1358                         pmd_t *new;
1359
1360                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1361                         alloc_bytes += PAGE_SIZE;
1362                         pud_populate(&init_mm, pud, new);
1363                 }
1364
1365                 pmd = pmd_offset(pud, vstart);
1366                 if (!pmd_present(*pmd)) {
1367                         pte_t *new;
1368
1369                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1370                         alloc_bytes += PAGE_SIZE;
1371                         pmd_populate_kernel(&init_mm, pmd, new);
1372                 }
1373
1374                 pte = pte_offset_kernel(pmd, vstart);
1375                 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1376                 if (this_end > vend)
1377                         this_end = vend;
1378
1379                 while (vstart < this_end) {
1380                         pte_val(*pte) = (paddr | pgprot_val(prot));
1381
1382                         vstart += PAGE_SIZE;
1383                         paddr += PAGE_SIZE;
1384                         pte++;
1385                 }
1386         }
1387
1388         return alloc_bytes;
1389 }
1390
1391 extern struct linux_mlist_p1275 *prom_ptot_ptr;
1392 extern unsigned int kvmap_linear_patch[1];
1393
1394 static void __init kernel_physical_mapping_init(void)
1395 {
1396         struct linux_mlist_p1275 *p = prom_ptot_ptr;
1397         unsigned long mem_alloced = 0UL;
1398
1399         while (p) {
1400                 unsigned long phys_start, phys_end;
1401
1402                 phys_start = p->start_adr;
1403                 phys_end = phys_start + p->num_bytes;
1404                 mem_alloced += kernel_map_range(phys_start, phys_end,
1405                                                 PAGE_KERNEL);
1406
1407                 p = p->theres_more;
1408         }
1409
1410         printk("Allocated %ld bytes for kernel page tables.\n",
1411                mem_alloced);
1412
1413         kvmap_linear_patch[0] = 0x01000000; /* nop */
1414         flushi(&kvmap_linear_patch[0]);
1415
1416         __flush_tlb_all();
1417 }
1418
1419 void kernel_map_pages(struct page *page, int numpages, int enable)
1420 {
1421         unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1422         unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1423
1424         kernel_map_range(phys_start, phys_end,
1425                          (enable ? PAGE_KERNEL : __pgprot(0)));
1426
1427         /* we should perform an IPI and flush all tlbs,
1428          * but that can deadlock->flush only current cpu.
1429          */
1430         __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1431                                  PAGE_OFFSET + phys_end);
1432 }
1433 #endif
1434
1435 unsigned long __init find_ecache_flush_span(unsigned long size)
1436 {
1437         unsigned long i;
1438
1439         for (i = 0; ; i++) {
1440                 if (sp_banks[i].num_bytes == 0)
1441                         break;
1442                 if (sp_banks[i].num_bytes >= size)
1443                         return sp_banks[i].base_addr;
1444         }
1445
1446         return ~0UL;
1447 }
1448
1449 static void __init prom_probe_memory(void)
1450 {
1451         struct linux_mlist_p1275 *mlist;
1452         unsigned long bytes, base_paddr, tally;
1453         int i;
1454
1455         i = 0;
1456         mlist = *prom_meminfo()->p1275_available;
1457         bytes = tally = mlist->num_bytes;
1458         base_paddr = mlist->start_adr;
1459   
1460         sp_banks[0].base_addr = base_paddr;
1461         sp_banks[0].num_bytes = bytes;
1462
1463         while (mlist->theres_more != (void *) 0) {
1464                 i++;
1465                 mlist = mlist->theres_more;
1466                 bytes = mlist->num_bytes;
1467                 tally += bytes;
1468                 if (i >= SPARC_PHYS_BANKS-1) {
1469                         printk ("The machine has more banks than "
1470                                 "this kernel can support\n"
1471                                 "Increase the SPARC_PHYS_BANKS "
1472                                 "setting (currently %d)\n",
1473                                 SPARC_PHYS_BANKS);
1474                         i = SPARC_PHYS_BANKS-1;
1475                         break;
1476                 }
1477     
1478                 sp_banks[i].base_addr = mlist->start_adr;
1479                 sp_banks[i].num_bytes = mlist->num_bytes;
1480         }
1481
1482         i++;
1483         sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1484         sp_banks[i].num_bytes = 0;
1485
1486         /* Now mask all bank sizes on a page boundary, it is all we can
1487          * use anyways.
1488          */
1489         for (i = 0; sp_banks[i].num_bytes != 0; i++)
1490                 sp_banks[i].num_bytes &= PAGE_MASK;
1491 }
1492
1493 /* paging_init() sets up the page tables */
1494
1495 extern void cheetah_ecache_flush_init(void);
1496
1497 static unsigned long last_valid_pfn;
1498 pgd_t swapper_pg_dir[2048];
1499
1500 void __init paging_init(void)
1501 {
1502         unsigned long end_pfn, pages_avail, shift;
1503         unsigned long real_end, i;
1504
1505         prom_probe_memory();
1506
1507         phys_base = 0xffffffffffffffffUL;
1508         for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1509                 unsigned long top;
1510
1511                 if (sp_banks[i].base_addr < phys_base)
1512                         phys_base = sp_banks[i].base_addr;
1513                 top = sp_banks[i].base_addr +
1514                         sp_banks[i].num_bytes;
1515         }
1516         pfn_base = phys_base >> PAGE_SHIFT;
1517
1518         kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1519         kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1520
1521         set_bit(0, mmu_context_bmap);
1522
1523         shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1524
1525         real_end = (unsigned long)_end;
1526         if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1527                 bigkernel = 1;
1528         if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1529                 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1530                 prom_halt();
1531         }
1532
1533         /* Set kernel pgd to upper alias so physical page computations
1534          * work.
1535          */
1536         init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1537         
1538         memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1539
1540         /* Now can init the kernel/bad page tables. */
1541         pud_set(pud_offset(&swapper_pg_dir[0], 0),
1542                 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1543         
1544         swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1545         
1546         /* Inherit non-locked OBP mappings. */
1547         inherit_prom_mappings();
1548         
1549         /* Ok, we can use our TLB miss and window trap handlers safely.
1550          * We need to do a quick peek here to see if we are on StarFire
1551          * or not, so setup_tba can setup the IRQ globals correctly (it
1552          * needs to get the hard smp processor id correctly).
1553          */
1554         {
1555                 extern void setup_tba(int);
1556                 setup_tba(this_is_starfire);
1557         }
1558
1559         inherit_locked_prom_mappings(1);
1560
1561         __flush_tlb_all();
1562
1563         /* Setup bootmem... */
1564         pages_avail = 0;
1565         last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1566
1567 #ifdef CONFIG_DEBUG_PAGEALLOC
1568         kernel_physical_mapping_init();
1569 #endif
1570
1571         {
1572                 unsigned long zones_size[MAX_NR_ZONES];
1573                 unsigned long zholes_size[MAX_NR_ZONES];
1574                 unsigned long npages;
1575                 int znum;
1576
1577                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1578                         zones_size[znum] = zholes_size[znum] = 0;
1579
1580                 npages = end_pfn - pfn_base;
1581                 zones_size[ZONE_DMA] = npages;
1582                 zholes_size[ZONE_DMA] = npages - pages_avail;
1583
1584                 free_area_init_node(0, &contig_page_data, zones_size,
1585                                     phys_base >> PAGE_SHIFT, zholes_size);
1586         }
1587
1588         device_scan();
1589 }
1590
1591 /* Ok, it seems that the prom can allocate some more memory chunks
1592  * as a side effect of some prom calls we perform during the
1593  * boot sequence.  My most likely theory is that it is from the
1594  * prom_set_traptable() call, and OBP is allocating a scratchpad
1595  * for saving client program register state etc.
1596  */
1597 static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
1598 {
1599         int swapi = 0;
1600         int i, mitr;
1601         unsigned long tmpaddr, tmpsize;
1602         unsigned long lowest;
1603
1604         for (i = 0; thislist[i].theres_more != 0; i++) {
1605                 lowest = thislist[i].start_adr;
1606                 for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
1607                         if (thislist[mitr].start_adr < lowest) {
1608                                 lowest = thislist[mitr].start_adr;
1609                                 swapi = mitr;
1610                         }
1611                 if (lowest == thislist[i].start_adr)
1612                         continue;
1613                 tmpaddr = thislist[swapi].start_adr;
1614                 tmpsize = thislist[swapi].num_bytes;
1615                 for (mitr = swapi; mitr > i; mitr--) {
1616                         thislist[mitr].start_adr = thislist[mitr-1].start_adr;
1617                         thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
1618                 }
1619                 thislist[i].start_adr = tmpaddr;
1620                 thislist[i].num_bytes = tmpsize;
1621         }
1622 }
1623
1624 void __init rescan_sp_banks(void)
1625 {
1626         struct linux_prom64_registers memlist[64];
1627         struct linux_mlist_p1275 avail[64], *mlist;
1628         unsigned long bytes, base_paddr;
1629         int num_regs, node = prom_finddevice("/memory");
1630         int i;
1631
1632         num_regs = prom_getproperty(node, "available",
1633                                     (char *) memlist, sizeof(memlist));
1634         num_regs = (num_regs / sizeof(struct linux_prom64_registers));
1635         for (i = 0; i < num_regs; i++) {
1636                 avail[i].start_adr = memlist[i].phys_addr;
1637                 avail[i].num_bytes = memlist[i].reg_size;
1638                 avail[i].theres_more = &avail[i + 1];
1639         }
1640         avail[i - 1].theres_more = NULL;
1641         sort_memlist(avail);
1642
1643         mlist = &avail[0];
1644         i = 0;
1645         bytes = mlist->num_bytes;
1646         base_paddr = mlist->start_adr;
1647   
1648         sp_banks[0].base_addr = base_paddr;
1649         sp_banks[0].num_bytes = bytes;
1650
1651         while (mlist->theres_more != NULL){
1652                 i++;
1653                 mlist = mlist->theres_more;
1654                 bytes = mlist->num_bytes;
1655                 if (i >= SPARC_PHYS_BANKS-1) {
1656                         printk ("The machine has more banks than "
1657                                 "this kernel can support\n"
1658                                 "Increase the SPARC_PHYS_BANKS "
1659                                 "setting (currently %d)\n",
1660                                 SPARC_PHYS_BANKS);
1661                         i = SPARC_PHYS_BANKS-1;
1662                         break;
1663                 }
1664     
1665                 sp_banks[i].base_addr = mlist->start_adr;
1666                 sp_banks[i].num_bytes = mlist->num_bytes;
1667         }
1668
1669         i++;
1670         sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
1671         sp_banks[i].num_bytes = 0;
1672
1673         for (i = 0; sp_banks[i].num_bytes != 0; i++)
1674                 sp_banks[i].num_bytes &= PAGE_MASK;
1675 }
1676
1677 static void __init taint_real_pages(void)
1678 {
1679         struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
1680         int i;
1681
1682         for (i = 0; i < SPARC_PHYS_BANKS; i++) {
1683                 saved_sp_banks[i].base_addr =
1684                         sp_banks[i].base_addr;
1685                 saved_sp_banks[i].num_bytes =
1686                         sp_banks[i].num_bytes;
1687         }
1688
1689         rescan_sp_banks();
1690
1691         /* Find changes discovered in the sp_bank rescan and
1692          * reserve the lost portions in the bootmem maps.
1693          */
1694         for (i = 0; saved_sp_banks[i].num_bytes; i++) {
1695                 unsigned long old_start, old_end;
1696
1697                 old_start = saved_sp_banks[i].base_addr;
1698                 old_end = old_start +
1699                         saved_sp_banks[i].num_bytes;
1700                 while (old_start < old_end) {
1701                         int n;
1702
1703                         for (n = 0; sp_banks[n].num_bytes; n++) {
1704                                 unsigned long new_start, new_end;
1705
1706                                 new_start = sp_banks[n].base_addr;
1707                                 new_end = new_start + sp_banks[n].num_bytes;
1708
1709                                 if (new_start <= old_start &&
1710                                     new_end >= (old_start + PAGE_SIZE)) {
1711                                         set_bit (old_start >> 22,
1712                                                  sparc64_valid_addr_bitmap);
1713                                         goto do_next_page;
1714                                 }
1715                         }
1716                         reserve_bootmem(old_start, PAGE_SIZE);
1717
1718                 do_next_page:
1719                         old_start += PAGE_SIZE;
1720                 }
1721         }
1722 }
1723
1724 void __init mem_init(void)
1725 {
1726         unsigned long codepages, datapages, initpages;
1727         unsigned long addr, last;
1728         int i;
1729
1730         i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1731         i += 1;
1732         sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1733         if (sparc64_valid_addr_bitmap == NULL) {
1734                 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1735                 prom_halt();
1736         }
1737         memset(sparc64_valid_addr_bitmap, 0, i << 3);
1738
1739         addr = PAGE_OFFSET + kern_base;
1740         last = PAGE_ALIGN(kern_size) + addr;
1741         while (addr < last) {
1742                 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1743                 addr += PAGE_SIZE;
1744         }
1745
1746         taint_real_pages();
1747
1748         max_mapnr = last_valid_pfn - pfn_base;
1749         high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1750
1751 #ifdef CONFIG_DEBUG_BOOTMEM
1752         prom_printf("mem_init: Calling free_all_bootmem().\n");
1753 #endif
1754         totalram_pages = num_physpages = free_all_bootmem() - 1;
1755
1756         /*
1757          * Set up the zero page, mark it reserved, so that page count
1758          * is not manipulated when freeing the page from user ptes.
1759          */
1760         mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1761         if (mem_map_zero == NULL) {
1762                 prom_printf("paging_init: Cannot alloc zero page.\n");
1763                 prom_halt();
1764         }
1765         SetPageReserved(mem_map_zero);
1766
1767         codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1768         codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1769         datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1770         datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1771         initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1772         initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1773
1774         printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1775                nr_free_pages() << (PAGE_SHIFT-10),
1776                codepages << (PAGE_SHIFT-10),
1777                datapages << (PAGE_SHIFT-10), 
1778                initpages << (PAGE_SHIFT-10), 
1779                PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1780
1781         if (tlb_type == cheetah || tlb_type == cheetah_plus)
1782                 cheetah_ecache_flush_init();
1783 }
1784
1785 void free_initmem(void)
1786 {
1787         unsigned long addr, initend;
1788
1789         /*
1790          * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1791          */
1792         addr = PAGE_ALIGN((unsigned long)(__init_begin));
1793         initend = (unsigned long)(__init_end) & PAGE_MASK;
1794         for (; addr < initend; addr += PAGE_SIZE) {
1795                 unsigned long page;
1796                 struct page *p;
1797
1798                 page = (addr +
1799                         ((unsigned long) __va(kern_base)) -
1800                         ((unsigned long) KERNBASE));
1801                 memset((void *)addr, 0xcc, PAGE_SIZE);
1802                 p = virt_to_page(page);
1803
1804                 ClearPageReserved(p);
1805                 set_page_count(p, 1);
1806                 __free_page(p);
1807                 num_physpages++;
1808                 totalram_pages++;
1809         }
1810 }
1811
1812 #ifdef CONFIG_BLK_DEV_INITRD
1813 void free_initrd_mem(unsigned long start, unsigned long end)
1814 {
1815         if (start < end)
1816                 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1817         for (; start < end; start += PAGE_SIZE) {
1818                 struct page *p = virt_to_page(start);
1819
1820                 ClearPageReserved(p);
1821                 set_page_count(p, 1);
1822                 __free_page(p);
1823                 num_physpages++;
1824                 totalram_pages++;
1825         }
1826 }
1827 #endif