]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/ioremap.c
x86: remove smp_apply_quirks()/smp_checks()
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 static inline int phys_addr_valid(unsigned long addr)
28 {
29         return addr < (1UL << boot_cpu_data.x86_phys_bits);
30 }
31
32 unsigned long __phys_addr(unsigned long x)
33 {
34         if (x >= __START_KERNEL_map) {
35                 x -= __START_KERNEL_map;
36                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37                 x += phys_base;
38         } else {
39                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40                 x -= PAGE_OFFSET;
41                 VIRTUAL_BUG_ON(!phys_addr_valid(x));
42         }
43         return x;
44 }
45 EXPORT_SYMBOL(__phys_addr);
46
47 bool __virt_addr_valid(unsigned long x)
48 {
49         if (x >= __START_KERNEL_map) {
50                 x -= __START_KERNEL_map;
51                 if (x >= KERNEL_IMAGE_SIZE)
52                         return false;
53                 x += phys_base;
54         } else {
55                 if (x < PAGE_OFFSET)
56                         return false;
57                 x -= PAGE_OFFSET;
58                 if (!phys_addr_valid(x))
59                         return false;
60         }
61
62         return pfn_valid(x >> PAGE_SHIFT);
63 }
64 EXPORT_SYMBOL(__virt_addr_valid);
65
66 #else
67
68 static inline int phys_addr_valid(unsigned long addr)
69 {
70         return 1;
71 }
72
73 #ifdef CONFIG_DEBUG_VIRTUAL
74 unsigned long __phys_addr(unsigned long x)
75 {
76         /* VMALLOC_* aren't constants  */
77         VIRTUAL_BUG_ON(x < PAGE_OFFSET);
78         VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
79         return x - PAGE_OFFSET;
80 }
81 EXPORT_SYMBOL(__phys_addr);
82 #endif
83
84 bool __virt_addr_valid(unsigned long x)
85 {
86         if (x < PAGE_OFFSET)
87                 return false;
88         if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
89                 return false;
90         return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
91 }
92 EXPORT_SYMBOL(__virt_addr_valid);
93
94 #endif
95
96 int page_is_ram(unsigned long pagenr)
97 {
98         resource_size_t addr, end;
99         int i;
100
101         /*
102          * A special case is the first 4Kb of memory;
103          * This is a BIOS owned area, not kernel ram, but generally
104          * not listed as such in the E820 table.
105          */
106         if (pagenr == 0)
107                 return 0;
108
109         /*
110          * Second special case: Some BIOSen report the PC BIOS
111          * area (640->1Mb) as ram even though it is not.
112          */
113         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
114                     pagenr < (BIOS_END >> PAGE_SHIFT))
115                 return 0;
116
117         for (i = 0; i < e820.nr_map; i++) {
118                 /*
119                  * Not usable memory:
120                  */
121                 if (e820.map[i].type != E820_RAM)
122                         continue;
123                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
124                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
125
126
127                 if ((pagenr >= addr) && (pagenr < end))
128                         return 1;
129         }
130         return 0;
131 }
132
133 /*
134  * Fix up the linear direct mapping of the kernel to avoid cache attribute
135  * conflicts.
136  */
137 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
138                                unsigned long prot_val)
139 {
140         unsigned long nrpages = size >> PAGE_SHIFT;
141         int err;
142
143         switch (prot_val) {
144         case _PAGE_CACHE_UC:
145         default:
146                 err = _set_memory_uc(vaddr, nrpages);
147                 break;
148         case _PAGE_CACHE_WC:
149                 err = _set_memory_wc(vaddr, nrpages);
150                 break;
151         case _PAGE_CACHE_WB:
152                 err = _set_memory_wb(vaddr, nrpages);
153                 break;
154         }
155
156         return err;
157 }
158
159 /*
160  * Remap an arbitrary physical address space into the kernel virtual
161  * address space. Needed when the kernel wants to access high addresses
162  * directly.
163  *
164  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
165  * have to convert them into an offset in a page-aligned mapping, but the
166  * caller shouldn't need to know that small detail.
167  */
168 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
169                 unsigned long size, unsigned long prot_val, void *caller)
170 {
171         unsigned long pfn, offset, vaddr;
172         resource_size_t last_addr;
173         const resource_size_t unaligned_phys_addr = phys_addr;
174         const unsigned long unaligned_size = size;
175         struct vm_struct *area;
176         unsigned long new_prot_val;
177         pgprot_t prot;
178         int retval;
179         void __iomem *ret_addr;
180
181         /* Don't allow wraparound or zero size */
182         last_addr = phys_addr + size - 1;
183         if (!size || last_addr < phys_addr)
184                 return NULL;
185
186         if (!phys_addr_valid(phys_addr)) {
187                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
188                        (unsigned long long)phys_addr);
189                 WARN_ON_ONCE(1);
190                 return NULL;
191         }
192
193         /*
194          * Don't remap the low PCI/ISA area, it's always mapped..
195          */
196         if (is_ISA_range(phys_addr, last_addr))
197                 return (__force void __iomem *)phys_to_virt(phys_addr);
198
199         /*
200          * Check if the request spans more than any BAR in the iomem resource
201          * tree.
202          */
203         WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
204                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
205
206         /*
207          * Don't allow anybody to remap normal RAM that we're using..
208          */
209         for (pfn = phys_addr >> PAGE_SHIFT;
210                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
211                                 pfn++) {
212
213                 int is_ram = page_is_ram(pfn);
214
215                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
216                         return NULL;
217                 WARN_ON_ONCE(is_ram);
218         }
219
220         /*
221          * Mappings have to be page-aligned
222          */
223         offset = phys_addr & ~PAGE_MASK;
224         phys_addr &= PAGE_MASK;
225         size = PAGE_ALIGN(last_addr+1) - phys_addr;
226
227         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
228                                                 prot_val, &new_prot_val);
229         if (retval) {
230                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
231                 return NULL;
232         }
233
234         if (prot_val != new_prot_val) {
235                 /*
236                  * Do not fallback to certain memory types with certain
237                  * requested type:
238                  * - request is uc-, return cannot be write-back
239                  * - request is uc-, return cannot be write-combine
240                  * - request is write-combine, return cannot be write-back
241                  */
242                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
243                      (new_prot_val == _PAGE_CACHE_WB ||
244                       new_prot_val == _PAGE_CACHE_WC)) ||
245                     (prot_val == _PAGE_CACHE_WC &&
246                      new_prot_val == _PAGE_CACHE_WB)) {
247                         pr_debug(
248                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
249                                 (unsigned long long)phys_addr,
250                                 (unsigned long long)(phys_addr + size),
251                                 prot_val, new_prot_val);
252                         free_memtype(phys_addr, phys_addr + size);
253                         return NULL;
254                 }
255                 prot_val = new_prot_val;
256         }
257
258         switch (prot_val) {
259         case _PAGE_CACHE_UC:
260         default:
261                 prot = PAGE_KERNEL_IO_NOCACHE;
262                 break;
263         case _PAGE_CACHE_UC_MINUS:
264                 prot = PAGE_KERNEL_IO_UC_MINUS;
265                 break;
266         case _PAGE_CACHE_WC:
267                 prot = PAGE_KERNEL_IO_WC;
268                 break;
269         case _PAGE_CACHE_WB:
270                 prot = PAGE_KERNEL_IO;
271                 break;
272         }
273
274         /*
275          * Ok, go for it..
276          */
277         area = get_vm_area_caller(size, VM_IOREMAP, caller);
278         if (!area)
279                 return NULL;
280         area->phys_addr = phys_addr;
281         vaddr = (unsigned long) area->addr;
282         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
283                 free_memtype(phys_addr, phys_addr + size);
284                 free_vm_area(area);
285                 return NULL;
286         }
287
288         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
289                 free_memtype(phys_addr, phys_addr + size);
290                 vunmap(area->addr);
291                 return NULL;
292         }
293
294         ret_addr = (void __iomem *) (vaddr + offset);
295         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
296
297         return ret_addr;
298 }
299
300 /**
301  * ioremap_nocache     -   map bus memory into CPU space
302  * @offset:    bus address of the memory
303  * @size:      size of the resource to map
304  *
305  * ioremap_nocache performs a platform specific sequence of operations to
306  * make bus memory CPU accessible via the readb/readw/readl/writeb/
307  * writew/writel functions and the other mmio helpers. The returned
308  * address is not guaranteed to be usable directly as a virtual
309  * address.
310  *
311  * This version of ioremap ensures that the memory is marked uncachable
312  * on the CPU as well as honouring existing caching rules from things like
313  * the PCI bus. Note that there are other caches and buffers on many
314  * busses. In particular driver authors should read up on PCI writes
315  *
316  * It's useful if some control registers are in such an area and
317  * write combining or read caching is not desirable:
318  *
319  * Must be freed with iounmap.
320  */
321 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
322 {
323         /*
324          * Ideally, this should be:
325          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
326          *
327          * Till we fix all X drivers to use ioremap_wc(), we will use
328          * UC MINUS.
329          */
330         unsigned long val = _PAGE_CACHE_UC_MINUS;
331
332         return __ioremap_caller(phys_addr, size, val,
333                                 __builtin_return_address(0));
334 }
335 EXPORT_SYMBOL(ioremap_nocache);
336
337 /**
338  * ioremap_wc   -       map memory into CPU space write combined
339  * @offset:     bus address of the memory
340  * @size:       size of the resource to map
341  *
342  * This version of ioremap ensures that the memory is marked write combining.
343  * Write combining allows faster writes to some hardware devices.
344  *
345  * Must be freed with iounmap.
346  */
347 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
348 {
349         if (pat_enabled)
350                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
351                                         __builtin_return_address(0));
352         else
353                 return ioremap_nocache(phys_addr, size);
354 }
355 EXPORT_SYMBOL(ioremap_wc);
356
357 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
358 {
359         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
360                                 __builtin_return_address(0));
361 }
362 EXPORT_SYMBOL(ioremap_cache);
363
364 static void __iomem *ioremap_default(resource_size_t phys_addr,
365                                         unsigned long size)
366 {
367         unsigned long flags;
368         void __iomem *ret;
369         int err;
370
371         /*
372          * - WB for WB-able memory and no other conflicting mappings
373          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
374          * - Inherit from confliting mappings otherwise
375          */
376         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
377         if (err < 0)
378                 return NULL;
379
380         ret = __ioremap_caller(phys_addr, size, flags,
381                                __builtin_return_address(0));
382
383         free_memtype(phys_addr, phys_addr + size);
384         return ret;
385 }
386
387 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
388                                 unsigned long prot_val)
389 {
390         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
391                                 __builtin_return_address(0));
392 }
393 EXPORT_SYMBOL(ioremap_prot);
394
395 /**
396  * iounmap - Free a IO remapping
397  * @addr: virtual address from ioremap_*
398  *
399  * Caller must ensure there is only one unmapping for the same pointer.
400  */
401 void iounmap(volatile void __iomem *addr)
402 {
403         struct vm_struct *p, *o;
404
405         if ((void __force *)addr <= high_memory)
406                 return;
407
408         /*
409          * __ioremap special-cases the PCI/ISA range by not instantiating a
410          * vm_area and by simply returning an address into the kernel mapping
411          * of ISA space.   So handle that here.
412          */
413         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
414             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
415                 return;
416
417         addr = (volatile void __iomem *)
418                 (PAGE_MASK & (unsigned long __force)addr);
419
420         mmiotrace_iounmap(addr);
421
422         /* Use the vm area unlocked, assuming the caller
423            ensures there isn't another iounmap for the same address
424            in parallel. Reuse of the virtual address is prevented by
425            leaving it in the global lists until we're done with it.
426            cpa takes care of the direct mappings. */
427         read_lock(&vmlist_lock);
428         for (p = vmlist; p; p = p->next) {
429                 if (p->addr == (void __force *)addr)
430                         break;
431         }
432         read_unlock(&vmlist_lock);
433
434         if (!p) {
435                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
436                 dump_stack();
437                 return;
438         }
439
440         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
441
442         /* Finally remove it */
443         o = remove_vm_area((void __force *)addr);
444         BUG_ON(p != o || o == NULL);
445         kfree(p);
446 }
447 EXPORT_SYMBOL(iounmap);
448
449 /*
450  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
451  * access
452  */
453 void *xlate_dev_mem_ptr(unsigned long phys)
454 {
455         void *addr;
456         unsigned long start = phys & PAGE_MASK;
457
458         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
459         if (page_is_ram(start >> PAGE_SHIFT))
460                 return __va(phys);
461
462         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
463         if (addr)
464                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
465
466         return addr;
467 }
468
469 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
470 {
471         if (page_is_ram(phys >> PAGE_SHIFT))
472                 return;
473
474         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
475         return;
476 }
477
478 static int __initdata early_ioremap_debug;
479
480 static int __init early_ioremap_debug_setup(char *str)
481 {
482         early_ioremap_debug = 1;
483
484         return 0;
485 }
486 early_param("early_ioremap_debug", early_ioremap_debug_setup);
487
488 static __initdata int after_paging_init;
489 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
490
491 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
492 {
493         /* Don't assume we're using swapper_pg_dir at this point */
494         pgd_t *base = __va(read_cr3());
495         pgd_t *pgd = &base[pgd_index(addr)];
496         pud_t *pud = pud_offset(pgd, addr);
497         pmd_t *pmd = pmd_offset(pud, addr);
498
499         return pmd;
500 }
501
502 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
503 {
504         return &bm_pte[pte_index(addr)];
505 }
506
507 void __init early_ioremap_init(void)
508 {
509         pmd_t *pmd;
510
511         if (early_ioremap_debug)
512                 printk(KERN_INFO "early_ioremap_init()\n");
513
514         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
515         memset(bm_pte, 0, sizeof(bm_pte));
516         pmd_populate_kernel(&init_mm, pmd, bm_pte);
517
518         /*
519          * The boot-ioremap range spans multiple pmds, for which
520          * we are not prepared:
521          */
522         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
523                 WARN_ON(1);
524                 printk(KERN_WARNING "pmd %p != %p\n",
525                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
526                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
527                         fix_to_virt(FIX_BTMAP_BEGIN));
528                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
529                         fix_to_virt(FIX_BTMAP_END));
530
531                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
532                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
533                        FIX_BTMAP_BEGIN);
534         }
535 }
536
537 void __init early_ioremap_reset(void)
538 {
539         after_paging_init = 1;
540 }
541
542 static void __init __early_set_fixmap(enum fixed_addresses idx,
543                                    unsigned long phys, pgprot_t flags)
544 {
545         unsigned long addr = __fix_to_virt(idx);
546         pte_t *pte;
547
548         if (idx >= __end_of_fixed_addresses) {
549                 BUG();
550                 return;
551         }
552         pte = early_ioremap_pte(addr);
553
554         if (pgprot_val(flags))
555                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
556         else
557                 pte_clear(&init_mm, addr, pte);
558         __flush_tlb_one(addr);
559 }
560
561 static inline void __init early_set_fixmap(enum fixed_addresses idx,
562                                            unsigned long phys, pgprot_t prot)
563 {
564         if (after_paging_init)
565                 __set_fixmap(idx, phys, prot);
566         else
567                 __early_set_fixmap(idx, phys, prot);
568 }
569
570 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
571 {
572         if (after_paging_init)
573                 clear_fixmap(idx);
574         else
575                 __early_set_fixmap(idx, 0, __pgprot(0));
576 }
577
578 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
579 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
580 static int __init check_early_ioremap_leak(void)
581 {
582         int count = 0;
583         int i;
584
585         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
586                 if (prev_map[i])
587                         count++;
588
589         if (!count)
590                 return 0;
591         WARN(1, KERN_WARNING
592                "Debug warning: early ioremap leak of %d areas detected.\n",
593                 count);
594         printk(KERN_WARNING
595                 "please boot with early_ioremap_debug and report the dmesg.\n");
596
597         return 1;
598 }
599 late_initcall(check_early_ioremap_leak);
600
601 static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
602 {
603         unsigned long offset, last_addr;
604         unsigned int nrpages;
605         enum fixed_addresses idx0, idx;
606         int i, slot;
607
608         WARN_ON(system_state != SYSTEM_BOOTING);
609
610         slot = -1;
611         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
612                 if (!prev_map[i]) {
613                         slot = i;
614                         break;
615                 }
616         }
617
618         if (slot < 0) {
619                 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
620                          phys_addr, size);
621                 WARN_ON(1);
622                 return NULL;
623         }
624
625         if (early_ioremap_debug) {
626                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
627                        phys_addr, size, slot);
628                 dump_stack();
629         }
630
631         /* Don't allow wraparound or zero size */
632         last_addr = phys_addr + size - 1;
633         if (!size || last_addr < phys_addr) {
634                 WARN_ON(1);
635                 return NULL;
636         }
637
638         prev_size[slot] = size;
639         /*
640          * Mappings have to be page-aligned
641          */
642         offset = phys_addr & ~PAGE_MASK;
643         phys_addr &= PAGE_MASK;
644         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
645
646         /*
647          * Mappings have to fit in the FIX_BTMAP area.
648          */
649         nrpages = size >> PAGE_SHIFT;
650         if (nrpages > NR_FIX_BTMAPS) {
651                 WARN_ON(1);
652                 return NULL;
653         }
654
655         /*
656          * Ok, go for it..
657          */
658         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
659         idx = idx0;
660         while (nrpages > 0) {
661                 early_set_fixmap(idx, phys_addr, prot);
662                 phys_addr += PAGE_SIZE;
663                 --idx;
664                 --nrpages;
665         }
666         if (early_ioremap_debug)
667                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
668
669         prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
670         return prev_map[slot];
671 }
672
673 /* Remap an IO device */
674 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
675 {
676         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
677 }
678
679 /* Remap memory */
680 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
681 {
682         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
683 }
684
685 void __init early_iounmap(void __iomem *addr, unsigned long size)
686 {
687         unsigned long virt_addr;
688         unsigned long offset;
689         unsigned int nrpages;
690         enum fixed_addresses idx;
691         int i, slot;
692
693         slot = -1;
694         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
695                 if (prev_map[i] == addr) {
696                         slot = i;
697                         break;
698                 }
699         }
700
701         if (slot < 0) {
702                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
703                          addr, size);
704                 WARN_ON(1);
705                 return;
706         }
707
708         if (prev_size[slot] != size) {
709                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
710                          addr, size, slot, prev_size[slot]);
711                 WARN_ON(1);
712                 return;
713         }
714
715         if (early_ioremap_debug) {
716                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
717                        size, slot);
718                 dump_stack();
719         }
720
721         virt_addr = (unsigned long)addr;
722         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
723                 WARN_ON(1);
724                 return;
725         }
726         offset = virt_addr & ~PAGE_MASK;
727         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
728
729         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
730         while (nrpages > 0) {
731                 early_clear_fixmap(idx);
732                 --idx;
733                 --nrpages;
734         }
735         prev_map[slot] = NULL;
736 }
737
738 void __this_fixmap_does_not_exist(void)
739 {
740         WARN_ON(1);
741 }