]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/ioremap.c
IO resources, x86: ioremap sanity check to catch mapping requests exceeding the BAR...
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 unsigned long __phys_addr(unsigned long x)
28 {
29         if (x >= __START_KERNEL_map)
30                 return x - __START_KERNEL_map + phys_base;
31         return x - PAGE_OFFSET;
32 }
33 EXPORT_SYMBOL(__phys_addr);
34
35 static inline int phys_addr_valid(unsigned long addr)
36 {
37         return addr < (1UL << boot_cpu_data.x86_phys_bits);
38 }
39
40 #else
41
42 static inline int phys_addr_valid(unsigned long addr)
43 {
44         return 1;
45 }
46
47 #endif
48
49 int page_is_ram(unsigned long pagenr)
50 {
51         resource_size_t addr, end;
52         int i;
53
54         /*
55          * A special case is the first 4Kb of memory;
56          * This is a BIOS owned area, not kernel ram, but generally
57          * not listed as such in the E820 table.
58          */
59         if (pagenr == 0)
60                 return 0;
61
62         /*
63          * Second special case: Some BIOSen report the PC BIOS
64          * area (640->1Mb) as ram even though it is not.
65          */
66         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67                     pagenr < (BIOS_END >> PAGE_SHIFT))
68                 return 0;
69
70         for (i = 0; i < e820.nr_map; i++) {
71                 /*
72                  * Not usable memory:
73                  */
74                 if (e820.map[i].type != E820_RAM)
75                         continue;
76                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
78
79
80                 if ((pagenr >= addr) && (pagenr < end))
81                         return 1;
82         }
83         return 0;
84 }
85
86 /*
87  * Fix up the linear direct mapping of the kernel to avoid cache attribute
88  * conflicts.
89  */
90 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
91                                unsigned long prot_val)
92 {
93         unsigned long nrpages = size >> PAGE_SHIFT;
94         int err;
95
96         switch (prot_val) {
97         case _PAGE_CACHE_UC:
98         default:
99                 err = _set_memory_uc(vaddr, nrpages);
100                 break;
101         case _PAGE_CACHE_WC:
102                 err = _set_memory_wc(vaddr, nrpages);
103                 break;
104         case _PAGE_CACHE_WB:
105                 err = _set_memory_wb(vaddr, nrpages);
106                 break;
107         }
108
109         return err;
110 }
111
112 /*
113  * Remap an arbitrary physical address space into the kernel virtual
114  * address space. Needed when the kernel wants to access high addresses
115  * directly.
116  *
117  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118  * have to convert them into an offset in a page-aligned mapping, but the
119  * caller shouldn't need to know that small detail.
120  */
121 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
122                 unsigned long size, unsigned long prot_val, void *caller)
123 {
124         unsigned long pfn, offset, vaddr;
125         resource_size_t last_addr;
126         const resource_size_t unaligned_phys_addr = phys_addr;
127         const unsigned long unaligned_size = size;
128         struct vm_struct *area;
129         unsigned long new_prot_val;
130         pgprot_t prot;
131         int retval;
132         void __iomem *ret_addr;
133
134         /* Don't allow wraparound or zero size */
135         last_addr = phys_addr + size - 1;
136         if (!size || last_addr < phys_addr)
137                 return NULL;
138
139         if (!phys_addr_valid(phys_addr)) {
140                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
141                        (unsigned long long)phys_addr);
142                 WARN_ON_ONCE(1);
143                 return NULL;
144         }
145
146         /*
147          * Don't remap the low PCI/ISA area, it's always mapped..
148          */
149         if (is_ISA_range(phys_addr, last_addr))
150                 return (__force void __iomem *)phys_to_virt(phys_addr);
151
152         /*
153          * Check if the request spans more than any BAR in the iomem resource
154          * tree.
155          */
156         WARN_ON(iomem_map_sanity_check(phys_addr, size));
157
158         /*
159          * Don't allow anybody to remap normal RAM that we're using..
160          */
161         for (pfn = phys_addr >> PAGE_SHIFT;
162                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
163                                 pfn++) {
164
165                 int is_ram = page_is_ram(pfn);
166
167                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
168                         return NULL;
169                 WARN_ON_ONCE(is_ram);
170         }
171
172         /*
173          * Mappings have to be page-aligned
174          */
175         offset = phys_addr & ~PAGE_MASK;
176         phys_addr &= PAGE_MASK;
177         size = PAGE_ALIGN(last_addr+1) - phys_addr;
178
179         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
180                                                 prot_val, &new_prot_val);
181         if (retval) {
182                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
183                 return NULL;
184         }
185
186         if (prot_val != new_prot_val) {
187                 /*
188                  * Do not fallback to certain memory types with certain
189                  * requested type:
190                  * - request is uc-, return cannot be write-back
191                  * - request is uc-, return cannot be write-combine
192                  * - request is write-combine, return cannot be write-back
193                  */
194                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
195                      (new_prot_val == _PAGE_CACHE_WB ||
196                       new_prot_val == _PAGE_CACHE_WC)) ||
197                     (prot_val == _PAGE_CACHE_WC &&
198                      new_prot_val == _PAGE_CACHE_WB)) {
199                         pr_debug(
200                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
201                                 (unsigned long long)phys_addr,
202                                 (unsigned long long)(phys_addr + size),
203                                 prot_val, new_prot_val);
204                         free_memtype(phys_addr, phys_addr + size);
205                         return NULL;
206                 }
207                 prot_val = new_prot_val;
208         }
209
210         switch (prot_val) {
211         case _PAGE_CACHE_UC:
212         default:
213                 prot = PAGE_KERNEL_NOCACHE;
214                 break;
215         case _PAGE_CACHE_UC_MINUS:
216                 prot = PAGE_KERNEL_UC_MINUS;
217                 break;
218         case _PAGE_CACHE_WC:
219                 prot = PAGE_KERNEL_WC;
220                 break;
221         case _PAGE_CACHE_WB:
222                 prot = PAGE_KERNEL;
223                 break;
224         }
225
226         /*
227          * Ok, go for it..
228          */
229         area = get_vm_area_caller(size, VM_IOREMAP, caller);
230         if (!area)
231                 return NULL;
232         area->phys_addr = phys_addr;
233         vaddr = (unsigned long) area->addr;
234         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
235                 free_memtype(phys_addr, phys_addr + size);
236                 free_vm_area(area);
237                 return NULL;
238         }
239
240         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
241                 free_memtype(phys_addr, phys_addr + size);
242                 vunmap(area->addr);
243                 return NULL;
244         }
245
246         ret_addr = (void __iomem *) (vaddr + offset);
247         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
248
249         return ret_addr;
250 }
251
252 /**
253  * ioremap_nocache     -   map bus memory into CPU space
254  * @offset:    bus address of the memory
255  * @size:      size of the resource to map
256  *
257  * ioremap_nocache performs a platform specific sequence of operations to
258  * make bus memory CPU accessible via the readb/readw/readl/writeb/
259  * writew/writel functions and the other mmio helpers. The returned
260  * address is not guaranteed to be usable directly as a virtual
261  * address.
262  *
263  * This version of ioremap ensures that the memory is marked uncachable
264  * on the CPU as well as honouring existing caching rules from things like
265  * the PCI bus. Note that there are other caches and buffers on many
266  * busses. In particular driver authors should read up on PCI writes
267  *
268  * It's useful if some control registers are in such an area and
269  * write combining or read caching is not desirable:
270  *
271  * Must be freed with iounmap.
272  */
273 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
274 {
275         /*
276          * Ideally, this should be:
277          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
278          *
279          * Till we fix all X drivers to use ioremap_wc(), we will use
280          * UC MINUS.
281          */
282         unsigned long val = _PAGE_CACHE_UC_MINUS;
283
284         return __ioremap_caller(phys_addr, size, val,
285                                 __builtin_return_address(0));
286 }
287 EXPORT_SYMBOL(ioremap_nocache);
288
289 /**
290  * ioremap_wc   -       map memory into CPU space write combined
291  * @offset:     bus address of the memory
292  * @size:       size of the resource to map
293  *
294  * This version of ioremap ensures that the memory is marked write combining.
295  * Write combining allows faster writes to some hardware devices.
296  *
297  * Must be freed with iounmap.
298  */
299 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
300 {
301         if (pat_enabled)
302                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
303                                         __builtin_return_address(0));
304         else
305                 return ioremap_nocache(phys_addr, size);
306 }
307 EXPORT_SYMBOL(ioremap_wc);
308
309 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
310 {
311         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
312                                 __builtin_return_address(0));
313 }
314 EXPORT_SYMBOL(ioremap_cache);
315
316 static void __iomem *ioremap_default(resource_size_t phys_addr,
317                                         unsigned long size)
318 {
319         unsigned long flags;
320         void *ret;
321         int err;
322
323         /*
324          * - WB for WB-able memory and no other conflicting mappings
325          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
326          * - Inherit from confliting mappings otherwise
327          */
328         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
329         if (err < 0)
330                 return NULL;
331
332         ret = (void *) __ioremap_caller(phys_addr, size, flags,
333                                         __builtin_return_address(0));
334
335         free_memtype(phys_addr, phys_addr + size);
336         return (void __iomem *)ret;
337 }
338
339 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
340                                 unsigned long prot_val)
341 {
342         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
343                                 __builtin_return_address(0));
344 }
345 EXPORT_SYMBOL(ioremap_prot);
346
347 /**
348  * iounmap - Free a IO remapping
349  * @addr: virtual address from ioremap_*
350  *
351  * Caller must ensure there is only one unmapping for the same pointer.
352  */
353 void iounmap(volatile void __iomem *addr)
354 {
355         struct vm_struct *p, *o;
356
357         if ((void __force *)addr <= high_memory)
358                 return;
359
360         /*
361          * __ioremap special-cases the PCI/ISA range by not instantiating a
362          * vm_area and by simply returning an address into the kernel mapping
363          * of ISA space.   So handle that here.
364          */
365         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
366             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
367                 return;
368
369         addr = (volatile void __iomem *)
370                 (PAGE_MASK & (unsigned long __force)addr);
371
372         mmiotrace_iounmap(addr);
373
374         /* Use the vm area unlocked, assuming the caller
375            ensures there isn't another iounmap for the same address
376            in parallel. Reuse of the virtual address is prevented by
377            leaving it in the global lists until we're done with it.
378            cpa takes care of the direct mappings. */
379         read_lock(&vmlist_lock);
380         for (p = vmlist; p; p = p->next) {
381                 if (p->addr == (void __force *)addr)
382                         break;
383         }
384         read_unlock(&vmlist_lock);
385
386         if (!p) {
387                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
388                 dump_stack();
389                 return;
390         }
391
392         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
393
394         /* Finally remove it */
395         o = remove_vm_area((void __force *)addr);
396         BUG_ON(p != o || o == NULL);
397         kfree(p);
398 }
399 EXPORT_SYMBOL(iounmap);
400
401 /*
402  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
403  * access
404  */
405 void *xlate_dev_mem_ptr(unsigned long phys)
406 {
407         void *addr;
408         unsigned long start = phys & PAGE_MASK;
409
410         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
411         if (page_is_ram(start >> PAGE_SHIFT))
412                 return __va(phys);
413
414         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
415         if (addr)
416                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
417
418         return addr;
419 }
420
421 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
422 {
423         if (page_is_ram(phys >> PAGE_SHIFT))
424                 return;
425
426         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
427         return;
428 }
429
430 int __initdata early_ioremap_debug;
431
432 static int __init early_ioremap_debug_setup(char *str)
433 {
434         early_ioremap_debug = 1;
435
436         return 0;
437 }
438 early_param("early_ioremap_debug", early_ioremap_debug_setup);
439
440 static __initdata int after_paging_init;
441 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
442
443 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
444 {
445         /* Don't assume we're using swapper_pg_dir at this point */
446         pgd_t *base = __va(read_cr3());
447         pgd_t *pgd = &base[pgd_index(addr)];
448         pud_t *pud = pud_offset(pgd, addr);
449         pmd_t *pmd = pmd_offset(pud, addr);
450
451         return pmd;
452 }
453
454 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
455 {
456         return &bm_pte[pte_index(addr)];
457 }
458
459 void __init early_ioremap_init(void)
460 {
461         pmd_t *pmd;
462
463         if (early_ioremap_debug)
464                 printk(KERN_INFO "early_ioremap_init()\n");
465
466         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
467         memset(bm_pte, 0, sizeof(bm_pte));
468         pmd_populate_kernel(&init_mm, pmd, bm_pte);
469
470         /*
471          * The boot-ioremap range spans multiple pmds, for which
472          * we are not prepared:
473          */
474         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
475                 WARN_ON(1);
476                 printk(KERN_WARNING "pmd %p != %p\n",
477                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
478                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
479                         fix_to_virt(FIX_BTMAP_BEGIN));
480                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
481                         fix_to_virt(FIX_BTMAP_END));
482
483                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
484                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
485                        FIX_BTMAP_BEGIN);
486         }
487 }
488
489 void __init early_ioremap_clear(void)
490 {
491         pmd_t *pmd;
492
493         if (early_ioremap_debug)
494                 printk(KERN_INFO "early_ioremap_clear()\n");
495
496         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
497         pmd_clear(pmd);
498         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
499         __flush_tlb_all();
500 }
501
502 void __init early_ioremap_reset(void)
503 {
504         enum fixed_addresses idx;
505         unsigned long addr, phys;
506         pte_t *pte;
507
508         after_paging_init = 1;
509         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
510                 addr = fix_to_virt(idx);
511                 pte = early_ioremap_pte(addr);
512                 if (pte_present(*pte)) {
513                         phys = pte_val(*pte) & PAGE_MASK;
514                         set_fixmap(idx, phys);
515                 }
516         }
517 }
518
519 static void __init __early_set_fixmap(enum fixed_addresses idx,
520                                    unsigned long phys, pgprot_t flags)
521 {
522         unsigned long addr = __fix_to_virt(idx);
523         pte_t *pte;
524
525         if (idx >= __end_of_fixed_addresses) {
526                 BUG();
527                 return;
528         }
529         pte = early_ioremap_pte(addr);
530
531         if (pgprot_val(flags))
532                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
533         else
534                 pte_clear(&init_mm, addr, pte);
535         __flush_tlb_one(addr);
536 }
537
538 static inline void __init early_set_fixmap(enum fixed_addresses idx,
539                                         unsigned long phys)
540 {
541         if (after_paging_init)
542                 set_fixmap(idx, phys);
543         else
544                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
545 }
546
547 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
548 {
549         if (after_paging_init)
550                 clear_fixmap(idx);
551         else
552                 __early_set_fixmap(idx, 0, __pgprot(0));
553 }
554
555
556 int __initdata early_ioremap_nested;
557
558 static int __init check_early_ioremap_leak(void)
559 {
560         if (!early_ioremap_nested)
561                 return 0;
562         WARN(1, KERN_WARNING
563                "Debug warning: early ioremap leak of %d areas detected.\n",
564                 early_ioremap_nested);
565         printk(KERN_WARNING
566                 "please boot with early_ioremap_debug and report the dmesg.\n");
567
568         return 1;
569 }
570 late_initcall(check_early_ioremap_leak);
571
572 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
573 {
574         unsigned long offset, last_addr;
575         unsigned int nrpages, nesting;
576         enum fixed_addresses idx0, idx;
577
578         WARN_ON(system_state != SYSTEM_BOOTING);
579
580         nesting = early_ioremap_nested;
581         if (early_ioremap_debug) {
582                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
583                        phys_addr, size, nesting);
584                 dump_stack();
585         }
586
587         /* Don't allow wraparound or zero size */
588         last_addr = phys_addr + size - 1;
589         if (!size || last_addr < phys_addr) {
590                 WARN_ON(1);
591                 return NULL;
592         }
593
594         if (nesting >= FIX_BTMAPS_NESTING) {
595                 WARN_ON(1);
596                 return NULL;
597         }
598         early_ioremap_nested++;
599         /*
600          * Mappings have to be page-aligned
601          */
602         offset = phys_addr & ~PAGE_MASK;
603         phys_addr &= PAGE_MASK;
604         size = PAGE_ALIGN(last_addr) - phys_addr;
605
606         /*
607          * Mappings have to fit in the FIX_BTMAP area.
608          */
609         nrpages = size >> PAGE_SHIFT;
610         if (nrpages > NR_FIX_BTMAPS) {
611                 WARN_ON(1);
612                 return NULL;
613         }
614
615         /*
616          * Ok, go for it..
617          */
618         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
619         idx = idx0;
620         while (nrpages > 0) {
621                 early_set_fixmap(idx, phys_addr);
622                 phys_addr += PAGE_SIZE;
623                 --idx;
624                 --nrpages;
625         }
626         if (early_ioremap_debug)
627                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
628
629         return (void *) (offset + fix_to_virt(idx0));
630 }
631
632 void __init early_iounmap(void *addr, unsigned long size)
633 {
634         unsigned long virt_addr;
635         unsigned long offset;
636         unsigned int nrpages;
637         enum fixed_addresses idx;
638         int nesting;
639
640         nesting = --early_ioremap_nested;
641         if (WARN_ON(nesting < 0))
642                 return;
643
644         if (early_ioremap_debug) {
645                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
646                        size, nesting);
647                 dump_stack();
648         }
649
650         virt_addr = (unsigned long)addr;
651         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
652                 WARN_ON(1);
653                 return;
654         }
655         offset = virt_addr & ~PAGE_MASK;
656         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
657
658         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
659         while (nrpages > 0) {
660                 early_clear_fixmap(idx);
661                 --idx;
662                 --nrpages;
663         }
664 }
665
666 void __this_fixmap_does_not_exist(void)
667 {
668         WARN_ON(1);
669 }