]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/ioremap.c
a78ffef62a2bf2f67e5a512e0a847289d75f726f
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 static inline int phys_addr_valid(unsigned long addr)
27 {
28         return addr < (1UL << boot_cpu_data.x86_phys_bits);
29 }
30
31 unsigned long __phys_addr(unsigned long x)
32 {
33         if (x >= __START_KERNEL_map) {
34                 x -= __START_KERNEL_map;
35                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
36                 x += phys_base;
37         } else {
38                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
39                 x -= PAGE_OFFSET;
40                 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
41                                         !phys_addr_valid(x));
42         }
43         return x;
44 }
45 EXPORT_SYMBOL(__phys_addr);
46
47 #else
48
49 static inline int phys_addr_valid(unsigned long addr)
50 {
51         return 1;
52 }
53
54 unsigned long __phys_addr(unsigned long x)
55 {
56         /* VMALLOC_* aren't constants; not available at the boot time */
57         VIRTUAL_BUG_ON(x < PAGE_OFFSET || (system_state != SYSTEM_BOOTING &&
58                                         is_vmalloc_addr((void *)x)));
59         return x - PAGE_OFFSET;
60 }
61 EXPORT_SYMBOL(__phys_addr);
62
63 #endif
64
65 int page_is_ram(unsigned long pagenr)
66 {
67         resource_size_t addr, end;
68         int i;
69
70         /*
71          * A special case is the first 4Kb of memory;
72          * This is a BIOS owned area, not kernel ram, but generally
73          * not listed as such in the E820 table.
74          */
75         if (pagenr == 0)
76                 return 0;
77
78         /*
79          * Second special case: Some BIOSen report the PC BIOS
80          * area (640->1Mb) as ram even though it is not.
81          */
82         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
83                     pagenr < (BIOS_END >> PAGE_SHIFT))
84                 return 0;
85
86         for (i = 0; i < e820.nr_map; i++) {
87                 /*
88                  * Not usable memory:
89                  */
90                 if (e820.map[i].type != E820_RAM)
91                         continue;
92                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
93                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
94
95
96                 if ((pagenr >= addr) && (pagenr < end))
97                         return 1;
98         }
99         return 0;
100 }
101
102 /*
103  * Fix up the linear direct mapping of the kernel to avoid cache attribute
104  * conflicts.
105  */
106 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
107                                unsigned long prot_val)
108 {
109         unsigned long nrpages = size >> PAGE_SHIFT;
110         int err;
111
112         switch (prot_val) {
113         case _PAGE_CACHE_UC:
114         default:
115                 err = _set_memory_uc(vaddr, nrpages);
116                 break;
117         case _PAGE_CACHE_WC:
118                 err = _set_memory_wc(vaddr, nrpages);
119                 break;
120         case _PAGE_CACHE_WB:
121                 err = _set_memory_wb(vaddr, nrpages);
122                 break;
123         }
124
125         return err;
126 }
127
128 /*
129  * Remap an arbitrary physical address space into the kernel virtual
130  * address space. Needed when the kernel wants to access high addresses
131  * directly.
132  *
133  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
134  * have to convert them into an offset in a page-aligned mapping, but the
135  * caller shouldn't need to know that small detail.
136  */
137 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
138                 unsigned long size, unsigned long prot_val, void *caller)
139 {
140         unsigned long pfn, offset, vaddr;
141         resource_size_t last_addr;
142         struct vm_struct *area;
143         unsigned long new_prot_val;
144         pgprot_t prot;
145         int retval;
146
147         /* Don't allow wraparound or zero size */
148         last_addr = phys_addr + size - 1;
149         if (!size || last_addr < phys_addr)
150                 return NULL;
151
152         if (!phys_addr_valid(phys_addr)) {
153                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
154                        (unsigned long long)phys_addr);
155                 WARN_ON_ONCE(1);
156                 return NULL;
157         }
158
159         /*
160          * Don't remap the low PCI/ISA area, it's always mapped..
161          */
162         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
163                 return (__force void __iomem *)phys_to_virt(phys_addr);
164
165         /*
166          * Don't allow anybody to remap normal RAM that we're using..
167          */
168         for (pfn = phys_addr >> PAGE_SHIFT;
169                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
170                                 pfn++) {
171
172                 int is_ram = page_is_ram(pfn);
173
174                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
175                         return NULL;
176                 WARN_ON_ONCE(is_ram);
177         }
178
179         /*
180          * Mappings have to be page-aligned
181          */
182         offset = phys_addr & ~PAGE_MASK;
183         phys_addr &= PAGE_MASK;
184         size = PAGE_ALIGN(last_addr+1) - phys_addr;
185
186         retval = reserve_memtype(phys_addr, phys_addr + size,
187                                                 prot_val, &new_prot_val);
188         if (retval) {
189                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
190                 return NULL;
191         }
192
193         if (prot_val != new_prot_val) {
194                 /*
195                  * Do not fallback to certain memory types with certain
196                  * requested type:
197                  * - request is uc-, return cannot be write-back
198                  * - request is uc-, return cannot be write-combine
199                  * - request is write-combine, return cannot be write-back
200                  */
201                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
202                      (new_prot_val == _PAGE_CACHE_WB ||
203                       new_prot_val == _PAGE_CACHE_WC)) ||
204                     (prot_val == _PAGE_CACHE_WC &&
205                      new_prot_val == _PAGE_CACHE_WB)) {
206                         pr_debug(
207                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
208                                 (unsigned long long)phys_addr,
209                                 (unsigned long long)(phys_addr + size),
210                                 prot_val, new_prot_val);
211                         free_memtype(phys_addr, phys_addr + size);
212                         return NULL;
213                 }
214                 prot_val = new_prot_val;
215         }
216
217         switch (prot_val) {
218         case _PAGE_CACHE_UC:
219         default:
220                 prot = PAGE_KERNEL_NOCACHE;
221                 break;
222         case _PAGE_CACHE_UC_MINUS:
223                 prot = PAGE_KERNEL_UC_MINUS;
224                 break;
225         case _PAGE_CACHE_WC:
226                 prot = PAGE_KERNEL_WC;
227                 break;
228         case _PAGE_CACHE_WB:
229                 prot = PAGE_KERNEL;
230                 break;
231         }
232
233         /*
234          * Ok, go for it..
235          */
236         area = get_vm_area_caller(size, VM_IOREMAP, caller);
237         if (!area)
238                 return NULL;
239         area->phys_addr = phys_addr;
240         vaddr = (unsigned long) area->addr;
241         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
242                 free_memtype(phys_addr, phys_addr + size);
243                 free_vm_area(area);
244                 return NULL;
245         }
246
247         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
248                 free_memtype(phys_addr, phys_addr + size);
249                 vunmap(area->addr);
250                 return NULL;
251         }
252
253         return (void __iomem *) (vaddr + offset);
254 }
255
256 /**
257  * ioremap_nocache     -   map bus memory into CPU space
258  * @offset:    bus address of the memory
259  * @size:      size of the resource to map
260  *
261  * ioremap_nocache performs a platform specific sequence of operations to
262  * make bus memory CPU accessible via the readb/readw/readl/writeb/
263  * writew/writel functions and the other mmio helpers. The returned
264  * address is not guaranteed to be usable directly as a virtual
265  * address.
266  *
267  * This version of ioremap ensures that the memory is marked uncachable
268  * on the CPU as well as honouring existing caching rules from things like
269  * the PCI bus. Note that there are other caches and buffers on many
270  * busses. In particular driver authors should read up on PCI writes
271  *
272  * It's useful if some control registers are in such an area and
273  * write combining or read caching is not desirable:
274  *
275  * Must be freed with iounmap.
276  */
277 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
278 {
279         /*
280          * Ideally, this should be:
281          *      pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
282          *
283          * Till we fix all X drivers to use ioremap_wc(), we will use
284          * UC MINUS.
285          */
286         unsigned long val = _PAGE_CACHE_UC_MINUS;
287
288         return __ioremap_caller(phys_addr, size, val,
289                                 __builtin_return_address(0));
290 }
291 EXPORT_SYMBOL(ioremap_nocache);
292
293 /**
294  * ioremap_wc   -       map memory into CPU space write combined
295  * @offset:     bus address of the memory
296  * @size:       size of the resource to map
297  *
298  * This version of ioremap ensures that the memory is marked write combining.
299  * Write combining allows faster writes to some hardware devices.
300  *
301  * Must be freed with iounmap.
302  */
303 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
304 {
305         if (pat_wc_enabled)
306                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
307                                         __builtin_return_address(0));
308         else
309                 return ioremap_nocache(phys_addr, size);
310 }
311 EXPORT_SYMBOL(ioremap_wc);
312
313 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
314 {
315         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
316                                 __builtin_return_address(0));
317 }
318 EXPORT_SYMBOL(ioremap_cache);
319
320 /**
321  * iounmap - Free a IO remapping
322  * @addr: virtual address from ioremap_*
323  *
324  * Caller must ensure there is only one unmapping for the same pointer.
325  */
326 void iounmap(volatile void __iomem *addr)
327 {
328         struct vm_struct *p, *o;
329
330         if ((void __force *)addr <= high_memory)
331                 return;
332
333         /*
334          * __ioremap special-cases the PCI/ISA range by not instantiating a
335          * vm_area and by simply returning an address into the kernel mapping
336          * of ISA space.   So handle that here.
337          */
338         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
339             addr < phys_to_virt(ISA_END_ADDRESS))
340                 return;
341
342         addr = (volatile void __iomem *)
343                 (PAGE_MASK & (unsigned long __force)addr);
344
345         /* Use the vm area unlocked, assuming the caller
346            ensures there isn't another iounmap for the same address
347            in parallel. Reuse of the virtual address is prevented by
348            leaving it in the global lists until we're done with it.
349            cpa takes care of the direct mappings. */
350         read_lock(&vmlist_lock);
351         for (p = vmlist; p; p = p->next) {
352                 if (p->addr == addr)
353                         break;
354         }
355         read_unlock(&vmlist_lock);
356
357         if (!p) {
358                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
359                 dump_stack();
360                 return;
361         }
362
363         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
364
365         /* Finally remove it */
366         o = remove_vm_area((void *)addr);
367         BUG_ON(p != o || o == NULL);
368         kfree(p);
369 }
370 EXPORT_SYMBOL(iounmap);
371
372 /*
373  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
374  * access
375  */
376 void *xlate_dev_mem_ptr(unsigned long phys)
377 {
378         void *addr;
379         unsigned long start = phys & PAGE_MASK;
380
381         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
382         if (page_is_ram(start >> PAGE_SHIFT))
383                 return __va(phys);
384
385         addr = (void *)ioremap(start, PAGE_SIZE);
386         if (addr)
387                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
388
389         return addr;
390 }
391
392 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
393 {
394         if (page_is_ram(phys >> PAGE_SHIFT))
395                 return;
396
397         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
398         return;
399 }
400
401 #ifdef CONFIG_X86_32
402
403 int __initdata early_ioremap_debug;
404
405 static int __init early_ioremap_debug_setup(char *str)
406 {
407         early_ioremap_debug = 1;
408
409         return 0;
410 }
411 early_param("early_ioremap_debug", early_ioremap_debug_setup);
412
413 static __initdata int after_paging_init;
414 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
415                 __section(.bss.page_aligned);
416
417 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
418 {
419         /* Don't assume we're using swapper_pg_dir at this point */
420         pgd_t *base = __va(read_cr3());
421         pgd_t *pgd = &base[pgd_index(addr)];
422         pud_t *pud = pud_offset(pgd, addr);
423         pmd_t *pmd = pmd_offset(pud, addr);
424
425         return pmd;
426 }
427
428 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
429 {
430         return &bm_pte[pte_index(addr)];
431 }
432
433 void __init early_ioremap_init(void)
434 {
435         pmd_t *pmd;
436
437         if (early_ioremap_debug)
438                 printk(KERN_INFO "early_ioremap_init()\n");
439
440         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
441         memset(bm_pte, 0, sizeof(bm_pte));
442         pmd_populate_kernel(&init_mm, pmd, bm_pte);
443
444         /*
445          * The boot-ioremap range spans multiple pmds, for which
446          * we are not prepared:
447          */
448         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
449                 WARN_ON(1);
450                 printk(KERN_WARNING "pmd %p != %p\n",
451                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
452                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
453                         fix_to_virt(FIX_BTMAP_BEGIN));
454                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
455                         fix_to_virt(FIX_BTMAP_END));
456
457                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
458                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
459                        FIX_BTMAP_BEGIN);
460         }
461 }
462
463 void __init early_ioremap_clear(void)
464 {
465         pmd_t *pmd;
466
467         if (early_ioremap_debug)
468                 printk(KERN_INFO "early_ioremap_clear()\n");
469
470         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
471         pmd_clear(pmd);
472         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
473         __flush_tlb_all();
474 }
475
476 void __init early_ioremap_reset(void)
477 {
478         enum fixed_addresses idx;
479         unsigned long addr, phys;
480         pte_t *pte;
481
482         after_paging_init = 1;
483         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
484                 addr = fix_to_virt(idx);
485                 pte = early_ioremap_pte(addr);
486                 if (pte_present(*pte)) {
487                         phys = pte_val(*pte) & PAGE_MASK;
488                         set_fixmap(idx, phys);
489                 }
490         }
491 }
492
493 static void __init __early_set_fixmap(enum fixed_addresses idx,
494                                    unsigned long phys, pgprot_t flags)
495 {
496         unsigned long addr = __fix_to_virt(idx);
497         pte_t *pte;
498
499         if (idx >= __end_of_fixed_addresses) {
500                 BUG();
501                 return;
502         }
503         pte = early_ioremap_pte(addr);
504         if (pgprot_val(flags))
505                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
506         else
507                 pte_clear(NULL, addr, pte);
508         __flush_tlb_one(addr);
509 }
510
511 static inline void __init early_set_fixmap(enum fixed_addresses idx,
512                                         unsigned long phys)
513 {
514         if (after_paging_init)
515                 set_fixmap(idx, phys);
516         else
517                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
518 }
519
520 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
521 {
522         if (after_paging_init)
523                 clear_fixmap(idx);
524         else
525                 __early_set_fixmap(idx, 0, __pgprot(0));
526 }
527
528
529 int __initdata early_ioremap_nested;
530
531 static int __init check_early_ioremap_leak(void)
532 {
533         if (!early_ioremap_nested)
534                 return 0;
535
536         printk(KERN_WARNING
537                "Debug warning: early ioremap leak of %d areas detected.\n",
538                early_ioremap_nested);
539         printk(KERN_WARNING
540                "please boot with early_ioremap_debug and report the dmesg.\n");
541         WARN_ON(1);
542
543         return 1;
544 }
545 late_initcall(check_early_ioremap_leak);
546
547 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
548 {
549         unsigned long offset, last_addr;
550         unsigned int nrpages, nesting;
551         enum fixed_addresses idx0, idx;
552
553         WARN_ON(system_state != SYSTEM_BOOTING);
554
555         nesting = early_ioremap_nested;
556         if (early_ioremap_debug) {
557                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
558                        phys_addr, size, nesting);
559                 dump_stack();
560         }
561
562         /* Don't allow wraparound or zero size */
563         last_addr = phys_addr + size - 1;
564         if (!size || last_addr < phys_addr) {
565                 WARN_ON(1);
566                 return NULL;
567         }
568
569         if (nesting >= FIX_BTMAPS_NESTING) {
570                 WARN_ON(1);
571                 return NULL;
572         }
573         early_ioremap_nested++;
574         /*
575          * Mappings have to be page-aligned
576          */
577         offset = phys_addr & ~PAGE_MASK;
578         phys_addr &= PAGE_MASK;
579         size = PAGE_ALIGN(last_addr) - phys_addr;
580
581         /*
582          * Mappings have to fit in the FIX_BTMAP area.
583          */
584         nrpages = size >> PAGE_SHIFT;
585         if (nrpages > NR_FIX_BTMAPS) {
586                 WARN_ON(1);
587                 return NULL;
588         }
589
590         /*
591          * Ok, go for it..
592          */
593         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
594         idx = idx0;
595         while (nrpages > 0) {
596                 early_set_fixmap(idx, phys_addr);
597                 phys_addr += PAGE_SIZE;
598                 --idx;
599                 --nrpages;
600         }
601         if (early_ioremap_debug)
602                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
603
604         return (void *) (offset + fix_to_virt(idx0));
605 }
606
607 void __init early_iounmap(void *addr, unsigned long size)
608 {
609         unsigned long virt_addr;
610         unsigned long offset;
611         unsigned int nrpages;
612         enum fixed_addresses idx;
613         int nesting;
614
615         nesting = --early_ioremap_nested;
616         if (WARN_ON(nesting < 0))
617                 return;
618
619         if (early_ioremap_debug) {
620                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
621                        size, nesting);
622                 dump_stack();
623         }
624
625         virt_addr = (unsigned long)addr;
626         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
627                 WARN_ON(1);
628                 return;
629         }
630         offset = virt_addr & ~PAGE_MASK;
631         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
632
633         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
634         while (nrpages > 0) {
635                 early_clear_fixmap(idx);
636                 --idx;
637                 --nrpages;
638         }
639 }
640
641 void __this_fixmap_does_not_exist(void)
642 {
643         WARN_ON(1);
644 }
645
646 #endif /* CONFIG_X86_32 */