]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/ioremap.c
x86 PAT: fix performance drop for glx, use UC minus for ioremap(), ioremap_nocache...
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 unsigned long __phys_addr(unsigned long x)
27 {
28         if (x >= __START_KERNEL_map)
29                 return x - __START_KERNEL_map + phys_base;
30         return x - PAGE_OFFSET;
31 }
32 EXPORT_SYMBOL(__phys_addr);
33
34 static inline int phys_addr_valid(unsigned long addr)
35 {
36         return addr < (1UL << boot_cpu_data.x86_phys_bits);
37 }
38
39 #else
40
41 static inline int phys_addr_valid(unsigned long addr)
42 {
43         return 1;
44 }
45
46 #endif
47
48 int page_is_ram(unsigned long pagenr)
49 {
50         resource_size_t addr, end;
51         int i;
52
53         /*
54          * A special case is the first 4Kb of memory;
55          * This is a BIOS owned area, not kernel ram, but generally
56          * not listed as such in the E820 table.
57          */
58         if (pagenr == 0)
59                 return 0;
60
61         /*
62          * Second special case: Some BIOSen report the PC BIOS
63          * area (640->1Mb) as ram even though it is not.
64          */
65         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66                     pagenr < (BIOS_END >> PAGE_SHIFT))
67                 return 0;
68
69         for (i = 0; i < e820.nr_map; i++) {
70                 /*
71                  * Not usable memory:
72                  */
73                 if (e820.map[i].type != E820_RAM)
74                         continue;
75                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
77
78
79                 if ((pagenr >= addr) && (pagenr < end))
80                         return 1;
81         }
82         return 0;
83 }
84
85 /*
86  * Fix up the linear direct mapping of the kernel to avoid cache attribute
87  * conflicts.
88  */
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90                                unsigned long prot_val)
91 {
92         unsigned long nrpages = size >> PAGE_SHIFT;
93         int err;
94
95         switch (prot_val) {
96         case _PAGE_CACHE_UC:
97         default:
98                 err = _set_memory_uc(vaddr, nrpages);
99                 break;
100         case _PAGE_CACHE_WC:
101                 err = _set_memory_wc(vaddr, nrpages);
102                 break;
103         case _PAGE_CACHE_WB:
104                 err = _set_memory_wb(vaddr, nrpages);
105                 break;
106         }
107
108         return err;
109 }
110
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
121                 unsigned long size, unsigned long prot_val, void *caller)
122 {
123         unsigned long pfn, offset, vaddr;
124         resource_size_t last_addr;
125         struct vm_struct *area;
126         unsigned long new_prot_val;
127         pgprot_t prot;
128         int retval;
129
130         /* Don't allow wraparound or zero size */
131         last_addr = phys_addr + size - 1;
132         if (!size || last_addr < phys_addr)
133                 return NULL;
134
135         if (!phys_addr_valid(phys_addr)) {
136                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
137                        (unsigned long long)phys_addr);
138                 WARN_ON_ONCE(1);
139                 return NULL;
140         }
141
142         /*
143          * Don't remap the low PCI/ISA area, it's always mapped..
144          */
145         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
146                 return (__force void __iomem *)phys_to_virt(phys_addr);
147
148         /*
149          * Don't allow anybody to remap normal RAM that we're using..
150          */
151         for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
152                 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153
154                 int is_ram = page_is_ram(pfn);
155
156                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
157                         return NULL;
158                 WARN_ON_ONCE(is_ram);
159         }
160
161         /*
162          * Mappings have to be page-aligned
163          */
164         offset = phys_addr & ~PAGE_MASK;
165         phys_addr &= PAGE_MASK;
166         size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
168         retval = reserve_memtype(phys_addr, phys_addr + size,
169                                                 prot_val, &new_prot_val);
170         if (retval) {
171                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
172                 return NULL;
173         }
174
175         if (prot_val != new_prot_val) {
176                 /*
177                  * Do not fallback to certain memory types with certain
178                  * requested type:
179                  * - request is uc-, return cannot be write-back
180                  * - request is uc-, return cannot be write-combine
181                  * - request is write-combine, return cannot be write-back
182                  */
183                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
184                      (new_prot_val == _PAGE_CACHE_WB ||
185                       new_prot_val == _PAGE_CACHE_WC)) ||
186                     (prot_val == _PAGE_CACHE_WC &&
187                      new_prot_val == _PAGE_CACHE_WB)) {
188                         pr_debug(
189                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
190                                 (unsigned long long)phys_addr,
191                                 (unsigned long long)(phys_addr + size),
192                                 prot_val, new_prot_val);
193                         free_memtype(phys_addr, phys_addr + size);
194                         return NULL;
195                 }
196                 prot_val = new_prot_val;
197         }
198
199         switch (prot_val) {
200         case _PAGE_CACHE_UC:
201         default:
202                 prot = PAGE_KERNEL_NOCACHE;
203                 break;
204         case _PAGE_CACHE_UC_MINUS:
205                 prot = PAGE_KERNEL_UC_MINUS;
206                 break;
207         case _PAGE_CACHE_WC:
208                 prot = PAGE_KERNEL_WC;
209                 break;
210         case _PAGE_CACHE_WB:
211                 prot = PAGE_KERNEL;
212                 break;
213         }
214
215         /*
216          * Ok, go for it..
217          */
218         area = get_vm_area_caller(size, VM_IOREMAP, caller);
219         if (!area)
220                 return NULL;
221         area->phys_addr = phys_addr;
222         vaddr = (unsigned long) area->addr;
223         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
224                 free_memtype(phys_addr, phys_addr + size);
225                 free_vm_area(area);
226                 return NULL;
227         }
228
229         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
230                 free_memtype(phys_addr, phys_addr + size);
231                 vunmap(area->addr);
232                 return NULL;
233         }
234
235         return (void __iomem *) (vaddr + offset);
236 }
237
238 /**
239  * ioremap_nocache     -   map bus memory into CPU space
240  * @offset:    bus address of the memory
241  * @size:      size of the resource to map
242  *
243  * ioremap_nocache performs a platform specific sequence of operations to
244  * make bus memory CPU accessible via the readb/readw/readl/writeb/
245  * writew/writel functions and the other mmio helpers. The returned
246  * address is not guaranteed to be usable directly as a virtual
247  * address.
248  *
249  * This version of ioremap ensures that the memory is marked uncachable
250  * on the CPU as well as honouring existing caching rules from things like
251  * the PCI bus. Note that there are other caches and buffers on many
252  * busses. In particular driver authors should read up on PCI writes
253  *
254  * It's useful if some control registers are in such an area and
255  * write combining or read caching is not desirable:
256  *
257  * Must be freed with iounmap.
258  */
259 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
260 {
261         /*
262          * Ideally, this should be:
263          *      pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
264          *
265          * Till we fix all X drivers to use ioremap_wc(), we will use
266          * UC MINUS.
267          */
268         unsigned long val = _PAGE_CACHE_UC_MINUS;
269
270         return __ioremap_caller(phys_addr, size, val,
271                                 __builtin_return_address(0));
272 }
273 EXPORT_SYMBOL(ioremap_nocache);
274
275 /**
276  * ioremap_wc   -       map memory into CPU space write combined
277  * @offset:     bus address of the memory
278  * @size:       size of the resource to map
279  *
280  * This version of ioremap ensures that the memory is marked write combining.
281  * Write combining allows faster writes to some hardware devices.
282  *
283  * Must be freed with iounmap.
284  */
285 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
286 {
287         if (pat_wc_enabled)
288                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
289                                         __builtin_return_address(0));
290         else
291                 return ioremap_nocache(phys_addr, size);
292 }
293 EXPORT_SYMBOL(ioremap_wc);
294
295 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
296 {
297         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
298                                 __builtin_return_address(0));
299 }
300 EXPORT_SYMBOL(ioremap_cache);
301
302 /**
303  * iounmap - Free a IO remapping
304  * @addr: virtual address from ioremap_*
305  *
306  * Caller must ensure there is only one unmapping for the same pointer.
307  */
308 void iounmap(volatile void __iomem *addr)
309 {
310         struct vm_struct *p, *o;
311
312         if ((void __force *)addr <= high_memory)
313                 return;
314
315         /*
316          * __ioremap special-cases the PCI/ISA range by not instantiating a
317          * vm_area and by simply returning an address into the kernel mapping
318          * of ISA space.   So handle that here.
319          */
320         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
321             addr < phys_to_virt(ISA_END_ADDRESS))
322                 return;
323
324         addr = (volatile void __iomem *)
325                 (PAGE_MASK & (unsigned long __force)addr);
326
327         /* Use the vm area unlocked, assuming the caller
328            ensures there isn't another iounmap for the same address
329            in parallel. Reuse of the virtual address is prevented by
330            leaving it in the global lists until we're done with it.
331            cpa takes care of the direct mappings. */
332         read_lock(&vmlist_lock);
333         for (p = vmlist; p; p = p->next) {
334                 if (p->addr == addr)
335                         break;
336         }
337         read_unlock(&vmlist_lock);
338
339         if (!p) {
340                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
341                 dump_stack();
342                 return;
343         }
344
345         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
346
347         /* Finally remove it */
348         o = remove_vm_area((void *)addr);
349         BUG_ON(p != o || o == NULL);
350         kfree(p);
351 }
352 EXPORT_SYMBOL(iounmap);
353
354 /*
355  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
356  * access
357  */
358 void *xlate_dev_mem_ptr(unsigned long phys)
359 {
360         void *addr;
361         unsigned long start = phys & PAGE_MASK;
362
363         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
364         if (page_is_ram(start >> PAGE_SHIFT))
365                 return __va(phys);
366
367         addr = (void *)ioremap(start, PAGE_SIZE);
368         if (addr)
369                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
370
371         return addr;
372 }
373
374 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
375 {
376         if (page_is_ram(phys >> PAGE_SHIFT))
377                 return;
378
379         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
380         return;
381 }
382
383 #ifdef CONFIG_X86_32
384
385 int __initdata early_ioremap_debug;
386
387 static int __init early_ioremap_debug_setup(char *str)
388 {
389         early_ioremap_debug = 1;
390
391         return 0;
392 }
393 early_param("early_ioremap_debug", early_ioremap_debug_setup);
394
395 static __initdata int after_paging_init;
396 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
397                 __section(.bss.page_aligned);
398
399 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
400 {
401         /* Don't assume we're using swapper_pg_dir at this point */
402         pgd_t *base = __va(read_cr3());
403         pgd_t *pgd = &base[pgd_index(addr)];
404         pud_t *pud = pud_offset(pgd, addr);
405         pmd_t *pmd = pmd_offset(pud, addr);
406
407         return pmd;
408 }
409
410 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
411 {
412         return &bm_pte[pte_index(addr)];
413 }
414
415 void __init early_ioremap_init(void)
416 {
417         pmd_t *pmd;
418
419         if (early_ioremap_debug)
420                 printk(KERN_INFO "early_ioremap_init()\n");
421
422         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
423         memset(bm_pte, 0, sizeof(bm_pte));
424         pmd_populate_kernel(&init_mm, pmd, bm_pte);
425
426         /*
427          * The boot-ioremap range spans multiple pmds, for which
428          * we are not prepared:
429          */
430         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
431                 WARN_ON(1);
432                 printk(KERN_WARNING "pmd %p != %p\n",
433                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
434                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
435                         fix_to_virt(FIX_BTMAP_BEGIN));
436                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
437                         fix_to_virt(FIX_BTMAP_END));
438
439                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
440                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
441                        FIX_BTMAP_BEGIN);
442         }
443 }
444
445 void __init early_ioremap_clear(void)
446 {
447         pmd_t *pmd;
448
449         if (early_ioremap_debug)
450                 printk(KERN_INFO "early_ioremap_clear()\n");
451
452         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
453         pmd_clear(pmd);
454         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
455         __flush_tlb_all();
456 }
457
458 void __init early_ioremap_reset(void)
459 {
460         enum fixed_addresses idx;
461         unsigned long addr, phys;
462         pte_t *pte;
463
464         after_paging_init = 1;
465         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
466                 addr = fix_to_virt(idx);
467                 pte = early_ioremap_pte(addr);
468                 if (pte_present(*pte)) {
469                         phys = pte_val(*pte) & PAGE_MASK;
470                         set_fixmap(idx, phys);
471                 }
472         }
473 }
474
475 static void __init __early_set_fixmap(enum fixed_addresses idx,
476                                    unsigned long phys, pgprot_t flags)
477 {
478         unsigned long addr = __fix_to_virt(idx);
479         pte_t *pte;
480
481         if (idx >= __end_of_fixed_addresses) {
482                 BUG();
483                 return;
484         }
485         pte = early_ioremap_pte(addr);
486         if (pgprot_val(flags))
487                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
488         else
489                 pte_clear(NULL, addr, pte);
490         __flush_tlb_one(addr);
491 }
492
493 static inline void __init early_set_fixmap(enum fixed_addresses idx,
494                                         unsigned long phys)
495 {
496         if (after_paging_init)
497                 set_fixmap(idx, phys);
498         else
499                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
500 }
501
502 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
503 {
504         if (after_paging_init)
505                 clear_fixmap(idx);
506         else
507                 __early_set_fixmap(idx, 0, __pgprot(0));
508 }
509
510
511 int __initdata early_ioremap_nested;
512
513 static int __init check_early_ioremap_leak(void)
514 {
515         if (!early_ioremap_nested)
516                 return 0;
517
518         printk(KERN_WARNING
519                "Debug warning: early ioremap leak of %d areas detected.\n",
520                early_ioremap_nested);
521         printk(KERN_WARNING
522                "please boot with early_ioremap_debug and report the dmesg.\n");
523         WARN_ON(1);
524
525         return 1;
526 }
527 late_initcall(check_early_ioremap_leak);
528
529 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
530 {
531         unsigned long offset, last_addr;
532         unsigned int nrpages, nesting;
533         enum fixed_addresses idx0, idx;
534
535         WARN_ON(system_state != SYSTEM_BOOTING);
536
537         nesting = early_ioremap_nested;
538         if (early_ioremap_debug) {
539                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
540                        phys_addr, size, nesting);
541                 dump_stack();
542         }
543
544         /* Don't allow wraparound or zero size */
545         last_addr = phys_addr + size - 1;
546         if (!size || last_addr < phys_addr) {
547                 WARN_ON(1);
548                 return NULL;
549         }
550
551         if (nesting >= FIX_BTMAPS_NESTING) {
552                 WARN_ON(1);
553                 return NULL;
554         }
555         early_ioremap_nested++;
556         /*
557          * Mappings have to be page-aligned
558          */
559         offset = phys_addr & ~PAGE_MASK;
560         phys_addr &= PAGE_MASK;
561         size = PAGE_ALIGN(last_addr) - phys_addr;
562
563         /*
564          * Mappings have to fit in the FIX_BTMAP area.
565          */
566         nrpages = size >> PAGE_SHIFT;
567         if (nrpages > NR_FIX_BTMAPS) {
568                 WARN_ON(1);
569                 return NULL;
570         }
571
572         /*
573          * Ok, go for it..
574          */
575         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
576         idx = idx0;
577         while (nrpages > 0) {
578                 early_set_fixmap(idx, phys_addr);
579                 phys_addr += PAGE_SIZE;
580                 --idx;
581                 --nrpages;
582         }
583         if (early_ioremap_debug)
584                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
585
586         return (void *) (offset + fix_to_virt(idx0));
587 }
588
589 void __init early_iounmap(void *addr, unsigned long size)
590 {
591         unsigned long virt_addr;
592         unsigned long offset;
593         unsigned int nrpages;
594         enum fixed_addresses idx;
595         unsigned int nesting;
596
597         nesting = --early_ioremap_nested;
598         WARN_ON(nesting < 0);
599
600         if (early_ioremap_debug) {
601                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
602                        size, nesting);
603                 dump_stack();
604         }
605
606         virt_addr = (unsigned long)addr;
607         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
608                 WARN_ON(1);
609                 return;
610         }
611         offset = virt_addr & ~PAGE_MASK;
612         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
613
614         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
615         while (nrpages > 0) {
616                 early_clear_fixmap(idx);
617                 --idx;
618                 --nrpages;
619         }
620 }
621
622 void __this_fixmap_does_not_exist(void)
623 {
624         WARN_ON(1);
625 }
626
627 #endif /* CONFIG_X86_32 */