]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/ioremap.c
3f7f05e2c434d3f8469f8d7c00b6a87f28488ea8
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 unsigned long __phys_addr(unsigned long x)
27 {
28         if (x >= __START_KERNEL_map)
29                 return x - __START_KERNEL_map + phys_base;
30         return x - PAGE_OFFSET;
31 }
32 EXPORT_SYMBOL(__phys_addr);
33
34 static inline int phys_addr_valid(unsigned long addr)
35 {
36         return addr < (1UL << boot_cpu_data.x86_phys_bits);
37 }
38
39 #else
40
41 static inline int phys_addr_valid(unsigned long addr)
42 {
43         return 1;
44 }
45
46 #endif
47
48 int page_is_ram(unsigned long pagenr)
49 {
50         unsigned long addr, end;
51         int i;
52
53         /*
54          * A special case is the first 4Kb of memory;
55          * This is a BIOS owned area, not kernel ram, but generally
56          * not listed as such in the E820 table.
57          */
58         if (pagenr == 0)
59                 return 0;
60
61         /*
62          * Second special case: Some BIOSen report the PC BIOS
63          * area (640->1Mb) as ram even though it is not.
64          */
65         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66                     pagenr < (BIOS_END >> PAGE_SHIFT))
67                 return 0;
68
69         for (i = 0; i < e820.nr_map; i++) {
70                 /*
71                  * Not usable memory:
72                  */
73                 if (e820.map[i].type != E820_RAM)
74                         continue;
75                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
77
78
79                 if ((pagenr >= addr) && (pagenr < end))
80                         return 1;
81         }
82         return 0;
83 }
84
85 /*
86  * Fix up the linear direct mapping of the kernel to avoid cache attribute
87  * conflicts.
88  */
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90                                unsigned long prot_val)
91 {
92         unsigned long nrpages = size >> PAGE_SHIFT;
93         int err;
94
95         switch (prot_val) {
96         case _PAGE_CACHE_UC:
97         default:
98                 err = _set_memory_uc(vaddr, nrpages);
99                 break;
100         case _PAGE_CACHE_WC:
101                 err = _set_memory_wc(vaddr, nrpages);
102                 break;
103         case _PAGE_CACHE_WB:
104                 err = _set_memory_wb(vaddr, nrpages);
105                 break;
106         }
107
108         return err;
109 }
110
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
121                                unsigned long prot_val)
122 {
123         unsigned long pfn, offset, last_addr, vaddr;
124         struct vm_struct *area;
125         unsigned long new_prot_val;
126         pgprot_t prot;
127         int retval;
128
129         /* Don't allow wraparound or zero size */
130         last_addr = phys_addr + size - 1;
131         if (!size || last_addr < phys_addr)
132                 return NULL;
133
134         if (!phys_addr_valid(phys_addr)) {
135                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
136                        phys_addr);
137                 WARN_ON_ONCE(1);
138                 return NULL;
139         }
140
141         /*
142          * Don't remap the low PCI/ISA area, it's always mapped..
143          */
144         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
145                 return (__force void __iomem *)phys_to_virt(phys_addr);
146
147         /*
148          * Don't allow anybody to remap normal RAM that we're using..
149          */
150         for (pfn = phys_addr >> PAGE_SHIFT;
151                                 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
152
153                 int is_ram = page_is_ram(pfn);
154
155                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
156                         return NULL;
157                 WARN_ON_ONCE(is_ram);
158         }
159
160         /*
161          * Mappings have to be page-aligned
162          */
163         offset = phys_addr & ~PAGE_MASK;
164         phys_addr &= PAGE_MASK;
165         size = PAGE_ALIGN(last_addr+1) - phys_addr;
166
167         retval = reserve_memtype(phys_addr, phys_addr + size,
168                                                 prot_val, &new_prot_val);
169         if (retval) {
170                 printk("reserve_memtype returned %d\n", retval);
171                 return NULL;
172         }
173
174         if (prot_val != new_prot_val) {
175                 /*
176                  * Do not fallback to certain memory types with certain
177                  * requested type:
178                  * - request is uncached, return cannot be write-back
179                  * - request is uncached, return cannot be write-combine
180                  * - request is write-combine, return cannot be write-back
181                  */
182                 if ((prot_val == _PAGE_CACHE_UC &&
183                      (new_prot_val == _PAGE_CACHE_WB ||
184                       new_prot_val == _PAGE_CACHE_WC)) ||
185                     (prot_val == _PAGE_CACHE_WC &&
186                      new_prot_val == _PAGE_CACHE_WB)) {
187                         printk(
188                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
189                                 phys_addr, phys_addr + size,
190                                 prot_val, new_prot_val);
191                         free_memtype(phys_addr, phys_addr + size);
192                         return NULL;
193                 }
194                 prot_val = new_prot_val;
195         }
196
197         switch (prot_val) {
198         case _PAGE_CACHE_UC:
199         default:
200                 prot = PAGE_KERNEL_NOCACHE;
201                 break;
202         case _PAGE_CACHE_WC:
203                 prot = PAGE_KERNEL_WC;
204                 break;
205         case _PAGE_CACHE_WB:
206                 prot = PAGE_KERNEL;
207                 break;
208         }
209
210         /*
211          * Ok, go for it..
212          */
213         area = get_vm_area(size, VM_IOREMAP);
214         if (!area)
215                 return NULL;
216         area->phys_addr = phys_addr;
217         vaddr = (unsigned long) area->addr;
218         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
219                 free_memtype(phys_addr, phys_addr + size);
220                 free_vm_area(area);
221                 return NULL;
222         }
223
224         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
225                 free_memtype(phys_addr, phys_addr + size);
226                 vunmap(area->addr);
227                 return NULL;
228         }
229
230         return (void __iomem *) (vaddr + offset);
231 }
232
233 /**
234  * ioremap_nocache     -   map bus memory into CPU space
235  * @offset:    bus address of the memory
236  * @size:      size of the resource to map
237  *
238  * ioremap_nocache performs a platform specific sequence of operations to
239  * make bus memory CPU accessible via the readb/readw/readl/writeb/
240  * writew/writel functions and the other mmio helpers. The returned
241  * address is not guaranteed to be usable directly as a virtual
242  * address.
243  *
244  * This version of ioremap ensures that the memory is marked uncachable
245  * on the CPU as well as honouring existing caching rules from things like
246  * the PCI bus. Note that there are other caches and buffers on many
247  * busses. In particular driver authors should read up on PCI writes
248  *
249  * It's useful if some control registers are in such an area and
250  * write combining or read caching is not desirable:
251  *
252  * Must be freed with iounmap.
253  */
254 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
255 {
256         return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
257 }
258 EXPORT_SYMBOL(ioremap_nocache);
259
260 /**
261  * ioremap_wc   -       map memory into CPU space write combined
262  * @offset:     bus address of the memory
263  * @size:       size of the resource to map
264  *
265  * This version of ioremap ensures that the memory is marked write combining.
266  * Write combining allows faster writes to some hardware devices.
267  *
268  * Must be freed with iounmap.
269  */
270 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
271 {
272         if (pat_wc_enabled)
273                 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
274         else
275                 return ioremap_nocache(phys_addr, size);
276 }
277 EXPORT_SYMBOL(ioremap_wc);
278
279 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
280 {
281         return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
282 }
283 EXPORT_SYMBOL(ioremap_cache);
284
285 /**
286  * iounmap - Free a IO remapping
287  * @addr: virtual address from ioremap_*
288  *
289  * Caller must ensure there is only one unmapping for the same pointer.
290  */
291 void iounmap(volatile void __iomem *addr)
292 {
293         struct vm_struct *p, *o;
294
295         if ((void __force *)addr <= high_memory)
296                 return;
297
298         /*
299          * __ioremap special-cases the PCI/ISA range by not instantiating a
300          * vm_area and by simply returning an address into the kernel mapping
301          * of ISA space.   So handle that here.
302          */
303         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
304             addr < phys_to_virt(ISA_END_ADDRESS))
305                 return;
306
307         addr = (volatile void __iomem *)
308                 (PAGE_MASK & (unsigned long __force)addr);
309
310         /* Use the vm area unlocked, assuming the caller
311            ensures there isn't another iounmap for the same address
312            in parallel. Reuse of the virtual address is prevented by
313            leaving it in the global lists until we're done with it.
314            cpa takes care of the direct mappings. */
315         read_lock(&vmlist_lock);
316         for (p = vmlist; p; p = p->next) {
317                 if (p->addr == addr)
318                         break;
319         }
320         read_unlock(&vmlist_lock);
321
322         if (!p) {
323                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
324                 dump_stack();
325                 return;
326         }
327
328         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
329
330         /* Finally remove it */
331         o = remove_vm_area((void *)addr);
332         BUG_ON(p != o || o == NULL);
333         kfree(p);
334 }
335 EXPORT_SYMBOL(iounmap);
336
337 #ifdef CONFIG_X86_32
338
339 int __initdata early_ioremap_debug;
340
341 static int __init early_ioremap_debug_setup(char *str)
342 {
343         early_ioremap_debug = 1;
344
345         return 0;
346 }
347 early_param("early_ioremap_debug", early_ioremap_debug_setup);
348
349 static __initdata int after_paging_init;
350 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
351                 __section(.bss.page_aligned);
352
353 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
354 {
355         /* Don't assume we're using swapper_pg_dir at this point */
356         pgd_t *base = __va(read_cr3());
357         pgd_t *pgd = &base[pgd_index(addr)];
358         pud_t *pud = pud_offset(pgd, addr);
359         pmd_t *pmd = pmd_offset(pud, addr);
360
361         return pmd;
362 }
363
364 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
365 {
366         return &bm_pte[pte_index(addr)];
367 }
368
369 void __init early_ioremap_init(void)
370 {
371         pmd_t *pmd;
372
373         if (early_ioremap_debug)
374                 printk(KERN_INFO "early_ioremap_init()\n");
375
376         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
377         memset(bm_pte, 0, sizeof(bm_pte));
378         pmd_populate_kernel(&init_mm, pmd, bm_pte);
379
380         /*
381          * The boot-ioremap range spans multiple pmds, for which
382          * we are not prepared:
383          */
384         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
385                 WARN_ON(1);
386                 printk(KERN_WARNING "pmd %p != %p\n",
387                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
388                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
389                         fix_to_virt(FIX_BTMAP_BEGIN));
390                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
391                         fix_to_virt(FIX_BTMAP_END));
392
393                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
394                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
395                        FIX_BTMAP_BEGIN);
396         }
397 }
398
399 void __init early_ioremap_clear(void)
400 {
401         pmd_t *pmd;
402
403         if (early_ioremap_debug)
404                 printk(KERN_INFO "early_ioremap_clear()\n");
405
406         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
407         pmd_clear(pmd);
408         paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
409         __flush_tlb_all();
410 }
411
412 void __init early_ioremap_reset(void)
413 {
414         enum fixed_addresses idx;
415         unsigned long addr, phys;
416         pte_t *pte;
417
418         after_paging_init = 1;
419         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
420                 addr = fix_to_virt(idx);
421                 pte = early_ioremap_pte(addr);
422                 if (pte_present(*pte)) {
423                         phys = pte_val(*pte) & PAGE_MASK;
424                         set_fixmap(idx, phys);
425                 }
426         }
427 }
428
429 static void __init __early_set_fixmap(enum fixed_addresses idx,
430                                    unsigned long phys, pgprot_t flags)
431 {
432         unsigned long addr = __fix_to_virt(idx);
433         pte_t *pte;
434
435         if (idx >= __end_of_fixed_addresses) {
436                 BUG();
437                 return;
438         }
439         pte = early_ioremap_pte(addr);
440         if (pgprot_val(flags))
441                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
442         else
443                 pte_clear(NULL, addr, pte);
444         __flush_tlb_one(addr);
445 }
446
447 static inline void __init early_set_fixmap(enum fixed_addresses idx,
448                                         unsigned long phys)
449 {
450         if (after_paging_init)
451                 set_fixmap(idx, phys);
452         else
453                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
454 }
455
456 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
457 {
458         if (after_paging_init)
459                 clear_fixmap(idx);
460         else
461                 __early_set_fixmap(idx, 0, __pgprot(0));
462 }
463
464
465 int __initdata early_ioremap_nested;
466
467 static int __init check_early_ioremap_leak(void)
468 {
469         if (!early_ioremap_nested)
470                 return 0;
471
472         printk(KERN_WARNING
473                "Debug warning: early ioremap leak of %d areas detected.\n",
474                early_ioremap_nested);
475         printk(KERN_WARNING
476                "please boot with early_ioremap_debug and report the dmesg.\n");
477         WARN_ON(1);
478
479         return 1;
480 }
481 late_initcall(check_early_ioremap_leak);
482
483 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
484 {
485         unsigned long offset, last_addr;
486         unsigned int nrpages, nesting;
487         enum fixed_addresses idx0, idx;
488
489         WARN_ON(system_state != SYSTEM_BOOTING);
490
491         nesting = early_ioremap_nested;
492         if (early_ioremap_debug) {
493                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
494                        phys_addr, size, nesting);
495                 dump_stack();
496         }
497
498         /* Don't allow wraparound or zero size */
499         last_addr = phys_addr + size - 1;
500         if (!size || last_addr < phys_addr) {
501                 WARN_ON(1);
502                 return NULL;
503         }
504
505         if (nesting >= FIX_BTMAPS_NESTING) {
506                 WARN_ON(1);
507                 return NULL;
508         }
509         early_ioremap_nested++;
510         /*
511          * Mappings have to be page-aligned
512          */
513         offset = phys_addr & ~PAGE_MASK;
514         phys_addr &= PAGE_MASK;
515         size = PAGE_ALIGN(last_addr) - phys_addr;
516
517         /*
518          * Mappings have to fit in the FIX_BTMAP area.
519          */
520         nrpages = size >> PAGE_SHIFT;
521         if (nrpages > NR_FIX_BTMAPS) {
522                 WARN_ON(1);
523                 return NULL;
524         }
525
526         /*
527          * Ok, go for it..
528          */
529         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
530         idx = idx0;
531         while (nrpages > 0) {
532                 early_set_fixmap(idx, phys_addr);
533                 phys_addr += PAGE_SIZE;
534                 --idx;
535                 --nrpages;
536         }
537         if (early_ioremap_debug)
538                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
539
540         return (void *) (offset + fix_to_virt(idx0));
541 }
542
543 void __init early_iounmap(void *addr, unsigned long size)
544 {
545         unsigned long virt_addr;
546         unsigned long offset;
547         unsigned int nrpages;
548         enum fixed_addresses idx;
549         unsigned int nesting;
550
551         nesting = --early_ioremap_nested;
552         WARN_ON(nesting < 0);
553
554         if (early_ioremap_debug) {
555                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
556                        size, nesting);
557                 dump_stack();
558         }
559
560         virt_addr = (unsigned long)addr;
561         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
562                 WARN_ON(1);
563                 return;
564         }
565         offset = virt_addr & ~PAGE_MASK;
566         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
567
568         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
569         while (nrpages > 0) {
570                 early_clear_fixmap(idx);
571                 --idx;
572                 --nrpages;
573         }
574 }
575
576 void __this_fixmap_does_not_exist(void)
577 {
578         WARN_ON(1);
579 }
580
581 #endif /* CONFIG_X86_32 */