2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
30 unsigned long __phys_addr(unsigned long x)
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
36 EXPORT_SYMBOL(__phys_addr);
38 static inline int phys_addr_valid(unsigned long addr)
40 return addr < (1UL << boot_cpu_data.x86_phys_bits);
45 static inline int phys_addr_valid(unsigned long addr)
52 int page_is_ram(unsigned long pagenr)
54 unsigned long addr, end;
58 * A special case is the first 4Kb of memory;
59 * This is a BIOS owned area, not kernel ram, but generally
60 * not listed as such in the E820 table.
66 * Second special case: Some BIOSen report the PC BIOS
67 * area (640->1Mb) as ram even though it is not.
69 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70 pagenr < (BIOS_END >> PAGE_SHIFT))
73 for (i = 0; i < e820.nr_map; i++) {
77 if (e820.map[i].type != E820_RAM)
79 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
80 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
83 if ((pagenr >= addr) && (pagenr < end))
90 * Fix up the linear direct mapping of the kernel to avoid cache attribute
93 static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
94 enum ioremap_mode mode)
96 unsigned long nrpages = size >> PAGE_SHIFT;
100 case IOR_MODE_UNCACHED:
102 err = set_memory_uc(vaddr, nrpages);
104 case IOR_MODE_CACHED:
105 err = set_memory_wb(vaddr, nrpages);
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
121 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
122 enum ioremap_mode mode)
124 unsigned long pfn, offset, last_addr, vaddr;
125 struct vm_struct *area;
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
141 * Don't remap the low PCI/ISA area, it's always mapped..
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
144 return (__force void __iomem *)phys_to_virt(phys_addr);
147 * Don't allow anybody to remap normal RAM that we're using..
149 for (pfn = phys_addr >> PAGE_SHIFT;
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
152 if (page_is_ram(pfn) && pfn_valid(pfn) &&
153 !PageReserved(pfn_to_page(pfn)))
158 case IOR_MODE_UNCACHED:
161 * FIXME: we will use UC MINUS for now, as video fb drivers
162 * depend on it. Upcoming ioremap_wc() will fix this behavior.
164 prot = PAGE_KERNEL_UC_MINUS;
166 case IOR_MODE_CACHED:
172 * Mappings have to be page-aligned
174 offset = phys_addr & ~PAGE_MASK;
175 phys_addr &= PAGE_MASK;
176 size = PAGE_ALIGN(last_addr+1) - phys_addr;
181 area = get_vm_area(size, VM_IOREMAP);
184 area->phys_addr = phys_addr;
185 vaddr = (unsigned long) area->addr;
186 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
191 if (ioremap_change_attr(vaddr, size, mode) < 0) {
196 return (void __iomem *) (vaddr + offset);
200 * ioremap_nocache - map bus memory into CPU space
201 * @offset: bus address of the memory
202 * @size: size of the resource to map
204 * ioremap_nocache performs a platform specific sequence of operations to
205 * make bus memory CPU accessible via the readb/readw/readl/writeb/
206 * writew/writel functions and the other mmio helpers. The returned
207 * address is not guaranteed to be usable directly as a virtual
210 * This version of ioremap ensures that the memory is marked uncachable
211 * on the CPU as well as honouring existing caching rules from things like
212 * the PCI bus. Note that there are other caches and buffers on many
213 * busses. In particular driver authors should read up on PCI writes
215 * It's useful if some control registers are in such an area and
216 * write combining or read caching is not desirable:
218 * Must be freed with iounmap.
220 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
222 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
224 EXPORT_SYMBOL(ioremap_nocache);
226 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
228 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
230 EXPORT_SYMBOL(ioremap_cache);
233 * iounmap - Free a IO remapping
234 * @addr: virtual address from ioremap_*
236 * Caller must ensure there is only one unmapping for the same pointer.
238 void iounmap(volatile void __iomem *addr)
240 struct vm_struct *p, *o;
242 if ((void __force *)addr <= high_memory)
246 * __ioremap special-cases the PCI/ISA range by not instantiating a
247 * vm_area and by simply returning an address into the kernel mapping
248 * of ISA space. So handle that here.
250 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
251 addr < phys_to_virt(ISA_END_ADDRESS))
254 addr = (volatile void __iomem *)
255 (PAGE_MASK & (unsigned long __force)addr);
257 /* Use the vm area unlocked, assuming the caller
258 ensures there isn't another iounmap for the same address
259 in parallel. Reuse of the virtual address is prevented by
260 leaving it in the global lists until we're done with it.
261 cpa takes care of the direct mappings. */
262 read_lock(&vmlist_lock);
263 for (p = vmlist; p; p = p->next) {
267 read_unlock(&vmlist_lock);
270 printk(KERN_ERR "iounmap: bad address %p\n", addr);
275 /* Finally remove it */
276 o = remove_vm_area((void *)addr);
277 BUG_ON(p != o || o == NULL);
280 EXPORT_SYMBOL(iounmap);
284 int __initdata early_ioremap_debug;
286 static int __init early_ioremap_debug_setup(char *str)
288 early_ioremap_debug = 1;
292 early_param("early_ioremap_debug", early_ioremap_debug_setup);
294 static __initdata int after_paging_init;
295 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
296 __section(.bss.page_aligned);
298 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
300 /* Don't assume we're using swapper_pg_dir at this point */
301 pgd_t *base = __va(read_cr3());
302 pgd_t *pgd = &base[pgd_index(addr)];
303 pud_t *pud = pud_offset(pgd, addr);
304 pmd_t *pmd = pmd_offset(pud, addr);
309 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
311 return &bm_pte[pte_index(addr)];
314 void __init early_ioremap_init(void)
318 if (early_ioremap_debug)
319 printk(KERN_INFO "early_ioremap_init()\n");
321 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
322 memset(bm_pte, 0, sizeof(bm_pte));
323 pmd_populate_kernel(&init_mm, pmd, bm_pte);
326 * The boot-ioremap range spans multiple pmds, for which
327 * we are not prepared:
329 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
331 printk(KERN_WARNING "pmd %p != %p\n",
332 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
333 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
334 fix_to_virt(FIX_BTMAP_BEGIN));
335 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
336 fix_to_virt(FIX_BTMAP_END));
338 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
339 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
344 void __init early_ioremap_clear(void)
348 if (early_ioremap_debug)
349 printk(KERN_INFO "early_ioremap_clear()\n");
351 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
353 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
357 void __init early_ioremap_reset(void)
359 enum fixed_addresses idx;
360 unsigned long addr, phys;
363 after_paging_init = 1;
364 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
365 addr = fix_to_virt(idx);
366 pte = early_ioremap_pte(addr);
367 if (pte_present(*pte)) {
368 phys = pte_val(*pte) & PAGE_MASK;
369 set_fixmap(idx, phys);
374 static void __init __early_set_fixmap(enum fixed_addresses idx,
375 unsigned long phys, pgprot_t flags)
377 unsigned long addr = __fix_to_virt(idx);
380 if (idx >= __end_of_fixed_addresses) {
384 pte = early_ioremap_pte(addr);
385 if (pgprot_val(flags))
386 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
388 pte_clear(NULL, addr, pte);
389 __flush_tlb_one(addr);
392 static inline void __init early_set_fixmap(enum fixed_addresses idx,
395 if (after_paging_init)
396 set_fixmap(idx, phys);
398 __early_set_fixmap(idx, phys, PAGE_KERNEL);
401 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
403 if (after_paging_init)
406 __early_set_fixmap(idx, 0, __pgprot(0));
410 int __initdata early_ioremap_nested;
412 static int __init check_early_ioremap_leak(void)
414 if (!early_ioremap_nested)
418 "Debug warning: early ioremap leak of %d areas detected.\n",
419 early_ioremap_nested);
421 "please boot with early_ioremap_debug and report the dmesg.\n");
426 late_initcall(check_early_ioremap_leak);
428 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
430 unsigned long offset, last_addr;
431 unsigned int nrpages, nesting;
432 enum fixed_addresses idx0, idx;
434 WARN_ON(system_state != SYSTEM_BOOTING);
436 nesting = early_ioremap_nested;
437 if (early_ioremap_debug) {
438 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
439 phys_addr, size, nesting);
443 /* Don't allow wraparound or zero size */
444 last_addr = phys_addr + size - 1;
445 if (!size || last_addr < phys_addr) {
450 if (nesting >= FIX_BTMAPS_NESTING) {
454 early_ioremap_nested++;
456 * Mappings have to be page-aligned
458 offset = phys_addr & ~PAGE_MASK;
459 phys_addr &= PAGE_MASK;
460 size = PAGE_ALIGN(last_addr) - phys_addr;
463 * Mappings have to fit in the FIX_BTMAP area.
465 nrpages = size >> PAGE_SHIFT;
466 if (nrpages > NR_FIX_BTMAPS) {
474 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
476 while (nrpages > 0) {
477 early_set_fixmap(idx, phys_addr);
478 phys_addr += PAGE_SIZE;
482 if (early_ioremap_debug)
483 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
485 return (void *) (offset + fix_to_virt(idx0));
488 void __init early_iounmap(void *addr, unsigned long size)
490 unsigned long virt_addr;
491 unsigned long offset;
492 unsigned int nrpages;
493 enum fixed_addresses idx;
494 unsigned int nesting;
496 nesting = --early_ioremap_nested;
497 WARN_ON(nesting < 0);
499 if (early_ioremap_debug) {
500 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
505 virt_addr = (unsigned long)addr;
506 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
510 offset = virt_addr & ~PAGE_MASK;
511 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
513 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
514 while (nrpages > 0) {
515 early_clear_fixmap(idx);
521 void __this_fixmap_does_not_exist(void)
526 #endif /* CONFIG_X86_32 */