2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
26 unsigned long __phys_addr(unsigned long x)
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
32 EXPORT_SYMBOL(__phys_addr);
34 static inline int phys_addr_valid(unsigned long addr)
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
41 static inline int phys_addr_valid(unsigned long addr)
48 int page_is_ram(unsigned long pagenr)
50 unsigned long addr, end;
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
69 for (i = 0; i < e820.nr_map; i++) {
73 if (e820.map[i].type != E820_RAM)
75 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
79 if ((pagenr >= addr) && (pagenr < end))
86 * Fix up the linear direct mapping of the kernel to avoid cache attribute
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
92 unsigned long nrpages = size >> PAGE_SHIFT;
98 err = _set_memory_uc(vaddr, nrpages);
101 err = _set_memory_wc(vaddr, nrpages);
104 err = _set_memory_wb(vaddr, nrpages);
112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
120 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
121 unsigned long prot_val)
123 unsigned long pfn, offset, last_addr, vaddr;
124 struct vm_struct *area;
125 unsigned long new_prot_val;
129 /* Don't allow wraparound or zero size */
130 last_addr = phys_addr + size - 1;
131 if (!size || last_addr < phys_addr)
134 if (!phys_addr_valid(phys_addr)) {
135 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
142 * Don't remap the low PCI/ISA area, it's always mapped..
144 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
145 return (__force void __iomem *)phys_to_virt(phys_addr);
148 * Don't allow anybody to remap normal RAM that we're using..
150 for (pfn = phys_addr >> PAGE_SHIFT;
151 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153 int is_ram = page_is_ram(pfn);
155 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
157 WARN_ON_ONCE(is_ram);
161 * Mappings have to be page-aligned
163 offset = phys_addr & ~PAGE_MASK;
164 phys_addr &= PAGE_MASK;
165 size = PAGE_ALIGN(last_addr+1) - phys_addr;
167 retval = reserve_memtype(phys_addr, phys_addr + size,
168 prot_val, &new_prot_val);
170 printk("reserve_memtype returned %d\n", retval);
174 if (prot_val != new_prot_val) {
176 * Do not fallback to certain memory types with certain
178 * - request is uncached, return cannot be write-back
179 * - request is uncached, return cannot be write-combine
180 * - request is write-combine, return cannot be write-back
182 if ((prot_val == _PAGE_CACHE_UC &&
183 (new_prot_val == _PAGE_CACHE_WB ||
184 new_prot_val == _PAGE_CACHE_WC)) ||
185 (prot_val == _PAGE_CACHE_WC &&
186 new_prot_val == _PAGE_CACHE_WB)) {
188 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
189 phys_addr, phys_addr + size,
190 prot_val, new_prot_val);
191 free_memtype(phys_addr, phys_addr + size);
194 prot_val = new_prot_val;
200 prot = PAGE_KERNEL_NOCACHE;
203 prot = PAGE_KERNEL_WC;
213 area = get_vm_area(size, VM_IOREMAP);
216 area->phys_addr = phys_addr;
217 vaddr = (unsigned long) area->addr;
218 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
219 free_memtype(phys_addr, phys_addr + size);
224 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
225 free_memtype(phys_addr, phys_addr + size);
230 return (void __iomem *) (vaddr + offset);
234 * ioremap_nocache - map bus memory into CPU space
235 * @offset: bus address of the memory
236 * @size: size of the resource to map
238 * ioremap_nocache performs a platform specific sequence of operations to
239 * make bus memory CPU accessible via the readb/readw/readl/writeb/
240 * writew/writel functions and the other mmio helpers. The returned
241 * address is not guaranteed to be usable directly as a virtual
244 * This version of ioremap ensures that the memory is marked uncachable
245 * on the CPU as well as honouring existing caching rules from things like
246 * the PCI bus. Note that there are other caches and buffers on many
247 * busses. In particular driver authors should read up on PCI writes
249 * It's useful if some control registers are in such an area and
250 * write combining or read caching is not desirable:
252 * Must be freed with iounmap.
254 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
256 return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
258 EXPORT_SYMBOL(ioremap_nocache);
261 * ioremap_wc - map memory into CPU space write combined
262 * @offset: bus address of the memory
263 * @size: size of the resource to map
265 * This version of ioremap ensures that the memory is marked write combining.
266 * Write combining allows faster writes to some hardware devices.
268 * Must be freed with iounmap.
270 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
273 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
275 return ioremap_nocache(phys_addr, size);
277 EXPORT_SYMBOL(ioremap_wc);
279 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
281 return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
283 EXPORT_SYMBOL(ioremap_cache);
286 * iounmap - Free a IO remapping
287 * @addr: virtual address from ioremap_*
289 * Caller must ensure there is only one unmapping for the same pointer.
291 void iounmap(volatile void __iomem *addr)
293 struct vm_struct *p, *o;
295 if ((void __force *)addr <= high_memory)
299 * __ioremap special-cases the PCI/ISA range by not instantiating a
300 * vm_area and by simply returning an address into the kernel mapping
301 * of ISA space. So handle that here.
303 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
304 addr < phys_to_virt(ISA_END_ADDRESS))
307 addr = (volatile void __iomem *)
308 (PAGE_MASK & (unsigned long __force)addr);
310 /* Use the vm area unlocked, assuming the caller
311 ensures there isn't another iounmap for the same address
312 in parallel. Reuse of the virtual address is prevented by
313 leaving it in the global lists until we're done with it.
314 cpa takes care of the direct mappings. */
315 read_lock(&vmlist_lock);
316 for (p = vmlist; p; p = p->next) {
320 read_unlock(&vmlist_lock);
323 printk(KERN_ERR "iounmap: bad address %p\n", addr);
328 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
330 /* Finally remove it */
331 o = remove_vm_area((void *)addr);
332 BUG_ON(p != o || o == NULL);
335 EXPORT_SYMBOL(iounmap);
339 int __initdata early_ioremap_debug;
341 static int __init early_ioremap_debug_setup(char *str)
343 early_ioremap_debug = 1;
347 early_param("early_ioremap_debug", early_ioremap_debug_setup);
349 static __initdata int after_paging_init;
350 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
351 __section(.bss.page_aligned);
353 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
355 /* Don't assume we're using swapper_pg_dir at this point */
356 pgd_t *base = __va(read_cr3());
357 pgd_t *pgd = &base[pgd_index(addr)];
358 pud_t *pud = pud_offset(pgd, addr);
359 pmd_t *pmd = pmd_offset(pud, addr);
364 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
366 return &bm_pte[pte_index(addr)];
369 void __init early_ioremap_init(void)
373 if (early_ioremap_debug)
374 printk(KERN_INFO "early_ioremap_init()\n");
376 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
377 memset(bm_pte, 0, sizeof(bm_pte));
378 pmd_populate_kernel(&init_mm, pmd, bm_pte);
381 * The boot-ioremap range spans multiple pmds, for which
382 * we are not prepared:
384 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
386 printk(KERN_WARNING "pmd %p != %p\n",
387 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
388 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
389 fix_to_virt(FIX_BTMAP_BEGIN));
390 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
391 fix_to_virt(FIX_BTMAP_END));
393 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
394 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
399 void __init early_ioremap_clear(void)
403 if (early_ioremap_debug)
404 printk(KERN_INFO "early_ioremap_clear()\n");
406 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
408 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
412 void __init early_ioremap_reset(void)
414 enum fixed_addresses idx;
415 unsigned long addr, phys;
418 after_paging_init = 1;
419 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
420 addr = fix_to_virt(idx);
421 pte = early_ioremap_pte(addr);
422 if (pte_present(*pte)) {
423 phys = pte_val(*pte) & PAGE_MASK;
424 set_fixmap(idx, phys);
429 static void __init __early_set_fixmap(enum fixed_addresses idx,
430 unsigned long phys, pgprot_t flags)
432 unsigned long addr = __fix_to_virt(idx);
435 if (idx >= __end_of_fixed_addresses) {
439 pte = early_ioremap_pte(addr);
440 if (pgprot_val(flags))
441 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
443 pte_clear(NULL, addr, pte);
444 __flush_tlb_one(addr);
447 static inline void __init early_set_fixmap(enum fixed_addresses idx,
450 if (after_paging_init)
451 set_fixmap(idx, phys);
453 __early_set_fixmap(idx, phys, PAGE_KERNEL);
456 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
458 if (after_paging_init)
461 __early_set_fixmap(idx, 0, __pgprot(0));
465 int __initdata early_ioremap_nested;
467 static int __init check_early_ioremap_leak(void)
469 if (!early_ioremap_nested)
473 "Debug warning: early ioremap leak of %d areas detected.\n",
474 early_ioremap_nested);
476 "please boot with early_ioremap_debug and report the dmesg.\n");
481 late_initcall(check_early_ioremap_leak);
483 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
485 unsigned long offset, last_addr;
486 unsigned int nrpages, nesting;
487 enum fixed_addresses idx0, idx;
489 WARN_ON(system_state != SYSTEM_BOOTING);
491 nesting = early_ioremap_nested;
492 if (early_ioremap_debug) {
493 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
494 phys_addr, size, nesting);
498 /* Don't allow wraparound or zero size */
499 last_addr = phys_addr + size - 1;
500 if (!size || last_addr < phys_addr) {
505 if (nesting >= FIX_BTMAPS_NESTING) {
509 early_ioremap_nested++;
511 * Mappings have to be page-aligned
513 offset = phys_addr & ~PAGE_MASK;
514 phys_addr &= PAGE_MASK;
515 size = PAGE_ALIGN(last_addr) - phys_addr;
518 * Mappings have to fit in the FIX_BTMAP area.
520 nrpages = size >> PAGE_SHIFT;
521 if (nrpages > NR_FIX_BTMAPS) {
529 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
531 while (nrpages > 0) {
532 early_set_fixmap(idx, phys_addr);
533 phys_addr += PAGE_SIZE;
537 if (early_ioremap_debug)
538 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
540 return (void *) (offset + fix_to_virt(idx0));
543 void __init early_iounmap(void *addr, unsigned long size)
545 unsigned long virt_addr;
546 unsigned long offset;
547 unsigned int nrpages;
548 enum fixed_addresses idx;
549 unsigned int nesting;
551 nesting = --early_ioremap_nested;
552 WARN_ON(nesting < 0);
554 if (early_ioremap_debug) {
555 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
560 virt_addr = (unsigned long)addr;
561 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
565 offset = virt_addr & ~PAGE_MASK;
566 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
568 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
569 while (nrpages > 0) {
570 early_clear_fixmap(idx);
576 void __this_fixmap_does_not_exist(void)
581 #endif /* CONFIG_X86_32 */