2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
30 unsigned long __phys_addr(unsigned long x)
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
36 EXPORT_SYMBOL(__phys_addr);
38 static inline int phys_addr_valid(unsigned long addr)
40 return addr < (1UL << boot_cpu_data.x86_phys_bits);
45 static inline int phys_addr_valid(unsigned long addr)
52 int page_is_ram(unsigned long pagenr)
54 unsigned long addr, end;
58 * A special case is the first 4Kb of memory;
59 * This is a BIOS owned area, not kernel ram, but generally
60 * not listed as such in the E820 table.
66 * Second special case: Some BIOSen report the PC BIOS
67 * area (640->1Mb) as ram even though it is not.
69 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70 pagenr < (BIOS_END >> PAGE_SHIFT))
73 for (i = 0; i < e820.nr_map; i++) {
77 if (e820.map[i].type != E820_RAM)
79 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
80 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
83 if ((pagenr >= addr) && (pagenr < end))
90 * Fix up the linear direct mapping of the kernel to avoid cache attribute
93 static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
94 enum ioremap_mode mode)
96 unsigned long nrpages = size >> PAGE_SHIFT;
100 case IOR_MODE_UNCACHED:
102 err = set_memory_uc(vaddr, nrpages);
104 case IOR_MODE_CACHED:
105 err = set_memory_wb(vaddr, nrpages);
113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
121 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
122 enum ioremap_mode mode)
124 unsigned long pfn, offset, last_addr, vaddr;
125 struct vm_struct *area;
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
141 * Don't remap the low PCI/ISA area, it's always mapped..
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
144 return (__force void __iomem *)phys_to_virt(phys_addr);
147 * Don't allow anybody to remap normal RAM that we're using..
149 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
151 if (page_is_ram(pfn) && pfn_valid(pfn) &&
152 !PageReserved(pfn_to_page(pfn)))
157 case IOR_MODE_UNCACHED:
160 * FIXME: we will use UC MINUS for now, as video fb drivers
161 * depend on it. Upcoming ioremap_wc() will fix this behavior.
163 prot = PAGE_KERNEL_UC_MINUS;
165 case IOR_MODE_CACHED:
171 * Mappings have to be page-aligned
173 offset = phys_addr & ~PAGE_MASK;
174 phys_addr &= PAGE_MASK;
175 size = PAGE_ALIGN(last_addr+1) - phys_addr;
180 area = get_vm_area(size, VM_IOREMAP);
183 area->phys_addr = phys_addr;
184 vaddr = (unsigned long) area->addr;
185 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
190 if (ioremap_change_attr(vaddr, size, mode) < 0) {
195 return (void __iomem *) (vaddr + offset);
199 * ioremap_nocache - map bus memory into CPU space
200 * @offset: bus address of the memory
201 * @size: size of the resource to map
203 * ioremap_nocache performs a platform specific sequence of operations to
204 * make bus memory CPU accessible via the readb/readw/readl/writeb/
205 * writew/writel functions and the other mmio helpers. The returned
206 * address is not guaranteed to be usable directly as a virtual
209 * This version of ioremap ensures that the memory is marked uncachable
210 * on the CPU as well as honouring existing caching rules from things like
211 * the PCI bus. Note that there are other caches and buffers on many
212 * busses. In particular driver authors should read up on PCI writes
214 * It's useful if some control registers are in such an area and
215 * write combining or read caching is not desirable:
217 * Must be freed with iounmap.
219 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
221 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
223 EXPORT_SYMBOL(ioremap_nocache);
225 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
227 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
229 EXPORT_SYMBOL(ioremap_cache);
232 * iounmap - Free a IO remapping
233 * @addr: virtual address from ioremap_*
235 * Caller must ensure there is only one unmapping for the same pointer.
237 void iounmap(volatile void __iomem *addr)
239 struct vm_struct *p, *o;
241 if ((void __force *)addr <= high_memory)
245 * __ioremap special-cases the PCI/ISA range by not instantiating a
246 * vm_area and by simply returning an address into the kernel mapping
247 * of ISA space. So handle that here.
249 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
250 addr < phys_to_virt(ISA_END_ADDRESS))
253 addr = (volatile void __iomem *)
254 (PAGE_MASK & (unsigned long __force)addr);
256 /* Use the vm area unlocked, assuming the caller
257 ensures there isn't another iounmap for the same address
258 in parallel. Reuse of the virtual address is prevented by
259 leaving it in the global lists until we're done with it.
260 cpa takes care of the direct mappings. */
261 read_lock(&vmlist_lock);
262 for (p = vmlist; p; p = p->next) {
266 read_unlock(&vmlist_lock);
269 printk(KERN_ERR "iounmap: bad address %p\n", addr);
274 /* Finally remove it */
275 o = remove_vm_area((void *)addr);
276 BUG_ON(p != o || o == NULL);
279 EXPORT_SYMBOL(iounmap);
283 int __initdata early_ioremap_debug;
285 static int __init early_ioremap_debug_setup(char *str)
287 early_ioremap_debug = 1;
291 early_param("early_ioremap_debug", early_ioremap_debug_setup);
293 static __initdata int after_paging_init;
294 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
295 __section(.bss.page_aligned);
297 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
299 /* Don't assume we're using swapper_pg_dir at this point */
300 pgd_t *base = __va(read_cr3());
301 pgd_t *pgd = &base[pgd_index(addr)];
302 pud_t *pud = pud_offset(pgd, addr);
303 pmd_t *pmd = pmd_offset(pud, addr);
308 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
310 return &bm_pte[pte_index(addr)];
313 void __init early_ioremap_init(void)
317 if (early_ioremap_debug)
318 printk(KERN_INFO "early_ioremap_init()\n");
320 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
321 memset(bm_pte, 0, sizeof(bm_pte));
322 pmd_populate_kernel(&init_mm, pmd, bm_pte);
325 * The boot-ioremap range spans multiple pmds, for which
326 * we are not prepared:
328 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
330 printk(KERN_WARNING "pmd %p != %p\n",
331 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
332 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
333 fix_to_virt(FIX_BTMAP_BEGIN));
334 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
335 fix_to_virt(FIX_BTMAP_END));
337 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
338 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
343 void __init early_ioremap_clear(void)
347 if (early_ioremap_debug)
348 printk(KERN_INFO "early_ioremap_clear()\n");
350 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
352 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
356 void __init early_ioremap_reset(void)
358 enum fixed_addresses idx;
359 unsigned long addr, phys;
362 after_paging_init = 1;
363 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
364 addr = fix_to_virt(idx);
365 pte = early_ioremap_pte(addr);
366 if (pte_present(*pte)) {
367 phys = pte_val(*pte) & PAGE_MASK;
368 set_fixmap(idx, phys);
373 static void __init __early_set_fixmap(enum fixed_addresses idx,
374 unsigned long phys, pgprot_t flags)
376 unsigned long addr = __fix_to_virt(idx);
379 if (idx >= __end_of_fixed_addresses) {
383 pte = early_ioremap_pte(addr);
384 if (pgprot_val(flags))
385 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
387 pte_clear(NULL, addr, pte);
388 __flush_tlb_one(addr);
391 static inline void __init early_set_fixmap(enum fixed_addresses idx,
394 if (after_paging_init)
395 set_fixmap(idx, phys);
397 __early_set_fixmap(idx, phys, PAGE_KERNEL);
400 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
402 if (after_paging_init)
405 __early_set_fixmap(idx, 0, __pgprot(0));
409 int __initdata early_ioremap_nested;
411 static int __init check_early_ioremap_leak(void)
413 if (!early_ioremap_nested)
417 "Debug warning: early ioremap leak of %d areas detected.\n",
418 early_ioremap_nested);
420 "please boot with early_ioremap_debug and report the dmesg.\n");
425 late_initcall(check_early_ioremap_leak);
427 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
429 unsigned long offset, last_addr;
430 unsigned int nrpages, nesting;
431 enum fixed_addresses idx0, idx;
433 WARN_ON(system_state != SYSTEM_BOOTING);
435 nesting = early_ioremap_nested;
436 if (early_ioremap_debug) {
437 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
438 phys_addr, size, nesting);
442 /* Don't allow wraparound or zero size */
443 last_addr = phys_addr + size - 1;
444 if (!size || last_addr < phys_addr) {
449 if (nesting >= FIX_BTMAPS_NESTING) {
453 early_ioremap_nested++;
455 * Mappings have to be page-aligned
457 offset = phys_addr & ~PAGE_MASK;
458 phys_addr &= PAGE_MASK;
459 size = PAGE_ALIGN(last_addr) - phys_addr;
462 * Mappings have to fit in the FIX_BTMAP area.
464 nrpages = size >> PAGE_SHIFT;
465 if (nrpages > NR_FIX_BTMAPS) {
473 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
475 while (nrpages > 0) {
476 early_set_fixmap(idx, phys_addr);
477 phys_addr += PAGE_SIZE;
481 if (early_ioremap_debug)
482 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
484 return (void *) (offset + fix_to_virt(idx0));
487 void __init early_iounmap(void *addr, unsigned long size)
489 unsigned long virt_addr;
490 unsigned long offset;
491 unsigned int nrpages;
492 enum fixed_addresses idx;
493 unsigned int nesting;
495 nesting = --early_ioremap_nested;
496 WARN_ON(nesting < 0);
498 if (early_ioremap_debug) {
499 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
504 virt_addr = (unsigned long)addr;
505 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
509 offset = virt_addr & ~PAGE_MASK;
510 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
512 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
513 while (nrpages > 0) {
514 early_clear_fixmap(idx);
520 void __this_fixmap_does_not_exist(void)
525 #endif /* CONFIG_X86_32 */