2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
26 #include <linux/vmalloc.h>
28 #include <asm/cacheflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/sizes.h>
35 #include <asm/mach/map.h>
39 * Used by ioremap() and iounmap() code to mark (super)section-mapped
40 * I/O regions in vm_struct->flags field.
42 #define VM_ARM_SECTION_MAPPING 0x80000000
44 static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
45 unsigned long phys_addr, const struct mem_type *type)
47 pgprot_t prot = __pgprot(type->prot_pte);
50 pte = pte_alloc_kernel(pmd, addr);
58 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
59 phys_addr += PAGE_SIZE;
60 } while (pte++, addr += PAGE_SIZE, addr != end);
64 printk(KERN_CRIT "remap_area_pte: page already exists\n");
68 static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
69 unsigned long end, unsigned long phys_addr,
70 const struct mem_type *type)
76 pmd = pmd_alloc(&init_mm, pgd, addr);
81 next = pmd_addr_end(addr, end);
82 ret = remap_area_pte(pmd, addr, next, phys_addr, type);
85 phys_addr += next - addr;
86 } while (pmd++, addr = next, addr != end);
90 static int remap_area_pages(unsigned long start, unsigned long pfn,
91 size_t size, const struct mem_type *type)
93 unsigned long addr = start;
94 unsigned long next, end = start + size;
95 unsigned long phys_addr = __pfn_to_phys(pfn);
100 pgd = pgd_offset_k(addr);
102 next = pgd_addr_end(addr, end);
103 err = remap_area_pmd(pgd, addr, next, phys_addr, type);
106 phys_addr += next - addr;
107 } while (pgd++, addr = next, addr != end);
113 void __check_kvm_seq(struct mm_struct *mm)
118 seq = init_mm.context.kvm_seq;
119 memcpy(pgd_offset(mm, VMALLOC_START),
120 pgd_offset_k(VMALLOC_START),
121 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
122 pgd_index(VMALLOC_START)));
123 mm->context.kvm_seq = seq;
124 } while (seq != init_mm.context.kvm_seq);
129 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
130 * the other CPUs will not see this change until their next context switch.
131 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
132 * which requires the new ioremap'd region to be referenced, the CPU will
133 * reference the _old_ region.
135 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
136 * the size back to 1MB aligned or we will overflow in the loop below.
138 static void unmap_area_sections(unsigned long virt, unsigned long size)
140 unsigned long addr = virt, end = virt + (size & ~SZ_1M);
143 flush_cache_vunmap(addr, end);
144 pgd = pgd_offset_k(addr);
146 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
149 if (!pmd_none(pmd)) {
151 * Clear the PMD from the page table, and
152 * increment the kvm sequence so others
153 * notice this change.
155 * Note: this is still racy on SMP machines.
158 init_mm.context.kvm_seq++;
161 * Free the page table, if there was one.
163 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
164 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
169 } while (addr < end);
172 * Ensure that the active_mm is up to date - we want to
173 * catch any use-after-iounmap cases.
175 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
176 __check_kvm_seq(current->active_mm);
178 flush_tlb_kernel_range(virt, end);
182 remap_area_sections(unsigned long virt, unsigned long pfn,
183 size_t size, const struct mem_type *type)
185 unsigned long addr = virt, end = virt + size;
189 * Remove and free any PTE-based mapping, and
190 * sync the current kernel mapping.
192 unmap_area_sections(virt, size);
194 pgd = pgd_offset_k(addr);
196 pmd_t *pmd = pmd_offset(pgd, addr);
198 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
199 pfn += SZ_1M >> PAGE_SHIFT;
200 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
201 pfn += SZ_1M >> PAGE_SHIFT;
202 flush_pmd_entry(pmd);
206 } while (addr < end);
212 remap_area_supersections(unsigned long virt, unsigned long pfn,
213 size_t size, const struct mem_type *type)
215 unsigned long addr = virt, end = virt + size;
219 * Remove and free any PTE-based mapping, and
220 * sync the current kernel mapping.
222 unmap_area_sections(virt, size);
224 pgd = pgd_offset_k(virt);
226 unsigned long super_pmd_val, i;
228 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
230 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
232 for (i = 0; i < 8; i++) {
233 pmd_t *pmd = pmd_offset(pgd, addr);
235 pmd[0] = __pmd(super_pmd_val);
236 pmd[1] = __pmd(super_pmd_val);
237 flush_pmd_entry(pmd);
243 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
244 } while (addr < end);
252 * Remap an arbitrary physical address space into the kernel virtual
253 * address space. Needed when the kernel wants to access high addresses
256 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
257 * have to convert them into an offset in a page-aligned mapping, but the
258 * caller shouldn't need to know that small detail.
260 * 'flags' are the extra L_PTE_ flags that you want to specify for this
261 * mapping. See <asm/pgtable.h> for more information.
264 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
267 const struct mem_type *type;
270 struct vm_struct * area;
273 * High mappings must be supersection aligned
275 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
278 type = get_mem_type(mtype);
283 * Page align the mapping size, taking account of any offset.
285 size = PAGE_ALIGN(offset + size);
287 area = get_vm_area(size, VM_IOREMAP);
290 addr = (unsigned long)area->addr;
293 if (DOMAIN_IO == 0 &&
294 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
295 cpu_is_xsc3()) && pfn >= 0x100000 &&
296 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
297 area->flags |= VM_ARM_SECTION_MAPPING;
298 err = remap_area_supersections(addr, pfn, size, type);
299 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
300 area->flags |= VM_ARM_SECTION_MAPPING;
301 err = remap_area_sections(addr, pfn, size, type);
304 err = remap_area_pages(addr, pfn, size, type);
307 vunmap((void *)addr);
311 flush_cache_vmap(addr, addr + size);
312 return (void __iomem *) (offset + addr);
314 EXPORT_SYMBOL(__arm_ioremap_pfn);
317 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
319 unsigned long last_addr;
320 unsigned long offset = phys_addr & ~PAGE_MASK;
321 unsigned long pfn = __phys_to_pfn(phys_addr);
324 * Don't allow wraparound or zero size
326 last_addr = phys_addr + size - 1;
327 if (!size || last_addr < phys_addr)
330 return __arm_ioremap_pfn(pfn, offset, size, mtype);
332 EXPORT_SYMBOL(__arm_ioremap);
334 void __iounmap(volatile void __iomem *addr)
337 struct vm_struct **p, *tmp;
339 unsigned int section_mapping = 0;
341 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
345 * If this is a section based mapping we need to handle it
346 * specially as the VM subsystem does not know how to handle
347 * such a beast. We need the lock here b/c we need to clear
348 * all the mappings before the area can be reclaimed
351 write_lock(&vmlist_lock);
352 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
353 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
354 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
356 unmap_area_sections((unsigned long)tmp->addr,
364 write_unlock(&vmlist_lock);
367 if (!section_mapping)
368 vunmap((void __force *)addr);
370 EXPORT_SYMBOL(__iounmap);