#include <asm/pgtable.h>
#include <asm/sections.h>
+#include <asm/unwind.h>
#ifdef CONFIG_XIP_KERNEL
/*
char *secstrings,
struct module *mod)
{
+#ifdef CONFIG_ARM_UNWIND
+ Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+
+ for (s = sechdrs; s < sechdrs_end; s++) {
+ if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0)
+ mod->arch.unw_sec_init = s;
+ else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0)
+ mod->arch.unw_sec_devinit = s;
+ else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0)
+ mod->arch.unw_sec_core = s;
+ else if (strcmp(".init.text", secstrings + s->sh_name) == 0)
+ mod->arch.sec_init_text = s;
+ else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0)
+ mod->arch.sec_devinit_text = s;
+ else if (strcmp(".text", secstrings + s->sh_name) == 0)
+ mod->arch.sec_core_text = s;
+ }
+#endif
return 0;
}
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_NONE:
+ /* ignore */
+ break;
+
case R_ARM_ABS32:
*(u32 *)loc += sym->st_value;
break;
*(u32 *)loc |= offset & 0x00ffffff;
break;
+ case R_ARM_V4BX:
+ /* Preserve Rm and the condition code. Alter
+ * other bits to re-code instruction as
+ * MOV PC,Rm.
+ */
+ *(u32 *)loc &= 0xf000000f;
+ *(u32 *)loc |= 0x01a0f000;
+ break;
+
+ case R_ARM_PREL31:
+ offset = *(u32 *)loc + sym->st_value - loc;
+ *(u32 *)loc = offset & 0x7fffffff;
+ break;
+
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
+#ifdef CONFIG_ARM_UNWIND
+static void register_unwind_tables(struct module *mod)
+{
+ if (mod->arch.unw_sec_init && mod->arch.sec_init_text)
+ mod->arch.unwind_init =
+ unwind_table_add(mod->arch.unw_sec_init->sh_addr,
+ mod->arch.unw_sec_init->sh_size,
+ mod->arch.sec_init_text->sh_addr,
+ mod->arch.sec_init_text->sh_size);
+ if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text)
+ mod->arch.unwind_devinit =
+ unwind_table_add(mod->arch.unw_sec_devinit->sh_addr,
+ mod->arch.unw_sec_devinit->sh_size,
+ mod->arch.sec_devinit_text->sh_addr,
+ mod->arch.sec_devinit_text->sh_size);
+ if (mod->arch.unw_sec_core && mod->arch.sec_core_text)
+ mod->arch.unwind_core =
+ unwind_table_add(mod->arch.unw_sec_core->sh_addr,
+ mod->arch.unw_sec_core->sh_size,
+ mod->arch.sec_core_text->sh_addr,
+ mod->arch.sec_core_text->sh_size);
+}
+
+static void unregister_unwind_tables(struct module *mod)
+{
+ unwind_table_del(mod->arch.unwind_init);
+ unwind_table_del(mod->arch.unwind_devinit);
+ unwind_table_del(mod->arch.unwind_core);
+}
+#else
+static inline void register_unwind_tables(struct module *mod) { }
+static inline void unregister_unwind_tables(struct module *mod) { }
+#endif
+
int
module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
+ register_unwind_tables(module);
return 0;
}
void
module_arch_cleanup(struct module *mod)
{
+ unregister_unwind_tables(mod);
}
#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
+#include <asm/cachetype.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/highmem.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_NONCACHED] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif
}
+ /*
+ * Non-cacheable Normal - intended for memory areas that must
+ * not cause dirty cache line writebacks when used
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /* Non-cacheable Normal is XCB = 001 */
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ PMD_SECT_BUFFERED;
+ } else {
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ PMD_SECT_TEX(1);
+ }
+ } else {
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring high memory\n");
+ } else if (cache_is_vipt_aliasing()) {
+ printk(KERN_CRIT "HIGHMEM is not yet supported "
+ "with VIPT aliasing cache, "
+ "ignoring high memory\n");
} else {
memmove(bank + 1, bank,
(meminfo.nr_banks - i) * sizeof(*bank));
* the vmalloc area.
*/
if (__va(bank->start) >= VMALLOC_MIN ||
- __va(bank->start) < PAGE_OFFSET) {
+ __va(bank->start) < (void *)PAGE_OFFSET) {
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
"(vmalloc region overlap).\n",
bank->start, bank->start + bank->size - 1);
flush_cache_all();
}
+static void __init kmap_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ pmd_t *pmd = pmd_off_k(PKMAP_BASE);
+ pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
+ BUG_ON(!pmd_none(*pmd) || !pte);
+ __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
+ pkmap_page_table = pte + PTRS_PER_PTE;
+#endif
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
prepare_page_table();
bootmem_init();
devicemaps_init(mdesc);
+ kmap_init();
top_pmd = pmd_off_k(0xffff0000);