]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/mm/init_32.c
x86: introduce /dev/mem restrictions with a config option
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / init_32.c
index 73dd0601166a0d7b94475c677f9318342414cb05..39852d539018cf24c84f9835ec10244cee89499e 100644 (file)
@@ -1,5 +1,4 @@
 /*
- *  linux/arch/i386/mm/init.c
  *
  *  Copyright (C) 1995  Linus Torvalds
  *
@@ -51,7 +50,7 @@
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
-unsigned long end_pfn_map;
+unsigned long max_pfn_mapped;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 unsigned long highstart_pfn, highend_pfn;
@@ -181,8 +180,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                        /*
                         * Map with big pages if possible, otherwise
                         * create normal page tables:
+                        *
+                        * Don't use a large page for the first 2/4MB of memory
+                        * because there are often fixed size MTRRs in there
+                        * and overlapping MTRRs into large pages can cause
+                        * slowdowns.
                         */
-                       if (cpu_has_pse) {
+                       if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
                                unsigned int addr2;
                                pgprot_t prot = PAGE_KERNEL_LARGE;
 
@@ -196,7 +200,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                                set_pmd(pmd, pfn_pmd(pfn, prot));
 
                                pfn += PTRS_PER_PTE;
-                               end_pfn_map = pfn;
+                               max_pfn_mapped = pfn;
                                continue;
                        }
                        pte = one_page_table_init(pmd);
@@ -211,7 +215,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
 
                                set_pte(pte, pfn_pte(pfn, prot));
                        }
-                       end_pfn_map = pfn;
+                       max_pfn_mapped = pfn;
                }
        }
 }
@@ -223,6 +227,25 @@ static inline int page_kills_ppro(unsigned long pagenr)
        return 0;
 }
 
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number.
+ *
+ *
+ * On x86, access has to be given to the first megabyte of ram because that area
+ * contains bios code and data regions used by X and dosemu and similar apps.
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pagenr)
+{
+       if (pagenr <= 256)
+               return 1;
+       if (!page_is_ram(pagenr))
+               return 1;
+       return 0;
+}
+
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
 pgprot_t kmap_prot;