2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/seq_file.h>
19 #include <asm/tlbflush.h>
20 #include <asm/processor.h>
22 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/fcntl.h>
31 int __read_mostly pat_enabled = 1;
33 void __cpuinit pat_disable(char *reason)
36 printk(KERN_INFO "%s\n", reason);
39 static int __init nopat(char *str)
41 pat_disable("PAT support disabled.");
44 early_param("nopat", nopat);
48 static int debug_enable;
49 static int __init pat_debug_setup(char *str)
54 __setup("debugpat", pat_debug_setup);
56 #define dprintk(fmt, arg...) \
57 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
60 static u64 __read_mostly boot_pat_state;
63 PAT_UC = 0, /* uncached */
64 PAT_WC = 1, /* Write combining */
65 PAT_WT = 4, /* Write Through */
66 PAT_WP = 5, /* Write Protected */
67 PAT_WB = 6, /* Write Back (default) */
68 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
71 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
81 if (!cpu_has_pat && boot_pat_state) {
83 * If this happens we are on a secondary CPU, but
84 * switched to PAT on the boot CPU. We have no way to
87 printk(KERN_ERR "PAT enabled, "
88 "but not supported by secondary CPU\n");
92 /* Set PWT to Write-Combining. All other bits stay the same */
94 * PTE encoding used in Linux:
99 * 000 WB _PAGE_CACHE_WB
100 * 001 WC _PAGE_CACHE_WC
101 * 010 UC- _PAGE_CACHE_UC_MINUS
102 * 011 UC _PAGE_CACHE_UC
105 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
106 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
110 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
112 wrmsrl(MSR_IA32_CR_PAT, pat);
113 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
114 smp_processor_id(), boot_pat_state, pat);
119 static char *cattr_name(unsigned long flags)
121 switch (flags & _PAGE_CACHE_MASK) {
122 case _PAGE_CACHE_UC: return "uncached";
123 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
124 case _PAGE_CACHE_WB: return "write-back";
125 case _PAGE_CACHE_WC: return "write-combining";
126 default: return "broken";
131 * The global memtype list keeps track of memory type for specific
132 * physical memory areas. Conflicting memory types in different
133 * mappings can cause CPU cache corruption. To avoid this we keep track.
135 * The list is sorted based on starting address and can contain multiple
136 * entries for each address (this allows reference counting for overlapping
137 * areas). All the aliases have the same cache attributes of course.
138 * Zero attributes are represented as holes.
140 * Currently the data structure is a list because the number of mappings
141 * are expected to be relatively small. If this should be a problem
142 * it could be changed to a rbtree or similar.
144 * memtype_lock protects the whole list.
154 static LIST_HEAD(memtype_list);
155 static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
158 * Does intersection of PAT memory type and MTRR memory type and returns
159 * the resulting memory type as PAT understands it.
160 * (Type in pat and mtrr will not have same value)
161 * The intersection is based on "Effective Memory Type" tables in IA-32
164 static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
167 * Look for MTRR hint to get the effective type in case where PAT
170 if (req_type == _PAGE_CACHE_WB) {
173 mtrr_type = mtrr_type_lookup(start, end);
174 if (mtrr_type == MTRR_TYPE_UNCACHABLE)
175 return _PAGE_CACHE_UC;
176 if (mtrr_type == MTRR_TYPE_WRCOMB)
177 return _PAGE_CACHE_WC;
183 static int chk_conflict(struct memtype *new, struct memtype *entry,
186 if (new->type != entry->type) {
188 new->type = entry->type;
194 /* check overlaps with more than one entry in the list */
195 list_for_each_entry_continue(entry, &memtype_list, nd) {
196 if (new->end <= entry->start)
198 else if (new->type != entry->type)
204 printk(KERN_INFO "%s:%d conflicting memory types "
205 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
206 new->end, cattr_name(new->type), cattr_name(entry->type));
210 static struct memtype *cached_entry;
211 static u64 cached_start;
214 * RED-PEN: TODO: Add PageReserved() check as well here,
215 * once we add SetPageReserved() to all the drivers using
216 * set_memory_* or set_pages_*.
218 * This will help prevent accidentally freeing pages
219 * before setting the attribute back to WB.
223 * For RAM pages, mark the pages as non WB memory type using
224 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
225 * set_memory_wc() on a RAM page at a time before marking it as WB again.
226 * This is ok, because only one driver will be owning the page and
227 * doing set_memory_*() calls.
229 * For now, we use PageNonWB to track that the RAM page is being mapped
230 * as non WB. In future, we will have to use one more flag
231 * (or some other mechanism in page_struct) to distinguish between
234 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
235 unsigned long *new_type)
240 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
241 page = pfn_to_page(pfn);
242 if (page_mapped(page) || PageNonWB(page))
251 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
252 page = pfn_to_page(pfn);
253 ClearPageNonWB(page);
259 static int free_ram_pages_type(u64 start, u64 end)
264 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
265 page = pfn_to_page(pfn);
266 if (page_mapped(page) || !PageNonWB(page))
269 ClearPageNonWB(page);
275 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
276 page = pfn_to_page(pfn);
283 * req_type typically has one of the:
286 * - _PAGE_CACHE_UC_MINUS
289 * req_type will have a special case value '-1', when requester want to inherit
290 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
292 * If new_type is NULL, function will return an error if it cannot reserve the
293 * region with req_type. If new_type is non-NULL, function will return
294 * available type in new_type in case of no error. In case of any error
295 * it will return a negative return value.
297 int reserve_memtype(u64 start, u64 end, unsigned long req_type,
298 unsigned long *new_type)
300 struct memtype *new, *entry;
301 unsigned long actual_type;
302 struct list_head *where;
306 BUG_ON(start >= end); /* end is exclusive */
309 /* This is identical to page table setting without PAT */
312 *new_type = _PAGE_CACHE_WB;
314 *new_type = req_type & _PAGE_CACHE_MASK;
319 /* Low ISA region is always mapped WB in page table. No need to track */
320 if (is_ISA_range(start, end - 1)) {
322 *new_type = _PAGE_CACHE_WB;
326 if (req_type == -1) {
328 * Call mtrr_lookup to get the type hint. This is an
329 * optimization for /dev/mem mmap'ers into WB memory (BIOS
330 * tools and ACPI tools). Use WB request for WB memory and use
331 * UC_MINUS otherwise.
333 u8 mtrr_type = mtrr_type_lookup(start, end);
335 if (mtrr_type == MTRR_TYPE_WRBACK)
336 actual_type = _PAGE_CACHE_WB;
338 actual_type = _PAGE_CACHE_UC_MINUS;
340 actual_type = pat_x_mtrr_type(start, end,
341 req_type & _PAGE_CACHE_MASK);
343 is_range_ram = pagerange_is_ram(start, end);
344 if (is_range_ram == 1)
345 return reserve_ram_pages_type(start, end, req_type, new_type);
346 else if (is_range_ram < 0)
349 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
355 new->type = actual_type;
358 *new_type = actual_type;
360 spin_lock(&memtype_lock);
362 if (cached_entry && start >= cached_start)
363 entry = cached_entry;
365 entry = list_entry(&memtype_list, struct memtype, nd);
367 /* Search for existing mapping that overlaps the current range */
369 list_for_each_entry_continue(entry, &memtype_list, nd) {
370 if (end <= entry->start) {
371 where = entry->nd.prev;
372 cached_entry = list_entry(where, struct memtype, nd);
374 } else if (start <= entry->start) { /* end > entry->start */
375 err = chk_conflict(new, entry, new_type);
377 dprintk("Overlap at 0x%Lx-0x%Lx\n",
378 entry->start, entry->end);
379 where = entry->nd.prev;
380 cached_entry = list_entry(where,
384 } else if (start < entry->end) { /* start > entry->start */
385 err = chk_conflict(new, entry, new_type);
387 dprintk("Overlap at 0x%Lx-0x%Lx\n",
388 entry->start, entry->end);
389 cached_entry = list_entry(entry->nd.prev,
393 * Move to right position in the linked
394 * list to add this new entry
396 list_for_each_entry_continue(entry,
398 if (start <= entry->start) {
399 where = entry->nd.prev;
409 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
410 "track %s, req %s\n",
411 start, end, cattr_name(new->type), cattr_name(req_type));
413 spin_unlock(&memtype_lock);
417 cached_start = start;
420 list_add(&new->nd, where);
422 list_add_tail(&new->nd, &memtype_list);
424 spin_unlock(&memtype_lock);
426 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
427 start, end, cattr_name(new->type), cattr_name(req_type),
428 new_type ? cattr_name(*new_type) : "-");
433 int free_memtype(u64 start, u64 end)
435 struct memtype *entry;
442 /* Low ISA region is always mapped WB. No need to track */
443 if (is_ISA_range(start, end - 1))
446 is_range_ram = pagerange_is_ram(start, end);
447 if (is_range_ram == 1)
448 return free_ram_pages_type(start, end);
449 else if (is_range_ram < 0)
452 spin_lock(&memtype_lock);
453 list_for_each_entry(entry, &memtype_list, nd) {
454 if (entry->start == start && entry->end == end) {
455 if (cached_entry == entry || cached_start == start)
458 list_del(&entry->nd);
464 spin_unlock(&memtype_lock);
467 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
468 current->comm, current->pid, start, end);
471 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
476 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
477 unsigned long size, pgprot_t vma_prot)
482 #ifdef CONFIG_STRICT_DEVMEM
483 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
484 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
489 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
491 u64 from = ((u64)pfn) << PAGE_SHIFT;
492 u64 to = from + size;
495 while (cursor < to) {
496 if (!devmem_is_allowed(pfn)) {
498 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
499 current->comm, from, to);
507 #endif /* CONFIG_STRICT_DEVMEM */
509 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
510 unsigned long size, pgprot_t *vma_prot)
512 u64 offset = ((u64) pfn) << PAGE_SHIFT;
513 unsigned long flags = -1;
516 if (!range_is_allowed(pfn, size))
519 if (file->f_flags & O_SYNC) {
520 flags = _PAGE_CACHE_UC_MINUS;
525 * On the PPro and successors, the MTRRs are used to set
526 * memory types for physical addresses outside main memory,
527 * so blindly setting UC or PWT on those pages is wrong.
528 * For Pentiums and earlier, the surround logic should disable
529 * caching for the high addresses through the KEN pin, but
530 * we maintain the tradition of paranoia in this code.
533 !(boot_cpu_has(X86_FEATURE_MTRR) ||
534 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
535 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
536 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
537 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
538 flags = _PAGE_CACHE_UC;
543 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
545 * Without O_SYNC, we want to get
546 * - WB for WB-able memory and no other conflicting mappings
547 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
548 * - Inherit from confliting mappings otherwise
551 retval = reserve_memtype(offset, offset + size, flags, NULL);
553 retval = reserve_memtype(offset, offset + size, -1, &flags);
559 if (((pfn < max_low_pfn_mapped) ||
560 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
561 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
562 free_memtype(offset, offset + size);
564 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
565 current->comm, current->pid,
567 offset, (unsigned long long)(offset + size));
571 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
576 void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
578 u64 addr = (u64)pfn << PAGE_SHIFT;
580 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
582 reserve_memtype(addr, addr + size, want_flags, &flags);
583 if (flags != want_flags) {
585 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
586 current->comm, current->pid,
587 cattr_name(want_flags),
588 addr, (unsigned long long)(addr + size),
593 void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
595 u64 addr = (u64)pfn << PAGE_SHIFT;
597 free_memtype(addr, addr + size);
600 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
602 /* get Nth element of the linked list */
603 static struct memtype *memtype_get_idx(loff_t pos)
605 struct memtype *list_node, *print_entry;
608 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
612 spin_lock(&memtype_lock);
613 list_for_each_entry(list_node, &memtype_list, nd) {
615 *print_entry = *list_node;
616 spin_unlock(&memtype_lock);
621 spin_unlock(&memtype_lock);
626 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
630 seq_printf(seq, "PAT memtype list:\n");
633 return memtype_get_idx(*pos);
636 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
639 return memtype_get_idx(*pos);
642 static void memtype_seq_stop(struct seq_file *seq, void *v)
646 static int memtype_seq_show(struct seq_file *seq, void *v)
648 struct memtype *print_entry = (struct memtype *)v;
650 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
651 print_entry->start, print_entry->end);
656 static struct seq_operations memtype_seq_ops = {
657 .start = memtype_seq_start,
658 .next = memtype_seq_next,
659 .stop = memtype_seq_stop,
660 .show = memtype_seq_show,
663 static int memtype_seq_open(struct inode *inode, struct file *file)
665 return seq_open(file, &memtype_seq_ops);
668 static const struct file_operations memtype_fops = {
669 .open = memtype_seq_open,
672 .release = seq_release,
675 static int __init pat_memtype_list_init(void)
677 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
678 NULL, &memtype_fops);
682 late_initcall(pat_memtype_list_init);
684 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */