]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/x86/mm/pageattr_64.c
x86: c_p_a() fix: reorder TLB / cache flushes to follow Intel recommendation
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / pageattr_64.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/uaccess.h>
15 #include <asm/io.h>
16
17 pte_t *lookup_address(unsigned long address, int *level)
18 {
19         pgd_t *pgd = pgd_offset_k(address);
20         pud_t *pud;
21         pmd_t *pmd;
22         pte_t *pte;
23
24         if (pgd_none(*pgd))
25                 return NULL;
26         pud = pud_offset(pgd, address);
27         if (!pud_present(*pud))
28                 return NULL;
29         pmd = pmd_offset(pud, address);
30         if (!pmd_present(*pmd))
31                 return NULL;
32         *level = 3;
33         if (pmd_large(*pmd))
34                 return (pte_t *)pmd;
35         *level = 4;
36
37         pte = pte_offset_kernel(pmd, address);
38         if (pte && !pte_present(*pte))
39                 pte = NULL;
40
41         return pte;
42 }
43
44 static struct page *
45 split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
46 {
47         unsigned long addr;
48         struct page *base;
49         pte_t *pbase;
50         int i;
51
52         base = alloc_pages(GFP_KERNEL, 0);
53         if (!base)
54                 return NULL;
55         /*
56          * page_private is used to track the number of entries in
57          * the page table page have non standard attributes.
58          */
59         SetPagePrivate(base);
60         page_private(base) = 0;
61
62         address = __pa(address);
63         addr = address & LARGE_PAGE_MASK;
64         pbase = (pte_t *)page_address(base);
65         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
66                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
67                                    addr == address ? prot : ref_prot);
68         }
69         return base;
70 }
71
72 void clflush_cache_range(void *addr, int size)
73 {
74         int i;
75
76         for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
77                 clflush(addr+i);
78 }
79
80 static void flush_kernel_map(void *arg)
81 {
82         struct list_head *l = (struct list_head *)arg;
83         struct page *pg;
84
85         __flush_tlb_all();
86
87         /* When clflush is available always use it because it is
88            much cheaper than WBINVD. */
89         /* clflush is still broken. Disable for now. */
90         if (1 || !cpu_has_clflush) {
91                 wbinvd();
92         } else {
93                 list_for_each_entry(pg, l, lru) {
94                         void *addr = page_address(pg);
95
96                         clflush_cache_range(addr, PAGE_SIZE);
97                 }
98         }
99 }
100
101 static inline void flush_map(struct list_head *l)
102 {
103         on_each_cpu(flush_kernel_map, l, 1, 1);
104 }
105
106 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
107
108 static inline void save_page(struct page *fpage)
109 {
110         if (!test_and_set_bit(PG_arch_1, &fpage->flags))
111                 list_add(&fpage->lru, &deferred_pages);
112 }
113
114 /*
115  * No more special protections in this 2/4MB area - revert to a
116  * large page again.
117  */
118 static void revert_page(unsigned long address, pgprot_t ref_prot)
119 {
120         unsigned long pfn;
121         pgd_t *pgd;
122         pud_t *pud;
123         pmd_t *pmd;
124         pte_t large_pte;
125
126         pgd = pgd_offset_k(address);
127         BUG_ON(pgd_none(*pgd));
128         pud = pud_offset(pgd, address);
129         BUG_ON(pud_none(*pud));
130         pmd = pmd_offset(pud, address);
131         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
132         pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
133         large_pte = pfn_pte(pfn, ref_prot);
134         large_pte = pte_mkhuge(large_pte);
135
136         set_pte((pte_t *)pmd, large_pte);
137 }
138
139 static int
140 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
141                    pgprot_t ref_prot)
142 {
143         struct page *kpte_page;
144         pgprot_t ref_prot2;
145         pte_t *kpte;
146         int level;
147
148         kpte = lookup_address(address, &level);
149         if (!kpte)
150                 return 0;
151
152         kpte_page = virt_to_page(kpte);
153         BUG_ON(PageLRU(kpte_page));
154         BUG_ON(PageCompound(kpte_page));
155         if (pgprot_val(prot) != pgprot_val(ref_prot)) {
156                 if (!pte_huge(*kpte)) {
157                         set_pte(kpte, pfn_pte(pfn, prot));
158                 } else {
159                         /*
160                          * split_large_page will take the reference for this
161                          * change_page_attr on the split page.
162                          */
163                         struct page *split;
164
165                         ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
166                         split = split_large_page(address, prot, ref_prot2);
167                         if (!split)
168                                 return -ENOMEM;
169                         pgprot_val(ref_prot2) &= ~_PAGE_NX;
170                         set_pte(kpte, mk_pte(split, ref_prot2));
171                         kpte_page = split;
172                 }
173                 page_private(kpte_page)++;
174         } else {
175                 if (!pte_huge(*kpte)) {
176                         set_pte(kpte, pfn_pte(pfn, ref_prot));
177                         BUG_ON(page_private(kpte_page) == 0);
178                         page_private(kpte_page)--;
179                 } else
180                         BUG();
181         }
182
183         /* on x86-64 the direct mapping set at boot is not using 4k pages */
184         BUG_ON(PageReserved(kpte_page));
185
186         save_page(kpte_page);
187         if (page_private(kpte_page) == 0)
188                 revert_page(address, ref_prot);
189         return 0;
190 }
191
192 /*
193  * Change the page attributes of an page in the linear mapping.
194  *
195  * This should be used when a page is mapped with a different caching policy
196  * than write-back somewhere - some CPUs do not like it when mappings with
197  * different caching policies exist. This changes the page attributes of the
198  * in kernel linear mapping too.
199  *
200  * The caller needs to ensure that there are no conflicting mappings elsewhere.
201  * This function only deals with the kernel linear map.
202  *
203  * Caller must call global_flush_tlb() after this.
204  */
205 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
206 {
207         int err = 0, kernel_map = 0, i;
208
209         if (address >= __START_KERNEL_map &&
210                         address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
211
212                 address = (unsigned long)__va(__pa(address));
213                 kernel_map = 1;
214         }
215
216         down_write(&init_mm.mmap_sem);
217         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
218                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
219
220                 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
221                         err = __change_page_attr(address, pfn, prot,
222                                                 PAGE_KERNEL);
223                         if (err)
224                                 break;
225                 }
226                 /* Handle kernel mapping too which aliases part of the
227                  * lowmem */
228                 if (__pa(address) < KERNEL_TEXT_SIZE) {
229                         unsigned long addr2;
230                         pgprot_t prot2;
231
232                         addr2 = __START_KERNEL_map + __pa(address);
233                         /* Make sure the kernel mappings stay executable */
234                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
235                         err = __change_page_attr(addr2, pfn, prot2,
236                                                  PAGE_KERNEL_EXEC);
237                 }
238         }
239         up_write(&init_mm.mmap_sem);
240
241         return err;
242 }
243
244 /* Don't call this for MMIO areas that may not have a mem_map entry */
245 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
246 {
247         unsigned long addr = (unsigned long)page_address(page);
248
249         return change_page_attr_addr(addr, numpages, prot);
250 }
251 EXPORT_SYMBOL(change_page_attr);
252
253 void global_flush_tlb(void)
254 {
255         struct page *pg, *next;
256         struct list_head l;
257
258         /*
259          * Write-protect the semaphore, to exclude two contexts
260          * doing a list_replace_init() call in parallel and to
261          * exclude new additions to the deferred_pages list:
262          */
263         down_write(&init_mm.mmap_sem);
264         list_replace_init(&deferred_pages, &l);
265         up_write(&init_mm.mmap_sem);
266
267         flush_map(&l);
268
269         list_for_each_entry_safe(pg, next, &l, lru) {
270                 list_del(&pg->lru);
271                 clear_bit(PG_arch_1, &pg->flags);
272                 if (page_private(pg) != 0)
273                         continue;
274                 ClearPagePrivate(pg);
275                 __free_page(pg);
276         }
277 }
278 EXPORT_SYMBOL(global_flush_tlb);