]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - mm/memory.c
[PATCH] unpaged: ZERO_PAGE in VM_UNPAGED
[linux-2.6-omap-h63xx.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/hugetlb.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/highmem.h>
47 #include <linux/pagemap.h>
48 #include <linux/rmap.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51
52 #include <asm/pgalloc.h>
53 #include <asm/uaccess.h>
54 #include <asm/tlb.h>
55 #include <asm/tlbflush.h>
56 #include <asm/pgtable.h>
57
58 #include <linux/swapops.h>
59 #include <linux/elf.h>
60
61 #ifndef CONFIG_NEED_MULTIPLE_NODES
62 /* use the per-pgdat data instead for discontigmem - mbligh */
63 unsigned long max_mapnr;
64 struct page *mem_map;
65
66 EXPORT_SYMBOL(max_mapnr);
67 EXPORT_SYMBOL(mem_map);
68 #endif
69
70 unsigned long num_physpages;
71 /*
72  * A number of key systems in x86 including ioremap() rely on the assumption
73  * that high_memory defines the upper bound on direct map memory, then end
74  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
75  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
76  * and ZONE_HIGHMEM.
77  */
78 void * high_memory;
79 unsigned long vmalloc_earlyreserve;
80
81 EXPORT_SYMBOL(num_physpages);
82 EXPORT_SYMBOL(high_memory);
83 EXPORT_SYMBOL(vmalloc_earlyreserve);
84
85 /*
86  * If a p?d_bad entry is found while walking page tables, report
87  * the error, before resetting entry to p?d_none.  Usually (but
88  * very seldom) called out from the p?d_none_or_clear_bad macros.
89  */
90
91 void pgd_clear_bad(pgd_t *pgd)
92 {
93         pgd_ERROR(*pgd);
94         pgd_clear(pgd);
95 }
96
97 void pud_clear_bad(pud_t *pud)
98 {
99         pud_ERROR(*pud);
100         pud_clear(pud);
101 }
102
103 void pmd_clear_bad(pmd_t *pmd)
104 {
105         pmd_ERROR(*pmd);
106         pmd_clear(pmd);
107 }
108
109 /*
110  * Note: this doesn't free the actual pages themselves. That
111  * has been handled earlier when unmapping all the memory regions.
112  */
113 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
114 {
115         struct page *page = pmd_page(*pmd);
116         pmd_clear(pmd);
117         pte_lock_deinit(page);
118         pte_free_tlb(tlb, page);
119         dec_page_state(nr_page_table_pages);
120         tlb->mm->nr_ptes--;
121 }
122
123 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
124                                 unsigned long addr, unsigned long end,
125                                 unsigned long floor, unsigned long ceiling)
126 {
127         pmd_t *pmd;
128         unsigned long next;
129         unsigned long start;
130
131         start = addr;
132         pmd = pmd_offset(pud, addr);
133         do {
134                 next = pmd_addr_end(addr, end);
135                 if (pmd_none_or_clear_bad(pmd))
136                         continue;
137                 free_pte_range(tlb, pmd);
138         } while (pmd++, addr = next, addr != end);
139
140         start &= PUD_MASK;
141         if (start < floor)
142                 return;
143         if (ceiling) {
144                 ceiling &= PUD_MASK;
145                 if (!ceiling)
146                         return;
147         }
148         if (end - 1 > ceiling - 1)
149                 return;
150
151         pmd = pmd_offset(pud, start);
152         pud_clear(pud);
153         pmd_free_tlb(tlb, pmd);
154 }
155
156 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
157                                 unsigned long addr, unsigned long end,
158                                 unsigned long floor, unsigned long ceiling)
159 {
160         pud_t *pud;
161         unsigned long next;
162         unsigned long start;
163
164         start = addr;
165         pud = pud_offset(pgd, addr);
166         do {
167                 next = pud_addr_end(addr, end);
168                 if (pud_none_or_clear_bad(pud))
169                         continue;
170                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
171         } while (pud++, addr = next, addr != end);
172
173         start &= PGDIR_MASK;
174         if (start < floor)
175                 return;
176         if (ceiling) {
177                 ceiling &= PGDIR_MASK;
178                 if (!ceiling)
179                         return;
180         }
181         if (end - 1 > ceiling - 1)
182                 return;
183
184         pud = pud_offset(pgd, start);
185         pgd_clear(pgd);
186         pud_free_tlb(tlb, pud);
187 }
188
189 /*
190  * This function frees user-level page tables of a process.
191  *
192  * Must be called with pagetable lock held.
193  */
194 void free_pgd_range(struct mmu_gather **tlb,
195                         unsigned long addr, unsigned long end,
196                         unsigned long floor, unsigned long ceiling)
197 {
198         pgd_t *pgd;
199         unsigned long next;
200         unsigned long start;
201
202         /*
203          * The next few lines have given us lots of grief...
204          *
205          * Why are we testing PMD* at this top level?  Because often
206          * there will be no work to do at all, and we'd prefer not to
207          * go all the way down to the bottom just to discover that.
208          *
209          * Why all these "- 1"s?  Because 0 represents both the bottom
210          * of the address space and the top of it (using -1 for the
211          * top wouldn't help much: the masks would do the wrong thing).
212          * The rule is that addr 0 and floor 0 refer to the bottom of
213          * the address space, but end 0 and ceiling 0 refer to the top
214          * Comparisons need to use "end - 1" and "ceiling - 1" (though
215          * that end 0 case should be mythical).
216          *
217          * Wherever addr is brought up or ceiling brought down, we must
218          * be careful to reject "the opposite 0" before it confuses the
219          * subsequent tests.  But what about where end is brought down
220          * by PMD_SIZE below? no, end can't go down to 0 there.
221          *
222          * Whereas we round start (addr) and ceiling down, by different
223          * masks at different levels, in order to test whether a table
224          * now has no other vmas using it, so can be freed, we don't
225          * bother to round floor or end up - the tests don't need that.
226          */
227
228         addr &= PMD_MASK;
229         if (addr < floor) {
230                 addr += PMD_SIZE;
231                 if (!addr)
232                         return;
233         }
234         if (ceiling) {
235                 ceiling &= PMD_MASK;
236                 if (!ceiling)
237                         return;
238         }
239         if (end - 1 > ceiling - 1)
240                 end -= PMD_SIZE;
241         if (addr > end - 1)
242                 return;
243
244         start = addr;
245         pgd = pgd_offset((*tlb)->mm, addr);
246         do {
247                 next = pgd_addr_end(addr, end);
248                 if (pgd_none_or_clear_bad(pgd))
249                         continue;
250                 free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
251         } while (pgd++, addr = next, addr != end);
252
253         if (!(*tlb)->fullmm)
254                 flush_tlb_pgtables((*tlb)->mm, start, end);
255 }
256
257 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
258                 unsigned long floor, unsigned long ceiling)
259 {
260         while (vma) {
261                 struct vm_area_struct *next = vma->vm_next;
262                 unsigned long addr = vma->vm_start;
263
264                 /*
265                  * Hide vma from rmap and vmtruncate before freeing pgtables
266                  */
267                 anon_vma_unlink(vma);
268                 unlink_file_vma(vma);
269
270                 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
271                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
272                                 floor, next? next->vm_start: ceiling);
273                 } else {
274                         /*
275                          * Optimization: gather nearby vmas into one call down
276                          */
277                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
278                           && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
279                                                         HPAGE_SIZE)) {
280                                 vma = next;
281                                 next = vma->vm_next;
282                                 anon_vma_unlink(vma);
283                                 unlink_file_vma(vma);
284                         }
285                         free_pgd_range(tlb, addr, vma->vm_end,
286                                 floor, next? next->vm_start: ceiling);
287                 }
288                 vma = next;
289         }
290 }
291
292 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
293 {
294         struct page *new = pte_alloc_one(mm, address);
295         if (!new)
296                 return -ENOMEM;
297
298         pte_lock_init(new);
299         spin_lock(&mm->page_table_lock);
300         if (pmd_present(*pmd)) {        /* Another has populated it */
301                 pte_lock_deinit(new);
302                 pte_free(new);
303         } else {
304                 mm->nr_ptes++;
305                 inc_page_state(nr_page_table_pages);
306                 pmd_populate(mm, pmd, new);
307         }
308         spin_unlock(&mm->page_table_lock);
309         return 0;
310 }
311
312 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
313 {
314         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
315         if (!new)
316                 return -ENOMEM;
317
318         spin_lock(&init_mm.page_table_lock);
319         if (pmd_present(*pmd))          /* Another has populated it */
320                 pte_free_kernel(new);
321         else
322                 pmd_populate_kernel(&init_mm, pmd, new);
323         spin_unlock(&init_mm.page_table_lock);
324         return 0;
325 }
326
327 static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
328 {
329         if (file_rss)
330                 add_mm_counter(mm, file_rss, file_rss);
331         if (anon_rss)
332                 add_mm_counter(mm, anon_rss, anon_rss);
333 }
334
335 /*
336  * This function is called to print an error when a pte in a
337  * !VM_UNPAGED region is found pointing to an invalid pfn (which
338  * is an error.
339  *
340  * The calling function must still handle the error.
341  */
342 void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
343 {
344         printk(KERN_ERR "Bad pte = %08llx, process = %s, "
345                         "vm_flags = %lx, vaddr = %lx\n",
346                 (long long)pte_val(pte),
347                 (vma->vm_mm == current->mm ? current->comm : "???"),
348                 vma->vm_flags, vaddr);
349         dump_stack();
350 }
351
352 /*
353  * page_is_anon applies strict checks for an anonymous page belonging to
354  * this vma at this address.  It is used on VM_UNPAGED vmas, which are
355  * usually populated with shared originals (which must not be counted),
356  * but occasionally contain private COWed copies (when !VM_SHARED, or
357  * perhaps via ptrace when VM_SHARED).  An mmap of /dev/mem might window
358  * free pages, pages from other processes, or from other parts of this:
359  * it's tricky, but try not to be deceived by foreign anonymous pages.
360  */
361 static inline int page_is_anon(struct page *page,
362                         struct vm_area_struct *vma, unsigned long addr)
363 {
364         return page && PageAnon(page) && page_mapped(page) &&
365                 page_address_in_vma(page, vma) == addr;
366 }
367
368 /*
369  * copy one vm_area from one task to the other. Assumes the page tables
370  * already present in the new task to be cleared in the whole range
371  * covered by this vma.
372  */
373
374 static inline void
375 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
376                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
377                 unsigned long addr, int *rss)
378 {
379         unsigned long vm_flags = vma->vm_flags;
380         pte_t pte = *src_pte;
381         struct page *page;
382         unsigned long pfn;
383
384         /* pte contains position in swap or file, so copy. */
385         if (unlikely(!pte_present(pte))) {
386                 if (!pte_file(pte)) {
387                         swap_duplicate(pte_to_swp_entry(pte));
388                         /* make sure dst_mm is on swapoff's mmlist. */
389                         if (unlikely(list_empty(&dst_mm->mmlist))) {
390                                 spin_lock(&mmlist_lock);
391                                 if (list_empty(&dst_mm->mmlist))
392                                         list_add(&dst_mm->mmlist,
393                                                  &src_mm->mmlist);
394                                 spin_unlock(&mmlist_lock);
395                         }
396                 }
397                 goto out_set_pte;
398         }
399
400         pfn = pte_pfn(pte);
401         page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;
402
403         if (unlikely(vm_flags & VM_UNPAGED))
404                 if (!page_is_anon(page, vma, addr))
405                         goto out_set_pte;
406
407         /*
408          * If the pte points outside of valid memory but
409          * the region is not VM_UNPAGED, we have a problem.
410          */
411         if (unlikely(!page)) {
412                 print_bad_pte(vma, pte, addr);
413                 goto out_set_pte; /* try to do something sane */
414         }
415
416         /*
417          * If it's a COW mapping, write protect it both
418          * in the parent and the child
419          */
420         if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
421                 ptep_set_wrprotect(src_mm, addr, src_pte);
422                 pte = *src_pte;
423         }
424
425         /*
426          * If it's a shared mapping, mark it clean in
427          * the child
428          */
429         if (vm_flags & VM_SHARED)
430                 pte = pte_mkclean(pte);
431         pte = pte_mkold(pte);
432         get_page(page);
433         page_dup_rmap(page);
434         rss[!!PageAnon(page)]++;
435
436 out_set_pte:
437         set_pte_at(dst_mm, addr, dst_pte, pte);
438 }
439
440 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
441                 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
442                 unsigned long addr, unsigned long end)
443 {
444         pte_t *src_pte, *dst_pte;
445         spinlock_t *src_ptl, *dst_ptl;
446         int progress = 0;
447         int rss[2];
448
449 again:
450         rss[1] = rss[0] = 0;
451         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
452         if (!dst_pte)
453                 return -ENOMEM;
454         src_pte = pte_offset_map_nested(src_pmd, addr);
455         src_ptl = pte_lockptr(src_mm, src_pmd);
456         spin_lock(src_ptl);
457
458         do {
459                 /*
460                  * We are holding two locks at this point - either of them
461                  * could generate latencies in another task on another CPU.
462                  */
463                 if (progress >= 32) {
464                         progress = 0;
465                         if (need_resched() ||
466                             need_lockbreak(src_ptl) ||
467                             need_lockbreak(dst_ptl))
468                                 break;
469                 }
470                 if (pte_none(*src_pte)) {
471                         progress++;
472                         continue;
473                 }
474                 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
475                 progress += 8;
476         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
477
478         spin_unlock(src_ptl);
479         pte_unmap_nested(src_pte - 1);
480         add_mm_rss(dst_mm, rss[0], rss[1]);
481         pte_unmap_unlock(dst_pte - 1, dst_ptl);
482         cond_resched();
483         if (addr != end)
484                 goto again;
485         return 0;
486 }
487
488 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
489                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
490                 unsigned long addr, unsigned long end)
491 {
492         pmd_t *src_pmd, *dst_pmd;
493         unsigned long next;
494
495         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
496         if (!dst_pmd)
497                 return -ENOMEM;
498         src_pmd = pmd_offset(src_pud, addr);
499         do {
500                 next = pmd_addr_end(addr, end);
501                 if (pmd_none_or_clear_bad(src_pmd))
502                         continue;
503                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
504                                                 vma, addr, next))
505                         return -ENOMEM;
506         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
507         return 0;
508 }
509
510 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
511                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
512                 unsigned long addr, unsigned long end)
513 {
514         pud_t *src_pud, *dst_pud;
515         unsigned long next;
516
517         dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
518         if (!dst_pud)
519                 return -ENOMEM;
520         src_pud = pud_offset(src_pgd, addr);
521         do {
522                 next = pud_addr_end(addr, end);
523                 if (pud_none_or_clear_bad(src_pud))
524                         continue;
525                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
526                                                 vma, addr, next))
527                         return -ENOMEM;
528         } while (dst_pud++, src_pud++, addr = next, addr != end);
529         return 0;
530 }
531
532 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
533                 struct vm_area_struct *vma)
534 {
535         pgd_t *src_pgd, *dst_pgd;
536         unsigned long next;
537         unsigned long addr = vma->vm_start;
538         unsigned long end = vma->vm_end;
539
540         /*
541          * Don't copy ptes where a page fault will fill them correctly.
542          * Fork becomes much lighter when there are big shared or private
543          * readonly mappings. The tradeoff is that copy_page_range is more
544          * efficient than faulting.
545          */
546         if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) {
547                 if (!vma->anon_vma)
548                         return 0;
549         }
550
551         if (is_vm_hugetlb_page(vma))
552                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
553
554         dst_pgd = pgd_offset(dst_mm, addr);
555         src_pgd = pgd_offset(src_mm, addr);
556         do {
557                 next = pgd_addr_end(addr, end);
558                 if (pgd_none_or_clear_bad(src_pgd))
559                         continue;
560                 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
561                                                 vma, addr, next))
562                         return -ENOMEM;
563         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
564         return 0;
565 }
566
567 static unsigned long zap_pte_range(struct mmu_gather *tlb,
568                                 struct vm_area_struct *vma, pmd_t *pmd,
569                                 unsigned long addr, unsigned long end,
570                                 long *zap_work, struct zap_details *details)
571 {
572         struct mm_struct *mm = tlb->mm;
573         pte_t *pte;
574         spinlock_t *ptl;
575         int file_rss = 0;
576         int anon_rss = 0;
577
578         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
579         do {
580                 pte_t ptent = *pte;
581                 if (pte_none(ptent)) {
582                         (*zap_work)--;
583                         continue;
584                 }
585                 if (pte_present(ptent)) {
586                         struct page *page;
587                         unsigned long pfn;
588
589                         (*zap_work) -= PAGE_SIZE;
590
591                         pfn = pte_pfn(ptent);
592                         page = pfn_valid(pfn)? pfn_to_page(pfn): NULL;
593
594                         if (unlikely(vma->vm_flags & VM_UNPAGED)) {
595                                 if (!page_is_anon(page, vma, addr))
596                                         page = NULL;
597                         } else if (unlikely(!page))
598                                 print_bad_pte(vma, ptent, addr);
599
600                         if (unlikely(details) && page) {
601                                 /*
602                                  * unmap_shared_mapping_pages() wants to
603                                  * invalidate cache without truncating:
604                                  * unmap shared but keep private pages.
605                                  */
606                                 if (details->check_mapping &&
607                                     details->check_mapping != page->mapping)
608                                         continue;
609                                 /*
610                                  * Each page->index must be checked when
611                                  * invalidating or truncating nonlinear.
612                                  */
613                                 if (details->nonlinear_vma &&
614                                     (page->index < details->first_index ||
615                                      page->index > details->last_index))
616                                         continue;
617                         }
618                         ptent = ptep_get_and_clear_full(mm, addr, pte,
619                                                         tlb->fullmm);
620                         tlb_remove_tlb_entry(tlb, pte, addr);
621                         if (unlikely(!page))
622                                 continue;
623                         if (unlikely(details) && details->nonlinear_vma
624                             && linear_page_index(details->nonlinear_vma,
625                                                 addr) != page->index)
626                                 set_pte_at(mm, addr, pte,
627                                            pgoff_to_pte(page->index));
628                         if (PageAnon(page))
629                                 anon_rss--;
630                         else {
631                                 if (pte_dirty(ptent))
632                                         set_page_dirty(page);
633                                 if (pte_young(ptent))
634                                         mark_page_accessed(page);
635                                 file_rss--;
636                         }
637                         page_remove_rmap(page);
638                         tlb_remove_page(tlb, page);
639                         continue;
640                 }
641                 /*
642                  * If details->check_mapping, we leave swap entries;
643                  * if details->nonlinear_vma, we leave file entries.
644                  */
645                 if (unlikely(details))
646                         continue;
647                 if (!pte_file(ptent))
648                         free_swap_and_cache(pte_to_swp_entry(ptent));
649                 pte_clear_full(mm, addr, pte, tlb->fullmm);
650         } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
651
652         add_mm_rss(mm, file_rss, anon_rss);
653         pte_unmap_unlock(pte - 1, ptl);
654
655         return addr;
656 }
657
658 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
659                                 struct vm_area_struct *vma, pud_t *pud,
660                                 unsigned long addr, unsigned long end,
661                                 long *zap_work, struct zap_details *details)
662 {
663         pmd_t *pmd;
664         unsigned long next;
665
666         pmd = pmd_offset(pud, addr);
667         do {
668                 next = pmd_addr_end(addr, end);
669                 if (pmd_none_or_clear_bad(pmd)) {
670                         (*zap_work)--;
671                         continue;
672                 }
673                 next = zap_pte_range(tlb, vma, pmd, addr, next,
674                                                 zap_work, details);
675         } while (pmd++, addr = next, (addr != end && *zap_work > 0));
676
677         return addr;
678 }
679
680 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
681                                 struct vm_area_struct *vma, pgd_t *pgd,
682                                 unsigned long addr, unsigned long end,
683                                 long *zap_work, struct zap_details *details)
684 {
685         pud_t *pud;
686         unsigned long next;
687
688         pud = pud_offset(pgd, addr);
689         do {
690                 next = pud_addr_end(addr, end);
691                 if (pud_none_or_clear_bad(pud)) {
692                         (*zap_work)--;
693                         continue;
694                 }
695                 next = zap_pmd_range(tlb, vma, pud, addr, next,
696                                                 zap_work, details);
697         } while (pud++, addr = next, (addr != end && *zap_work > 0));
698
699         return addr;
700 }
701
702 static unsigned long unmap_page_range(struct mmu_gather *tlb,
703                                 struct vm_area_struct *vma,
704                                 unsigned long addr, unsigned long end,
705                                 long *zap_work, struct zap_details *details)
706 {
707         pgd_t *pgd;
708         unsigned long next;
709
710         if (details && !details->check_mapping && !details->nonlinear_vma)
711                 details = NULL;
712
713         BUG_ON(addr >= end);
714         tlb_start_vma(tlb, vma);
715         pgd = pgd_offset(vma->vm_mm, addr);
716         do {
717                 next = pgd_addr_end(addr, end);
718                 if (pgd_none_or_clear_bad(pgd)) {
719                         (*zap_work)--;
720                         continue;
721                 }
722                 next = zap_pud_range(tlb, vma, pgd, addr, next,
723                                                 zap_work, details);
724         } while (pgd++, addr = next, (addr != end && *zap_work > 0));
725         tlb_end_vma(tlb, vma);
726
727         return addr;
728 }
729
730 #ifdef CONFIG_PREEMPT
731 # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
732 #else
733 /* No preempt: go for improved straight-line efficiency */
734 # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
735 #endif
736
737 /**
738  * unmap_vmas - unmap a range of memory covered by a list of vma's
739  * @tlbp: address of the caller's struct mmu_gather
740  * @vma: the starting vma
741  * @start_addr: virtual address at which to start unmapping
742  * @end_addr: virtual address at which to end unmapping
743  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
744  * @details: details of nonlinear truncation or shared cache invalidation
745  *
746  * Returns the end address of the unmapping (restart addr if interrupted).
747  *
748  * Unmap all pages in the vma list.
749  *
750  * We aim to not hold locks for too long (for scheduling latency reasons).
751  * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
752  * return the ending mmu_gather to the caller.
753  *
754  * Only addresses between `start' and `end' will be unmapped.
755  *
756  * The VMA list must be sorted in ascending virtual address order.
757  *
758  * unmap_vmas() assumes that the caller will flush the whole unmapped address
759  * range after unmap_vmas() returns.  So the only responsibility here is to
760  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
761  * drops the lock and schedules.
762  */
763 unsigned long unmap_vmas(struct mmu_gather **tlbp,
764                 struct vm_area_struct *vma, unsigned long start_addr,
765                 unsigned long end_addr, unsigned long *nr_accounted,
766                 struct zap_details *details)
767 {
768         long zap_work = ZAP_BLOCK_SIZE;
769         unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
770         int tlb_start_valid = 0;
771         unsigned long start = start_addr;
772         spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
773         int fullmm = (*tlbp)->fullmm;
774
775         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
776                 unsigned long end;
777
778                 start = max(vma->vm_start, start_addr);
779                 if (start >= vma->vm_end)
780                         continue;
781                 end = min(vma->vm_end, end_addr);
782                 if (end <= vma->vm_start)
783                         continue;
784
785                 if (vma->vm_flags & VM_ACCOUNT)
786                         *nr_accounted += (end - start) >> PAGE_SHIFT;
787
788                 while (start != end) {
789                         if (!tlb_start_valid) {
790                                 tlb_start = start;
791                                 tlb_start_valid = 1;
792                         }
793
794                         if (unlikely(is_vm_hugetlb_page(vma))) {
795                                 unmap_hugepage_range(vma, start, end);
796                                 zap_work -= (end - start) /
797                                                 (HPAGE_SIZE / PAGE_SIZE);
798                                 start = end;
799                         } else
800                                 start = unmap_page_range(*tlbp, vma,
801                                                 start, end, &zap_work, details);
802
803                         if (zap_work > 0) {
804                                 BUG_ON(start != end);
805                                 break;
806                         }
807
808                         tlb_finish_mmu(*tlbp, tlb_start, start);
809
810                         if (need_resched() ||
811                                 (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
812                                 if (i_mmap_lock) {
813                                         *tlbp = NULL;
814                                         goto out;
815                                 }
816                                 cond_resched();
817                         }
818
819                         *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
820                         tlb_start_valid = 0;
821                         zap_work = ZAP_BLOCK_SIZE;
822                 }
823         }
824 out:
825         return start;   /* which is now the end (or restart) address */
826 }
827
828 /**
829  * zap_page_range - remove user pages in a given range
830  * @vma: vm_area_struct holding the applicable pages
831  * @address: starting address of pages to zap
832  * @size: number of bytes to zap
833  * @details: details of nonlinear truncation or shared cache invalidation
834  */
835 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
836                 unsigned long size, struct zap_details *details)
837 {
838         struct mm_struct *mm = vma->vm_mm;
839         struct mmu_gather *tlb;
840         unsigned long end = address + size;
841         unsigned long nr_accounted = 0;
842
843         lru_add_drain();
844         tlb = tlb_gather_mmu(mm, 0);
845         update_hiwater_rss(mm);
846         end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
847         if (tlb)
848                 tlb_finish_mmu(tlb, address, end);
849         return end;
850 }
851
852 /*
853  * Do a quick page-table lookup for a single page.
854  */
855 struct page *follow_page(struct mm_struct *mm, unsigned long address,
856                         unsigned int flags)
857 {
858         pgd_t *pgd;
859         pud_t *pud;
860         pmd_t *pmd;
861         pte_t *ptep, pte;
862         spinlock_t *ptl;
863         unsigned long pfn;
864         struct page *page;
865
866         page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
867         if (!IS_ERR(page)) {
868                 BUG_ON(flags & FOLL_GET);
869                 goto out;
870         }
871
872         page = NULL;
873         pgd = pgd_offset(mm, address);
874         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
875                 goto no_page_table;
876
877         pud = pud_offset(pgd, address);
878         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
879                 goto no_page_table;
880         
881         pmd = pmd_offset(pud, address);
882         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
883                 goto no_page_table;
884
885         if (pmd_huge(*pmd)) {
886                 BUG_ON(flags & FOLL_GET);
887                 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
888                 goto out;
889         }
890
891         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
892         if (!ptep)
893                 goto out;
894
895         pte = *ptep;
896         if (!pte_present(pte))
897                 goto unlock;
898         if ((flags & FOLL_WRITE) && !pte_write(pte))
899                 goto unlock;
900         pfn = pte_pfn(pte);
901         if (!pfn_valid(pfn))
902                 goto unlock;
903
904         page = pfn_to_page(pfn);
905         if (flags & FOLL_GET)
906                 get_page(page);
907         if (flags & FOLL_TOUCH) {
908                 if ((flags & FOLL_WRITE) &&
909                     !pte_dirty(pte) && !PageDirty(page))
910                         set_page_dirty(page);
911                 mark_page_accessed(page);
912         }
913 unlock:
914         pte_unmap_unlock(ptep, ptl);
915 out:
916         return page;
917
918 no_page_table:
919         /*
920          * When core dumping an enormous anonymous area that nobody
921          * has touched so far, we don't want to allocate page tables.
922          */
923         if (flags & FOLL_ANON) {
924                 page = ZERO_PAGE(address);
925                 if (flags & FOLL_GET)
926                         get_page(page);
927                 BUG_ON(flags & FOLL_WRITE);
928         }
929         return page;
930 }
931
932 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
933                 unsigned long start, int len, int write, int force,
934                 struct page **pages, struct vm_area_struct **vmas)
935 {
936         int i;
937         unsigned int vm_flags;
938
939         /* 
940          * Require read or write permissions.
941          * If 'force' is set, we only require the "MAY" flags.
942          */
943         vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
944         vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
945         i = 0;
946
947         do {
948                 struct vm_area_struct *vma;
949                 unsigned int foll_flags;
950
951                 vma = find_extend_vma(mm, start);
952                 if (!vma && in_gate_area(tsk, start)) {
953                         unsigned long pg = start & PAGE_MASK;
954                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
955                         pgd_t *pgd;
956                         pud_t *pud;
957                         pmd_t *pmd;
958                         pte_t *pte;
959                         if (write) /* user gate pages are read-only */
960                                 return i ? : -EFAULT;
961                         if (pg > TASK_SIZE)
962                                 pgd = pgd_offset_k(pg);
963                         else
964                                 pgd = pgd_offset_gate(mm, pg);
965                         BUG_ON(pgd_none(*pgd));
966                         pud = pud_offset(pgd, pg);
967                         BUG_ON(pud_none(*pud));
968                         pmd = pmd_offset(pud, pg);
969                         if (pmd_none(*pmd))
970                                 return i ? : -EFAULT;
971                         pte = pte_offset_map(pmd, pg);
972                         if (pte_none(*pte)) {
973                                 pte_unmap(pte);
974                                 return i ? : -EFAULT;
975                         }
976                         if (pages) {
977                                 pages[i] = pte_page(*pte);
978                                 get_page(pages[i]);
979                         }
980                         pte_unmap(pte);
981                         if (vmas)
982                                 vmas[i] = gate_vma;
983                         i++;
984                         start += PAGE_SIZE;
985                         len--;
986                         continue;
987                 }
988
989                 if (!vma || (vma->vm_flags & VM_IO)
990                                 || !(vm_flags & vma->vm_flags))
991                         return i ? : -EFAULT;
992
993                 if (is_vm_hugetlb_page(vma)) {
994                         i = follow_hugetlb_page(mm, vma, pages, vmas,
995                                                 &start, &len, i);
996                         continue;
997                 }
998
999                 foll_flags = FOLL_TOUCH;
1000                 if (pages)
1001                         foll_flags |= FOLL_GET;
1002                 if (!write && !(vma->vm_flags & VM_LOCKED) &&
1003                     (!vma->vm_ops || !vma->vm_ops->nopage))
1004                         foll_flags |= FOLL_ANON;
1005
1006                 do {
1007                         struct page *page;
1008
1009                         if (write)
1010                                 foll_flags |= FOLL_WRITE;
1011
1012                         cond_resched();
1013                         while (!(page = follow_page(mm, start, foll_flags))) {
1014                                 int ret;
1015                                 ret = __handle_mm_fault(mm, vma, start,
1016                                                 foll_flags & FOLL_WRITE);
1017                                 /*
1018                                  * The VM_FAULT_WRITE bit tells us that do_wp_page has
1019                                  * broken COW when necessary, even if maybe_mkwrite
1020                                  * decided not to set pte_write. We can thus safely do
1021                                  * subsequent page lookups as if they were reads.
1022                                  */
1023                                 if (ret & VM_FAULT_WRITE)
1024                                         foll_flags &= ~FOLL_WRITE;
1025                                 
1026                                 switch (ret & ~VM_FAULT_WRITE) {
1027                                 case VM_FAULT_MINOR:
1028                                         tsk->min_flt++;
1029                                         break;
1030                                 case VM_FAULT_MAJOR:
1031                                         tsk->maj_flt++;
1032                                         break;
1033                                 case VM_FAULT_SIGBUS:
1034                                         return i ? i : -EFAULT;
1035                                 case VM_FAULT_OOM:
1036                                         return i ? i : -ENOMEM;
1037                                 default:
1038                                         BUG();
1039                                 }
1040                         }
1041                         if (pages) {
1042                                 pages[i] = page;
1043                                 flush_dcache_page(page);
1044                         }
1045                         if (vmas)
1046                                 vmas[i] = vma;
1047                         i++;
1048                         start += PAGE_SIZE;
1049                         len--;
1050                 } while (len && start < vma->vm_end);
1051         } while (len);
1052         return i;
1053 }
1054 EXPORT_SYMBOL(get_user_pages);
1055
1056 static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1057                         unsigned long addr, unsigned long end, pgprot_t prot)
1058 {
1059         pte_t *pte;
1060         spinlock_t *ptl;
1061
1062         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1063         if (!pte)
1064                 return -ENOMEM;
1065         do {
1066                 struct page *page = ZERO_PAGE(addr);
1067                 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
1068                 page_cache_get(page);
1069                 page_add_file_rmap(page);
1070                 inc_mm_counter(mm, file_rss);
1071                 BUG_ON(!pte_none(*pte));
1072                 set_pte_at(mm, addr, pte, zero_pte);
1073         } while (pte++, addr += PAGE_SIZE, addr != end);
1074         pte_unmap_unlock(pte - 1, ptl);
1075         return 0;
1076 }
1077
1078 static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
1079                         unsigned long addr, unsigned long end, pgprot_t prot)
1080 {
1081         pmd_t *pmd;
1082         unsigned long next;
1083
1084         pmd = pmd_alloc(mm, pud, addr);
1085         if (!pmd)
1086                 return -ENOMEM;
1087         do {
1088                 next = pmd_addr_end(addr, end);
1089                 if (zeromap_pte_range(mm, pmd, addr, next, prot))
1090                         return -ENOMEM;
1091         } while (pmd++, addr = next, addr != end);
1092         return 0;
1093 }
1094
1095 static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1096                         unsigned long addr, unsigned long end, pgprot_t prot)
1097 {
1098         pud_t *pud;
1099         unsigned long next;
1100
1101         pud = pud_alloc(mm, pgd, addr);
1102         if (!pud)
1103                 return -ENOMEM;
1104         do {
1105                 next = pud_addr_end(addr, end);
1106                 if (zeromap_pmd_range(mm, pud, addr, next, prot))
1107                         return -ENOMEM;
1108         } while (pud++, addr = next, addr != end);
1109         return 0;
1110 }
1111
1112 int zeromap_page_range(struct vm_area_struct *vma,
1113                         unsigned long addr, unsigned long size, pgprot_t prot)
1114 {
1115         pgd_t *pgd;
1116         unsigned long next;
1117         unsigned long end = addr + size;
1118         struct mm_struct *mm = vma->vm_mm;
1119         int err;
1120
1121         BUG_ON(addr >= end);
1122         pgd = pgd_offset(mm, addr);
1123         flush_cache_range(vma, addr, end);
1124         do {
1125                 next = pgd_addr_end(addr, end);
1126                 err = zeromap_pud_range(mm, pgd, addr, next, prot);
1127                 if (err)
1128                         break;
1129         } while (pgd++, addr = next, addr != end);
1130         return err;
1131 }
1132
1133 /*
1134  * maps a range of physical memory into the requested pages. the old
1135  * mappings are removed. any references to nonexistent pages results
1136  * in null mappings (currently treated as "copy-on-access")
1137  */
1138 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1139                         unsigned long addr, unsigned long end,
1140                         unsigned long pfn, pgprot_t prot)
1141 {
1142         pte_t *pte;
1143         spinlock_t *ptl;
1144
1145         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1146         if (!pte)
1147                 return -ENOMEM;
1148         do {
1149                 BUG_ON(!pte_none(*pte));
1150                 set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
1151                 pfn++;
1152         } while (pte++, addr += PAGE_SIZE, addr != end);
1153         pte_unmap_unlock(pte - 1, ptl);
1154         return 0;
1155 }
1156
1157 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1158                         unsigned long addr, unsigned long end,
1159                         unsigned long pfn, pgprot_t prot)
1160 {
1161         pmd_t *pmd;
1162         unsigned long next;
1163
1164         pfn -= addr >> PAGE_SHIFT;
1165         pmd = pmd_alloc(mm, pud, addr);
1166         if (!pmd)
1167                 return -ENOMEM;
1168         do {
1169                 next = pmd_addr_end(addr, end);
1170                 if (remap_pte_range(mm, pmd, addr, next,
1171                                 pfn + (addr >> PAGE_SHIFT), prot))
1172                         return -ENOMEM;
1173         } while (pmd++, addr = next, addr != end);
1174         return 0;
1175 }
1176
1177 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1178                         unsigned long addr, unsigned long end,
1179                         unsigned long pfn, pgprot_t prot)
1180 {
1181         pud_t *pud;
1182         unsigned long next;
1183
1184         pfn -= addr >> PAGE_SHIFT;
1185         pud = pud_alloc(mm, pgd, addr);
1186         if (!pud)
1187                 return -ENOMEM;
1188         do {
1189                 next = pud_addr_end(addr, end);
1190                 if (remap_pmd_range(mm, pud, addr, next,
1191                                 pfn + (addr >> PAGE_SHIFT), prot))
1192                         return -ENOMEM;
1193         } while (pud++, addr = next, addr != end);
1194         return 0;
1195 }
1196
1197 /*  Note: this is only safe if the mm semaphore is held when called. */
1198 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1199                     unsigned long pfn, unsigned long size, pgprot_t prot)
1200 {
1201         pgd_t *pgd;
1202         unsigned long next;
1203         unsigned long end = addr + PAGE_ALIGN(size);
1204         struct mm_struct *mm = vma->vm_mm;
1205         int err;
1206
1207         /*
1208          * Physically remapped pages are special. Tell the
1209          * rest of the world about it:
1210          *   VM_IO tells people not to look at these pages
1211          *      (accesses can have side effects).
1212          *   VM_RESERVED is specified all over the place, because
1213          *      in 2.4 it kept swapout's vma scan off this vma; but
1214          *      in 2.6 the LRU scan won't even find its pages, so this
1215          *      flag means no more than count its pages in reserved_vm,
1216          *      and omit it from core dump, even when VM_IO turned off.
1217          *   VM_UNPAGED tells the core MM not to "manage" these pages
1218          *      (e.g. refcount, mapcount, try to swap them out): in
1219          *      particular, zap_pte_range does not try to free them.
1220          */
1221         vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED;
1222
1223         BUG_ON(addr >= end);
1224         pfn -= addr >> PAGE_SHIFT;
1225         pgd = pgd_offset(mm, addr);
1226         flush_cache_range(vma, addr, end);
1227         do {
1228                 next = pgd_addr_end(addr, end);
1229                 err = remap_pud_range(mm, pgd, addr, next,
1230                                 pfn + (addr >> PAGE_SHIFT), prot);
1231                 if (err)
1232                         break;
1233         } while (pgd++, addr = next, addr != end);
1234         return err;
1235 }
1236 EXPORT_SYMBOL(remap_pfn_range);
1237
1238 /*
1239  * handle_pte_fault chooses page fault handler according to an entry
1240  * which was read non-atomically.  Before making any commitment, on
1241  * those architectures or configurations (e.g. i386 with PAE) which
1242  * might give a mix of unmatched parts, do_swap_page and do_file_page
1243  * must check under lock before unmapping the pte and proceeding
1244  * (but do_wp_page is only called after already making such a check;
1245  * and do_anonymous_page and do_no_page can safely check later on).
1246  */
1247 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
1248                                 pte_t *page_table, pte_t orig_pte)
1249 {
1250         int same = 1;
1251 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1252         if (sizeof(pte_t) > sizeof(unsigned long)) {
1253                 spinlock_t *ptl = pte_lockptr(mm, pmd);
1254                 spin_lock(ptl);
1255                 same = pte_same(*page_table, orig_pte);
1256                 spin_unlock(ptl);
1257         }
1258 #endif
1259         pte_unmap(page_table);
1260         return same;
1261 }
1262
1263 /*
1264  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1265  * servicing faults for write access.  In the normal case, do always want
1266  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1267  * that do not have writing enabled, when used by access_process_vm.
1268  */
1269 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1270 {
1271         if (likely(vma->vm_flags & VM_WRITE))
1272                 pte = pte_mkwrite(pte);
1273         return pte;
1274 }
1275
1276 /*
1277  * This routine handles present pages, when users try to write
1278  * to a shared page. It is done by copying the page to a new address
1279  * and decrementing the shared-page counter for the old page.
1280  *
1281  * Note that this routine assumes that the protection checks have been
1282  * done by the caller (the low-level page fault routine in most cases).
1283  * Thus we can safely just mark it writable once we've done any necessary
1284  * COW.
1285  *
1286  * We also mark the page dirty at this point even though the page will
1287  * change only once the write actually happens. This avoids a few races,
1288  * and potentially makes it more efficient.
1289  *
1290  * We enter with non-exclusive mmap_sem (to exclude vma changes,
1291  * but allow concurrent faults), with pte both mapped and locked.
1292  * We return with mmap_sem still held, but pte unmapped and unlocked.
1293  */
1294 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1295                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1296                 spinlock_t *ptl, pte_t orig_pte)
1297 {
1298         struct page *old_page, *src_page, *new_page;
1299         unsigned long pfn = pte_pfn(orig_pte);
1300         pte_t entry;
1301         int ret = VM_FAULT_MINOR;
1302
1303         if (unlikely(!pfn_valid(pfn))) {
1304                 /*
1305                  * Page table corrupted: show pte and kill process.
1306                  * Or it's an attempt to COW an out-of-map VM_UNPAGED
1307                  * entry, which copy_user_highpage does not support.
1308                  */
1309                 print_bad_pte(vma, orig_pte, address);
1310                 ret = VM_FAULT_OOM;
1311                 goto unlock;
1312         }
1313         old_page = pfn_to_page(pfn);
1314         src_page = old_page;
1315
1316         if (unlikely(vma->vm_flags & VM_UNPAGED))
1317                 if (!page_is_anon(old_page, vma, address)) {
1318                         old_page = NULL;
1319                         goto gotten;
1320                 }
1321
1322         if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
1323                 int reuse = can_share_swap_page(old_page);
1324                 unlock_page(old_page);
1325                 if (reuse) {
1326                         flush_cache_page(vma, address, pfn);
1327                         entry = pte_mkyoung(orig_pte);
1328                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1329                         ptep_set_access_flags(vma, address, page_table, entry, 1);
1330                         update_mmu_cache(vma, address, entry);
1331                         lazy_mmu_prot_update(entry);
1332                         ret |= VM_FAULT_WRITE;
1333                         goto unlock;
1334                 }
1335         }
1336
1337         /*
1338          * Ok, we need to copy. Oh, well..
1339          */
1340         page_cache_get(old_page);
1341 gotten:
1342         pte_unmap_unlock(page_table, ptl);
1343
1344         if (unlikely(anon_vma_prepare(vma)))
1345                 goto oom;
1346         if (src_page == ZERO_PAGE(address)) {
1347                 new_page = alloc_zeroed_user_highpage(vma, address);
1348                 if (!new_page)
1349                         goto oom;
1350         } else {
1351                 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1352                 if (!new_page)
1353                         goto oom;
1354                 copy_user_highpage(new_page, src_page, address);
1355         }
1356
1357         /*
1358          * Re-check the pte - we dropped the lock
1359          */
1360         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1361         if (likely(pte_same(*page_table, orig_pte))) {
1362                 if (old_page) {
1363                         page_remove_rmap(old_page);
1364                         if (!PageAnon(old_page)) {
1365                                 dec_mm_counter(mm, file_rss);
1366                                 inc_mm_counter(mm, anon_rss);
1367                         }
1368                 } else
1369                         inc_mm_counter(mm, anon_rss);
1370                 flush_cache_page(vma, address, pfn);
1371                 entry = mk_pte(new_page, vma->vm_page_prot);
1372                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1373                 ptep_establish(vma, address, page_table, entry);
1374                 update_mmu_cache(vma, address, entry);
1375                 lazy_mmu_prot_update(entry);
1376                 lru_cache_add_active(new_page);
1377                 page_add_anon_rmap(new_page, vma, address);
1378
1379                 /* Free the old page.. */
1380                 new_page = old_page;
1381                 ret |= VM_FAULT_WRITE;
1382         }
1383         if (new_page)
1384                 page_cache_release(new_page);
1385         if (old_page)
1386                 page_cache_release(old_page);
1387 unlock:
1388         pte_unmap_unlock(page_table, ptl);
1389         return ret;
1390 oom:
1391         if (old_page)
1392                 page_cache_release(old_page);
1393         return VM_FAULT_OOM;
1394 }
1395
1396 /*
1397  * Helper functions for unmap_mapping_range().
1398  *
1399  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
1400  *
1401  * We have to restart searching the prio_tree whenever we drop the lock,
1402  * since the iterator is only valid while the lock is held, and anyway
1403  * a later vma might be split and reinserted earlier while lock dropped.
1404  *
1405  * The list of nonlinear vmas could be handled more efficiently, using
1406  * a placeholder, but handle it in the same way until a need is shown.
1407  * It is important to search the prio_tree before nonlinear list: a vma
1408  * may become nonlinear and be shifted from prio_tree to nonlinear list
1409  * while the lock is dropped; but never shifted from list to prio_tree.
1410  *
1411  * In order to make forward progress despite restarting the search,
1412  * vm_truncate_count is used to mark a vma as now dealt with, so we can
1413  * quickly skip it next time around.  Since the prio_tree search only
1414  * shows us those vmas affected by unmapping the range in question, we
1415  * can't efficiently keep all vmas in step with mapping->truncate_count:
1416  * so instead reset them all whenever it wraps back to 0 (then go to 1).
1417  * mapping->truncate_count and vma->vm_truncate_count are protected by
1418  * i_mmap_lock.
1419  *
1420  * In order to make forward progress despite repeatedly restarting some
1421  * large vma, note the restart_addr from unmap_vmas when it breaks out:
1422  * and restart from that address when we reach that vma again.  It might
1423  * have been split or merged, shrunk or extended, but never shifted: so
1424  * restart_addr remains valid so long as it remains in the vma's range.
1425  * unmap_mapping_range forces truncate_count to leap over page-aligned
1426  * values so we can save vma's restart_addr in its truncate_count field.
1427  */
1428 #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
1429
1430 static void reset_vma_truncate_counts(struct address_space *mapping)
1431 {
1432         struct vm_area_struct *vma;
1433         struct prio_tree_iter iter;
1434
1435         vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
1436                 vma->vm_truncate_count = 0;
1437         list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1438                 vma->vm_truncate_count = 0;
1439 }
1440
1441 static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1442                 unsigned long start_addr, unsigned long end_addr,
1443                 struct zap_details *details)
1444 {
1445         unsigned long restart_addr;
1446         int need_break;
1447
1448 again:
1449         restart_addr = vma->vm_truncate_count;
1450         if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
1451                 start_addr = restart_addr;
1452                 if (start_addr >= end_addr) {
1453                         /* Top of vma has been split off since last time */
1454                         vma->vm_truncate_count = details->truncate_count;
1455                         return 0;
1456                 }
1457         }
1458
1459         restart_addr = zap_page_range(vma, start_addr,
1460                                         end_addr - start_addr, details);
1461         need_break = need_resched() ||
1462                         need_lockbreak(details->i_mmap_lock);
1463
1464         if (restart_addr >= end_addr) {
1465                 /* We have now completed this vma: mark it so */
1466                 vma->vm_truncate_count = details->truncate_count;
1467                 if (!need_break)
1468                         return 0;
1469         } else {
1470                 /* Note restart_addr in vma's truncate_count field */
1471                 vma->vm_truncate_count = restart_addr;
1472                 if (!need_break)
1473                         goto again;
1474         }
1475
1476         spin_unlock(details->i_mmap_lock);
1477         cond_resched();
1478         spin_lock(details->i_mmap_lock);
1479         return -EINTR;
1480 }
1481
1482 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
1483                                             struct zap_details *details)
1484 {
1485         struct vm_area_struct *vma;
1486         struct prio_tree_iter iter;
1487         pgoff_t vba, vea, zba, zea;
1488
1489 restart:
1490         vma_prio_tree_foreach(vma, &iter, root,
1491                         details->first_index, details->last_index) {
1492                 /* Skip quickly over those we have already dealt with */
1493                 if (vma->vm_truncate_count == details->truncate_count)
1494                         continue;
1495
1496                 vba = vma->vm_pgoff;
1497                 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
1498                 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
1499                 zba = details->first_index;
1500                 if (zba < vba)
1501                         zba = vba;
1502                 zea = details->last_index;
1503                 if (zea > vea)
1504                         zea = vea;
1505
1506                 if (unmap_mapping_range_vma(vma,
1507                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
1508                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
1509                                 details) < 0)
1510                         goto restart;
1511         }
1512 }
1513
1514 static inline void unmap_mapping_range_list(struct list_head *head,
1515                                             struct zap_details *details)
1516 {
1517         struct vm_area_struct *vma;
1518
1519         /*
1520          * In nonlinear VMAs there is no correspondence between virtual address
1521          * offset and file offset.  So we must perform an exhaustive search
1522          * across *all* the pages in each nonlinear VMA, not just the pages
1523          * whose virtual address lies outside the file truncation point.
1524          */
1525 restart:
1526         list_for_each_entry(vma, head, shared.vm_set.list) {
1527                 /* Skip quickly over those we have already dealt with */
1528                 if (vma->vm_truncate_count == details->truncate_count)
1529                         continue;
1530                 details->nonlinear_vma = vma;
1531                 if (unmap_mapping_range_vma(vma, vma->vm_start,
1532                                         vma->vm_end, details) < 0)
1533                         goto restart;
1534         }
1535 }
1536
1537 /**
1538  * unmap_mapping_range - unmap the portion of all mmaps
1539  * in the specified address_space corresponding to the specified
1540  * page range in the underlying file.
1541  * @mapping: the address space containing mmaps to be unmapped.
1542  * @holebegin: byte in first page to unmap, relative to the start of
1543  * the underlying file.  This will be rounded down to a PAGE_SIZE
1544  * boundary.  Note that this is different from vmtruncate(), which
1545  * must keep the partial page.  In contrast, we must get rid of
1546  * partial pages.
1547  * @holelen: size of prospective hole in bytes.  This will be rounded
1548  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1549  * end of the file.
1550  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
1551  * but 0 when invalidating pagecache, don't throw away private data.
1552  */
1553 void unmap_mapping_range(struct address_space *mapping,
1554                 loff_t const holebegin, loff_t const holelen, int even_cows)
1555 {
1556         struct zap_details details;
1557         pgoff_t hba = holebegin >> PAGE_SHIFT;
1558         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1559
1560         /* Check for overflow. */
1561         if (sizeof(holelen) > sizeof(hlen)) {
1562                 long long holeend =
1563                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1564                 if (holeend & ~(long long)ULONG_MAX)
1565                         hlen = ULONG_MAX - hba + 1;
1566         }
1567
1568         details.check_mapping = even_cows? NULL: mapping;
1569         details.nonlinear_vma = NULL;
1570         details.first_index = hba;
1571         details.last_index = hba + hlen - 1;
1572         if (details.last_index < details.first_index)
1573                 details.last_index = ULONG_MAX;
1574         details.i_mmap_lock = &mapping->i_mmap_lock;
1575
1576         spin_lock(&mapping->i_mmap_lock);
1577
1578         /* serialize i_size write against truncate_count write */
1579         smp_wmb();
1580         /* Protect against page faults, and endless unmapping loops */
1581         mapping->truncate_count++;
1582         /*
1583          * For archs where spin_lock has inclusive semantics like ia64
1584          * this smp_mb() will prevent to read pagetable contents
1585          * before the truncate_count increment is visible to
1586          * other cpus.
1587          */
1588         smp_mb();
1589         if (unlikely(is_restart_addr(mapping->truncate_count))) {
1590                 if (mapping->truncate_count == 0)
1591                         reset_vma_truncate_counts(mapping);
1592                 mapping->truncate_count++;
1593         }
1594         details.truncate_count = mapping->truncate_count;
1595
1596         if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
1597                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
1598         if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
1599                 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
1600         spin_unlock(&mapping->i_mmap_lock);
1601 }
1602 EXPORT_SYMBOL(unmap_mapping_range);
1603
1604 /*
1605  * Handle all mappings that got truncated by a "truncate()"
1606  * system call.
1607  *
1608  * NOTE! We have to be ready to update the memory sharing
1609  * between the file and the memory map for a potential last
1610  * incomplete page.  Ugly, but necessary.
1611  */
1612 int vmtruncate(struct inode * inode, loff_t offset)
1613 {
1614         struct address_space *mapping = inode->i_mapping;
1615         unsigned long limit;
1616
1617         if (inode->i_size < offset)
1618                 goto do_expand;
1619         /*
1620          * truncation of in-use swapfiles is disallowed - it would cause
1621          * subsequent swapout to scribble on the now-freed blocks.
1622          */
1623         if (IS_SWAPFILE(inode))
1624                 goto out_busy;
1625         i_size_write(inode, offset);
1626         unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1627         truncate_inode_pages(mapping, offset);
1628         goto out_truncate;
1629
1630 do_expand:
1631         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1632         if (limit != RLIM_INFINITY && offset > limit)
1633                 goto out_sig;
1634         if (offset > inode->i_sb->s_maxbytes)
1635                 goto out_big;
1636         i_size_write(inode, offset);
1637
1638 out_truncate:
1639         if (inode->i_op && inode->i_op->truncate)
1640                 inode->i_op->truncate(inode);
1641         return 0;
1642 out_sig:
1643         send_sig(SIGXFSZ, current, 0);
1644 out_big:
1645         return -EFBIG;
1646 out_busy:
1647         return -ETXTBSY;
1648 }
1649
1650 EXPORT_SYMBOL(vmtruncate);
1651
1652 /* 
1653  * Primitive swap readahead code. We simply read an aligned block of
1654  * (1 << page_cluster) entries in the swap area. This method is chosen
1655  * because it doesn't cost us any seek time.  We also make sure to queue
1656  * the 'original' request together with the readahead ones...  
1657  *
1658  * This has been extended to use the NUMA policies from the mm triggering
1659  * the readahead.
1660  *
1661  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1662  */
1663 void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
1664 {
1665 #ifdef CONFIG_NUMA
1666         struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
1667 #endif
1668         int i, num;
1669         struct page *new_page;
1670         unsigned long offset;
1671
1672         /*
1673          * Get the number of handles we should do readahead io to.
1674          */
1675         num = valid_swaphandles(entry, &offset);
1676         for (i = 0; i < num; offset++, i++) {
1677                 /* Ok, do the async read-ahead now */
1678                 new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1679                                                            offset), vma, addr);
1680                 if (!new_page)
1681                         break;
1682                 page_cache_release(new_page);
1683 #ifdef CONFIG_NUMA
1684                 /*
1685                  * Find the next applicable VMA for the NUMA policy.
1686                  */
1687                 addr += PAGE_SIZE;
1688                 if (addr == 0)
1689                         vma = NULL;
1690                 if (vma) {
1691                         if (addr >= vma->vm_end) {
1692                                 vma = next_vma;
1693                                 next_vma = vma ? vma->vm_next : NULL;
1694                         }
1695                         if (vma && addr < vma->vm_start)
1696                                 vma = NULL;
1697                 } else {
1698                         if (next_vma && addr >= next_vma->vm_start) {
1699                                 vma = next_vma;
1700                                 next_vma = vma->vm_next;
1701                         }
1702                 }
1703 #endif
1704         }
1705         lru_add_drain();        /* Push any new pages onto the LRU now */
1706 }
1707
1708 /*
1709  * We enter with non-exclusive mmap_sem (to exclude vma changes,
1710  * but allow concurrent faults), and pte mapped but not yet locked.
1711  * We return with mmap_sem still held, but pte unmapped and unlocked.
1712  */
1713 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1714                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1715                 int write_access, pte_t orig_pte)
1716 {
1717         spinlock_t *ptl;
1718         struct page *page;
1719         swp_entry_t entry;
1720         pte_t pte;
1721         int ret = VM_FAULT_MINOR;
1722
1723         if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
1724                 goto out;
1725
1726         entry = pte_to_swp_entry(orig_pte);
1727         page = lookup_swap_cache(entry);
1728         if (!page) {
1729                 swapin_readahead(entry, address, vma);
1730                 page = read_swap_cache_async(entry, vma, address);
1731                 if (!page) {
1732                         /*
1733                          * Back out if somebody else faulted in this pte
1734                          * while we released the pte lock.
1735                          */
1736                         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1737                         if (likely(pte_same(*page_table, orig_pte)))
1738                                 ret = VM_FAULT_OOM;
1739                         goto unlock;
1740                 }
1741
1742                 /* Had to read the page from swap area: Major fault */
1743                 ret = VM_FAULT_MAJOR;
1744                 inc_page_state(pgmajfault);
1745                 grab_swap_token();
1746         }
1747
1748         mark_page_accessed(page);
1749         lock_page(page);
1750
1751         /*
1752          * Back out if somebody else already faulted in this pte.
1753          */
1754         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1755         if (unlikely(!pte_same(*page_table, orig_pte)))
1756                 goto out_nomap;
1757
1758         if (unlikely(!PageUptodate(page))) {
1759                 ret = VM_FAULT_SIGBUS;
1760                 goto out_nomap;
1761         }
1762
1763         /* The page isn't present yet, go ahead with the fault. */
1764
1765         inc_mm_counter(mm, anon_rss);
1766         pte = mk_pte(page, vma->vm_page_prot);
1767         if (write_access && can_share_swap_page(page)) {
1768                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1769                 write_access = 0;
1770         }
1771
1772         flush_icache_page(vma, page);
1773         set_pte_at(mm, address, page_table, pte);
1774         page_add_anon_rmap(page, vma, address);
1775
1776         swap_free(entry);
1777         if (vm_swap_full())
1778                 remove_exclusive_swap_page(page);
1779         unlock_page(page);
1780
1781         if (write_access) {
1782                 if (do_wp_page(mm, vma, address,
1783                                 page_table, pmd, ptl, pte) == VM_FAULT_OOM)
1784                         ret = VM_FAULT_OOM;
1785                 goto out;
1786         }
1787
1788         /* No need to invalidate - it was non-present before */
1789         update_mmu_cache(vma, address, pte);
1790         lazy_mmu_prot_update(pte);
1791 unlock:
1792         pte_unmap_unlock(page_table, ptl);
1793 out:
1794         return ret;
1795 out_nomap:
1796         pte_unmap_unlock(page_table, ptl);
1797         unlock_page(page);
1798         page_cache_release(page);
1799         return ret;
1800 }
1801
1802 /*
1803  * We enter with non-exclusive mmap_sem (to exclude vma changes,
1804  * but allow concurrent faults), and pte mapped but not yet locked.
1805  * We return with mmap_sem still held, but pte unmapped and unlocked.
1806  */
1807 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1808                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1809                 int write_access)
1810 {
1811         struct page *page;
1812         spinlock_t *ptl;
1813         pte_t entry;
1814
1815         /*
1816          * A VM_UNPAGED vma will normally be filled with present ptes
1817          * by remap_pfn_range, and never arrive here; but it might have
1818          * holes, or if !VM_DONTEXPAND, mremap might have expanded it.
1819          * It's weird enough handling anon pages in unpaged vmas, we do
1820          * not want to worry about ZERO_PAGEs too (it may or may not
1821          * matter if their counts wrap): just give them anon pages.
1822          */
1823
1824         if (write_access || (vma->vm_flags & VM_UNPAGED)) {
1825                 /* Allocate our own private page. */
1826                 pte_unmap(page_table);
1827
1828                 if (unlikely(anon_vma_prepare(vma)))
1829                         goto oom;
1830                 page = alloc_zeroed_user_highpage(vma, address);
1831                 if (!page)
1832                         goto oom;
1833
1834                 entry = mk_pte(page, vma->vm_page_prot);
1835                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1836
1837                 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1838                 if (!pte_none(*page_table))
1839                         goto release;
1840                 inc_mm_counter(mm, anon_rss);
1841                 lru_cache_add_active(page);
1842                 SetPageReferenced(page);
1843                 page_add_anon_rmap(page, vma, address);
1844         } else {
1845                 /* Map the ZERO_PAGE - vm_page_prot is readonly */
1846                 page = ZERO_PAGE(address);
1847                 page_cache_get(page);
1848                 entry = mk_pte(page, vma->vm_page_prot);
1849
1850                 ptl = pte_lockptr(mm, pmd);
1851                 spin_lock(ptl);
1852                 if (!pte_none(*page_table))
1853                         goto release;
1854                 inc_mm_counter(mm, file_rss);
1855                 page_add_file_rmap(page);
1856         }
1857
1858         set_pte_at(mm, address, page_table, entry);
1859
1860         /* No need to invalidate - it was non-present before */
1861         update_mmu_cache(vma, address, entry);
1862         lazy_mmu_prot_update(entry);
1863 unlock:
1864         pte_unmap_unlock(page_table, ptl);
1865         return VM_FAULT_MINOR;
1866 release:
1867         page_cache_release(page);
1868         goto unlock;
1869 oom:
1870         return VM_FAULT_OOM;
1871 }
1872
1873 /*
1874  * do_no_page() tries to create a new page mapping. It aggressively
1875  * tries to share with existing pages, but makes a separate copy if
1876  * the "write_access" parameter is true in order to avoid the next
1877  * page fault.
1878  *
1879  * As this is called only for pages that do not currently exist, we
1880  * do not need to flush old virtual caches or the TLB.
1881  *
1882  * We enter with non-exclusive mmap_sem (to exclude vma changes,
1883  * but allow concurrent faults), and pte mapped but not yet locked.
1884  * We return with mmap_sem still held, but pte unmapped and unlocked.
1885  */
1886 static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1887                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1888                 int write_access)
1889 {
1890         spinlock_t *ptl;
1891         struct page *new_page;
1892         struct address_space *mapping = NULL;
1893         pte_t entry;
1894         unsigned int sequence = 0;
1895         int ret = VM_FAULT_MINOR;
1896         int anon = 0;
1897
1898         pte_unmap(page_table);
1899         BUG_ON(vma->vm_flags & VM_UNPAGED);
1900
1901         if (vma->vm_file) {
1902                 mapping = vma->vm_file->f_mapping;
1903                 sequence = mapping->truncate_count;
1904                 smp_rmb(); /* serializes i_size against truncate_count */
1905         }
1906 retry:
1907         new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
1908         /*
1909          * No smp_rmb is needed here as long as there's a full
1910          * spin_lock/unlock sequence inside the ->nopage callback
1911          * (for the pagecache lookup) that acts as an implicit
1912          * smp_mb() and prevents the i_size read to happen
1913          * after the next truncate_count read.
1914          */
1915
1916         /* no page was available -- either SIGBUS or OOM */
1917         if (new_page == NOPAGE_SIGBUS)
1918                 return VM_FAULT_SIGBUS;
1919         if (new_page == NOPAGE_OOM)
1920                 return VM_FAULT_OOM;
1921
1922         /*
1923          * Should we do an early C-O-W break?
1924          */
1925         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1926                 struct page *page;
1927
1928                 if (unlikely(anon_vma_prepare(vma)))
1929                         goto oom;
1930                 page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1931                 if (!page)
1932                         goto oom;
1933                 copy_user_highpage(page, new_page, address);
1934                 page_cache_release(new_page);
1935                 new_page = page;
1936                 anon = 1;
1937         }
1938
1939         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1940         /*
1941          * For a file-backed vma, someone could have truncated or otherwise
1942          * invalidated this page.  If unmap_mapping_range got called,
1943          * retry getting the page.
1944          */
1945         if (mapping && unlikely(sequence != mapping->truncate_count)) {
1946                 pte_unmap_unlock(page_table, ptl);
1947                 page_cache_release(new_page);
1948                 cond_resched();
1949                 sequence = mapping->truncate_count;
1950                 smp_rmb();
1951                 goto retry;
1952         }
1953
1954         /*
1955          * This silly early PAGE_DIRTY setting removes a race
1956          * due to the bad i386 page protection. But it's valid
1957          * for other architectures too.
1958          *
1959          * Note that if write_access is true, we either now have
1960          * an exclusive copy of the page, or this is a shared mapping,
1961          * so we can make it writable and dirty to avoid having to
1962          * handle that later.
1963          */
1964         /* Only go through if we didn't race with anybody else... */
1965         if (pte_none(*page_table)) {
1966                 flush_icache_page(vma, new_page);
1967                 entry = mk_pte(new_page, vma->vm_page_prot);
1968                 if (write_access)
1969                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1970                 set_pte_at(mm, address, page_table, entry);
1971                 if (anon) {
1972                         inc_mm_counter(mm, anon_rss);
1973                         lru_cache_add_active(new_page);
1974                         page_add_anon_rmap(new_page, vma, address);
1975                 } else {
1976                         inc_mm_counter(mm, file_rss);
1977                         page_add_file_rmap(new_page);
1978                 }
1979         } else {
1980                 /* One of our sibling threads was faster, back out. */
1981                 page_cache_release(new_page);
1982                 goto unlock;
1983         }
1984
1985         /* no need to invalidate: a not-present page shouldn't be cached */
1986         update_mmu_cache(vma, address, entry);
1987         lazy_mmu_prot_update(entry);
1988 unlock:
1989         pte_unmap_unlock(page_table, ptl);
1990         return ret;
1991 oom:
1992         page_cache_release(new_page);
1993         return VM_FAULT_OOM;
1994 }
1995
1996 /*
1997  * Fault of a previously existing named mapping. Repopulate the pte
1998  * from the encoded file_pte if possible. This enables swappable
1999  * nonlinear vmas.
2000  *
2001  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2002  * but allow concurrent faults), and pte mapped but not yet locked.
2003  * We return with mmap_sem still held, but pte unmapped and unlocked.
2004  */
2005 static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
2006                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2007                 int write_access, pte_t orig_pte)
2008 {
2009         pgoff_t pgoff;
2010         int err;
2011
2012         if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2013                 return VM_FAULT_MINOR;
2014
2015         if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
2016                 /*
2017                  * Page table corrupted: show pte and kill process.
2018                  */
2019                 print_bad_pte(vma, orig_pte, address);
2020                 return VM_FAULT_OOM;
2021         }
2022         /* We can then assume vm->vm_ops && vma->vm_ops->populate */
2023
2024         pgoff = pte_to_pgoff(orig_pte);
2025         err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
2026                                         vma->vm_page_prot, pgoff, 0);
2027         if (err == -ENOMEM)
2028                 return VM_FAULT_OOM;
2029         if (err)
2030                 return VM_FAULT_SIGBUS;
2031         return VM_FAULT_MAJOR;
2032 }
2033
2034 /*
2035  * These routines also need to handle stuff like marking pages dirty
2036  * and/or accessed for architectures that don't do it in hardware (most
2037  * RISC architectures).  The early dirtying is also good on the i386.
2038  *
2039  * There is also a hook called "update_mmu_cache()" that architectures
2040  * with external mmu caches can use to update those (ie the Sparc or
2041  * PowerPC hashed page tables that act as extended TLBs).
2042  *
2043  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2044  * but allow concurrent faults), and pte mapped but not yet locked.
2045  * We return with mmap_sem still held, but pte unmapped and unlocked.
2046  */
2047 static inline int handle_pte_fault(struct mm_struct *mm,
2048                 struct vm_area_struct *vma, unsigned long address,
2049                 pte_t *pte, pmd_t *pmd, int write_access)
2050 {
2051         pte_t entry;
2052         pte_t old_entry;
2053         spinlock_t *ptl;
2054
2055         old_entry = entry = *pte;
2056         if (!pte_present(entry)) {
2057                 if (pte_none(entry)) {
2058                         if (!vma->vm_ops || !vma->vm_ops->nopage)
2059                                 return do_anonymous_page(mm, vma, address,
2060                                         pte, pmd, write_access);
2061                         return do_no_page(mm, vma, address,
2062                                         pte, pmd, write_access);
2063                 }
2064                 if (pte_file(entry))
2065                         return do_file_page(mm, vma, address,
2066                                         pte, pmd, write_access, entry);
2067                 return do_swap_page(mm, vma, address,
2068                                         pte, pmd, write_access, entry);
2069         }
2070
2071         ptl = pte_lockptr(mm, pmd);
2072         spin_lock(ptl);
2073         if (unlikely(!pte_same(*pte, entry)))
2074                 goto unlock;
2075         if (write_access) {
2076                 if (!pte_write(entry))
2077                         return do_wp_page(mm, vma, address,
2078                                         pte, pmd, ptl, entry);
2079                 entry = pte_mkdirty(entry);
2080         }
2081         entry = pte_mkyoung(entry);
2082         if (!pte_same(old_entry, entry)) {
2083                 ptep_set_access_flags(vma, address, pte, entry, write_access);
2084                 update_mmu_cache(vma, address, entry);
2085                 lazy_mmu_prot_update(entry);
2086         } else {
2087                 /*
2088                  * This is needed only for protection faults but the arch code
2089                  * is not yet telling us if this is a protection fault or not.
2090                  * This still avoids useless tlb flushes for .text page faults
2091                  * with threads.
2092                  */
2093                 if (write_access)
2094                         flush_tlb_page(vma, address);
2095         }
2096 unlock:
2097         pte_unmap_unlock(pte, ptl);
2098         return VM_FAULT_MINOR;
2099 }
2100
2101 /*
2102  * By the time we get here, we already hold the mm semaphore
2103  */
2104 int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2105                 unsigned long address, int write_access)
2106 {
2107         pgd_t *pgd;
2108         pud_t *pud;
2109         pmd_t *pmd;
2110         pte_t *pte;
2111
2112         __set_current_state(TASK_RUNNING);
2113
2114         inc_page_state(pgfault);
2115
2116         if (unlikely(is_vm_hugetlb_page(vma)))
2117                 return hugetlb_fault(mm, vma, address, write_access);
2118
2119         pgd = pgd_offset(mm, address);
2120         pud = pud_alloc(mm, pgd, address);
2121         if (!pud)
2122                 return VM_FAULT_OOM;
2123         pmd = pmd_alloc(mm, pud, address);
2124         if (!pmd)
2125                 return VM_FAULT_OOM;
2126         pte = pte_alloc_map(mm, pmd, address);
2127         if (!pte)
2128                 return VM_FAULT_OOM;
2129
2130         return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2131 }
2132
2133 #ifndef __PAGETABLE_PUD_FOLDED
2134 /*
2135  * Allocate page upper directory.
2136  * We've already handled the fast-path in-line.
2137  */
2138 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2139 {
2140         pud_t *new = pud_alloc_one(mm, address);
2141         if (!new)
2142                 return -ENOMEM;
2143
2144         spin_lock(&mm->page_table_lock);
2145         if (pgd_present(*pgd))          /* Another has populated it */
2146                 pud_free(new);
2147         else
2148                 pgd_populate(mm, pgd, new);
2149         spin_unlock(&mm->page_table_lock);
2150         return 0;
2151 }
2152 #endif /* __PAGETABLE_PUD_FOLDED */
2153
2154 #ifndef __PAGETABLE_PMD_FOLDED
2155 /*
2156  * Allocate page middle directory.
2157  * We've already handled the fast-path in-line.
2158  */
2159 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2160 {
2161         pmd_t *new = pmd_alloc_one(mm, address);
2162         if (!new)
2163                 return -ENOMEM;
2164
2165         spin_lock(&mm->page_table_lock);
2166 #ifndef __ARCH_HAS_4LEVEL_HACK
2167         if (pud_present(*pud))          /* Another has populated it */
2168                 pmd_free(new);
2169         else
2170                 pud_populate(mm, pud, new);
2171 #else
2172         if (pgd_present(*pud))          /* Another has populated it */
2173                 pmd_free(new);
2174         else
2175                 pgd_populate(mm, pud, new);
2176 #endif /* __ARCH_HAS_4LEVEL_HACK */
2177         spin_unlock(&mm->page_table_lock);
2178         return 0;
2179 }
2180 #endif /* __PAGETABLE_PMD_FOLDED */
2181
2182 int make_pages_present(unsigned long addr, unsigned long end)
2183 {
2184         int ret, len, write;
2185         struct vm_area_struct * vma;
2186
2187         vma = find_vma(current->mm, addr);
2188         if (!vma)
2189                 return -1;
2190         write = (vma->vm_flags & VM_WRITE) != 0;
2191         if (addr >= end)
2192                 BUG();
2193         if (end > vma->vm_end)
2194                 BUG();
2195         len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
2196         ret = get_user_pages(current, current->mm, addr,
2197                         len, write, 0, NULL, NULL);
2198         if (ret < 0)
2199                 return ret;
2200         return ret == len ? 0 : -1;
2201 }
2202
2203 /* 
2204  * Map a vmalloc()-space virtual address to the physical page.
2205  */
2206 struct page * vmalloc_to_page(void * vmalloc_addr)
2207 {
2208         unsigned long addr = (unsigned long) vmalloc_addr;
2209         struct page *page = NULL;
2210         pgd_t *pgd = pgd_offset_k(addr);
2211         pud_t *pud;
2212         pmd_t *pmd;
2213         pte_t *ptep, pte;
2214   
2215         if (!pgd_none(*pgd)) {
2216                 pud = pud_offset(pgd, addr);
2217                 if (!pud_none(*pud)) {
2218                         pmd = pmd_offset(pud, addr);
2219                         if (!pmd_none(*pmd)) {
2220                                 ptep = pte_offset_map(pmd, addr);
2221                                 pte = *ptep;
2222                                 if (pte_present(pte))
2223                                         page = pte_page(pte);
2224                                 pte_unmap(ptep);
2225                         }
2226                 }
2227         }
2228         return page;
2229 }
2230
2231 EXPORT_SYMBOL(vmalloc_to_page);
2232
2233 /*
2234  * Map a vmalloc()-space virtual address to the physical page frame number.
2235  */
2236 unsigned long vmalloc_to_pfn(void * vmalloc_addr)
2237 {
2238         return page_to_pfn(vmalloc_to_page(vmalloc_addr));
2239 }
2240
2241 EXPORT_SYMBOL(vmalloc_to_pfn);
2242
2243 #if !defined(__HAVE_ARCH_GATE_AREA)
2244
2245 #if defined(AT_SYSINFO_EHDR)
2246 static struct vm_area_struct gate_vma;
2247
2248 static int __init gate_vma_init(void)
2249 {
2250         gate_vma.vm_mm = NULL;
2251         gate_vma.vm_start = FIXADDR_USER_START;
2252         gate_vma.vm_end = FIXADDR_USER_END;
2253         gate_vma.vm_page_prot = PAGE_READONLY;
2254         gate_vma.vm_flags = 0;
2255         return 0;
2256 }
2257 __initcall(gate_vma_init);
2258 #endif
2259
2260 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
2261 {
2262 #ifdef AT_SYSINFO_EHDR
2263         return &gate_vma;
2264 #else
2265         return NULL;
2266 #endif
2267 }
2268
2269 int in_gate_area_no_task(unsigned long addr)
2270 {
2271 #ifdef AT_SYSINFO_EHDR
2272         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2273                 return 1;
2274 #endif
2275         return 0;
2276 }
2277
2278 #endif  /* __HAVE_ARCH_GATE_AREA */