]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - mm/filemap.c
ee79b5d3439f74126ad2fc0e659f3e39f4703997
[linux-2.6-omap-h63xx.git] / mm / filemap.c
1 /*
2  *      linux/mm/filemap.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * This file handles the generic file mmap semantics used by
9  * most "normal" filesystems (but you don't /have/ to use this:
10  * the NFS filesystem used to do this differently, for example)
11  */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/aio.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/hash.h>
26 #include <linux/writeback.h>
27 #include <linux/pagevec.h>
28 #include <linux/blkdev.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 /*
32  * This is needed for the following functions:
33  *  - try_to_release_page
34  *  - block_invalidatepage
35  *  - generic_osync_inode
36  *
37  * FIXME: remove all knowledge of the buffer layer from the core VM
38  */
39 #include <linux/buffer_head.h> /* for generic_osync_inode */
40
41 #include <asm/uaccess.h>
42 #include <asm/mman.h>
43
44 /*
45  * Shared mappings implemented 30.11.1994. It's not fully working yet,
46  * though.
47  *
48  * Shared mappings now work. 15.8.1995  Bruno.
49  *
50  * finished 'unifying' the page and buffer cache and SMP-threaded the
51  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
52  *
53  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
54  */
55
56 /*
57  * Lock ordering:
58  *
59  *  ->i_mmap_lock               (vmtruncate)
60  *    ->private_lock            (__free_pte->__set_page_dirty_buffers)
61  *      ->swap_list_lock
62  *        ->swap_device_lock    (exclusive_swap_page, others)
63  *          ->mapping->tree_lock
64  *
65  *  ->i_sem
66  *    ->i_mmap_lock             (truncate->unmap_mapping_range)
67  *
68  *  ->mmap_sem
69  *    ->i_mmap_lock
70  *      ->page_table_lock       (various places, mainly in mmap.c)
71  *        ->mapping->tree_lock  (arch-dependent flush_dcache_mmap_lock)
72  *
73  *  ->mmap_sem
74  *    ->lock_page               (access_process_vm)
75  *
76  *  ->mmap_sem
77  *    ->i_sem                   (msync)
78  *
79  *  ->i_sem
80  *    ->i_alloc_sem             (various)
81  *
82  *  ->inode_lock
83  *    ->sb_lock                 (fs/fs-writeback.c)
84  *    ->mapping->tree_lock      (__sync_single_inode)
85  *
86  *  ->i_mmap_lock
87  *    ->anon_vma.lock           (vma_adjust)
88  *
89  *  ->anon_vma.lock
90  *    ->page_table_lock         (anon_vma_prepare and various)
91  *
92  *  ->page_table_lock
93  *    ->swap_device_lock        (try_to_unmap_one)
94  *    ->private_lock            (try_to_unmap_one)
95  *    ->tree_lock               (try_to_unmap_one)
96  *    ->zone.lru_lock           (follow_page->mark_page_accessed)
97  *    ->private_lock            (page_remove_rmap->set_page_dirty)
98  *    ->tree_lock               (page_remove_rmap->set_page_dirty)
99  *    ->inode_lock              (page_remove_rmap->set_page_dirty)
100  *    ->inode_lock              (zap_pte_range->set_page_dirty)
101  *    ->private_lock            (zap_pte_range->__set_page_dirty_buffers)
102  *
103  *  ->task->proc_lock
104  *    ->dcache_lock             (proc_pid_lookup)
105  */
106
107 /*
108  * Remove a page from the page cache and free it. Caller has to make
109  * sure the page is locked and that nobody else uses it - or that usage
110  * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
111  */
112 void __remove_from_page_cache(struct page *page)
113 {
114         struct address_space *mapping = page->mapping;
115
116         radix_tree_delete(&mapping->page_tree, page->index);
117         page->mapping = NULL;
118         mapping->nrpages--;
119         pagecache_acct(-1);
120 }
121
122 void remove_from_page_cache(struct page *page)
123 {
124         struct address_space *mapping = page->mapping;
125
126         if (unlikely(!PageLocked(page)))
127                 PAGE_BUG(page);
128
129         write_lock_irq(&mapping->tree_lock);
130         __remove_from_page_cache(page);
131         write_unlock_irq(&mapping->tree_lock);
132 }
133
134 static int sync_page(void *word)
135 {
136         struct address_space *mapping;
137         struct page *page;
138
139         page = container_of((page_flags_t *)word, struct page, flags);
140
141         /*
142          * page_mapping() is being called without PG_locked held.
143          * Some knowledge of the state and use of the page is used to
144          * reduce the requirements down to a memory barrier.
145          * The danger here is of a stale page_mapping() return value
146          * indicating a struct address_space different from the one it's
147          * associated with when it is associated with one.
148          * After smp_mb(), it's either the correct page_mapping() for
149          * the page, or an old page_mapping() and the page's own
150          * page_mapping() has gone NULL.
151          * The ->sync_page() address_space operation must tolerate
152          * page_mapping() going NULL. By an amazing coincidence,
153          * this comes about because none of the users of the page
154          * in the ->sync_page() methods make essential use of the
155          * page_mapping(), merely passing the page down to the backing
156          * device's unplug functions when it's non-NULL, which in turn
157          * ignore it for all cases but swap, where only page->private is
158          * of interest. When page_mapping() does go NULL, the entire
159          * call stack gracefully ignores the page and returns.
160          * -- wli
161          */
162         smp_mb();
163         mapping = page_mapping(page);
164         if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
165                 mapping->a_ops->sync_page(page);
166         io_schedule();
167         return 0;
168 }
169
170 /**
171  * filemap_fdatawrite_range - start writeback against all of a mapping's
172  * dirty pages that lie within the byte offsets <start, end>
173  * @mapping: address space structure to write
174  * @start: offset in bytes where the range starts
175  * @end : offset in bytes where the range ends
176  *
177  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
178  * opposed to a regular memory * cleansing writeback.  The difference between
179  * these two operations is that if a dirty page/buffer is encountered, it must
180  * be waited upon, and not just skipped over.
181  */
182 static int __filemap_fdatawrite_range(struct address_space *mapping,
183         loff_t start, loff_t end, int sync_mode)
184 {
185         int ret;
186         struct writeback_control wbc = {
187                 .sync_mode = sync_mode,
188                 .nr_to_write = mapping->nrpages * 2,
189                 .start = start,
190                 .end = end,
191         };
192
193         if (!mapping_cap_writeback_dirty(mapping))
194                 return 0;
195
196         ret = do_writepages(mapping, &wbc);
197         return ret;
198 }
199
200 static inline int __filemap_fdatawrite(struct address_space *mapping,
201         int sync_mode)
202 {
203         return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
204 }
205
206 int filemap_fdatawrite(struct address_space *mapping)
207 {
208         return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
209 }
210 EXPORT_SYMBOL(filemap_fdatawrite);
211
212 static int filemap_fdatawrite_range(struct address_space *mapping,
213         loff_t start, loff_t end)
214 {
215         return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
216 }
217
218 /*
219  * This is a mostly non-blocking flush.  Not suitable for data-integrity
220  * purposes - I/O may not be started against all dirty pages.
221  */
222 int filemap_flush(struct address_space *mapping)
223 {
224         return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
225 }
226 EXPORT_SYMBOL(filemap_flush);
227
228 /*
229  * Wait for writeback to complete against pages indexed by start->end
230  * inclusive
231  */
232 static int wait_on_page_writeback_range(struct address_space *mapping,
233                                 pgoff_t start, pgoff_t end)
234 {
235         struct pagevec pvec;
236         int nr_pages;
237         int ret = 0;
238         pgoff_t index;
239
240         if (end < start)
241                 return 0;
242
243         pagevec_init(&pvec, 0);
244         index = start;
245         while ((index <= end) &&
246                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
247                         PAGECACHE_TAG_WRITEBACK,
248                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
249                 unsigned i;
250
251                 for (i = 0; i < nr_pages; i++) {
252                         struct page *page = pvec.pages[i];
253
254                         /* until radix tree lookup accepts end_index */
255                         if (page->index > end)
256                                 continue;
257
258                         wait_on_page_writeback(page);
259                         if (PageError(page))
260                                 ret = -EIO;
261                 }
262                 pagevec_release(&pvec);
263                 cond_resched();
264         }
265
266         /* Check for outstanding write errors */
267         if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
268                 ret = -ENOSPC;
269         if (test_and_clear_bit(AS_EIO, &mapping->flags))
270                 ret = -EIO;
271
272         return ret;
273 }
274
275 /*
276  * Write and wait upon all the pages in the passed range.  This is a "data
277  * integrity" operation.  It waits upon in-flight writeout before starting and
278  * waiting upon new writeout.  If there was an IO error, return it.
279  *
280  * We need to re-take i_sem during the generic_osync_inode list walk because
281  * it is otherwise livelockable.
282  */
283 int sync_page_range(struct inode *inode, struct address_space *mapping,
284                         loff_t pos, size_t count)
285 {
286         pgoff_t start = pos >> PAGE_CACHE_SHIFT;
287         pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
288         int ret;
289
290         if (!mapping_cap_writeback_dirty(mapping) || !count)
291                 return 0;
292         ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
293         if (ret == 0) {
294                 down(&inode->i_sem);
295                 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
296                 up(&inode->i_sem);
297         }
298         if (ret == 0)
299                 ret = wait_on_page_writeback_range(mapping, start, end);
300         return ret;
301 }
302 EXPORT_SYMBOL(sync_page_range);
303
304 /*
305  * Note: Holding i_sem across sync_page_range_nolock is not a good idea
306  * as it forces O_SYNC writers to different parts of the same file
307  * to be serialised right until io completion.
308  */
309 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
310                         loff_t pos, size_t count)
311 {
312         pgoff_t start = pos >> PAGE_CACHE_SHIFT;
313         pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
314         int ret;
315
316         if (!mapping_cap_writeback_dirty(mapping) || !count)
317                 return 0;
318         ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
319         if (ret == 0)
320                 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
321         if (ret == 0)
322                 ret = wait_on_page_writeback_range(mapping, start, end);
323         return ret;
324 }
325 EXPORT_SYMBOL(sync_page_range_nolock);
326
327 /**
328  * filemap_fdatawait - walk the list of under-writeback pages of the given
329  *     address space and wait for all of them.
330  *
331  * @mapping: address space structure to wait for
332  */
333 int filemap_fdatawait(struct address_space *mapping)
334 {
335         loff_t i_size = i_size_read(mapping->host);
336
337         if (i_size == 0)
338                 return 0;
339
340         return wait_on_page_writeback_range(mapping, 0,
341                                 (i_size - 1) >> PAGE_CACHE_SHIFT);
342 }
343 EXPORT_SYMBOL(filemap_fdatawait);
344
345 int filemap_write_and_wait(struct address_space *mapping)
346 {
347         int retval = 0;
348
349         if (mapping->nrpages) {
350                 retval = filemap_fdatawrite(mapping);
351                 if (retval == 0)
352                         retval = filemap_fdatawait(mapping);
353         }
354         return retval;
355 }
356
357 int filemap_write_and_wait_range(struct address_space *mapping,
358                                  loff_t lstart, loff_t lend)
359 {
360         int retval = 0;
361
362         if (mapping->nrpages) {
363                 retval = __filemap_fdatawrite_range(mapping, lstart, lend,
364                                                     WB_SYNC_ALL);
365                 if (retval == 0)
366                         retval = wait_on_page_writeback_range(mapping,
367                                                     lstart >> PAGE_CACHE_SHIFT,
368                                                     lend >> PAGE_CACHE_SHIFT);
369         }
370         return retval;
371 }
372
373 /*
374  * This function is used to add newly allocated pagecache pages:
375  * the page is new, so we can just run SetPageLocked() against it.
376  * The other page state flags were set by rmqueue().
377  *
378  * This function does not add the page to the LRU.  The caller must do that.
379  */
380 int add_to_page_cache(struct page *page, struct address_space *mapping,
381                 pgoff_t offset, int gfp_mask)
382 {
383         int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
384
385         if (error == 0) {
386                 write_lock_irq(&mapping->tree_lock);
387                 error = radix_tree_insert(&mapping->page_tree, offset, page);
388                 if (!error) {
389                         page_cache_get(page);
390                         SetPageLocked(page);
391                         page->mapping = mapping;
392                         page->index = offset;
393                         mapping->nrpages++;
394                         pagecache_acct(1);
395                 }
396                 write_unlock_irq(&mapping->tree_lock);
397                 radix_tree_preload_end();
398         }
399         return error;
400 }
401
402 EXPORT_SYMBOL(add_to_page_cache);
403
404 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
405                                 pgoff_t offset, int gfp_mask)
406 {
407         int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
408         if (ret == 0)
409                 lru_cache_add(page);
410         return ret;
411 }
412
413 /*
414  * In order to wait for pages to become available there must be
415  * waitqueues associated with pages. By using a hash table of
416  * waitqueues where the bucket discipline is to maintain all
417  * waiters on the same queue and wake all when any of the pages
418  * become available, and for the woken contexts to check to be
419  * sure the appropriate page became available, this saves space
420  * at a cost of "thundering herd" phenomena during rare hash
421  * collisions.
422  */
423 static wait_queue_head_t *page_waitqueue(struct page *page)
424 {
425         const struct zone *zone = page_zone(page);
426
427         return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
428 }
429
430 static inline void wake_up_page(struct page *page, int bit)
431 {
432         __wake_up_bit(page_waitqueue(page), &page->flags, bit);
433 }
434
435 void fastcall wait_on_page_bit(struct page *page, int bit_nr)
436 {
437         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
438
439         if (test_bit(bit_nr, &page->flags))
440                 __wait_on_bit(page_waitqueue(page), &wait, sync_page,
441                                                         TASK_UNINTERRUPTIBLE);
442 }
443 EXPORT_SYMBOL(wait_on_page_bit);
444
445 /**
446  * unlock_page() - unlock a locked page
447  *
448  * @page: the page
449  *
450  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
451  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
452  * mechananism between PageLocked pages and PageWriteback pages is shared.
453  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
454  *
455  * The first mb is necessary to safely close the critical section opened by the
456  * TestSetPageLocked(), the second mb is necessary to enforce ordering between
457  * the clear_bit and the read of the waitqueue (to avoid SMP races with a
458  * parallel wait_on_page_locked()).
459  */
460 void fastcall unlock_page(struct page *page)
461 {
462         smp_mb__before_clear_bit();
463         if (!TestClearPageLocked(page))
464                 BUG();
465         smp_mb__after_clear_bit(); 
466         wake_up_page(page, PG_locked);
467 }
468 EXPORT_SYMBOL(unlock_page);
469
470 /*
471  * End writeback against a page.
472  */
473 void end_page_writeback(struct page *page)
474 {
475         if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
476                 if (!test_clear_page_writeback(page))
477                         BUG();
478         }
479         smp_mb__after_clear_bit();
480         wake_up_page(page, PG_writeback);
481 }
482 EXPORT_SYMBOL(end_page_writeback);
483
484 /*
485  * Get a lock on the page, assuming we need to sleep to get it.
486  *
487  * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
488  * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
489  * chances are that on the second loop, the block layer's plug list is empty,
490  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
491  */
492 void fastcall __lock_page(struct page *page)
493 {
494         DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
495
496         __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
497                                                         TASK_UNINTERRUPTIBLE);
498 }
499 EXPORT_SYMBOL(__lock_page);
500
501 /*
502  * a rather lightweight function, finding and getting a reference to a
503  * hashed page atomically.
504  */
505 struct page * find_get_page(struct address_space *mapping, unsigned long offset)
506 {
507         struct page *page;
508
509         read_lock_irq(&mapping->tree_lock);
510         page = radix_tree_lookup(&mapping->page_tree, offset);
511         if (page)
512                 page_cache_get(page);
513         read_unlock_irq(&mapping->tree_lock);
514         return page;
515 }
516
517 EXPORT_SYMBOL(find_get_page);
518
519 /*
520  * Same as above, but trylock it instead of incrementing the count.
521  */
522 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
523 {
524         struct page *page;
525
526         read_lock_irq(&mapping->tree_lock);
527         page = radix_tree_lookup(&mapping->page_tree, offset);
528         if (page && TestSetPageLocked(page))
529                 page = NULL;
530         read_unlock_irq(&mapping->tree_lock);
531         return page;
532 }
533
534 EXPORT_SYMBOL(find_trylock_page);
535
536 /**
537  * find_lock_page - locate, pin and lock a pagecache page
538  *
539  * @mapping - the address_space to search
540  * @offset - the page index
541  *
542  * Locates the desired pagecache page, locks it, increments its reference
543  * count and returns its address.
544  *
545  * Returns zero if the page was not present. find_lock_page() may sleep.
546  */
547 struct page *find_lock_page(struct address_space *mapping,
548                                 unsigned long offset)
549 {
550         struct page *page;
551
552         read_lock_irq(&mapping->tree_lock);
553 repeat:
554         page = radix_tree_lookup(&mapping->page_tree, offset);
555         if (page) {
556                 page_cache_get(page);
557                 if (TestSetPageLocked(page)) {
558                         read_unlock_irq(&mapping->tree_lock);
559                         lock_page(page);
560                         read_lock_irq(&mapping->tree_lock);
561
562                         /* Has the page been truncated while we slept? */
563                         if (page->mapping != mapping || page->index != offset) {
564                                 unlock_page(page);
565                                 page_cache_release(page);
566                                 goto repeat;
567                         }
568                 }
569         }
570         read_unlock_irq(&mapping->tree_lock);
571         return page;
572 }
573
574 EXPORT_SYMBOL(find_lock_page);
575
576 /**
577  * find_or_create_page - locate or add a pagecache page
578  *
579  * @mapping - the page's address_space
580  * @index - the page's index into the mapping
581  * @gfp_mask - page allocation mode
582  *
583  * Locates a page in the pagecache.  If the page is not present, a new page
584  * is allocated using @gfp_mask and is added to the pagecache and to the VM's
585  * LRU list.  The returned page is locked and has its reference count
586  * incremented.
587  *
588  * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
589  * allocation!
590  *
591  * find_or_create_page() returns the desired page's address, or zero on
592  * memory exhaustion.
593  */
594 struct page *find_or_create_page(struct address_space *mapping,
595                 unsigned long index, unsigned int gfp_mask)
596 {
597         struct page *page, *cached_page = NULL;
598         int err;
599 repeat:
600         page = find_lock_page(mapping, index);
601         if (!page) {
602                 if (!cached_page) {
603                         cached_page = alloc_page(gfp_mask);
604                         if (!cached_page)
605                                 return NULL;
606                 }
607                 err = add_to_page_cache_lru(cached_page, mapping,
608                                         index, gfp_mask);
609                 if (!err) {
610                         page = cached_page;
611                         cached_page = NULL;
612                 } else if (err == -EEXIST)
613                         goto repeat;
614         }
615         if (cached_page)
616                 page_cache_release(cached_page);
617         return page;
618 }
619
620 EXPORT_SYMBOL(find_or_create_page);
621
622 /**
623  * find_get_pages - gang pagecache lookup
624  * @mapping:    The address_space to search
625  * @start:      The starting page index
626  * @nr_pages:   The maximum number of pages
627  * @pages:      Where the resulting pages are placed
628  *
629  * find_get_pages() will search for and return a group of up to
630  * @nr_pages pages in the mapping.  The pages are placed at @pages.
631  * find_get_pages() takes a reference against the returned pages.
632  *
633  * The search returns a group of mapping-contiguous pages with ascending
634  * indexes.  There may be holes in the indices due to not-present pages.
635  *
636  * find_get_pages() returns the number of pages which were found.
637  */
638 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
639                             unsigned int nr_pages, struct page **pages)
640 {
641         unsigned int i;
642         unsigned int ret;
643
644         read_lock_irq(&mapping->tree_lock);
645         ret = radix_tree_gang_lookup(&mapping->page_tree,
646                                 (void **)pages, start, nr_pages);
647         for (i = 0; i < ret; i++)
648                 page_cache_get(pages[i]);
649         read_unlock_irq(&mapping->tree_lock);
650         return ret;
651 }
652
653 /*
654  * Like find_get_pages, except we only return pages which are tagged with
655  * `tag'.   We update *index to index the next page for the traversal.
656  */
657 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
658                         int tag, unsigned int nr_pages, struct page **pages)
659 {
660         unsigned int i;
661         unsigned int ret;
662
663         read_lock_irq(&mapping->tree_lock);
664         ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
665                                 (void **)pages, *index, nr_pages, tag);
666         for (i = 0; i < ret; i++)
667                 page_cache_get(pages[i]);
668         if (ret)
669                 *index = pages[ret - 1]->index + 1;
670         read_unlock_irq(&mapping->tree_lock);
671         return ret;
672 }
673
674 /*
675  * Same as grab_cache_page, but do not wait if the page is unavailable.
676  * This is intended for speculative data generators, where the data can
677  * be regenerated if the page couldn't be grabbed.  This routine should
678  * be safe to call while holding the lock for another page.
679  *
680  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
681  * and deadlock against the caller's locked page.
682  */
683 struct page *
684 grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
685 {
686         struct page *page = find_get_page(mapping, index);
687         unsigned int gfp_mask;
688
689         if (page) {
690                 if (!TestSetPageLocked(page))
691                         return page;
692                 page_cache_release(page);
693                 return NULL;
694         }
695         gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
696         page = alloc_pages(gfp_mask, 0);
697         if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
698                 page_cache_release(page);
699                 page = NULL;
700         }
701         return page;
702 }
703
704 EXPORT_SYMBOL(grab_cache_page_nowait);
705
706 /*
707  * This is a generic file read routine, and uses the
708  * mapping->a_ops->readpage() function for the actual low-level
709  * stuff.
710  *
711  * This is really ugly. But the goto's actually try to clarify some
712  * of the logic when it comes to error handling etc.
713  *
714  * Note the struct file* is only passed for the use of readpage.  It may be
715  * NULL.
716  */
717 void do_generic_mapping_read(struct address_space *mapping,
718                              struct file_ra_state *_ra,
719                              struct file *filp,
720                              loff_t *ppos,
721                              read_descriptor_t *desc,
722                              read_actor_t actor)
723 {
724         struct inode *inode = mapping->host;
725         unsigned long index;
726         unsigned long end_index;
727         unsigned long offset;
728         unsigned long last_index;
729         unsigned long next_index;
730         unsigned long prev_index;
731         loff_t isize;
732         struct page *cached_page;
733         int error;
734         struct file_ra_state ra = *_ra;
735
736         cached_page = NULL;
737         index = *ppos >> PAGE_CACHE_SHIFT;
738         next_index = index;
739         prev_index = ra.prev_page;
740         last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
741         offset = *ppos & ~PAGE_CACHE_MASK;
742
743         isize = i_size_read(inode);
744         if (!isize)
745                 goto out;
746
747         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
748         for (;;) {
749                 struct page *page;
750                 unsigned long nr, ret;
751
752                 /* nr is the maximum number of bytes to copy from this page */
753                 nr = PAGE_CACHE_SIZE;
754                 if (index >= end_index) {
755                         if (index > end_index)
756                                 goto out;
757                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
758                         if (nr <= offset) {
759                                 goto out;
760                         }
761                 }
762                 nr = nr - offset;
763
764                 cond_resched();
765                 if (index == next_index)
766                         next_index = page_cache_readahead(mapping, &ra, filp,
767                                         index, last_index - index);
768
769 find_page:
770                 page = find_get_page(mapping, index);
771                 if (unlikely(page == NULL)) {
772                         handle_ra_miss(mapping, &ra, index);
773                         goto no_cached_page;
774                 }
775                 if (!PageUptodate(page))
776                         goto page_not_up_to_date;
777 page_ok:
778
779                 /* If users can be writing to this page using arbitrary
780                  * virtual addresses, take care about potential aliasing
781                  * before reading the page on the kernel side.
782                  */
783                 if (mapping_writably_mapped(mapping))
784                         flush_dcache_page(page);
785
786                 /*
787                  * When (part of) the same page is read multiple times
788                  * in succession, only mark it as accessed the first time.
789                  */
790                 if (prev_index != index)
791                         mark_page_accessed(page);
792                 prev_index = index;
793
794                 /*
795                  * Ok, we have the page, and it's up-to-date, so
796                  * now we can copy it to user space...
797                  *
798                  * The actor routine returns how many bytes were actually used..
799                  * NOTE! This may not be the same as how much of a user buffer
800                  * we filled up (we may be padding etc), so we can only update
801                  * "pos" here (the actor routine has to update the user buffer
802                  * pointers and the remaining count).
803                  */
804                 ret = actor(desc, page, offset, nr);
805                 offset += ret;
806                 index += offset >> PAGE_CACHE_SHIFT;
807                 offset &= ~PAGE_CACHE_MASK;
808
809                 page_cache_release(page);
810                 if (ret == nr && desc->count)
811                         continue;
812                 goto out;
813
814 page_not_up_to_date:
815                 /* Get exclusive access to the page ... */
816                 lock_page(page);
817
818                 /* Did it get unhashed before we got the lock? */
819                 if (!page->mapping) {
820                         unlock_page(page);
821                         page_cache_release(page);
822                         continue;
823                 }
824
825                 /* Did somebody else fill it already? */
826                 if (PageUptodate(page)) {
827                         unlock_page(page);
828                         goto page_ok;
829                 }
830
831 readpage:
832                 /* Start the actual read. The read will unlock the page. */
833                 error = mapping->a_ops->readpage(filp, page);
834
835                 if (unlikely(error))
836                         goto readpage_error;
837
838                 if (!PageUptodate(page)) {
839                         lock_page(page);
840                         if (!PageUptodate(page)) {
841                                 if (page->mapping == NULL) {
842                                         /*
843                                          * invalidate_inode_pages got it
844                                          */
845                                         unlock_page(page);
846                                         page_cache_release(page);
847                                         goto find_page;
848                                 }
849                                 unlock_page(page);
850                                 error = -EIO;
851                                 goto readpage_error;
852                         }
853                         unlock_page(page);
854                 }
855
856                 /*
857                  * i_size must be checked after we have done ->readpage.
858                  *
859                  * Checking i_size after the readpage allows us to calculate
860                  * the correct value for "nr", which means the zero-filled
861                  * part of the page is not copied back to userspace (unless
862                  * another truncate extends the file - this is desired though).
863                  */
864                 isize = i_size_read(inode);
865                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
866                 if (unlikely(!isize || index > end_index)) {
867                         page_cache_release(page);
868                         goto out;
869                 }
870
871                 /* nr is the maximum number of bytes to copy from this page */
872                 nr = PAGE_CACHE_SIZE;
873                 if (index == end_index) {
874                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
875                         if (nr <= offset) {
876                                 page_cache_release(page);
877                                 goto out;
878                         }
879                 }
880                 nr = nr - offset;
881                 goto page_ok;
882
883 readpage_error:
884                 /* UHHUH! A synchronous read error occurred. Report it */
885                 desc->error = error;
886                 page_cache_release(page);
887                 goto out;
888
889 no_cached_page:
890                 /*
891                  * Ok, it wasn't cached, so we need to create a new
892                  * page..
893                  */
894                 if (!cached_page) {
895                         cached_page = page_cache_alloc_cold(mapping);
896                         if (!cached_page) {
897                                 desc->error = -ENOMEM;
898                                 goto out;
899                         }
900                 }
901                 error = add_to_page_cache_lru(cached_page, mapping,
902                                                 index, GFP_KERNEL);
903                 if (error) {
904                         if (error == -EEXIST)
905                                 goto find_page;
906                         desc->error = error;
907                         goto out;
908                 }
909                 page = cached_page;
910                 cached_page = NULL;
911                 goto readpage;
912         }
913
914 out:
915         *_ra = ra;
916
917         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
918         if (cached_page)
919                 page_cache_release(cached_page);
920         if (filp)
921                 file_accessed(filp);
922 }
923
924 EXPORT_SYMBOL(do_generic_mapping_read);
925
926 int file_read_actor(read_descriptor_t *desc, struct page *page,
927                         unsigned long offset, unsigned long size)
928 {
929         char *kaddr;
930         unsigned long left, count = desc->count;
931
932         if (size > count)
933                 size = count;
934
935         /*
936          * Faults on the destination of a read are common, so do it before
937          * taking the kmap.
938          */
939         if (!fault_in_pages_writeable(desc->arg.buf, size)) {
940                 kaddr = kmap_atomic(page, KM_USER0);
941                 left = __copy_to_user_inatomic(desc->arg.buf,
942                                                 kaddr + offset, size);
943                 kunmap_atomic(kaddr, KM_USER0);
944                 if (left == 0)
945                         goto success;
946         }
947
948         /* Do it the slow way */
949         kaddr = kmap(page);
950         left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
951         kunmap(page);
952
953         if (left) {
954                 size -= left;
955                 desc->error = -EFAULT;
956         }
957 success:
958         desc->count = count - size;
959         desc->written += size;
960         desc->arg.buf += size;
961         return size;
962 }
963
964 /*
965  * This is the "read()" routine for all filesystems
966  * that can use the page cache directly.
967  */
968 ssize_t
969 __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
970                 unsigned long nr_segs, loff_t *ppos)
971 {
972         struct file *filp = iocb->ki_filp;
973         ssize_t retval;
974         unsigned long seg;
975         size_t count;
976
977         count = 0;
978         for (seg = 0; seg < nr_segs; seg++) {
979                 const struct iovec *iv = &iov[seg];
980
981                 /*
982                  * If any segment has a negative length, or the cumulative
983                  * length ever wraps negative then return -EINVAL.
984                  */
985                 count += iv->iov_len;
986                 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
987                         return -EINVAL;
988                 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
989                         continue;
990                 if (seg == 0)
991                         return -EFAULT;
992                 nr_segs = seg;
993                 count -= iv->iov_len;   /* This segment is no good */
994                 break;
995         }
996
997         /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
998         if (filp->f_flags & O_DIRECT) {
999                 loff_t pos = *ppos, size;
1000                 struct address_space *mapping;
1001                 struct inode *inode;
1002
1003                 mapping = filp->f_mapping;
1004                 inode = mapping->host;
1005                 retval = 0;
1006                 if (!count)
1007                         goto out; /* skip atime */
1008                 size = i_size_read(inode);
1009                 if (pos < size) {
1010                         retval = generic_file_direct_IO(READ, iocb,
1011                                                 iov, pos, nr_segs);
1012                         if (retval >= 0 && !is_sync_kiocb(iocb))
1013                                 retval = -EIOCBQUEUED;
1014                         if (retval > 0)
1015                                 *ppos = pos + retval;
1016                 }
1017                 file_accessed(filp);
1018                 goto out;
1019         }
1020
1021         retval = 0;
1022         if (count) {
1023                 for (seg = 0; seg < nr_segs; seg++) {
1024                         read_descriptor_t desc;
1025
1026                         desc.written = 0;
1027                         desc.arg.buf = iov[seg].iov_base;
1028                         desc.count = iov[seg].iov_len;
1029                         if (desc.count == 0)
1030                                 continue;
1031                         desc.error = 0;
1032                         do_generic_file_read(filp,ppos,&desc,file_read_actor);
1033                         retval += desc.written;
1034                         if (!retval) {
1035                                 retval = desc.error;
1036                                 break;
1037                         }
1038                 }
1039         }
1040 out:
1041         return retval;
1042 }
1043
1044 EXPORT_SYMBOL(__generic_file_aio_read);
1045
1046 ssize_t
1047 generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
1048 {
1049         struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1050
1051         BUG_ON(iocb->ki_pos != pos);
1052         return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
1053 }
1054
1055 EXPORT_SYMBOL(generic_file_aio_read);
1056
1057 ssize_t
1058 generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1059 {
1060         struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1061         struct kiocb kiocb;
1062         ssize_t ret;
1063
1064         init_sync_kiocb(&kiocb, filp);
1065         ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
1066         if (-EIOCBQUEUED == ret)
1067                 ret = wait_on_sync_kiocb(&kiocb);
1068         return ret;
1069 }
1070
1071 EXPORT_SYMBOL(generic_file_read);
1072
1073 int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1074 {
1075         ssize_t written;
1076         unsigned long count = desc->count;
1077         struct file *file = desc->arg.data;
1078
1079         if (size > count)
1080                 size = count;
1081
1082         written = file->f_op->sendpage(file, page, offset,
1083                                        size, &file->f_pos, size<count);
1084         if (written < 0) {
1085                 desc->error = written;
1086                 written = 0;
1087         }
1088         desc->count = count - written;
1089         desc->written += written;
1090         return written;
1091 }
1092
1093 ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
1094                          size_t count, read_actor_t actor, void *target)
1095 {
1096         read_descriptor_t desc;
1097
1098         if (!count)
1099                 return 0;
1100
1101         desc.written = 0;
1102         desc.count = count;
1103         desc.arg.data = target;
1104         desc.error = 0;
1105
1106         do_generic_file_read(in_file, ppos, &desc, actor);
1107         if (desc.written)
1108                 return desc.written;
1109         return desc.error;
1110 }
1111
1112 EXPORT_SYMBOL(generic_file_sendfile);
1113
1114 static ssize_t
1115 do_readahead(struct address_space *mapping, struct file *filp,
1116              unsigned long index, unsigned long nr)
1117 {
1118         if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1119                 return -EINVAL;
1120
1121         force_page_cache_readahead(mapping, filp, index,
1122                                         max_sane_readahead(nr));
1123         return 0;
1124 }
1125
1126 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1127 {
1128         ssize_t ret;
1129         struct file *file;
1130
1131         ret = -EBADF;
1132         file = fget(fd);
1133         if (file) {
1134                 if (file->f_mode & FMODE_READ) {
1135                         struct address_space *mapping = file->f_mapping;
1136                         unsigned long start = offset >> PAGE_CACHE_SHIFT;
1137                         unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1138                         unsigned long len = end - start + 1;
1139                         ret = do_readahead(mapping, file, start, len);
1140                 }
1141                 fput(file);
1142         }
1143         return ret;
1144 }
1145
1146 #ifdef CONFIG_MMU
1147 /*
1148  * This adds the requested page to the page cache if it isn't already there,
1149  * and schedules an I/O to read in its contents from disk.
1150  */
1151 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1152 static int fastcall page_cache_read(struct file * file, unsigned long offset)
1153 {
1154         struct address_space *mapping = file->f_mapping;
1155         struct page *page; 
1156         int error;
1157
1158         page = page_cache_alloc_cold(mapping);
1159         if (!page)
1160                 return -ENOMEM;
1161
1162         error = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1163         if (!error) {
1164                 error = mapping->a_ops->readpage(file, page);
1165                 page_cache_release(page);
1166                 return error;
1167         }
1168
1169         /*
1170          * We arrive here in the unlikely event that someone 
1171          * raced with us and added our page to the cache first
1172          * or we are out of memory for radix-tree nodes.
1173          */
1174         page_cache_release(page);
1175         return error == -EEXIST ? 0 : error;
1176 }
1177
1178 #define MMAP_LOTSAMISS  (100)
1179
1180 /*
1181  * filemap_nopage() is invoked via the vma operations vector for a
1182  * mapped memory region to read in file data during a page fault.
1183  *
1184  * The goto's are kind of ugly, but this streamlines the normal case of having
1185  * it in the page cache, and handles the special cases reasonably without
1186  * having a lot of duplicated code.
1187  */
1188 struct page *filemap_nopage(struct vm_area_struct *area,
1189                                 unsigned long address, int *type)
1190 {
1191         int error;
1192         struct file *file = area->vm_file;
1193         struct address_space *mapping = file->f_mapping;
1194         struct file_ra_state *ra = &file->f_ra;
1195         struct inode *inode = mapping->host;
1196         struct page *page;
1197         unsigned long size, pgoff;
1198         int did_readaround = 0, majmin = VM_FAULT_MINOR;
1199
1200         pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1201
1202 retry_all:
1203         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1204         if (pgoff >= size)
1205                 goto outside_data_content;
1206
1207         /* If we don't want any read-ahead, don't bother */
1208         if (VM_RandomReadHint(area))
1209                 goto no_cached_page;
1210
1211         /*
1212          * The readahead code wants to be told about each and every page
1213          * so it can build and shrink its windows appropriately
1214          *
1215          * For sequential accesses, we use the generic readahead logic.
1216          */
1217         if (VM_SequentialReadHint(area))
1218                 page_cache_readahead(mapping, ra, file, pgoff, 1);
1219
1220         /*
1221          * Do we have something in the page cache already?
1222          */
1223 retry_find:
1224         page = find_get_page(mapping, pgoff);
1225         if (!page) {
1226                 unsigned long ra_pages;
1227
1228                 if (VM_SequentialReadHint(area)) {
1229                         handle_ra_miss(mapping, ra, pgoff);
1230                         goto no_cached_page;
1231                 }
1232                 ra->mmap_miss++;
1233
1234                 /*
1235                  * Do we miss much more than hit in this file? If so,
1236                  * stop bothering with read-ahead. It will only hurt.
1237                  */
1238                 if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
1239                         goto no_cached_page;
1240
1241                 /*
1242                  * To keep the pgmajfault counter straight, we need to
1243                  * check did_readaround, as this is an inner loop.
1244                  */
1245                 if (!did_readaround) {
1246                         majmin = VM_FAULT_MAJOR;
1247                         inc_page_state(pgmajfault);
1248                 }
1249                 did_readaround = 1;
1250                 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1251                 if (ra_pages) {
1252                         pgoff_t start = 0;
1253
1254                         if (pgoff > ra_pages / 2)
1255                                 start = pgoff - ra_pages / 2;
1256                         do_page_cache_readahead(mapping, file, start, ra_pages);
1257                 }
1258                 page = find_get_page(mapping, pgoff);
1259                 if (!page)
1260                         goto no_cached_page;
1261         }
1262
1263         if (!did_readaround)
1264                 ra->mmap_hit++;
1265
1266         /*
1267          * Ok, found a page in the page cache, now we need to check
1268          * that it's up-to-date.
1269          */
1270         if (!PageUptodate(page))
1271                 goto page_not_uptodate;
1272
1273 success:
1274         /*
1275          * Found the page and have a reference on it.
1276          */
1277         mark_page_accessed(page);
1278         if (type)
1279                 *type = majmin;
1280         return page;
1281
1282 outside_data_content:
1283         /*
1284          * An external ptracer can access pages that normally aren't
1285          * accessible..
1286          */
1287         if (area->vm_mm == current->mm)
1288                 return NULL;
1289         /* Fall through to the non-read-ahead case */
1290 no_cached_page:
1291         /*
1292          * We're only likely to ever get here if MADV_RANDOM is in
1293          * effect.
1294          */
1295         error = page_cache_read(file, pgoff);
1296         grab_swap_token();
1297
1298         /*
1299          * The page we want has now been added to the page cache.
1300          * In the unlikely event that someone removed it in the
1301          * meantime, we'll just come back here and read it again.
1302          */
1303         if (error >= 0)
1304                 goto retry_find;
1305
1306         /*
1307          * An error return from page_cache_read can result if the
1308          * system is low on memory, or a problem occurs while trying
1309          * to schedule I/O.
1310          */
1311         if (error == -ENOMEM)
1312                 return NOPAGE_OOM;
1313         return NULL;
1314
1315 page_not_uptodate:
1316         if (!did_readaround) {
1317                 majmin = VM_FAULT_MAJOR;
1318                 inc_page_state(pgmajfault);
1319         }
1320         lock_page(page);
1321
1322         /* Did it get unhashed while we waited for it? */
1323         if (!page->mapping) {
1324                 unlock_page(page);
1325                 page_cache_release(page);
1326                 goto retry_all;
1327         }
1328
1329         /* Did somebody else get it up-to-date? */
1330         if (PageUptodate(page)) {
1331                 unlock_page(page);
1332                 goto success;
1333         }
1334
1335         if (!mapping->a_ops->readpage(file, page)) {
1336                 wait_on_page_locked(page);
1337                 if (PageUptodate(page))
1338                         goto success;
1339         }
1340
1341         /*
1342          * Umm, take care of errors if the page isn't up-to-date.
1343          * Try to re-read it _once_. We do this synchronously,
1344          * because there really aren't any performance issues here
1345          * and we need to check for errors.
1346          */
1347         lock_page(page);
1348
1349         /* Somebody truncated the page on us? */
1350         if (!page->mapping) {
1351                 unlock_page(page);
1352                 page_cache_release(page);
1353                 goto retry_all;
1354         }
1355
1356         /* Somebody else successfully read it in? */
1357         if (PageUptodate(page)) {
1358                 unlock_page(page);
1359                 goto success;
1360         }
1361         ClearPageError(page);
1362         if (!mapping->a_ops->readpage(file, page)) {
1363                 wait_on_page_locked(page);
1364                 if (PageUptodate(page))
1365                         goto success;
1366         }
1367
1368         /*
1369          * Things didn't work out. Return zero to tell the
1370          * mm layer so, possibly freeing the page cache page first.
1371          */
1372         page_cache_release(page);
1373         return NULL;
1374 }
1375
1376 EXPORT_SYMBOL(filemap_nopage);
1377
1378 static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
1379                                         int nonblock)
1380 {
1381         struct address_space *mapping = file->f_mapping;
1382         struct page *page;
1383         int error;
1384
1385         /*
1386          * Do we have something in the page cache already?
1387          */
1388 retry_find:
1389         page = find_get_page(mapping, pgoff);
1390         if (!page) {
1391                 if (nonblock)
1392                         return NULL;
1393                 goto no_cached_page;
1394         }
1395
1396         /*
1397          * Ok, found a page in the page cache, now we need to check
1398          * that it's up-to-date.
1399          */
1400         if (!PageUptodate(page)) {
1401                 if (nonblock) {
1402                         page_cache_release(page);
1403                         return NULL;
1404                 }
1405                 goto page_not_uptodate;
1406         }
1407
1408 success:
1409         /*
1410          * Found the page and have a reference on it.
1411          */
1412         mark_page_accessed(page);
1413         return page;
1414
1415 no_cached_page:
1416         error = page_cache_read(file, pgoff);
1417
1418         /*
1419          * The page we want has now been added to the page cache.
1420          * In the unlikely event that someone removed it in the
1421          * meantime, we'll just come back here and read it again.
1422          */
1423         if (error >= 0)
1424                 goto retry_find;
1425
1426         /*
1427          * An error return from page_cache_read can result if the
1428          * system is low on memory, or a problem occurs while trying
1429          * to schedule I/O.
1430          */
1431         return NULL;
1432
1433 page_not_uptodate:
1434         lock_page(page);
1435
1436         /* Did it get unhashed while we waited for it? */
1437         if (!page->mapping) {
1438                 unlock_page(page);
1439                 goto err;
1440         }
1441
1442         /* Did somebody else get it up-to-date? */
1443         if (PageUptodate(page)) {
1444                 unlock_page(page);
1445                 goto success;
1446         }
1447
1448         if (!mapping->a_ops->readpage(file, page)) {
1449                 wait_on_page_locked(page);
1450                 if (PageUptodate(page))
1451                         goto success;
1452         }
1453
1454         /*
1455          * Umm, take care of errors if the page isn't up-to-date.
1456          * Try to re-read it _once_. We do this synchronously,
1457          * because there really aren't any performance issues here
1458          * and we need to check for errors.
1459          */
1460         lock_page(page);
1461
1462         /* Somebody truncated the page on us? */
1463         if (!page->mapping) {
1464                 unlock_page(page);
1465                 goto err;
1466         }
1467         /* Somebody else successfully read it in? */
1468         if (PageUptodate(page)) {
1469                 unlock_page(page);
1470                 goto success;
1471         }
1472
1473         ClearPageError(page);
1474         if (!mapping->a_ops->readpage(file, page)) {
1475                 wait_on_page_locked(page);
1476                 if (PageUptodate(page))
1477                         goto success;
1478         }
1479
1480         /*
1481          * Things didn't work out. Return zero to tell the
1482          * mm layer so, possibly freeing the page cache page first.
1483          */
1484 err:
1485         page_cache_release(page);
1486
1487         return NULL;
1488 }
1489
1490 int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
1491                 unsigned long len, pgprot_t prot, unsigned long pgoff,
1492                 int nonblock)
1493 {
1494         struct file *file = vma->vm_file;
1495         struct address_space *mapping = file->f_mapping;
1496         struct inode *inode = mapping->host;
1497         unsigned long size;
1498         struct mm_struct *mm = vma->vm_mm;
1499         struct page *page;
1500         int err;
1501
1502         if (!nonblock)
1503                 force_page_cache_readahead(mapping, vma->vm_file,
1504                                         pgoff, len >> PAGE_CACHE_SHIFT);
1505
1506 repeat:
1507         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1508         if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
1509                 return -EINVAL;
1510
1511         page = filemap_getpage(file, pgoff, nonblock);
1512         if (!page && !nonblock)
1513                 return -ENOMEM;
1514         if (page) {
1515                 err = install_page(mm, vma, addr, page, prot);
1516                 if (err) {
1517                         page_cache_release(page);
1518                         return err;
1519                 }
1520         } else {
1521                 err = install_file_pte(mm, vma, addr, pgoff, prot);
1522                 if (err)
1523                         return err;
1524         }
1525
1526         len -= PAGE_SIZE;
1527         addr += PAGE_SIZE;
1528         pgoff++;
1529         if (len)
1530                 goto repeat;
1531
1532         return 0;
1533 }
1534
1535 struct vm_operations_struct generic_file_vm_ops = {
1536         .nopage         = filemap_nopage,
1537         .populate       = filemap_populate,
1538 };
1539
1540 /* This is used for a general mmap of a disk file */
1541
1542 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1543 {
1544         struct address_space *mapping = file->f_mapping;
1545
1546         if (!mapping->a_ops->readpage)
1547                 return -ENOEXEC;
1548         file_accessed(file);
1549         vma->vm_ops = &generic_file_vm_ops;
1550         return 0;
1551 }
1552 EXPORT_SYMBOL(filemap_populate);
1553
1554 /*
1555  * This is for filesystems which do not implement ->writepage.
1556  */
1557 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1558 {
1559         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1560                 return -EINVAL;
1561         return generic_file_mmap(file, vma);
1562 }
1563 #else
1564 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1565 {
1566         return -ENOSYS;
1567 }
1568 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1569 {
1570         return -ENOSYS;
1571 }
1572 #endif /* CONFIG_MMU */
1573
1574 EXPORT_SYMBOL(generic_file_mmap);
1575 EXPORT_SYMBOL(generic_file_readonly_mmap);
1576
1577 static inline struct page *__read_cache_page(struct address_space *mapping,
1578                                 unsigned long index,
1579                                 int (*filler)(void *,struct page*),
1580                                 void *data)
1581 {
1582         struct page *page, *cached_page = NULL;
1583         int err;
1584 repeat:
1585         page = find_get_page(mapping, index);
1586         if (!page) {
1587                 if (!cached_page) {
1588                         cached_page = page_cache_alloc_cold(mapping);
1589                         if (!cached_page)
1590                                 return ERR_PTR(-ENOMEM);
1591                 }
1592                 err = add_to_page_cache_lru(cached_page, mapping,
1593                                         index, GFP_KERNEL);
1594                 if (err == -EEXIST)
1595                         goto repeat;
1596                 if (err < 0) {
1597                         /* Presumably ENOMEM for radix tree node */
1598                         page_cache_release(cached_page);
1599                         return ERR_PTR(err);
1600                 }
1601                 page = cached_page;
1602                 cached_page = NULL;
1603                 err = filler(data, page);
1604                 if (err < 0) {
1605                         page_cache_release(page);
1606                         page = ERR_PTR(err);
1607                 }
1608         }
1609         if (cached_page)
1610                 page_cache_release(cached_page);
1611         return page;
1612 }
1613
1614 /*
1615  * Read into the page cache. If a page already exists,
1616  * and PageUptodate() is not set, try to fill the page.
1617  */
1618 struct page *read_cache_page(struct address_space *mapping,
1619                                 unsigned long index,
1620                                 int (*filler)(void *,struct page*),
1621                                 void *data)
1622 {
1623         struct page *page;
1624         int err;
1625
1626 retry:
1627         page = __read_cache_page(mapping, index, filler, data);
1628         if (IS_ERR(page))
1629                 goto out;
1630         mark_page_accessed(page);
1631         if (PageUptodate(page))
1632                 goto out;
1633
1634         lock_page(page);
1635         if (!page->mapping) {
1636                 unlock_page(page);
1637                 page_cache_release(page);
1638                 goto retry;
1639         }
1640         if (PageUptodate(page)) {
1641                 unlock_page(page);
1642                 goto out;
1643         }
1644         err = filler(data, page);
1645         if (err < 0) {
1646                 page_cache_release(page);
1647                 page = ERR_PTR(err);
1648         }
1649  out:
1650         return page;
1651 }
1652
1653 EXPORT_SYMBOL(read_cache_page);
1654
1655 /*
1656  * If the page was newly created, increment its refcount and add it to the
1657  * caller's lru-buffering pagevec.  This function is specifically for
1658  * generic_file_write().
1659  */
1660 static inline struct page *
1661 __grab_cache_page(struct address_space *mapping, unsigned long index,
1662                         struct page **cached_page, struct pagevec *lru_pvec)
1663 {
1664         int err;
1665         struct page *page;
1666 repeat:
1667         page = find_lock_page(mapping, index);
1668         if (!page) {
1669                 if (!*cached_page) {
1670                         *cached_page = page_cache_alloc(mapping);
1671                         if (!*cached_page)
1672                                 return NULL;
1673                 }
1674                 err = add_to_page_cache(*cached_page, mapping,
1675                                         index, GFP_KERNEL);
1676                 if (err == -EEXIST)
1677                         goto repeat;
1678                 if (err == 0) {
1679                         page = *cached_page;
1680                         page_cache_get(page);
1681                         if (!pagevec_add(lru_pvec, page))
1682                                 __pagevec_lru_add(lru_pvec);
1683                         *cached_page = NULL;
1684                 }
1685         }
1686         return page;
1687 }
1688
1689 /*
1690  * The logic we want is
1691  *
1692  *      if suid or (sgid and xgrp)
1693  *              remove privs
1694  */
1695 int remove_suid(struct dentry *dentry)
1696 {
1697         mode_t mode = dentry->d_inode->i_mode;
1698         int kill = 0;
1699         int result = 0;
1700
1701         /* suid always must be killed */
1702         if (unlikely(mode & S_ISUID))
1703                 kill = ATTR_KILL_SUID;
1704
1705         /*
1706          * sgid without any exec bits is just a mandatory locking mark; leave
1707          * it alone.  If some exec bits are set, it's a real sgid; kill it.
1708          */
1709         if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1710                 kill |= ATTR_KILL_SGID;
1711
1712         if (unlikely(kill && !capable(CAP_FSETID))) {
1713                 struct iattr newattrs;
1714
1715                 newattrs.ia_valid = ATTR_FORCE | kill;
1716                 result = notify_change(dentry, &newattrs);
1717         }
1718         return result;
1719 }
1720 EXPORT_SYMBOL(remove_suid);
1721
1722 /*
1723  * Copy as much as we can into the page and return the number of bytes which
1724  * were sucessfully copied.  If a fault is encountered then clear the page
1725  * out to (offset+bytes) and return the number of bytes which were copied.
1726  */
1727 static inline size_t
1728 filemap_copy_from_user(struct page *page, unsigned long offset,
1729                         const char __user *buf, unsigned bytes)
1730 {
1731         char *kaddr;
1732         int left;
1733
1734         kaddr = kmap_atomic(page, KM_USER0);
1735         left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1736         kunmap_atomic(kaddr, KM_USER0);
1737
1738         if (left != 0) {
1739                 /* Do it the slow way */
1740                 kaddr = kmap(page);
1741                 left = __copy_from_user(kaddr + offset, buf, bytes);
1742                 kunmap(page);
1743         }
1744         return bytes - left;
1745 }
1746
1747 static size_t
1748 __filemap_copy_from_user_iovec(char *vaddr, 
1749                         const struct iovec *iov, size_t base, size_t bytes)
1750 {
1751         size_t copied = 0, left = 0;
1752
1753         while (bytes) {
1754                 char __user *buf = iov->iov_base + base;
1755                 int copy = min(bytes, iov->iov_len - base);
1756
1757                 base = 0;
1758                 left = __copy_from_user_inatomic(vaddr, buf, copy);
1759                 copied += copy;
1760                 bytes -= copy;
1761                 vaddr += copy;
1762                 iov++;
1763
1764                 if (unlikely(left)) {
1765                         /* zero the rest of the target like __copy_from_user */
1766                         if (bytes)
1767                                 memset(vaddr, 0, bytes);
1768                         break;
1769                 }
1770         }
1771         return copied - left;
1772 }
1773
1774 /*
1775  * This has the same sideeffects and return value as filemap_copy_from_user().
1776  * The difference is that on a fault we need to memset the remainder of the
1777  * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
1778  * single-segment behaviour.
1779  */
1780 static inline size_t
1781 filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
1782                         const struct iovec *iov, size_t base, size_t bytes)
1783 {
1784         char *kaddr;
1785         size_t copied;
1786
1787         kaddr = kmap_atomic(page, KM_USER0);
1788         copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
1789                                                 base, bytes);
1790         kunmap_atomic(kaddr, KM_USER0);
1791         if (copied != bytes) {
1792                 kaddr = kmap(page);
1793                 copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
1794                                                         base, bytes);
1795                 kunmap(page);
1796         }
1797         return copied;
1798 }
1799
1800 static inline void
1801 filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1802 {
1803         const struct iovec *iov = *iovp;
1804         size_t base = *basep;
1805
1806         while (bytes) {
1807                 int copy = min(bytes, iov->iov_len - base);
1808
1809                 bytes -= copy;
1810                 base += copy;
1811                 if (iov->iov_len == base) {
1812                         iov++;
1813                         base = 0;
1814                 }
1815         }
1816         *iovp = iov;
1817         *basep = base;
1818 }
1819
1820 /*
1821  * Performs necessary checks before doing a write
1822  *
1823  * Can adjust writing position aor amount of bytes to write.
1824  * Returns appropriate error code that caller should return or
1825  * zero in case that write should be allowed.
1826  */
1827 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1828 {
1829         struct inode *inode = file->f_mapping->host;
1830         unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1831
1832         if (unlikely(*pos < 0))
1833                 return -EINVAL;
1834
1835         if (unlikely(file->f_error)) {
1836                 int err = file->f_error;
1837                 file->f_error = 0;
1838                 return err;
1839         }
1840
1841         if (!isblk) {
1842                 /* FIXME: this is for backwards compatibility with 2.4 */
1843                 if (file->f_flags & O_APPEND)
1844                         *pos = i_size_read(inode);
1845
1846                 if (limit != RLIM_INFINITY) {
1847                         if (*pos >= limit) {
1848                                 send_sig(SIGXFSZ, current, 0);
1849                                 return -EFBIG;
1850                         }
1851                         if (*count > limit - (typeof(limit))*pos) {
1852                                 *count = limit - (typeof(limit))*pos;
1853                         }
1854                 }
1855         }
1856
1857         /*
1858          * LFS rule
1859          */
1860         if (unlikely(*pos + *count > MAX_NON_LFS &&
1861                                 !(file->f_flags & O_LARGEFILE))) {
1862                 if (*pos >= MAX_NON_LFS) {
1863                         send_sig(SIGXFSZ, current, 0);
1864                         return -EFBIG;
1865                 }
1866                 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1867                         *count = MAX_NON_LFS - (unsigned long)*pos;
1868                 }
1869         }
1870
1871         /*
1872          * Are we about to exceed the fs block limit ?
1873          *
1874          * If we have written data it becomes a short write.  If we have
1875          * exceeded without writing data we send a signal and return EFBIG.
1876          * Linus frestrict idea will clean these up nicely..
1877          */
1878         if (likely(!isblk)) {
1879                 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1880                         if (*count || *pos > inode->i_sb->s_maxbytes) {
1881                                 send_sig(SIGXFSZ, current, 0);
1882                                 return -EFBIG;
1883                         }
1884                         /* zero-length writes at ->s_maxbytes are OK */
1885                 }
1886
1887                 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1888                         *count = inode->i_sb->s_maxbytes - *pos;
1889         } else {
1890                 loff_t isize;
1891                 if (bdev_read_only(I_BDEV(inode)))
1892                         return -EPERM;
1893                 isize = i_size_read(inode);
1894                 if (*pos >= isize) {
1895                         if (*count || *pos > isize)
1896                                 return -ENOSPC;
1897                 }
1898
1899                 if (*pos + *count > isize)
1900                         *count = isize - *pos;
1901         }
1902         return 0;
1903 }
1904 EXPORT_SYMBOL(generic_write_checks);
1905
1906 ssize_t
1907 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1908                 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1909                 size_t count, size_t ocount)
1910 {
1911         struct file     *file = iocb->ki_filp;
1912         struct address_space *mapping = file->f_mapping;
1913         struct inode    *inode = mapping->host;
1914         ssize_t         written;
1915
1916         if (count != ocount)
1917                 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1918
1919         written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
1920         if (written > 0) {
1921                 loff_t end = pos + written;
1922                 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
1923                         i_size_write(inode,  end);
1924                         mark_inode_dirty(inode);
1925                 }
1926                 *ppos = end;
1927         }
1928
1929         /*
1930          * Sync the fs metadata but not the minor inode changes and
1931          * of course not the data as we did direct DMA for the IO.
1932          * i_sem is held, which protects generic_osync_inode() from
1933          * livelocking.
1934          */
1935         if (written >= 0 && file->f_flags & O_SYNC)
1936                 generic_osync_inode(inode, mapping, OSYNC_METADATA);
1937         if (written == count && !is_sync_kiocb(iocb))
1938                 written = -EIOCBQUEUED;
1939         return written;
1940 }
1941 EXPORT_SYMBOL(generic_file_direct_write);
1942
1943 ssize_t
1944 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1945                 unsigned long nr_segs, loff_t pos, loff_t *ppos,
1946                 size_t count, ssize_t written)
1947 {
1948         struct file *file = iocb->ki_filp;
1949         struct address_space * mapping = file->f_mapping;
1950         struct address_space_operations *a_ops = mapping->a_ops;
1951         struct inode    *inode = mapping->host;
1952         long            status = 0;
1953         struct page     *page;
1954         struct page     *cached_page = NULL;
1955         size_t          bytes;
1956         struct pagevec  lru_pvec;
1957         const struct iovec *cur_iov = iov; /* current iovec */
1958         size_t          iov_base = 0;      /* offset in the current iovec */
1959         char __user     *buf;
1960
1961         pagevec_init(&lru_pvec, 0);
1962
1963         /*
1964          * handle partial DIO write.  Adjust cur_iov if needed.
1965          */
1966         if (likely(nr_segs == 1))
1967                 buf = iov->iov_base + written;
1968         else {
1969                 filemap_set_next_iovec(&cur_iov, &iov_base, written);
1970                 buf = cur_iov->iov_base + iov_base;
1971         }
1972
1973         do {
1974                 unsigned long index;
1975                 unsigned long offset;
1976                 size_t copied;
1977
1978                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1979                 index = pos >> PAGE_CACHE_SHIFT;
1980                 bytes = PAGE_CACHE_SIZE - offset;
1981                 if (bytes > count)
1982                         bytes = count;
1983
1984                 /*
1985                  * Bring in the user page that we will copy from _first_.
1986                  * Otherwise there's a nasty deadlock on copying from the
1987                  * same page as we're writing to, without it being marked
1988                  * up-to-date.
1989                  */
1990                 fault_in_pages_readable(buf, bytes);
1991
1992                 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
1993                 if (!page) {
1994                         status = -ENOMEM;
1995                         break;
1996                 }
1997
1998                 status = a_ops->prepare_write(file, page, offset, offset+bytes);
1999                 if (unlikely(status)) {
2000                         loff_t isize = i_size_read(inode);
2001                         /*
2002                          * prepare_write() may have instantiated a few blocks
2003                          * outside i_size.  Trim these off again.
2004                          */
2005                         unlock_page(page);
2006                         page_cache_release(page);
2007                         if (pos + bytes > isize)
2008                                 vmtruncate(inode, isize);
2009                         break;
2010                 }
2011                 if (likely(nr_segs == 1))
2012                         copied = filemap_copy_from_user(page, offset,
2013                                                         buf, bytes);
2014                 else
2015                         copied = filemap_copy_from_user_iovec(page, offset,
2016                                                 cur_iov, iov_base, bytes);
2017                 flush_dcache_page(page);
2018                 status = a_ops->commit_write(file, page, offset, offset+bytes);
2019                 if (likely(copied > 0)) {
2020                         if (!status)
2021                                 status = copied;
2022
2023                         if (status >= 0) {
2024                                 written += status;
2025                                 count -= status;
2026                                 pos += status;
2027                                 buf += status;
2028                                 if (unlikely(nr_segs > 1)) {
2029                                         filemap_set_next_iovec(&cur_iov,
2030                                                         &iov_base, status);
2031                                         buf = cur_iov->iov_base + iov_base;
2032                                 }
2033                         }
2034                 }
2035                 if (unlikely(copied != bytes))
2036                         if (status >= 0)
2037                                 status = -EFAULT;
2038                 unlock_page(page);
2039                 mark_page_accessed(page);
2040                 page_cache_release(page);
2041                 if (status < 0)
2042                         break;
2043                 balance_dirty_pages_ratelimited(mapping);
2044                 cond_resched();
2045         } while (count);
2046         *ppos = pos;
2047
2048         if (cached_page)
2049                 page_cache_release(cached_page);
2050
2051         /*
2052          * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
2053          */
2054         if (likely(status >= 0)) {
2055                 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2056                         if (!a_ops->writepage || !is_sync_kiocb(iocb))
2057                                 status = generic_osync_inode(inode, mapping,
2058                                                 OSYNC_METADATA|OSYNC_DATA);
2059                 }
2060         }
2061         
2062         /*
2063          * If we get here for O_DIRECT writes then we must have fallen through
2064          * to buffered writes (block instantiation inside i_size).  So we sync
2065          * the file data here, to try to honour O_DIRECT expectations.
2066          */
2067         if (unlikely(file->f_flags & O_DIRECT) && written)
2068                 status = filemap_write_and_wait(mapping);
2069
2070         pagevec_lru_add(&lru_pvec);
2071         return written ? written : status;
2072 }
2073 EXPORT_SYMBOL(generic_file_buffered_write);
2074
2075 ssize_t
2076 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2077                                 unsigned long nr_segs, loff_t *ppos)
2078 {
2079         struct file *file = iocb->ki_filp;
2080         struct address_space * mapping = file->f_mapping;
2081         size_t ocount;          /* original count */
2082         size_t count;           /* after file limit checks */
2083         struct inode    *inode = mapping->host;
2084         unsigned long   seg;
2085         loff_t          pos;
2086         ssize_t         written;
2087         ssize_t         err;
2088
2089         ocount = 0;
2090         for (seg = 0; seg < nr_segs; seg++) {
2091                 const struct iovec *iv = &iov[seg];
2092
2093                 /*
2094                  * If any segment has a negative length, or the cumulative
2095                  * length ever wraps negative then return -EINVAL.
2096                  */
2097                 ocount += iv->iov_len;
2098                 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
2099                         return -EINVAL;
2100                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2101                         continue;
2102                 if (seg == 0)
2103                         return -EFAULT;
2104                 nr_segs = seg;
2105                 ocount -= iv->iov_len;  /* This segment is no good */
2106                 break;
2107         }
2108
2109         count = ocount;
2110         pos = *ppos;
2111
2112         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2113
2114         /* We can write back this queue in page reclaim */
2115         current->backing_dev_info = mapping->backing_dev_info;
2116         written = 0;
2117
2118         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2119         if (err)
2120                 goto out;
2121
2122         if (count == 0)
2123                 goto out;
2124
2125         err = remove_suid(file->f_dentry);
2126         if (err)
2127                 goto out;
2128
2129         inode_update_time(inode, 1);
2130
2131         /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2132         if (unlikely(file->f_flags & O_DIRECT)) {
2133                 written = generic_file_direct_write(iocb, iov,
2134                                 &nr_segs, pos, ppos, count, ocount);
2135                 if (written < 0 || written == count)
2136                         goto out;
2137                 /*
2138                  * direct-io write to a hole: fall through to buffered I/O
2139                  * for completing the rest of the request.
2140                  */
2141                 pos += written;
2142                 count -= written;
2143         }
2144
2145         written = generic_file_buffered_write(iocb, iov, nr_segs,
2146                         pos, ppos, count, written);
2147 out:
2148         current->backing_dev_info = NULL;
2149         return written ? written : err;
2150 }
2151 EXPORT_SYMBOL(generic_file_aio_write_nolock);
2152
2153 ssize_t
2154 generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2155                                 unsigned long nr_segs, loff_t *ppos)
2156 {
2157         struct file *file = iocb->ki_filp;
2158         struct address_space *mapping = file->f_mapping;
2159         struct inode *inode = mapping->host;
2160         ssize_t ret;
2161         loff_t pos = *ppos;
2162
2163         ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
2164
2165         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2166                 int err;
2167
2168                 err = sync_page_range_nolock(inode, mapping, pos, ret);
2169                 if (err < 0)
2170                         ret = err;
2171         }
2172         return ret;
2173 }
2174
2175 ssize_t
2176 __generic_file_write_nolock(struct file *file, const struct iovec *iov,
2177                                 unsigned long nr_segs, loff_t *ppos)
2178 {
2179         struct kiocb kiocb;
2180         ssize_t ret;
2181
2182         init_sync_kiocb(&kiocb, file);
2183         ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2184         if (ret == -EIOCBQUEUED)
2185                 ret = wait_on_sync_kiocb(&kiocb);
2186         return ret;
2187 }
2188
2189 ssize_t
2190 generic_file_write_nolock(struct file *file, const struct iovec *iov,
2191                                 unsigned long nr_segs, loff_t *ppos)
2192 {
2193         struct kiocb kiocb;
2194         ssize_t ret;
2195
2196         init_sync_kiocb(&kiocb, file);
2197         ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2198         if (-EIOCBQUEUED == ret)
2199                 ret = wait_on_sync_kiocb(&kiocb);
2200         return ret;
2201 }
2202 EXPORT_SYMBOL(generic_file_write_nolock);
2203
2204 ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
2205                                size_t count, loff_t pos)
2206 {
2207         struct file *file = iocb->ki_filp;
2208         struct address_space *mapping = file->f_mapping;
2209         struct inode *inode = mapping->host;
2210         ssize_t ret;
2211         struct iovec local_iov = { .iov_base = (void __user *)buf,
2212                                         .iov_len = count };
2213
2214         BUG_ON(iocb->ki_pos != pos);
2215
2216         down(&inode->i_sem);
2217         ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
2218                                                 &iocb->ki_pos);
2219         up(&inode->i_sem);
2220
2221         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2222                 ssize_t err;
2223
2224                 err = sync_page_range(inode, mapping, pos, ret);
2225                 if (err < 0)
2226                         ret = err;
2227         }
2228         return ret;
2229 }
2230 EXPORT_SYMBOL(generic_file_aio_write);
2231
2232 ssize_t generic_file_write(struct file *file, const char __user *buf,
2233                            size_t count, loff_t *ppos)
2234 {
2235         struct address_space *mapping = file->f_mapping;
2236         struct inode *inode = mapping->host;
2237         ssize_t ret;
2238         struct iovec local_iov = { .iov_base = (void __user *)buf,
2239                                         .iov_len = count };
2240
2241         down(&inode->i_sem);
2242         ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
2243         up(&inode->i_sem);
2244
2245         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2246                 ssize_t err;
2247
2248                 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2249                 if (err < 0)
2250                         ret = err;
2251         }
2252         return ret;
2253 }
2254 EXPORT_SYMBOL(generic_file_write);
2255
2256 ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
2257                         unsigned long nr_segs, loff_t *ppos)
2258 {
2259         struct kiocb kiocb;
2260         ssize_t ret;
2261
2262         init_sync_kiocb(&kiocb, filp);
2263         ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
2264         if (-EIOCBQUEUED == ret)
2265                 ret = wait_on_sync_kiocb(&kiocb);
2266         return ret;
2267 }
2268 EXPORT_SYMBOL(generic_file_readv);
2269
2270 ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
2271                         unsigned long nr_segs, loff_t *ppos)
2272 {
2273         struct address_space *mapping = file->f_mapping;
2274         struct inode *inode = mapping->host;
2275         ssize_t ret;
2276
2277         down(&inode->i_sem);
2278         ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
2279         up(&inode->i_sem);
2280
2281         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2282                 int err;
2283
2284                 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2285                 if (err < 0)
2286                         ret = err;
2287         }
2288         return ret;
2289 }
2290 EXPORT_SYMBOL(generic_file_writev);
2291
2292 /*
2293  * Called under i_sem for writes to S_ISREG files.   Returns -EIO if something
2294  * went wrong during pagecache shootdown.
2295  */
2296 ssize_t
2297 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2298         loff_t offset, unsigned long nr_segs)
2299 {
2300         struct file *file = iocb->ki_filp;
2301         struct address_space *mapping = file->f_mapping;
2302         ssize_t retval;
2303         size_t write_len = 0;
2304
2305         /*
2306          * If it's a write, unmap all mmappings of the file up-front.  This
2307          * will cause any pte dirty bits to be propagated into the pageframes
2308          * for the subsequent filemap_write_and_wait().
2309          */
2310         if (rw == WRITE) {
2311                 write_len = iov_length(iov, nr_segs);
2312                 if (mapping_mapped(mapping))
2313                         unmap_mapping_range(mapping, offset, write_len, 0);
2314         }
2315
2316         retval = filemap_write_and_wait(mapping);
2317         if (retval == 0) {
2318                 retval = mapping->a_ops->direct_IO(rw, iocb, iov,
2319                                                 offset, nr_segs);
2320                 if (rw == WRITE && mapping->nrpages) {
2321                         pgoff_t end = (offset + write_len - 1)
2322                                                 >> PAGE_CACHE_SHIFT;
2323                         int err = invalidate_inode_pages2_range(mapping,
2324                                         offset >> PAGE_CACHE_SHIFT, end);
2325                         if (err)
2326                                 retval = err;
2327                 }
2328         }
2329         return retval;
2330 }
2331 EXPORT_SYMBOL_GPL(generic_file_direct_IO);